summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
0 files changed, 0 insertions, 0 deletions
able> -rw-r--r--drivers/net/Space.c12
-rw-r--r--drivers/net/amt.c3452
-rw-r--r--drivers/net/appletalk/Kconfig113
-rw-r--r--drivers/net/appletalk/Makefile8
-rw-r--r--drivers/net/appletalk/cops.c1005
-rw-r--r--drivers/net/appletalk/cops.h61
-rw-r--r--drivers/net/appletalk/cops_ffdrv.h532
-rw-r--r--drivers/net/appletalk/cops_ltdrv.h241
-rw-r--r--drivers/net/appletalk/ipddp.c344
-rw-r--r--drivers/net/appletalk/ipddp.h28
-rw-r--r--drivers/net/appletalk/ltpc.c1277
-rw-r--r--drivers/net/appletalk/ltpc.h74
-rw-r--r--drivers/net/arcnet/Kconfig2
-rw-r--r--drivers/net/arcnet/arc-rawmode.c1
-rw-r--r--drivers/net/arcnet/arc-rimi.c1
-rw-r--r--drivers/net/arcnet/arcdevice.h5
-rw-r--r--drivers/net/arcnet/arcnet.c26
-rw-r--r--drivers/net/arcnet/capmode.c1
-rw-r--r--drivers/net/arcnet/com20020-isa.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c108
-rw-r--r--drivers/net/arcnet/com20020.c1
-rw-r--r--drivers/net/arcnet/com20020_cs.c12
-rw-r--r--drivers/net/arcnet/com90io.c1
-rw-r--r--drivers/net/arcnet/com90xx.c1
-rw-r--r--drivers/net/arcnet/rfc1051.c1
-rw-r--r--drivers/net/arcnet/rfc1201.c1
-rw-r--r--drivers/net/bareudp.c237
-rw-r--r--drivers/net/bonding/bond_3ad.c386
-rw-r--r--drivers/net/bonding/bond_alb.c74
-rw-r--r--drivers/net/bonding/bond_debugfs.c24
-rw-r--r--drivers/net/bonding/bond_main.c1706
-rw-r--r--drivers/net/bonding/bond_netlink.c271
-rw-r--r--drivers/net/bonding/bond_options.c449
-rw-r--r--drivers/net/bonding/bond_procfs.c28
-rw-r--r--drivers/net/bonding/bond_sysfs.c166
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c72
-rw-r--r--drivers/net/bonding/bonding_priv.h4
-rw-r--r--drivers/net/caif/caif_serial.c20
-rw-r--r--drivers/net/caif/caif_virtio.c26
-rw-r--r--drivers/net/can/Kconfig183
-rw-r--r--drivers/net/can/Makefile13
-rw-r--r--drivers/net/can/at91_can.c1009
-rw-r--r--drivers/net/can/bxcan.c1101
-rw-r--r--drivers/net/can/c_can/Kconfig3
-rw-r--r--drivers/net/can/c_can/c_can.h20
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c21
-rw-r--r--drivers/net/can/c_can/c_can_main.c91
-rw-r--r--drivers/net/can/c_can/c_can_pci.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c76
-rw-r--r--drivers/net/can/can327.c1141
-rw-r--r--drivers/net/can/cc770/Kconfig1
-rw-r--r--drivers/net/can/cc770/cc770.c26
-rw-r--r--drivers/net/can/cc770/cc770_isa.c14
-rw-r--r--drivers/net/can/cc770/cc770_platform.c48
-rw-r--r--drivers/net/can/ctucanfd/Kconfig34
-rw-r--r--drivers/net/can/ctucanfd/Makefile10
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd.h82
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c1460
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kframe.h77
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kregs.h349
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_pci.c290
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c126
-rw-r--r--drivers/net/can/dev/Makefile17
-rw-r--r--drivers/net/can/dev/bittiming.c363
-rw-r--r--drivers/net/can/dev/calc_bittiming.c262
-rw-r--r--drivers/net/can/dev/dev.c265
-rw-r--r--drivers/net/can/dev/length.c15
-rw-r--r--drivers/net/can/dev/netlink.c936
-rw-r--r--drivers/net/can/dev/rx-offload.c62
-rw-r--r--drivers/net/can/dev/skb.c176
-rw-r--r--drivers/net/can/dummy_can.c285
-rw-r--r--drivers/net/can/esd/Kconfig12
-rw-r--r--drivers/net/can/esd/Makefile7
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c515
-rw-r--r--drivers/net/can/esd/esdacc.c769
-rw-r--r--drivers/net/can/esd/esdacc.h358
-rw-r--r--drivers/net/can/flexcan/Makefile7
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c (renamed from drivers/net/can/flexcan.c)531
-rw-r--r--drivers/net/can/flexcan/flexcan-ethtool.c110
-rw-r--r--drivers/net/can/flexcan/flexcan.h171
-rw-r--r--drivers/net/can/grcan.c106
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c110
-rw-r--r--drivers/net/can/janz-ican3.c31
-rw-r--r--drivers/net/can/kvaser_pciefd/Makefile3
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd.h96
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c (renamed from drivers/net/can/kvaser_pciefd.c)1651
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c60
-rw-r--r--drivers/net/can/led.c140
-rw-r--r--drivers/net/can/m_can/Kconfig1
-rw-r--r--drivers/net/can/m_can/m_can.c1458
-rw-r--r--drivers/net/can/m_can/m_can.h68
-rw-r--r--drivers/net/can/m_can/m_can_pci.c33
-rw-r--r--drivers/net/can/m_can/m_can_platform.c40
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c252
-rw-r--r--drivers/net/can/m_can/tcan4x5x-regmap.c48
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c24
-rw-r--r--drivers/net/can/mscan/mscan.c30
-rw-r--r--drivers/net/can/pch_can.c1247
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c75
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h4
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c364
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c1408
-rw-r--r--drivers/net/can/rockchip/Kconfig10
-rw-r--r--drivers/net/can/rockchip/Makefile10
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c962
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-ethtool.c73
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-rx.c299
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c105
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c167
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd.h553
-rw-r--r--drivers/net/can/sja1000/Kconfig5
-rw-r--r--drivers/net/can/sja1000/ems_pci.c153
-rw-r--r--drivers/net/can/sja1000/ems_pcmcia.c7
-rw-r--r--drivers/net/can/sja1000/peak_pci.c8
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c14
-rw-r--r--drivers/net/can/sja1000/plx_pci.c3
-rw-r--r--drivers/net/can/sja1000/sja1000.c168
-rw-r--r--drivers/net/can/sja1000/sja1000.h4
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c14
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c94
-rw-r--r--drivers/net/can/sja1000/tscan1.c7
-rw-r--r--drivers/net/can/slcan.c791
-rw-r--r--drivers/net/can/slcan/Makefile7
-rw-r--r--drivers/net/can/slcan/slcan-core.c953
-rw-r--r--drivers/net/can/slcan/slcan-ethtool.c61
-rw-r--r--drivers/net/can/slcan/slcan.h19
-rw-r--r--drivers/net/can/softing/softing_cs.c2
-rw-r--r--drivers/net/can/softing/softing_fw.c13
-rw-r--r--drivers/net/can/softing/softing_main.c29
-rw-r--r--drivers/net/can/spi/hi311x.c197
-rw-r--r--drivers/net/can/spi/mcp251x.c164
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig2
-rw-r--r--drivers/net/can/spi/mcp251xfd/Makefile7
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c119
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c1883
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c8
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c145
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c162
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h62
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c165
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c551
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c333
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c302
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c29
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c244
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h236
-rw-r--r--drivers/net/can/sun4i_can.c162
-rw-r--r--drivers/net/can/ti_hecc.c54
-rw-r--r--drivers/net/can/usb/Kconfig55
-rw-r--r--drivers/net/can/usb/Makefile4
-rw-r--r--drivers/net/can/usb/ems_usb.c88
-rw-r--r--drivers/net/can/usb/esd_usb.c1398
-rw-r--r--drivers/net/can/usb/esd_usb2.c1157
-rw-r--r--drivers/net/can/usb/etas_es58x/Makefile2
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.c9
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.h2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c169
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h74
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_devlink.c260
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c23
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.h2
-rw-r--r--drivers/net/can/usb/f81604.c1204
-rw-r--r--drivers/net/can/usb/gs_usb.c1314
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h122
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c662
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c87
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c545
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c944
-rw-r--r--drivers/net/can/usb/mcba_usb.c76
-rw-r--r--drivers/net/can/usb/nct6694_canfd.c831
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c93
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c213
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h24
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c148
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c47
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h7
-rw-r--r--drivers/net/can/usb/ucan.c81
-rw-r--r--drivers/net/can/usb/usb_8dev.c81
-rw-r--r--drivers/net/can/vcan.c27
-rw-r--r--drivers/net/can/vxcan.c75
-rw-r--r--drivers/net/can/xilinx_can.c385
-rw-r--r--drivers/net/dsa/Kconfig91
-rw-r--r--drivers/net/dsa/Makefile14
-rw-r--r--drivers/net/dsa/b53/Kconfig2
-rw-r--r--drivers/net/dsa/b53/b53_common.c1259
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c18
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c178
-rw-r--r--drivers/net/dsa/b53/b53_priv.h246
-rw-r--r--drivers/net/dsa/b53/b53_regs.h93
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c95
-rw-r--r--drivers/net/dsa/b53/b53_serdes.h16
-rw-r--r--drivers/net/dsa/b53/b53_spi.c22
-rw-r--r--drivers/net/dsa/b53/b53_srab.c47
-rw-r--r--drivers/net/dsa/bcm_sf2.c383
-rw-r--r--drivers/net/dsa/bcm_sf2.h18
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c32
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h65
-rw-r--r--drivers/net/dsa/dsa_loop.c113
-rw-r--r--drivers/net/dsa/dsa_loop.h20
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c35
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c265
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h19
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c41
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h7
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c86
-rw-r--r--drivers/net/dsa/ks8995.c857
-rw-r--r--drivers/net/dsa/lan9303-core.c319
-rw-r--r--drivers/net/dsa/lan9303_i2c.c15
-rw-r--r--drivers/net/dsa/lan9303_mdio.c17
-rw-r--r--drivers/net/dsa/lantiq/Kconfig24
-rw-r--r--drivers/net/dsa/lantiq/Makefile3
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.c518
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.h301
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip_common.c1739
-rw-r--r--drivers/net/dsa/lantiq/lantiq_pce.h (renamed from drivers/net/dsa/lantiq_pce.h)9
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.c733
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.h126
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h154
-rw-r--r--drivers/net/dsa/lantiq_gswip.c2276
-rw-r--r--drivers/net/dsa/microchip/Kconfig55
-rw-r--r--drivers/net/dsa/microchip/Makefile16
-rw-r--r--drivers/net/dsa/microchip/ksz8.c2115
-rw-r--r--drivers/net/dsa/microchip/ksz8.h111
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c1806
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c142
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c65
-rw-r--r--drivers/net/dsa/microchip/ksz8_reg.h (renamed from drivers/net/dsa/microchip/ksz8795_reg.h)148
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1478
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h102
-rw-r--r--drivers/net/dsa/microchip/ksz9477_acl.c1436
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c91
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h134
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c122
-rw-r--r--drivers/net/dsa/microchip/ksz9477_tc_flower.c284
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c5417
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h832
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c612
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.h23
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c1188
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h89
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp_reg.h142
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c366
-rw-r--r--drivers/net/dsa/microchip/lan937x.h26
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c671
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h217
-rw-r--r--drivers/net/dsa/mt7530-mdio.c265
-rw-r--r--drivers/net/dsa/mt7530-mmio.c100
-rw-r--r--drivers/net/dsa/mt7530.c2781
-rw-r--r--drivers/net/dsa/mt7530.h499
-rw-r--r--drivers/net/dsa/mv88e6060.c52
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig14
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile9
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c3185
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h189
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c140
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c113
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c110
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c322
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c117
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h29
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c65
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c34
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h21
-rw-r--r--drivers/net/dsa/mv88e6xxx/leds.c848
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c191
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c390
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c970
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c41
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c167
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h193
-rw-r--r--drivers/net/dsa/mv88e6xxx/port_hidden.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c195
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h135
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c1001
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h137
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c35
-rw-r--r--drivers/net/dsa/mv88e6xxx/switchdev.c83
-rw-r--r--drivers/net/dsa/mv88e6xxx/switchdev.h19
-rw-r--r--drivers/net/dsa/mv88e6xxx/trace.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/trace.h96
-rw-r--r--drivers/net/dsa/ocelot/Kconfig34
-rw-r--r--drivers/net/dsa/ocelot/Makefile13
-rw-r--r--drivers/net/dsa/ocelot/felix.c1811
-rw-r--r--drivers/net/dsa/ocelot/felix.h70
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c2016
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c112
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c616
-rw-r--r--drivers/net/dsa/qca/Kconfig17
-rw-r--r--drivers/net/dsa/qca/Makefile5
-rw-r--r--drivers/net/dsa/qca/ar9331.c141
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c2227
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c1257
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c487
-rw-r--r--drivers/net/dsa/qca/qca8k.h595
-rw-r--r--drivers/net/dsa/qca/qca8k_leds.h16
-rw-r--r--drivers/net/dsa/qca8k.c2198
-rw-r--r--drivers/net/dsa/qca8k.h310
-rw-r--r--drivers/net/dsa/realtek-smi-core.c523
-rw-r--r--drivers/net/dsa/realtek/Kconfig52
-rw-r--r--drivers/net/dsa/realtek/Makefile18
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c187
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.h48
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c408
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.h48
-rw-r--r--drivers/net/dsa/realtek/realtek.h (renamed from drivers/net/dsa/realtek-smi-core.h)102
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c (renamed from drivers/net/dsa/rtl8365mb.c)991
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c (renamed from drivers/net/dsa/rtl8366.c)190
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb-leds.c177
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c (renamed from drivers/net/dsa/rtl8366rb.c)805
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.h107
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c331
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.h24
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c1319
-rw-r--r--drivers/net/dsa/rzn1_a5psw.h262
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h30
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c21
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c16
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c93
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c60
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c646
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c174
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c172
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h33
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c28
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c14
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c15
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c16
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c1564
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c8
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c18
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h68
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c117
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c17
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c2
-rw-r--r--drivers/net/dsa/yt921x.c3006
-rw-r--r--drivers/net/dsa/yt921x.h567
-rw-r--r--drivers/net/dummy.c41
-rw-r--r--drivers/net/eql.c12
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c17
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c4
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c19
-rw-r--r--drivers/net/ethernet/3com/3c59x.c12
-rw-r--r--drivers/net/ethernet/3com/Kconfig4
-rw-r--r--drivers/net/ethernet/3com/typhoon.c44
-rw-r--r--drivers/net/ethernet/8390/8390.c1
-rw-r--r--drivers/net/ethernet/8390/8390.h2
-rw-r--r--drivers/net/ethernet/8390/8390p.c1
-rw-r--r--drivers/net/ethernet/8390/Kconfig6
-rw-r--r--drivers/net/ethernet/8390/apne.c8
-rw-r--r--drivers/net/ethernet/8390/ax88796.c10
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c13
-rw-r--r--drivers/net/ethernet/8390/etherh.c14
-rw-r--r--drivers/net/ethernet/8390/hydra.c11
-rw-r--r--drivers/net/ethernet/8390/lib8390.c5
-rw-r--r--drivers/net/ethernet/8390/mac8390.c10
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c22
-rw-r--r--drivers/net/ethernet/8390/ne.c8
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c28
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c11
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c9
-rw-r--r--drivers/net/ethernet/8390/stnic.c6
-rw-r--r--drivers/net/ethernet/8390/wd.c9
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c8
-rw-r--r--drivers/net/ethernet/Kconfig50
-rw-r--r--drivers/net/ethernet/Makefile11
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c19
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c14
-rw-r--r--drivers/net/ethernet/adi/Kconfig29
-rw-r--r--drivers/net/ethernet/adi/Makefile6
-rw-r--r--drivers/net/ethernet/adi/adin1110.c1737
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c18
-rw-r--r--drivers/net/ethernet/agere/et131x.c73
-rw-r--r--drivers/net/ethernet/airoha/Kconfig34
-rw-r--r--drivers/net/ethernet/airoha/Makefile9
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c3180
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h673
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c783
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c1561
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c187
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h923
-rw-r--r--drivers/net/ethernet/alacritech/slic.h14
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c31
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c252
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.h18
-rw-r--r--drivers/net/ethernet/alteon/acenic.c44
-rw-r--r--drivers/net/ethernet/alteon/acenic.h9
-rw-r--r--drivers/net/ethernet/altera/Kconfig5
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h24
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c23
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c558
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h5
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h162
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c830
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h200
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.h27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.c210
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.h21
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c87
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h45
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c402
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c1688
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h152
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.c233
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c469
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.h151
-rw-r--r--drivers/net/ethernet/amd/7990.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig29
-rw-r--r--drivers/net/ethernet/amd/Makefile2
-rw-r--r--drivers/net/ethernet/amd/a2065.c25
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c68
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h4
-rw-r--r--drivers/net/ethernet/amd/ariadne.c25
-rw-r--r--drivers/net/ethernet/amd/atarilance.c22
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c36
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h4
-rw-r--r--drivers/net/ethernet/amd/declance.c8
-rw-r--r--drivers/net/ethernet/amd/hplance.c5
-rw-r--r--drivers/net/ethernet/amd/lance.c12
-rw-r--r--drivers/net/ethernet/amd/mvme147.c22
-rw-r--r--drivers/net/ethernet/amd/ni65.c1249
-rw-r--r--drivers/net/ethernet/amd/ni65.h121
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c24
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c42
-rw-r--r--drivers/net/ethernet/amd/pds_core/Makefile13
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c310
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c256
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c661
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h325
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c175
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c382
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c179
-rw-r--r--drivers/net/ethernet/amd/pds_core/fw.c197
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c609
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c5
-rw-r--r--drivers/net/ethernet/amd/sunlance.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h227
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c119
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c136
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c126
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c554
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c541
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c241
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c399
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c137
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c119
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c223
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c211
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c662
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c175
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pps.c74
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c238
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-selftest.c346
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-smn.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h275
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c10
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.h1
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c16
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c51
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c83
-rw-r--r--drivers/net/ethernet/apple/mace.c29
-rw-r--r--drivers/net/ethernet/apple/macmace.c20
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c180
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c193
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c165
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c220
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c51
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.h12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c552
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h44
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c66
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c170
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c43
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c25
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c38
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h38
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c151
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h4
-rw-r--r--drivers/net/ethernet/arc/Kconfig10
-rw-r--r--drivers/net/ethernet/arc/Makefile1
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c90
-rw-r--r--drivers/net/ethernet/arc/emac_main.c38
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c14
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c6
-rw-r--r--drivers/net/ethernet/asix/ax88796c_ioctl.c2
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c40
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.h14
-rw-r--r--drivers/net/ethernet/asix/ax88796c_spi.c2
-rw-r--r--drivers/net/ethernet/atheros/Kconfig4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c377
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c5
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c30
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c128
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c34
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c119
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c25
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig36
-rw-r--r--drivers/net/ethernet/broadcom/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c1469
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h600
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c464
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c1423
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h256
-rw-r--r--drivers/net/ethernet/broadcom/b44.c117
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c95
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c112
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c131
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h37
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c33
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c29
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnge/Makefile13
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h255
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.c258
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.h84
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c420
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_db.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.c306
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c548
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h112
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c1185
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c2485
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h454
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.c617
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.h97
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c499
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.h202
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c122
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c216
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c243
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8456
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1118
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c677
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h104
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c939
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h28
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c3241
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h67
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h9526
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c241
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c118
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c614
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h106
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c246
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c629
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h80
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c64
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c264
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h21
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c65
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1588
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h91
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c130
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c135
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c23
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c674
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h13
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h60
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c38
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c116
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c40
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c42
-rw-r--r--drivers/net/ethernet/cadence/macb.h366
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2148
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c146
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c10
-rw-r--r--drivers/net/ethernet/cavium/Kconfig7
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/Makefile8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c294
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c23
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c48
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c140
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c72
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c27
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c6
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c78
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c117
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c77
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c13
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c80
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/elmer0.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/regs.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c96
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c100
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c181
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c285
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c121
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c63
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c67
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c58
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c75
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c88
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h36
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h8
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c43
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c189
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c13
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c3
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c8
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c15
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c76
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile2
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h40
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h157
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h148
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c248
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c892
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c144
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h26
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.c436
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.h8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.c117
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/rq_enet_desc.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h60
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c35
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h20
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h34
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h20
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_nic.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_resource.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c16
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rss.h14
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_stats.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c22
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h17
-rw-r--r--drivers/net/ethernet/cisco/enic/wq_enet_desc.h15
-rw-r--r--drivers/net/ethernet/cortina/gemini.c202
-rw-r--r--drivers/net/ethernet/cortina/gemini.h4
-rw-r--r--drivers/net/ethernet/davicom/Kconfig31
-rw-r--r--drivers/net/ethernet/davicom/Makefile1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c45
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c1258
-rw-r--r--drivers/net/ethernet/davicom/dm9051.h162
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig15
-rw-r--r--drivers/net/ethernet/dec/tulip/Makefile1
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c16
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c5583
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.h1017
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c82
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c14
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c132
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h24
-rw-r--r--drivers/net/ethernet/dlink/sundance.c95
-rw-r--r--drivers/net/ethernet/dnet.c14
-rw-r--r--drivers/net/ethernet/ec_bhf.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c254
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c137
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c134
-rw-r--r--drivers/net/ethernet/engleder/Kconfig40
-rw-r--r--drivers/net/ethernet/engleder/Makefile10
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h264
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c480
-rw-r--r--drivers/net/ethernet/engleder/tsnep_hw.h248
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c2715
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c242
-rw-r--r--drivers/net/ethernet/engleder/tsnep_rxnfc.c307
-rw-r--r--drivers/net/ethernet/engleder/tsnep_selftests.c811
-rw-r--r--drivers/net/ethernet/engleder/tsnep_tc.c466
-rw-r--r--drivers/net/ethernet/engleder/tsnep_xdp.c85
-rw-r--r--drivers/net/ethernet/ethoc.c25
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c17
-rw-r--r--drivers/net/ethernet/faraday/Kconfig13
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c416
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c124
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.h12
-rw-r--r--drivers/net/ethernet/fealnx.c16
-rw-r--r--drivers/net/ethernet/freescale/Kconfig12
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c384
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h52
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c34
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h36
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c281
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c57
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c22
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h144
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c1183
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h147
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c194
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c322
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h23
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c13
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c52
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c43
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c255
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c452
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h12
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h25
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h15
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig70
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile21
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1989
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h281
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c90
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h232
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c1102
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c99
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c898
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h255
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c121
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c41
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c796
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h39
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c439
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h22
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c401
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c53
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c845
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c457
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp_private.h104
-rw-r--r--drivers/net/ethernet/freescale/fec.h128
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2185
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c28
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c13
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c433
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c94
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h34
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c675
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h58
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.h29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h34
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c924
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h57
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c32
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h32
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c50
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.h28
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c263
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h54
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c728
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h90
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c475
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h43
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c23
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c32
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c30
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c15
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c26
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c24
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c120
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c70
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c639
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h24
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c107
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c273
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig2
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/fungible/Kconfig28
-rw-r--r--drivers/net/ethernet/fungible/Makefile7
-rw-r--r--drivers/net/ethernet/fungible/funcore/Makefile5
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c833
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.h150
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_hci.h1242
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.c536
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.h174
-rw-r--r--drivers/net/ethernet/fungible/funeth/Kconfig17
-rw-r--r--drivers/net/ethernet/fungible/funeth/Makefile10
-rw-r--r--drivers/net/ethernet/fungible/funeth/fun_port.h97
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth.h171
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.c27
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.h13
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c1196
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ktls.c155
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ktls.h30
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c2069
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_rx.c828
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_trace.h117
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_tx.c800
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h266
-rw-r--r--drivers/net/ethernet/google/Kconfig2
-rw-r--r--drivers/net/ethernet/google/gve/Makefile5
-rw-r--r--drivers/net/ethernet/google/gve/gve.h654
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c966
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h317
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c344
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc.h24
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h11
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h55
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c598
-rw-r--r--drivers/net/ethernet/google/gve/gve_flow_rule.c298
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2059
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c166
-rw-r--r--drivers/net/ethernet/google/gve/gve_register.h9
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c962
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c887
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c496
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c907
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c72
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h11
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig20
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile10
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h294
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c168
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c349
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c194
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c500
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c399
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h63
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c145
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c528
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c304
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h302
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h84
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c717
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h44
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c39
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c183
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c91
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c34
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c75
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c74
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c114
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h87
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h144
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c685
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h500
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c518
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h133
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c115
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1071
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1554
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c644
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c590
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h463
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c178
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c2173
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h645
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c576
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3692
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h230
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c714
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c92
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c669
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c386
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h108
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c524
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h211
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c1191
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c121
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c166
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h64
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c204
-rw-r--r--drivers/net/ethernet/huawei/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.c18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c231
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c153
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c24
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h175
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c35
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c17
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h25
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c116
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h12
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c41
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c29
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile25
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c915
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h156
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c76
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_csr.h79
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c776
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h122
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c236
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h57
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c426
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h47
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h264
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c557
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h81
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c436
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c194
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c421
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c409
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c860
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h141
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h224
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c496
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c385
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h61
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h93
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c885
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h145
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.c336
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.h14
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c551
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h104
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c779
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h147
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c138
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h87
-rw-r--r--drivers/net/ethernet/i825xx/82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c11
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c5
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c22
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h5
-rw-r--r--drivers/net/ethernet/ibm/Kconfig13
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c26
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c284
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h11
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c116
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c55
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c53
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c53
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c1013
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h105
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c1760
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h93
-rw-r--r--drivers/net/ethernet/intel/Kconfig72
-rw-r--r--drivers/net/ethernet/intel/Makefile5
-rw-r--r--drivers/net/ethernet/intel/e100.c61
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c56
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c74
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile10
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h33
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000e_trace.h42
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c250
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h18
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c243
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c37
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c512
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c227
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c66
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c95
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c16
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c28
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c129
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h586
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c497
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h171
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2937
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c390
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h31
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c68
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c58
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debug.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c340
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.c286
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.h18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c887
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c91
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_io.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c157
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3857
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c1229
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h719
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c141
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h78
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h75
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c969
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h86
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h253
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c1280
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c253
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h17
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h359
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c167
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h19
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h83
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.c127
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_alloc.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.c578
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.h169
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c490
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c671
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c180
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c3194
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_osdep.h9
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.c492
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.h47
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_register.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c1134
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h245
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h375
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_types.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c1574
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile37
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c2109
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.h (renamed from drivers/net/ethernet/intel/ice/ice_devlink.h)7
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c551
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.h71
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c1001
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h58
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h502
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c147
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h1422
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c900
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c3754
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h301
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c381
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c252
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c170
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c74
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c2547
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h475
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c756
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c3830
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h145
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c726
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c1355
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.h122
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c2016
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h158
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c559
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c204
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c2291
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h388
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c1138
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h267
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c286
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c555
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c403
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.c126
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c296
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c2573
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h118
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h425
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c2808
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c4841
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c391
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.c2430
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h538
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c859
-rw-r--r--drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h336
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c3504
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h341
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h558
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c5732
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h743
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c411
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c848
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h94
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c329
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2121
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h189
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h184
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c1735
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h109
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h152
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c626
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c1231
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h272
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c552
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h95
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h253
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c1419
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h400
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c412
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c248
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c5290
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h339
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c438
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c846
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c106
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c884
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h57
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c)36
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c)874
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h)2
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c975
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.h20
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.c1922
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.c2936
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.h140
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig27
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile26
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h1036
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c623
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.h144
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h177
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c171
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c215
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_devids.h10
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c1799
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c503
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h128
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h310
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h128
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c2623
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c392
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_mem.h20
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c1021
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h379
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c1183
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c4706
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h1118
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c200
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c4370
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h189
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c673
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h1813
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h451
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c486
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h175
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c633
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.h33
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c35
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c24
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c14
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c17
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h80
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c204
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1205
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c321
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c562
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h37
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h1
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c135
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c13
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile5
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h268
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c51
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h29
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h132
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c439
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h20
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c42
-rw-r--r--drivers/net/ethernet/intel/igc/igc_leds.c302
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c320
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c1818
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c54
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c63
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c590
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h56
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c509
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h59
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c27
-rw-r--r--drivers/net/ethernet/intel/ixgb/Makefile9
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h179
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.c580
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.h79
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c638
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c1229
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h767
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h23
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2295
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_osdep.h39
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c444
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c558
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c290
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h133
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c116
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c222
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c518
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h115
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c4043
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h102
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c548
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c707
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c122
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1623
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c84
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c581
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c226
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c424
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h370
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h1032
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c136
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c650
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c116
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c75
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c90
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h45
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c189
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c327
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h29
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c302
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h11
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig15
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile12
-rw-r--r--drivers/net/ethernet/intel/libeth/priv.h37
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c273
-rw-r--r--drivers/net/ethernet/intel/libeth/tx.c41
-rw-r--r--drivers/net/ethernet/intel/libeth/xdp.c451
-rw-r--r--drivers/net/ethernet/intel/libeth/xsk.c271
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig25
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile14
-rw-r--r--drivers/net/ethernet/intel/libie/adminq.c52
-rw-r--r--drivers/net/ethernet/intel/libie/fwlog.c1115
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c126
-rw-r--r--drivers/net/ethernet/jme.c23
-rw-r--r--drivers/net/ethernet/jme.h2
-rw-r--r--drivers/net/ethernet/korina.c25
-rw-r--r--drivers/net/ethernet/lantiq_etop.c119
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c172
-rw-r--r--drivers/net/ethernet/litex/Kconfig2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c32
-rw-r--r--drivers/net/ethernet/marvell/Kconfig3
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c187
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c148
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c1190
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h16
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c56
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c20
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c920
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c287
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Kconfig20
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c897
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c925
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h252
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h11
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c279
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h182
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c448
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h430
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c463
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c1693
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h414
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c472
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h169
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h390
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h416
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c547
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h224
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c332
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h317
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig19
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c489
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c500
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h160
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c269
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c1220
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h338
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c431
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h166
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h154
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h162
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c511
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h224
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c331
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h276
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c676
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h39
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c218
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c424
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h380
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h40
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c172
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h926
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c1617
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.h246
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c277
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h1129
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c932
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h80
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h692
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c408
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c691
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h90
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c495
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h394
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c479
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c66
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c449
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c1631
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c815
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c1838
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c573
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c615
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c2036
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h237
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c469
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h86
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c50
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h100
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c65
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c1042
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c1815
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c450
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c776
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h512
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c489
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c125
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c528
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c213
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c1319
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c281
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c1006
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c624
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c276
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c245
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c1774
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.h78
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c319
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c879
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.h55
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h186
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c907
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h251
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c474
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.h30
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c22
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c208
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c173
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h23
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c532
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h9
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c2152
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h173
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c733
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.c127
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.h17
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c238
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c1645
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c688
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.h155
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c34
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c72
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.h12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c722
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c53
-rw-r--r--drivers/net/ethernet/marvell/skge.c33
-rw-r--r--drivers/net/ethernet/marvell/sky2.c138
-rw-r--r--drivers/net/ethernet/marvell/sky2.h2
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig12
-rw-r--r--drivers/net/ethernet/mediatek/Makefile7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c95
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c3918
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h1016
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c815
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h206
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c68
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c340
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h30
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c126
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c559
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c2902
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h226
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c637
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c407
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_ops.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h815
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c491
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h283
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c155
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c247
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c121
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c363
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c342
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c385
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h156
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1059
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c722
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c552
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h564
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dim.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h221
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c194
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.c722
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c972
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c290
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c388
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c863
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c420
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c622
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c476
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c158
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c257
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c461
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c268
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c337
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c203
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c199
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c294
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c338
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c459
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c600
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c460
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c1048
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c337
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c588
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c279
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c1345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h349
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c3063
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c634
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c328
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c)63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c646
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1800
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c1155
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c247
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c390
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c232
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c989
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c873
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c148
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3332
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c840
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c1729
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c1080
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c3910
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c428
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c650
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c746
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c1134
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h188
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c369
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c186
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c2217
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1068
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h447
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3067
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c1582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c622
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c294
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c1545
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h159
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c655
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c622
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c360
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.c423
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c1356
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c1073
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c755
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c499
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/events.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c654
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c875
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c2409
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c799
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c532
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/st.c185
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c1025
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h283
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c609
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c352
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c193
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c351
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c2651
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h316
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c467
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c1226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c1219
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h347
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c257
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c487
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c2329
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h831
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c1641
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c1293
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h941
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c394
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h472
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c846
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c1358
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c494
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c)633
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c)244
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c1186
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c)274
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c)4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c)355
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c)304
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c)218
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c)513
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c)576
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h)35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c)55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c)781
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h240
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h168
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c263
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c)118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h)495
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c)272
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h)15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h)82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h)37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h)46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c256
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c464
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_gpio.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c230
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c284
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h56
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h171
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c873
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h241
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c856
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c359
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c186
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c1604
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c918
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c434
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c1192
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2829
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2020
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h264
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c374
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c357
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c173
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c1788
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c261
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c172
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c336
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c200
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c748
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3466
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h117
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c812
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c1353
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h63
-rw-r--r--drivers/net/ethernet/meta/Kconfig38
-rw-r--r--drivers/net/ethernet/meta/Makefile6
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h227
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c149
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h1201
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c271
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c668
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c1924
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c1920
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h326
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c123
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h45
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c601
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h163
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c313
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c934
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h122
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mdio.c195
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c857
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h117
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c655
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c311
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c1246
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h232
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_time.c303
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c560
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h158
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c2930
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h201
-rw-r--r--drivers/net/ethernet/micrel/Kconfig2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h8
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c132
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c17
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c58
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c337
-rw-r--r--drivers/net/ethernet/microchip/Kconfig10
-rw-r--r--drivers/net/ethernet/microchip/Makefile4
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c40
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c19
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c17
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h2
-rw-r--r--drivers/net/ethernet/microchip/fdma/Kconfig18
-rw-r--r--drivers/net/ethernet/microchip/fdma/Makefile7
-rw-r--r--drivers/net/ethernet/microchip/fdma/fdma_api.c146
-rw-r--r--drivers/net/ethernet/microchip/fdma/fdma_api.h243
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c788
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h95
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c1417
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h390
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c733
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h18
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Kconfig19
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Makefile6
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c455
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig24
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile23
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c70
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_dcb.c365
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c716
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ets.c96
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c289
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c982
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_goto.c50
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h174
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_lag.c368
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c592
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1327
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h746
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c551
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c138
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c138
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_police.c226
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c571
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c1133
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h1888
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c665
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c528
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c85
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c142
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c626
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c91
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c3270
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h11
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c244
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c784
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c344
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c136
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig24
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile23
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c357
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h82
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c191
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c406
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c224
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c3843
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c85
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c126
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c408
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c89
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c459
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c49
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c459
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h537
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h5660
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c240
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c100
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c107
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c53
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c84
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c54
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pool.c81
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c390
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h111
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c337
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c696
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c583
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h84
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.h247
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c338
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c410
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c174
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.h108
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c1581
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c204
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c3874
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h20
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c471
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.h33
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c2101
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h228
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c75
-rw-r--r--drivers/net/ethernet/microchip/vcap/Kconfig53
-rw-r--r--drivers/net/ethernet/microchip/vcap/Makefile10
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h906
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c3611
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h280
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h284
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c468
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.h41
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c554
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c2352
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_private.h124
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c4062
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h18
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.c412
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.h32
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig6
-rw-r--r--drivers/net/ethernet/microsoft/mana/Makefile2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma.h675
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c1187
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c265
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.h190
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h540
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c266
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c2337
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c409
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.c15
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.h21
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c58
-rw-r--r--drivers/net/ethernet/mscc/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/Makefile14
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c2014
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h65
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c31
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c892
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.h166
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c306
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c53
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mm.c300
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c64
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c608
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c67
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h7
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c579
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c1026
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c217
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c835
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c685
-rw-r--r--drivers/net/ethernet/mucse/Kconfig33
-rw-r--r--drivers/net/ethernet/mucse/Makefile7
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/Makefile11
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h71
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c143
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h17
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c320
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c406
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h20
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c191
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h88
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c60
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c31
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c22
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c25
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c4
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c12
-rw-r--r--drivers/net/ethernet/neterion/Kconfig24
-rw-r--r--drivers/net/ethernet/neterion/Makefile1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c110
-rw-r--r--drivers/net/ethernet/neterion/s2io.h1
-rw-r--r--drivers/net/ethernet/neterion/vxge/Makefile8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c5099
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2086
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c1154
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.h48
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4832
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h518
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-reg.h4636
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c2428
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h2290
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-version.h49
-rw-r--r--drivers/net/ethernet/netronome/Kconfig13
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile12
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm_mbox.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h23
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c640
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c114
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h13
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c866
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h38
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c74
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h178
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c67
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c97
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c220
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c504
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c681
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c1413
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h114
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/rings.c279
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c409
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c1575
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h136
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/rings.c198
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c92
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c147
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h305
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2727
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h164
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c81
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.c466
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.h219
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c1264
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c127
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c31
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c111
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c174
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c53
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c23
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h77
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c160
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/dcb.c571
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.h46
-rw-r--r--drivers/net/ethernet/ni/nixge.c200
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c81
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c22
-rw-r--r--drivers/net/ethernet/oa_tc6.c1369
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c68
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c12
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c14
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c16
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c4
-rw-r--r--drivers/net/ethernet/pensando/Kconfig2
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h21
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_api.h131
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.c102
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.h10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c258
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c711
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h164
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c333
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h442
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c1245
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h105
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c300
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c70
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c45
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c23
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c1363
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c7
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c28
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c36
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c73
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.h17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c242
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c133
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c186
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c238
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c71
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c42
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c62
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c252
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c159
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c35
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c175
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c113
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h8
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c50
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c28
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c26
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c78
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c40
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c60
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c36
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c36
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig15
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-ethtool.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c13
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c26
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c19
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.c239
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.h39
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.c2034
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.h317
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c847
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h16
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_regs.h591
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.h16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.h29
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c62
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.h15
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c161
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h26
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c27
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h20
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c20
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c195
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c64
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h1
-rw-r--r--drivers/net/ethernet/rdc/r6040.c13
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c16
-rw-r--r--drivers/net/ethernet/realtek/8139too.c20
-rw-r--r--drivers/net/ethernet/realtek/Kconfig29
-rw-r--r--drivers/net/ethernet/realtek/Makefile4
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/atp.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h40
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c9
-rw-r--r--drivers/net/ethernet/realtek/r8169_leds.c275
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c2089
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c474
-rw-r--r--drivers/net/ethernet/realtek/rtase/Makefile10
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h362
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c2400
-rw-r--r--drivers/net/ethernet/renesas/Kconfig39
-rw-r--r--drivers/net/ethernet/renesas/Makefile10
-rw-r--r--drivers/net/ethernet/renesas/ravb.h106
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1968
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c35
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c187
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h25
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h1074
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.c316
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.h15
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c2295
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c1371
-rw-r--r--drivers/net/ethernet/renesas/rtsn.h464
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c126
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c54
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c26
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h3
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c55
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c34
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c105
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c9
-rw-r--r--drivers/net/ethernet/seeq/ether3.c12
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c6
-rw-r--r--drivers/net/ethernet/sfc/Kconfig22
-rw-r--r--drivers/net/ethernet/sfc/Makefile10
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c350
-rw-r--r--drivers/net/ethernet/sfc/ef100.c88
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c17
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c269
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.h9
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c649
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h20
-rw-r--r--drivers/net/ethernet/sfc/ef100_regs.h83
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c499
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.h80
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c74
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.c72
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.h14
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c114
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.h5
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c26
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h2
-rw-r--r--drivers/net/ethernet/sfc/efx.c314
-rw-r--r--drivers/net/ethernet/sfc/efx.h15
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c347
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h10
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c167
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h22
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c746
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.h47
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.c522
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.h20
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c56
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c385
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h27
-rw-r--r--drivers/net/ethernet/sfc/falcon/bitfield.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c51
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c135
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c12
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c28
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c20
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h9
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c19
-rw-r--r--drivers/net/ethernet/sfc/falcon/selftest.c58
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c11
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/filter.h49
-rw-r--r--drivers/net/ethernet/sfc/fw_formats.h114
-rw-r--r--drivers/net/ethernet/sfc/io.h110
-rw-r--r--drivers/net/ethernet/sfc/mae.c2338
-rw-r--r--drivers/net/ethernet/sfc/mae.h138
-rw-r--r--drivers/net/ethernet/sfc/mae_counter_format.h73
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c338
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h91
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c141
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h9
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c24
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h18844
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol_mae.h24
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c59
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c31
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h273
-rw-r--r--drivers/net/ethernet/sfc/nic.c167
-rw-r--r--drivers/net/ethernet/sfc/nic.h182
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h21
-rw-r--r--drivers/net/ethernet/sfc/ptp.c635
-rw-r--r--drivers/net/ethernet/sfc/ptp.h16
-rw-r--r--drivers/net/ethernet/sfc/rx.c27
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c140
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h14
-rw-r--r--drivers/net/ethernet/sfc/selftest.c65
-rw-r--r--drivers/net/ethernet/sfc/siena/Kconfig46
-rw-r--r--drivers/net/ethernet/sfc/siena/Makefile11
-rw-r--r--drivers/net/ethernet/sfc/siena/bitfield.h614
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c1350
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.h218
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c1371
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.h45
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c1398
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.h118
-rw-r--r--drivers/net/ethernet/sfc/siena/enum.h176
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c277
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c1232
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h57
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c (renamed from drivers/net/ethernet/sfc/farch.c)85
-rw-r--r--drivers/net/ethernet/sfc/siena/farch_regs.h (renamed from drivers/net/ethernet/sfc/farch_regs.h)0
-rw-r--r--drivers/net/ethernet/sfc/siena/filter.h309
-rw-r--r--drivers/net/ethernet/sfc/siena/io.h310
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.c2260
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.h386
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_mon.c531
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h17204
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port.c110
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port.h17
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port_common.c1282
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port_common.h58
-rw-r--r--drivers/net/ethernet/sfc/siena/mtd.c124
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h1695
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.c530
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.h206
-rw-r--r--drivers/net/ethernet/sfc/siena/nic_common.h252
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c2196
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h48
-rw-r--r--drivers/net/ethernet/sfc/siena/rx.c400
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c1037
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.h106
-rw-r--r--drivers/net/ethernet/sfc/siena/selftest.c819
-rw-r--r--drivers/net/ethernet/sfc/siena/selftest.h52
-rw-r--r--drivers/net/ethernet/sfc/siena/siena.c (renamed from drivers/net/ethernet/sfc/siena.c)180
-rw-r--r--drivers/net/ethernet/sfc/siena/siena_sriov.c (renamed from drivers/net/ethernet/sfc/siena_sriov.c)35
-rw-r--r--drivers/net/ethernet/sfc/siena/siena_sriov.h (renamed from drivers/net/ethernet/sfc/siena_sriov.h)9
-rw-r--r--drivers/net/ethernet/sfc/siena/sriov.h83
-rw-r--r--drivers/net/ethernet/sfc/siena/tx.c392
-rw-r--r--drivers/net/ethernet/sfc/siena/tx.h40
-rw-r--r--drivers/net/ethernet/sfc/siena/tx_common.c448
-rw-r--r--drivers/net/ethernet/sfc/siena/tx_common.h39
-rw-r--r--drivers/net/ethernet/sfc/siena/vfdi.h (renamed from drivers/net/ethernet/sfc/vfdi.h)0
-rw-r--r--drivers/net/ethernet/sfc/siena/workarounds.h28
-rw-r--r--drivers/net/ethernet/sfc/sriov.c10
-rw-r--r--drivers/net/ethernet/sfc/tc.c3094
-rw-r--r--drivers/net/ethernet/sfc/tc.h359
-rw-r--r--drivers/net/ethernet/sfc/tc_bindings.c241
-rw-r--r--drivers/net/ethernet/sfc/tc_bindings.h43
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c625
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.h55
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.c563
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.h66
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c753
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.h114
-rw-r--r--drivers/net/ethernet/sfc/tx.c77
-rw-r--r--drivers/net/ethernet/sfc/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c71
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h7
-rw-r--r--drivers/net/ethernet/sfc/tx_tso.c8
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h7
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c20
-rw-r--r--drivers/net/ethernet/sgi/meth.c6
-rw-r--r--drivers/net/ethernet/sis/Kconfig4
-rw-r--r--drivers/net/ethernet/sis/sis190.c12
-rw-r--r--drivers/net/ethernet/sis/sis900.c21
-rw-r--r--drivers/net/ethernet/smsc/Kconfig20
-rw-r--r--drivers/net/ethernet/smsc/Makefile1
-rw-r--r--drivers/net/ethernet/smsc/epic100.c16
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2193
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h901
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c19
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h38
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c51
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c18
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.h1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c58
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c37
-rw-r--r--drivers/net/ethernet/spacemit/Kconfig29
-rw-r--r--drivers/net/ethernet/spacemit/Makefile6
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c2162
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.h416
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig142
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c265
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h199
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c269
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c277
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c222
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c134
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c526
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c221
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c651
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c240
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c59
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c422
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c188
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c797
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c1875
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c186
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c385
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c151
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c241
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c489
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c199
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c87
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c403
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c286
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c247
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c258
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c91
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h178
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c677
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c252
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h107
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c227
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h65
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h121
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c438
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c177
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c290
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h365
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c141
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h175
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c179
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c574
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c314
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c142
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c3944
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c726
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c127
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h76
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c497
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c219
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c566
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c375
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h1
-rw-r--r--drivers/net/ethernet/sun/cassini.c130
-rw-r--r--drivers/net/ethernet/sun/cassini.h2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c21
-rw-r--r--drivers/net/ethernet/sun/niu.c156
-rw-r--r--drivers/net/ethernet/sun/niu.h8
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c14
-rw-r--r--drivers/net/ethernet/sun/sungem.c38
-rw-r--r--drivers/net/ethernet/sun/sunhme.c1721
-rw-r--r--drivers/net/ethernet/sun/sunhme.h6
-rw-r--r--drivers/net/ethernet/sun/sunqe.c10
-rw-r--r--drivers/net/ethernet/sun/sunqe.h2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c50
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c19
-rw-r--r--drivers/net/ethernet/sunplus/Kconfig32
-rw-r--r--drivers/net/ethernet/sunplus/Makefile6
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_define.h270
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_desc.c228
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_desc.h19
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c563
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_int.c273
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_int.h13
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mac.c274
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mac.h18
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mdio.c125
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mdio.h12
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_phy.c90
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_phy.h12
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_register.h86
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c11
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c6
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c14
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c5
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h3
-rw-r--r--drivers/net/ethernet/tehuti/Kconfig15
-rw-r--r--drivers/net/ethernet/tehuti/Makefile3
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c58
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h2
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c1857
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h266
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c220
-rw-r--r--drivers/net/ethernet/tehuti/tn40_phy.c76
-rw-r--r--drivers/net/ethernet/tehuti/tn40_regs.h245
-rw-r--r--drivers/net/ethernet/ti/Kconfig84
-rw-r--r--drivers/net/ethernet/ti/Makefile25
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c364
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2318
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h92
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c963
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h198
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c449
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h26
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1247
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c117
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c445
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h66
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c39
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c118
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c317
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h32
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/cpts.c43
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c166
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c310
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c1107
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.h112
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_classifier.c469
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c1853
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c864
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h335
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c321
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c124
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_mii_rt.h151
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c2364
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h526
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c1250
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_queues.c52
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c95
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h204
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h270
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switchdev.c477
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switchdev.h13
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.c1746
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.h262
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h85
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switch.h257
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c56
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.h6
-rw-r--r--drivers/net/ethernet/ti/netcp.h7
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c90
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c103
-rw-r--r--drivers/net/ethernet/ti/tlan.c14
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig11
-rw-r--r--drivers/net/ethernet/toshiba/Makefile2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c268
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h36
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2555
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h476
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c172
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c21
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c73
-rw-r--r--drivers/net/ethernet/vertexcom/Kconfig25
-rw-r--r--drivers/net/ethernet/vertexcom/Makefile6
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c806
-rw-r--r--drivers/net/ethernet/via/Kconfig1
-rw-r--r--drivers/net/ethernet/via/via-rhine.c31
-rw-r--r--drivers/net/ethernet/via/via-velocity.c30
-rw-r--r--drivers/net/ethernet/via/via-velocity.h5
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig103
-rw-r--r--drivers/net/ethernet/wangxun/Makefile10
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c762
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h60
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c2977
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h63
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c3358
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h45
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c419
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h99
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c905
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.h20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c929
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.h18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h1507
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.c599
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h129
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.c414
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c292
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h15
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c155
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c99
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h13
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c889
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c203
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h11
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h146
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c266
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h29
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile15
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c530
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c606
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h11
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c648
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h20
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c227
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h12
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c268
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h7
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c986
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c676
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h11
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h482
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c331
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h26
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c16
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c20
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c14
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h186
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c128
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h334
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2364
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c106
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c142
-rw-r--r--drivers/net/ethernet/xircom/Kconfig2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c17
-rw-r--r--drivers/net/ethernet/xscale/Kconfig4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c245
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c22
-rw-r--r--drivers/net/fddi/defxx.c24
-rw-r--r--drivers/net/fddi/defza.c12
-rw-r--r--drivers/net/fddi/skfp/fplustm.c2
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h2
-rw-r--r--drivers/net/fddi/skfp/hwmtm.c6
-rw-r--r--drivers/net/fddi/skfp/rmt.c6
-rw-r--r--drivers/net/fddi/skfp/skfddi.c1
-rw-r--r--drivers/net/fddi/skfp/smt.c16
-rw-r--r--drivers/net/fjes/fjes_ethtool.c70
-rw-r--r--drivers/net/fjes/fjes_hw.c37
-rw-r--r--drivers/net/fjes/fjes_main.c1160
-rw-r--r--drivers/net/fjes/fjes_trace.h12
-rw-r--r--drivers/net/geneve.c553
-rw-r--r--drivers/net/gtp.c1452
-rw-r--r--drivers/net/hamradio/6pack.c153
-rw-r--r--drivers/net/hamradio/Kconfig57
-rw-r--r--drivers/net/hamradio/Makefile1
-rw-r--r--drivers/net/hamradio/baycom_epp.c22
-rw-r--r--drivers/net/hamradio/baycom_par.c5
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c4
-rw-r--r--drivers/net/hamradio/bpqether.c31
-rw-r--r--drivers/net/hamradio/dmascc.c1449
-rw-r--r--drivers/net/hamradio/hdlcdrv.c5
-rw-r--r--drivers/net/hamradio/mkiss.c15
-rw-r--r--drivers/net/hamradio/scc.c51
-rw-r--r--drivers/net/hamradio/yam.c8
-rw-r--r--drivers/net/hippi/rrunner.c7
-rw-r--r--drivers/net/hyperv/Kconfig3
-rw-r--r--drivers/net/hyperv/hyperv_net.h108
-rw-r--r--drivers/net/hyperv/netvsc.c262
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c103
-rw-r--r--drivers/net/hyperv/netvsc_drv.c588
-rw-r--r--drivers/net/hyperv/netvsc_trace.h8
-rw-r--r--drivers/net/hyperv/rndis_filter.c95
-rw-r--r--drivers/net/ieee802154/Kconfig8
-rw-r--r--drivers/net/ieee802154/adf7242.c17
-rw-r--r--drivers/net/ieee802154/at86rf230.c277
-rw-r--r--drivers/net/ieee802154/atusb.c258
-rw-r--r--drivers/net/ieee802154/ca8210.c323
-rw-r--r--drivers/net/ieee802154/cc2520.c145
-rw-r--r--drivers/net/ieee802154/fakelb.c4
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c189
-rw-r--r--drivers/net/ieee802154/mcr20a.c30
-rw-r--r--drivers/net/ieee802154/mrf24j40.c8
-rw-r--r--drivers/net/ifb.c167
-rw-r--r--drivers/net/ipa/Kconfig4
-rw-r--r--drivers/net/ipa/Makefile23
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c (renamed from drivers/net/ipa/ipa_data-v3.1.c)35
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c (renamed from drivers/net/ipa/ipa_data-v3.5.1.c)47
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c (renamed from drivers/net/ipa/ipa_data-v4.11.c)33
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c (renamed from drivers/net/ipa/ipa_data-v4.2.c)33
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c (renamed from drivers/net/ipa/ipa_data-v4.5.c)40
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c405
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c (renamed from drivers/net/ipa/ipa_data-v4.9.c)33
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c481
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c487
-rw-r--r--drivers/net/ipa/gsi.c954
-rw-r--r--drivers/net/ipa/gsi.h87
-rw-r--r--drivers/net/ipa/gsi_private.h43
-rw-r--r--drivers/net/ipa/gsi_reg.c162
-rw-r--r--drivers/net/ipa/gsi_reg.h629
-rw-r--r--drivers/net/ipa/gsi_trans.c452
-rw-r--r--drivers/net/ipa/gsi_trans.h63
-rw-r--r--drivers/net/ipa/ipa.h58
-rw-r--r--drivers/net/ipa/ipa_cmd.c222
-rw-r--r--drivers/net/ipa/ipa_cmd.h51
-rw-r--r--drivers/net/ipa/ipa_data.h81
-rw-r--r--drivers/net/ipa/ipa_endpoint.c1532
-rw-r--r--drivers/net/ipa/ipa_endpoint.h128
-rw-r--r--drivers/net/ipa/ipa_gsi.c7
-rw-r--r--drivers/net/ipa/ipa_interrupt.c292
-rw-r--r--drivers/net/ipa/ipa_interrupt.h98
-rw-r--r--drivers/net/ipa/ipa_main.c558
-rw-r--r--drivers/net/ipa/ipa_mem.c124
-rw-r--r--drivers/net/ipa/ipa_mem.h15
-rw-r--r--drivers/net/ipa/ipa_modem.c155
-rw-r--r--drivers/net/ipa/ipa_modem.h8
-rw-r--r--drivers/net/ipa/ipa_power.c355
-rw-r--r--drivers/net/ipa/ipa_power.h35
-rw-r--r--drivers/net/ipa/ipa_qmi.c31
-rw-r--r--drivers/net/ipa/ipa_qmi.h4
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c31
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h62
-rw-r--r--drivers/net/ipa/ipa_reg.c143
-rw-r--r--drivers/net/ipa/ipa_reg.h1191
-rw-r--r--drivers/net/ipa/ipa_resource.c68
-rw-r--r--drivers/net/ipa/ipa_smp2p.c62
-rw-r--r--drivers/net/ipa/ipa_smp2p.h16
-rw-r--r--drivers/net/ipa/ipa_sysfs.c90
-rw-r--r--drivers/net/ipa/ipa_sysfs.h5
-rw-r--r--drivers/net/ipa/ipa_table.c506
-rw-r--r--drivers/net/ipa/ipa_table.h34
-rw-r--r--drivers/net/ipa/ipa_uc.c54
-rw-r--r--drivers/net/ipa/ipa_uc.h9
-rw-r--r--drivers/net/ipa/ipa_version.h25
-rw-r--r--drivers/net/ipa/reg.h136
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.1.c293
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.5.1.c305
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.0.c310
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.11.c315
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.5.c313
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.9.c314
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v5.0.c319
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c436
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c446
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c502
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c460
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c523
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.7.c494
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c499
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.0.c566
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.5.c565
-rw-r--r--drivers/net/ipvlan/ipvlan.h14
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c116
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c12
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c84
-rw-r--r--drivers/net/ipvlan/ipvtap.c16
-rw-r--r--drivers/net/loopback.c45
-rw-r--r--drivers/net/macsec.c938
-rw-r--r--drivers/net/macvlan.c284
-rw-r--r--drivers/net/macvtap.c22
-rw-r--r--drivers/net/mctp/Kconfig54
-rw-r--r--drivers/net/mctp/Makefile4
-rw-r--r--drivers/net/mctp/mctp-i2c.c1146
-rw-r--r--drivers/net/mctp/mctp-i3c.c767
-rw-r--r--drivers/net/mctp/mctp-serial.c634
-rw-r--r--drivers/net/mctp/mctp-usb.c390
-rw-r--r--drivers/net/mdio.c172
-rw-r--r--drivers/net/mdio/Kconfig73
-rw-r--r--drivers/net/mdio/Makefile4
-rw-r--r--drivers/net/mdio/acpi_mdio.c11
-rw-r--r--drivers/net/mdio/fwnode_mdio.c97
-rw-r--r--drivers/net/mdio/mdio-airoha.c278
-rw-r--r--drivers/net/mdio/mdio-aspeed.c125
-rw-r--r--drivers/net/mdio/mdio-bcm-iproc.c4
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c133
-rw-r--r--drivers/net/mdio/mdio-bitbang.c78
-rw-r--r--drivers/net/mdio/mdio-cavium.c111
-rw-r--r--drivers/net/mdio/mdio-cavium.h9
-rw-r--r--drivers/net/mdio/mdio-gpio.c11
-rw-r--r--drivers/net/mdio/mdio-hisi-femac.c4
-rw-r--r--drivers/net/mdio/mdio-i2c.c417
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c272
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c15
-rw-r--r--drivers/net/mdio/mdio-moxart.c4
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c290
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c64
-rw-r--r--drivers/net/mdio/mdio-mux-bcm6368.c8
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c6
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c81
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c163
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c70
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c13
-rw-r--r--drivers/net/mdio/mdio-mux.c65
-rw-r--r--drivers/net/mdio/mdio-mvusb.c17
-rw-r--r--drivers/net/mdio/mdio-octeon.c34
-rw-r--r--drivers/net/mdio/mdio-realtek-rtl9300.c522
-rw-r--r--drivers/net/mdio/mdio-regmap.c93
-rw-r--r--drivers/net/mdio/mdio-sun4i.c4
-rw-r--r--drivers/net/mdio/mdio-thunder.c19
-rw-r--r--drivers/net/mdio/mdio-xgene.c35
-rw-r--r--drivers/net/mdio/of_mdio.c107
-rw-r--r--drivers/net/mhi_net.c13
-rw-r--r--drivers/net/mii.c3
-rw-r--r--drivers/net/net_failover.c20
-rw-r--r--drivers/net/netconsole.c1536
-rw-r--r--drivers/net/netdevsim/Makefile10
-rw-r--r--drivers/net/netdevsim/bpf.c20
-rw-r--r--drivers/net/netdevsim/bus.c371
-rw-r--r--drivers/net/netdevsim/dev.c621
-rw-r--r--drivers/net/netdevsim/ethtool.c89
-rw-r--r--drivers/net/netdevsim/fib.c121
-rw-r--r--drivers/net/netdevsim/health.c140
-rw-r--r--drivers/net/netdevsim/hwstats.c480
-rw-r--r--drivers/net/netdevsim/ipsec.c65
-rw-r--r--drivers/net/netdevsim/macsec.c351
-rw-r--r--drivers/net/netdevsim/netdev.c964
-rw-r--r--drivers/net/netdevsim/netdevsim.h188
-rw-r--r--drivers/net/netdevsim/psp.c252
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c35
-rw-r--r--drivers/net/netkit.c1017
-rw-r--r--drivers/net/nlmon.c28
-rw-r--r--drivers/net/ntb_netdev.c29
-rw-r--r--drivers/net/ovpn/Makefile22
-rw-r--r--drivers/net/ovpn/bind.c55
-rw-r--r--drivers/net/ovpn/bind.h101
-rw-r--r--drivers/net/ovpn/crypto.c210
-rw-r--r--drivers/net/ovpn/crypto.h145
-rw-r--r--drivers/net/ovpn/crypto_aead.c389
-rw-r--r--drivers/net/ovpn/crypto_aead.h29
-rw-r--r--drivers/net/ovpn/io.c465
-rw-r--r--drivers/net/ovpn/io.h34
-rw-r--r--drivers/net/ovpn/main.c279
-rw-r--r--drivers/net/ovpn/main.h14
-rw-r--r--drivers/net/ovpn/netlink-gen.c263
-rw-r--r--drivers/net/ovpn/netlink-gen.h48
-rw-r--r--drivers/net/ovpn/netlink.c1293
-rw-r--r--drivers/net/ovpn/netlink.h18
-rw-r--r--drivers/net/ovpn/ovpnpriv.h55
-rw-r--r--drivers/net/ovpn/peer.c1364
-rw-r--r--drivers/net/ovpn/peer.h163
-rw-r--r--drivers/net/ovpn/pktid.c129
-rw-r--r--drivers/net/ovpn/pktid.h86
-rw-r--r--drivers/net/ovpn/proto.h118
-rw-r--r--drivers/net/ovpn/skb.h61
-rw-r--r--drivers/net/ovpn/socket.c241
-rw-r--r--drivers/net/ovpn/socket.h49
-rw-r--r--drivers/net/ovpn/stats.c21
-rw-r--r--drivers/net/ovpn/stats.h47
-rw-r--r--drivers/net/ovpn/tcp.c619
-rw-r--r--drivers/net/ovpn/tcp.h37
-rw-r--r--drivers/net/ovpn/udp.c448
-rw-r--r--drivers/net/ovpn/udp.h25
-rw-r--r--drivers/net/pcs/Kconfig24
-rw-r--r--drivers/net/pcs/Makefile5
-rw-r--r--drivers/net/pcs/pcs-lynx.c317
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c329
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c770
-rw-r--r--drivers/net/pcs/pcs-xpcs-nxp.c24
-rw-r--r--drivers/net/pcs/pcs-xpcs-plat.c457
-rw-r--r--drivers/net/pcs/pcs-xpcs-wx.c197
-rw-r--r--drivers/net/pcs/pcs-xpcs.c1427
-rw-r--r--drivers/net/pcs/pcs-xpcs.h82
-rw-r--r--drivers/net/pfcp.c298
-rw-r--r--drivers/net/phy/Kconfig185
-rw-r--r--drivers/net/phy/Makefile57
-rw-r--r--drivers/net/phy/adin.c101
-rw-r--r--drivers/net/phy/adin1100.c354
-rw-r--r--drivers/net/phy/air_en8811h.c1224
-rw-r--r--drivers/net/phy/amd.c35
-rw-r--r--drivers/net/phy/aquantia.h16
-rw-r--r--drivers/net/phy/aquantia/Kconfig6
-rw-r--r--drivers/net/phy/aquantia/Makefile6
-rw-r--r--drivers/net/phy/aquantia/aquantia.h259
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c385
-rw-r--r--drivers/net/phy/aquantia/aquantia_hwmon.c (renamed from drivers/net/phy/aquantia_hwmon.c)48
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c160
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c1593
-rw-r--r--drivers/net/phy/aquantia_main.c708
-rw-r--r--drivers/net/phy/as21xxx.c1088
-rw-r--r--drivers/net/phy/at803x.c2070
-rw-r--r--drivers/net/phy/ax88796b.c13
-rw-r--r--drivers/net/phy/ax88796b_rust.rs134
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c382
-rw-r--r--drivers/net/phy/bcm-phy-lib.h38
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c957
-rw-r--r--drivers/net/phy/bcm54140.c21
-rw-r--r--drivers/net/phy/bcm63xx.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c30
-rw-r--r--drivers/net/phy/bcm84881.c28
-rw-r--r--drivers/net/phy/bcm87xx.c51
-rw-r--r--drivers/net/phy/broadcom.c1019
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/cortina.c2
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c159
-rw-r--r--drivers/net/phy/dp83822.c737
-rw-r--r--drivers/net/phy/dp83848.c4
-rw-r--r--drivers/net/phy/dp83867.c440
-rw-r--r--drivers/net/phy/dp83869.c96
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/dp83td510.c977
-rw-r--r--drivers/net/phy/dp83tg720.c678
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/fixed_phy.c285
-rw-r--r--drivers/net/phy/icplus.c11
-rw-r--r--drivers/net/phy/intel-xway.c262
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c1154
-rw-r--r--drivers/net/phy/marvell-88x2222.c23
-rw-r--r--drivers/net/phy/marvell.c1066
-rw-r--r--drivers/net/phy/marvell10g.c376
-rw-r--r--drivers/net/phy/mdio-boardinfo.c80
-rw-r--r--drivers/net/phy/mdio-boardinfo.h23
-rw-r--r--drivers/net/phy/mdio-open-alliance.h95
-rw-r--r--drivers/net/phy/mdio-private.h11
-rw-r--r--drivers/net/phy/mdio_bus.c749
-rw-r--r--drivers/net/phy/mdio_bus_provider.c442
-rw-r--r--drivers/net/phy/mdio_device.c74
-rw-r--r--drivers/net/phy/mdio_devres.c12
-rw-r--r--drivers/net/phy/mediatek-ge.c116
-rw-r--r--drivers/net/phy/mediatek/Kconfig40
-rw-r--r--drivers/net/phy/mediatek/Makefile5
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c413
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c1548
-rw-r--r--drivers/net/phy/mediatek/mtk-ge.c142
-rw-r--r--drivers/net/phy/mediatek/mtk-phy-lib.c347
-rw-r--r--drivers/net/phy/mediatek/mtk.h104
-rw-r--r--drivers/net/phy/meson-gxl.c84
-rw-r--r--drivers/net/phy/micrel.c5213
-rw-r--r--drivers/net/phy/microchip.c240
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c1306
-rw-r--r--drivers/net/phy/microchip_rds_ptp.h247
-rw-r--r--drivers/net/phy/microchip_t1.c2062
-rw-r--r--drivers/net/phy/microchip_t1s.c610
-rw-r--r--drivers/net/phy/motorcomm.c2976
-rw-r--r--drivers/net/phy/mscc/mscc.h39
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c163
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.h2
-rw-r--r--drivers/net/phy/mscc/mscc_main.c680
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c177
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.h1
-rw-r--r--drivers/net/phy/mxl-86110.c978
-rw-r--r--drivers/net/phy/mxl-gpy.c786
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/ncn26000.c171
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx-macsec.c1725
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c1578
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.h63
-rw-r--r--drivers/net/phy/nxp-cbtx.c227
-rw-r--r--drivers/net/phy/nxp-tja11xx.c155
-rw-r--r--drivers/net/phy/open_alliance_helpers.c77
-rw-r--r--drivers/net/phy/open_alliance_helpers.h47
-rw-r--r--drivers/net/phy/phy-c45.c1318
-rw-r--r--drivers/net/phy/phy-caps.h64
-rw-r--r--drivers/net/phy/phy-core.c457
-rw-r--r--drivers/net/phy/phy.c1313
-rw-r--r--drivers/net/phy/phy_caps.c380
-rw-r--r--drivers/net/phy/phy_device.c1465
-rw-r--r--drivers/net/phy/phy_led_triggers.c25
-rw-r--r--drivers/net/phy/phy_link_topology.c105
-rw-r--r--drivers/net/phy/phy_package.c419
-rw-r--r--drivers/net/phy/phylib-internal.h31
-rw-r--r--drivers/net/phy/phylib.h34
-rw-r--r--drivers/net/phy/phylink.c2800
-rw-r--r--drivers/net/phy/qcom/Kconfig31
-rw-r--r--drivers/net/phy/qcom/Makefile6
-rw-r--r--drivers/net/phy/qcom/at803x.c1250
-rw-r--r--drivers/net/phy/qcom/qca807x.c868
-rw-r--r--drivers/net/phy/qcom/qca808x.c686
-rw-r--r--drivers/net/phy/qcom/qca83xx.c269
-rw-r--r--drivers/net/phy/qcom/qcom-phy-lib.c776
-rw-r--r--drivers/net/phy/qcom/qcom.h271
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/qt2025.rs111
-rw-r--r--drivers/net/phy/realtek.c1044
-rw-r--r--drivers/net/phy/realtek/Kconfig15
-rw-r--r--drivers/net/phy/realtek/Makefile4
-rw-r--r--drivers/net/phy/realtek/realtek.h10
-rw-r--r--drivers/net/phy/realtek/realtek_hwmon.c74
-rw-r--r--drivers/net/phy/realtek/realtek_main.c2277
-rw-r--r--drivers/net/phy/rockchip.c2
-rw-r--r--drivers/net/phy/sfp-bus.c339
-rw-r--r--drivers/net/phy/sfp.c1246
-rw-r--r--drivers/net/phy/sfp.h13
-rw-r--r--drivers/net/phy/smsc.c576
-rw-r--r--drivers/net/phy/spi_ks8995.c549
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/stubs.c10
-rw-r--r--drivers/net/phy/teranetics.c5
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c181
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c17
-rw-r--r--drivers/net/plip/plip.c12
-rw-r--r--drivers/net/ppp/Kconfig37
-rw-r--r--drivers/net/ppp/bsd_comp.c5
-rw-r--r--drivers/net/ppp/ppp_async.c47
-rw-r--r--drivers/net/ppp/ppp_deflate.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c356
-rw-r--r--drivers/net/ppp/ppp_mppe.c110
-rw-r--r--drivers/net/ppp/ppp_synctty.c50
-rw-r--r--drivers/net/ppp/pppoe.c175
-rw-r--r--drivers/net/ppp/pptp.c64
-rw-r--r--drivers/net/pse-pd/Kconfig55
-rw-r--r--drivers/net/pse-pd/Makefile9
-rw-r--r--drivers/net/pse-pd/pd692x0.c1883
-rw-r--r--drivers/net/pse-pd/pse_core.c1978
-rw-r--r--drivers/net/pse-pd/pse_regulator.c155
-rw-r--r--drivers/net/pse-pd/si3474.c578
-rw-r--r--drivers/net/pse-pd/tps23881.c1563
-rw-r--r--drivers/net/rionet.c15
-rw-r--r--drivers/net/sb1000.c1179
-rw-r--r--drivers/net/slip/slhc.c60
-rw-r--r--drivers/net/slip/slip.c38
-rw-r--r--drivers/net/slip/slip.h2
-rw-r--r--drivers/net/sungem_phy.c47
-rw-r--r--drivers/net/tap.c296
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team_core.c (renamed from drivers/net/team/team.c)457
-rw-r--r--drivers/net/team/team_mode_activebackup.c11
-rw-r--r--drivers/net/team/team_mode_broadcast.c1
-rw-r--r--drivers/net/team/team_mode_loadbalance.c67
-rw-r--r--drivers/net/team/team_mode_random.c2
-rw-r--r--drivers/net/team/team_mode_roundrobin.c1
-rw-r--r--drivers/net/team/team_nl.c60
-rw-r--r--drivers/net/team/team_nl.h30
-rw-r--r--drivers/net/thunderbolt/Kconfig12
-rw-r--r--drivers/net/thunderbolt/Makefile6
-rw-r--r--drivers/net/thunderbolt/main.c (renamed from drivers/net/thunderbolt.c)201
-rw-r--r--drivers/net/thunderbolt/trace.c10
-rw-r--r--drivers/net/thunderbolt/trace.h141
-rw-r--r--drivers/net/tun.c755
-rw-r--r--drivers/net/tun_vnet.h269
-rw-r--r--drivers/net/usb/Kconfig25
-rw-r--r--drivers/net/usb/aqc111.c41
-rw-r--r--drivers/net/usb/asix.h20
-rw-r--r--drivers/net/usb/asix_common.c150
-rw-r--r--drivers/net/usb/asix_devices.c359
-rw-r--r--drivers/net/usb/ax88172a.c16
-rw-r--r--drivers/net/usb/ax88179_178a.c615
-rw-r--r--drivers/net/usb/catc.c56
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/cdc_ether.c161
-rw-r--r--drivers/net/usb/cdc_mbim.c13
-rw-r--r--drivers/net/usb/cdc_ncm.c148
-rw-r--r--drivers/net/usb/cdc_subset.c10
-rw-r--r--drivers/net/usb/ch9200.c7
-rw-r--r--drivers/net/usb/dm9601.c7
-rw-r--r--drivers/net/usb/gl620a.c6
-rw-r--r--drivers/net/usb/hso.c38
-rw-r--r--drivers/net/usb/ipheth.c227
-rw-r--r--drivers/net/usb/kalmia.c8
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/lan78xx.c3296
-rw-r--r--drivers/net/usb/mcs7830.c12
-rw-r--r--drivers/net/usb/net1080.c2
-rw-r--r--drivers/net/usb/pegasus.c6
-rw-r--r--drivers/net/usb/plusb.c12
-rw-r--r--drivers/net/usb/qmi_wwan.c68
-rw-r--r--drivers/net/usb/r8152.c1141
-rw-r--r--drivers/net/usb/r8153_ecm.c6
-rw-r--r--drivers/net/usb/rndis_host.c57
-rw-r--r--drivers/net/usb/rtl8150.c42
-rw-r--r--drivers/net/usb/sierra_net.c16
-rw-r--r--drivers/net/usb/smsc75xx.c28
-rw-r--r--drivers/net/usb/smsc95xx.c611
-rw-r--r--drivers/net/usb/sr9700.c29
-rw-r--r--drivers/net/usb/sr9800.c8
-rw-r--r--drivers/net/usb/sr9800.h2
-rw-r--r--drivers/net/usb/usbnet.c526
-rw-r--r--drivers/net/usb/zaurus.c33
-rw-r--r--drivers/net/veth.c648
-rw-r--r--drivers/net/virtio_net.c5724
-rw-r--r--drivers/net/vmxnet3/Makefile4
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h139
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c915
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c324
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h92
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c429
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.h47
-rw-r--r--drivers/net/vrf.c195
-rw-r--r--drivers/net/vsockmon.c25
-rw-r--r--drivers/net/vxlan/Makefile7
-rw-r--r--drivers/net/vxlan/vxlan_core.c (renamed from drivers/net/vxlan.c)2435
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c1746
-rw-r--r--drivers/net/vxlan/vxlan_multicast.c272
-rw-r--r--drivers/net/vxlan/vxlan_private.h251
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c1005
-rw-r--r--drivers/net/wan/Kconfig91
-rw-r--r--drivers/net/wan/Makefile8
-rw-r--r--drivers/net/wan/cosa.c2052
-rw-r--r--drivers/net/wan/cosa.h104
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/farsync.h2
-rw-r--r--drivers/net/wan/framer/Kconfig42
-rw-r--r--drivers/net/wan/framer/Makefile7
-rw-r--r--drivers/net/wan/framer/framer-core.c873
-rw-r--r--drivers/net/wan/framer/pef2256/Makefile8
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256-regs.h250
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c893
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c808
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c99
-rw-r--r--drivers/net/wan/hd64572.c3
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/hdlc_fr.c4
-rw-r--r--drivers/net/wan/hdlc_ppp.c8
-rw-r--r--drivers/net/wan/hostess_sv11.c336
-rw-r--r--drivers/net/wan/ixp4xx_hss.c305
-rw-r--r--drivers/net/wan/lapbether.c12
-rw-r--r--drivers/net/wan/lmc/Makefile18
-rw-r--r--drivers/net/wan/lmc/lmc.h33
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c65
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h52
-rw-r--r--drivers/net/wan/lmc/lmc_ioctl.h255
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2008
-rw-r--r--drivers/net/wan/lmc/lmc_media.c1206
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c106
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h18
-rw-r--r--drivers/net/wan/lmc/lmc_var.h468
-rw-r--r--drivers/net/wan/sealevel.c352
-rw-r--r--drivers/net/wan/slic_ds26522.c6
-rw-r--r--drivers/net/wan/z85230.c1641
-rw-r--r--drivers/net/wan/z85230.h407
-rw-r--r--drivers/net/wireguard/Makefile2
-rw-r--r--drivers/net/wireguard/allowedips.c117
-rw-r--r--drivers/net/wireguard/allowedips.h4
-rw-r--r--drivers/net/wireguard/cookie.c24
-rw-r--r--drivers/net/wireguard/device.c118
-rw-r--r--drivers/net/wireguard/device.h9
-rw-r--r--drivers/net/wireguard/generated/netlink.c73
-rw-r--r--drivers/net/wireguard/generated/netlink.h30
-rw-r--r--drivers/net/wireguard/main.c10
-rw-r--r--drivers/net/wireguard/netlink.c147
-rw-r--r--drivers/net/wireguard/noise.c59
-rw-r--r--drivers/net/wireguard/peer.c3
-rw-r--r--drivers/net/wireguard/peer.h2
-rw-r--r--drivers/net/wireguard/queueing.c8
-rw-r--r--drivers/net/wireguard/queueing.h44
-rw-r--r--drivers/net/wireguard/ratelimiter.c4
-rw-r--r--drivers/net/wireguard/receive.c70
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c103
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c4
-rw-r--r--drivers/net/wireguard/send.c7
-rw-r--r--drivers/net/wireguard/socket.c11
-rw-r--r--drivers/net/wireguard/timers.c47
-rw-r--r--drivers/net/wireless/Kconfig78
-rw-r--r--drivers/net/wireless/Makefile15
-rw-r--r--drivers/net/wireless/admtek/adm8211.c11
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c66
-rw-r--r--drivers/net/wireless/ath/ath.h18
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c104
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c363
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h49
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h16
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c69
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c30
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h20
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c156
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h439
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c394
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c120
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c85
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h99
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c89
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c582
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c115
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c66
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c127
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h103
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h43
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c80
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c255
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h78
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c39
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c162
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h141
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig3
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile6
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c725
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h25
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c82
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h29
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c1765
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h407
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.h79
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c69
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h56
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c1078
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h178
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c217
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h159
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c221
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c103
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h121
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c1478
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c391
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c171
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.h27
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c198
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h96
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h29
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c600
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h234
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h79
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c120
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.h27
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c782
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h129
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c4600
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h26
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c461
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h21
-rw-r--r--drivers/net/wireless/ath/ath11k/p2p.c149
-rw-r--r--drivers/net/wireless/ath/ath11k/p2p.h22
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c1366
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h43
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c865
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.h54
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c418
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c816
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h69
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c539
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c87
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c433
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode_i.h50
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c30
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h238
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c3565
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h1493
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c810
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.h46
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig62
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile35
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c510
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.h114
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1156
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c1051
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h193
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c2321
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h1519
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.h81
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c358
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c104
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h76
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c1515
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h147
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c6178
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h2076
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.c337
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c1841
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h1976
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c4438
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h107
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c4830
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h191
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c1781
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h41
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c178
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h37
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c2663
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h1181
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h3045
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c947
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h1171
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.c145
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h200
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h165
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c795
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.h316
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c1680
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h382
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c14996
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h209
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c659
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h47
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c147
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c1895
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h153
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c570
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h123
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c3917
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h635
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c994
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h123
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h1544
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c395
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h179
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c11230
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h6526
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c1029
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.h62
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c23
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c64
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c23
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c5
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c39
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c45
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c38
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h25
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c24
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c9
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/recovery.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c24
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h18
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c30
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c26
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h60
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig4
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c99
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c90
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c151
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c64
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h95
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c61
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c62
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c119
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c188
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c73
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c168
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c70
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c90
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h158
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_aic.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c79
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c206
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c28
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c90
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c28
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c61
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h4
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c23
-rw-r--r--drivers/net/wireless/ath/hw.c4
-rw-r--r--drivers/net/wireless/ath/key.c6
-rw-r--r--drivers/net/wireless/ath/main.c1
-rw-r--r--drivers/net/wireless/ath/regd.c10
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/ath/regd_common.h3
-rw-r--r--drivers/net/wireless/ath/spectral_common.h4
-rw-r--r--drivers/net/wireless/ath/testmode_i.h66
-rw-r--r--drivers/net/wireless/ath/trace.h11
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c39
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c180
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.c125
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.h84
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h218
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c397
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c13
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c448
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h14
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c229
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h30
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c59
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c68
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h5
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c48
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h12
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c37
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h10
-rw-r--r--drivers/net/wireless/atmel/Kconfig35
-rw-r--r--drivers/net/wireless/atmel/Makefile4
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c79
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.h2
-rw-r--r--drivers/net/wireless/atmel/atmel.c4528
-rw-r--r--drivers/net/wireless/atmel/atmel.h31
-rw-r--r--drivers/net/wireless/atmel/atmel_cs.c283
-rw-r--r--drivers/net/wireless/atmel/atmel_pci.c65
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h18
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c27
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c8
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c54
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c12
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c8
-rw-r--r--drivers/net/wireless/broadcom/b43/sysfs.c13
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.c20
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.h2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/b43legacy.h2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c26
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c8
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c20
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/radio.c17
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/acpi.c51
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c39
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/vops.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c198
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h58
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c1332
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h31
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c49
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c238
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c86
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c373
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h87
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/vops.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c41
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c66
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c172
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c167
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h68
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c134
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h155
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h198
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c200
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h88
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c79
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c68
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c892
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c117
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c113
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/vops.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h22
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c73
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c41
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c453
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c55
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pub.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/scb.h14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h18
-rw-r--r--drivers/net/wireless/cisco/Kconfig59
-rw-r--r--drivers/net/wireless/cisco/Makefile3
-rw-r--r--drivers/net/wireless/cisco/airo.c8263
-rw-r--r--drivers/net/wireless/cisco/airo.h10
-rw-r--r--drivers/net/wireless/cisco/airo_cs.c218
-rw-r--r--drivers/net/wireless/intel/Makefile1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Kconfig11
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Makefile7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c122
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c265
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h11
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h175
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto.c246
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_ccmp.c438
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_tkip.c728
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_wep.c247
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c38
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c112
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_spy.c233
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c9
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c43
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c77
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c11
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c46
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c47
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/Kconfig4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h305
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c102
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h24
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c64
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c856
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c96
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c248
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c193
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c175
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h203
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c (renamed from drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c)483
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c835
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h237
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h141
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h372
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h515
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h219
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h347
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h858
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h175
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h283
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h312
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h450
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h578
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h298
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h247
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/system.h (renamed from drivers/net/wireless/intel/iwlwifi/fw/api/soc.h)16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c1046
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c218
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h254
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c375
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c743
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h267
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c843
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h379
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h579
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c445
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c764
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c398
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h129
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c802
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c119
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h)25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h103
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c841
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h1118
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/Makefile8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/internal.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h542
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c2216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c412
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h784
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace-data.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace.h76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c677
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c363
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c2065
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c1113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.h244
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c451
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c548
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/hcmd.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c703
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c408
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c909
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c336
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c2702
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c285
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c776
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h615
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c1199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c719
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c198
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c391
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c321
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c383
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c261
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c2264
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.h73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c2179
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.c222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.h102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c1319
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h273
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c521
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c663
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c339
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/module.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c353
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c503
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c467
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c739
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c1394
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2054
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c269
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c1095
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c447
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c160
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c1138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c319
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c921
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c4381
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c408
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c335
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c1283
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c1226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1260
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c147
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1175
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c213
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c279
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c326
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c483
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c352
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c580
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c1774
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c1129
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c1206
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h189
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c539
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-sync.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-sync.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c305
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c916
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c603
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c153
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c338
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c618
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2387
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h (renamed from drivers/net/wireless/intel/iwlwifi/pcie/internal.h)564
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/rx.c)411
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c)327
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans.c)2053
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c1434
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/tx.c)1405
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info-v2.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h)145
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info.h)47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c259
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c1834
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h184
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c316
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/module.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/utils.c109
-rw-r--r--drivers/net/wireless/intersil/Kconfig2
-rw-r--r--drivers/net/wireless/intersil/Makefile2
-rw-r--r--drivers/net/wireless/intersil/hostap/Kconfig95
-rw-r--r--drivers/net/wireless/intersil/hostap/Makefile8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap.h99
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_80211.h97
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_80211_rx.c1116
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_80211_tx.c554
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c3277
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.h264
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_common.h420
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_config.h49
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_cs.c710
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_download.c811
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c3386
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_info.c509
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c4053
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c1126
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_pci.c445
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_plx.c617
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c411
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_wlan.h1049
-rw-r--r--drivers/net/wireless/intersil/orinoco/Kconfig143
-rw-r--r--drivers/net/wireless/intersil/orinoco/Makefile15
-rw-r--r--drivers/net/wireless/intersil/orinoco/airport.c267
-rw-r--r--drivers/net/wireless/intersil/orinoco/cfg.c291
-rw-r--r--drivers/net/wireless/intersil/orinoco/cfg.h15
-rw-r--r--drivers/net/wireless/intersil/orinoco/fw.c387
-rw-r--r--drivers/net/wireless/intersil/orinoco/fw.h21
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes.c777
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes.h534
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes_dld.c477
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes_dld.h52
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes_rid.h165
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.c1360
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.h60
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c2414
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.h50
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c89
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.h23
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco.h251
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_cs.c341
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_nortel.c314
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_pci.c257
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_pci.h54
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_plx.c362
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_tmd.c237
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c1787
-rw-r--r--drivers/net/wireless/intersil/orinoco/scan.c259
-rw-r--r--drivers/net/wireless/intersil/orinoco/scan.h21
-rw-r--r--drivers/net/wireless/intersil/orinoco/spectrum_cs.c319
-rw-r--r--drivers/net/wireless/intersil/orinoco/wext.c1413
-rw-r--r--drivers/net/wireless/intersil/orinoco/wext.h13
-rw-r--r--drivers/net/wireless/intersil/p54/eeprom.h4
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c16
-rw-r--r--drivers/net/wireless/intersil/p54/main.c27
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h3
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c8
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c27
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c21
-rw-r--r--drivers/net/wireless/marvell/libertas/Kconfig10
-rw-r--r--drivers/net/wireless/marvell/libertas/Makefile1
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c107
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c156
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h15
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c7
-rw-r--r--drivers/net/wireless/marvell/libertas/decl.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/ethtool.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/host.h16
-rw-r--r--drivers/net/wireless/marvell/libertas/if_cs.c957
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c76
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c31
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c9
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c111
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c79
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/radiotap.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/rx.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/tx.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/types.h21
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c8
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h39
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c26
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c33
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c67
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Kconfig5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Makefile13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c600
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c139
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c55
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h38
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ethtool.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h127
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c126
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c98
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c194
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h122
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c380
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie_quirks.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie_quirks.h18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c63
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c147
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h29
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c220
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c44
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c79
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c85
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c40
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c38
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c61
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c226
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c30
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c61
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c66
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c154
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c35
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.h29
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c282
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig17
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile19
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c416
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c37
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c785
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h164
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c207
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c869
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h1042
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Kconfig4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c95
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c67
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c164
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c131
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c116
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c540
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c216
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c573
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h143
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h73
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c128
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c105
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h295
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h (renamed from drivers/net/wireless/mediatek/mt76/mt7921/mac.h)464
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c268
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h400
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c1098
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c1643
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h1089
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c58
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c84
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c44
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.c411
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.h136
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c1134
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c694
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c354
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h102
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c816
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c2109
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h366
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c977
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c3599
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h1040
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c987
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h393
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c279
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h1060
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c1312
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c179
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c294
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c446
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c370
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c1422
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1569
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c1723
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h264
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h480
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c479
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c281
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h404
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c159
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c78
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/trace.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c348
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Kconfig30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c319
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c281
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c1497
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c2229
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c3903
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h714
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h380
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c639
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c151
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.c265
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h92
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c346
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h537
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c486
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h123
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c986
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c168
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c372
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c384
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_regs.h487
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.h (renamed from drivers/net/wireless/mediatek/mt76/mt7921/mt7921_trace.h)18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c317
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Kconfig21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Makefile9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c268
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.h97
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c1056
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c1005
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c405
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h79
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c1745
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c3321
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c2340
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c4889
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h974
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c892
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h902
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/npu.c352
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c269
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h791
-rw-r--r--drivers/net/wireless/mediatek/mt76/npu.c501
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c81
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c72
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c334
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c260
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c209
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c14
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c9
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c9
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c491
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.h6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/fw.h34
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c450
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h64
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c10
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c228
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h36
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c346
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c348
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c549
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h71
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c39
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.h7
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_if.h22
-rw-r--r--drivers/net/wireless/purelifi/Kconfig17
-rw-r--r--drivers/net/wireless/purelifi/Makefile2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/Kconfig14
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/Makefile3
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/chip.c98
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/chip.h70
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/firmware.c276
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/intf.h52
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c759
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h184
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c896
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.h198
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c46
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c30
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c21
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c32
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h25
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h25
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2188
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h26
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c121
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h45
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00config.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c44
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00link.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c30
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.c153
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.h29
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c15
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c14
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c14
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.h2
-rw-r--r--drivers/net/wireless/ray_cs.c2817
-rw-r--r--drivers/net/wireless/ray_cs.h74
-rw-r--r--drivers/net/wireless/rayctl.h734
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c33
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c46
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c14
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188e.c1885
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188f.c1765
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c)243
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192e.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c)301
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192f.c2091
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8710b.c1875
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723a.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c)310
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723b.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c)165
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig14
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c)3006
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/regs.h (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h)188
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h690
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Kconfig16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c100
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c122
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c49
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c216
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c55
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c89
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c253
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c56
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c147
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h (renamed from drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h)0
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c1061
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c370
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h49
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c1225
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c856
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h111
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h (renamed from drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h)162
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c359
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c516
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h405
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c1086
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h91
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c377
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h37
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c1190
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c44
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c957
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c377
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c532
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h434
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c120
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c63
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c1212
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c3118
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c240
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c394
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c1675
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c372
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c53
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c42
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c42
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c56
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c42
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c49
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c169
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c288
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h171
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig180
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile66
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c41
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c486
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h30
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c563
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h23
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c823
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h221
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h24
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c74
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c211
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c262
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c880
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h388
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c245
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h5
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c428
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h23
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c62
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h324
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c26
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c2003
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.h102
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c902
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c34
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c721
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h251
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723ds.c41
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723du.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c788
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h524
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c1125
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.c2812
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.h26
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c2270
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.h62
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.c23930
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.h40
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c31
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c1226
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.c2350
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c78
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c277
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h63
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c_table.c1156
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c60
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c160
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h35
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c104
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c222
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h47
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c1239
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.c1989
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.h175
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c140
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h75
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c114
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.h22
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c1437
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.h178
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c227
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h139
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c1381
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.h103
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c107
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.h11
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig137
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile85
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c1281
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h298
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c795
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h465
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c3448
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h223
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c9342
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h236
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c5488
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h5066
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c3794
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h31
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c342
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h21
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c514
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c9481
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h5933
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c5182
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h1002
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c1684
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c2657
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c2968
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h1367
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c708
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c6529
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h837
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c1021
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c485
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h7421
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c1398
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c2721
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.h76
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c3784
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h34
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c499
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c14916
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c102
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c66
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c902
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c499
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c2744
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h49
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.c13719
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.h12
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c104
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852au.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c1033
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c2106
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.h400
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c4212
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h34
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.c794
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.h62
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.c22927
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c106
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c890
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c4279
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h34
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c490
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c108
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c3251
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h103
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c4491
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h37
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c781
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c57159
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c131
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852cu.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c3009
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.h74
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c353
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c119
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c817
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h28
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c383
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h754
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.c1071
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.h77
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.c162
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c1820
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h147
-rw-r--r--drivers/net/wireless/rndis_wlan.c3761
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c14
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c29
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c85
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c10
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c18
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c81
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c17
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c43
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h1
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h2
-rw-r--r--drivers/net/wireless/silabs/Kconfig18
-rw-r--r--drivers/net/wireless/silabs/Makefile3
-rw-r--r--drivers/net/wireless/silabs/wfx/Kconfig13
-rw-r--r--drivers/net/wireless/silabs/wfx/Makefile25
-rw-r--r--drivers/net/wireless/silabs/wfx/bh.c324
-rw-r--r--drivers/net/wireless/silabs/wfx/bh.h34
-rw-r--r--drivers/net/wireless/silabs/wfx/bus.h37
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c326
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c321
-rw-r--r--drivers/net/wireless/silabs/wfx/data_rx.c93
-rw-r--r--drivers/net/wireless/silabs/wfx/data_rx.h17
-rw-r--r--drivers/net/wireless/silabs/wfx/data_tx.c594
-rw-r--r--drivers/net/wireless/silabs/wfx/data_tx.h53
-rw-r--r--drivers/net/wireless/silabs/wfx/debug.c331
-rw-r--r--drivers/net/wireless/silabs/wfx/debug.h19
-rw-r--r--drivers/net/wireless/silabs/wfx/fwio.c388
-rw-r--r--drivers/net/wireless/silabs/wfx/fwio.h15
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_cmd.h553
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_general.h252
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_mib.h346
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_rx.c391
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_rx.h17
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.c537
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.h62
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx_mib.c307
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx_mib.h48
-rw-r--r--drivers/net/wireless/silabs/wfx/hwio.c332
-rw-r--r--drivers/net/wireless/silabs/wfx/hwio.h78
-rw-r--r--drivers/net/wireless/silabs/wfx/key.c227
-rw-r--r--drivers/net/wireless/silabs/wfx/key.h19
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c525
-rw-r--r--drivers/net/wireless/silabs/wfx/main.h41
-rw-r--r--drivers/net/wireless/silabs/wfx/queue.c322
-rw-r--r--drivers/net/wireless/silabs/wfx/queue.h45
-rw-r--r--drivers/net/wireless/silabs/wfx/scan.c209
-rw-r--r--drivers/net/wireless/silabs/wfx/scan.h28
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c841
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h73
-rw-r--r--drivers/net/wireless/silabs/wfx/traces.h496
-rw-r--r--drivers/net/wireless/silabs/wfx/wfx.h169
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c21
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c42
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c83
-rw-r--r--drivers/net/wireless/st/cw1200/main.c7
-rw-r--r--drivers/net/wireless/st/cw1200/pm.c2
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c50
-rw-r--r--drivers/net/wireless/st/cw1200/queue.h1
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c73
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h12
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c16
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c4
-rw-r--r--drivers/net/wireless/ti/Kconfig8
-rw-r--r--drivers/net/wireless/ti/Makefile3
-rw-r--r--drivers/net/wireless/ti/wilink_platform_data.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h5
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c79
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h5
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c22
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/io.c20
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c37
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c33
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c81
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c25
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h1
-rw-r--r--drivers/net/wireless/ti/wl1251/wl12xx_80211.h1
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c8
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c21
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c10
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c79
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c13
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h62
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c65
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c63
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c507
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c43
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c14
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c21
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_80211.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h10
-rw-r--r--drivers/net/wireless/virtual/Kconfig20
-rw-r--r--drivers/net/wireless/virtual/Makefile3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c (renamed from drivers/net/wireless/mac80211_hwsim.c)3016
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h (renamed from drivers/net/wireless/mac80211_hwsim.h)101
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c (renamed from drivers/net/wireless/virt_wifi.c)38
-rw-r--r--drivers/net/wireless/wl3501.h615
-rw-r--r--drivers/net/wireless/wl3501_cs.c2030
-rw-r--r--drivers/net/wireless/zydas/Kconfig19
-rw-r--r--drivers/net/wireless/zydas/Makefile2
-rw-r--r--drivers/net/wireless/zydas/zd1201.c1906
-rw-r--r--drivers/net/wireless/zydas/zd1201.h144
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c30
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf.h3
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c22
-rw-r--r--drivers/net/wwan/Kconfig42
-rw-r--r--drivers/net/wwan/Makefile2
-rw-r--r--drivers/net/wwan/iosm/Makefile4
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.h5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_debugfs.c30
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_debugfs.h17
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c9
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c145
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h32
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c44
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h19
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c10
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.h6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.c50
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.h133
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c769
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.h142
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c127
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.h1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pm.h2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.c5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.h2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol.h2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c10
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c181
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.h74
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c51
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.h10
-rw-r--r--drivers/net/wwan/mhi_wwan_ctrl.c3
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c46
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c908
-rw-r--r--drivers/net/wwan/rpmsg_wwan_ctrl.c5
-rw-r--r--drivers/net/wwan/t7xx/Makefile21
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.c281
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.h180
-rw-r--r--drivers/net/wwan/t7xx/t7xx_dpmaif.c1281
-rw-r--r--drivers/net/wwan/t7xx/t7xx_dpmaif.h179
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c1362
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h133
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c583
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h202
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c1168
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h117
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c682
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h78
-rw-r--r--drivers/net/wwan/t7xx/t7xx_mhccif.c122
-rw-r--r--drivers/net/wwan/t7xx/t7xx_mhccif.h38
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c817
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h96
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c529
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.h60
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c961
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h140
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.c262
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.h31
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h153
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c277
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c655
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h113
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c117
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c247
-rw-r--r--drivers/net/wwan/t7xx/t7xx_reg.h372
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c648
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h139
-rw-r--r--drivers/net/wwan/wwan_core.c214
-rw-r--r--drivers/net/wwan/wwan_hwsim.c42
-rw-r--r--drivers/net/xen-netback/common.h24
-rw-r--r--drivers/net/xen-netback/hash.c5
-rw-r--r--drivers/net/xen-netback/interface.c46
-rw-r--r--drivers/net/xen-netback/netback.c403
-rw-r--r--drivers/net/xen-netback/rx.c84
-rw-r--r--drivers/net/xen-netback/xenbus.c22
-rw-r--r--drivers/net/xen-netfront.c426
5469 files changed, 1458710 insertions, 419557 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index dd335ae1122b..ac12eaf11755 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -78,23 +78,9 @@ config WIREGUARD
depends on IPV6 || !IPV6
select NET_UDP_TUNNEL
select DST_CACHE
- select CRYPTO
select CRYPTO_LIB_CURVE25519
select CRYPTO_LIB_CHACHA20POLY1305
- select CRYPTO_LIB_BLAKE2S
- select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
- select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
- select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
- select CRYPTO_CURVE25519_X86 if X86 && 64BIT
- select ARM_CRYPTO if ARM
- select ARM64_CRYPTO if ARM64
- select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
- select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
- select CRYPTO_POLY1305_ARM if ARM
- select CRYPTO_BLAKE2S_ARM if ARM
- select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
- select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
- select CRYPTO_POLY1305_MIPS if MIPS
+ select CRYPTO_LIB_UTILS
help
WireGuard is a secure, fast, and easy to use replacement for IPSec
that uses modern cryptography and clever networking tricks. It's
@@ -116,6 +102,21 @@ config WIREGUARD_DEBUG
Say N here unless you know what you're doing.
+config OVPN
+ tristate "OpenVPN data channel offload"
+ depends on NET && INET
+ depends on IPV6 || !IPV6
+ select DST_CACHE
+ select NET_UDP_TUNNEL
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ select CRYPTO_CHACHA20POLY1305
+ select STREAM_PARSER
+ help
+ This module enhances the performance of the OpenVPN userspace software
+ by offloading the data channel processing to kernelspace.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
help
@@ -291,6 +292,36 @@ config GTP
To compile this drivers as a module, choose M here: the module
will be called gtp.
+config PFCP
+ tristate "Packet Forwarding Control Protocol (PFCP)"
+ depends on INET
+ select NET_UDP_TUNNEL
+ help
+ This allows one to create PFCP virtual interfaces that allows to
+ set up software and hardware offload of PFCP packets.
+ Note that this module does not support PFCP protocol in the kernel space.
+ There is no support for parsing any PFCP messages.
+
+ To compile this drivers as a module, choose M here: the module
+ will be called pfcp.
+
+config AMT
+ tristate "Automatic Multicast Tunneling (AMT)"
+ depends on INET && IP_MULTICAST
+ depends on IPV6 || !IPV6
+ select NET_UDP_TUNNEL
+ help
+ This allows one to create AMT(Automatic Multicast Tunneling)
+ virtual interfaces that provide multicast tunneling.
+ There are two roles, Gateway, and Relay.
+ Gateway Encapsulates IGMP/MLD traffic from listeners to the Relay.
+ Gateway Decapsulates multicast traffic from the Relay to Listeners.
+ Relay Encapsulates multicast traffic from Sources to Gateway.
+ Relay Decapsulates IGMP/MLD traffic from Gateway.
+
+ To compile this drivers as a module, choose M here: the module
+ will be called amt.
+
config MACSEC
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
select CRYPTO
@@ -316,9 +347,30 @@ config NETCONSOLE_DYNAMIC
at runtime through a userspace interface exported using configfs.
See <file:Documentation/networking/netconsole.rst> for details.
+config NETCONSOLE_EXTENDED_LOG
+ bool "Set kernel extended message by default"
+ depends on NETCONSOLE
+ default n
+ help
+ Set extended log support for netconsole message. If this option is
+ set, log messages are transmitted with extended metadata header in a
+ format similar to /dev/kmsg. See
+ <file:Documentation/networking/netconsole.rst> for details.
+
+config NETCONSOLE_PREPEND_RELEASE
+ bool "Prepend kernel release version in the message by default"
+ depends on NETCONSOLE_EXTENDED_LOG
+ default n
+ help
+ Set kernel release to be prepended to each netconsole message by
+ default. If this option is set, the kernel release is prepended into
+ the first field of every netconsole message, so, the netconsole
+ server/peer can easily identify what kernel release is logging each
+ message. See <file:Documentation/networking/netconsole.rst> for
+ details.
+
config NETPOLL
def_bool NETCONSOLE
- select SRCU
config NET_POLL_CONTROLLER
def_bool NETPOLL
@@ -387,6 +439,7 @@ config TUN_VNET_CROSS_LE
config VETH
tristate "Virtual ethernet pair device"
+ select PAGE_POOL
help
This device is a local ethernet tunnel. Devices are created in pairs.
When one end receives the packet it appears on its pair and vice
@@ -396,6 +449,7 @@ config VIRTIO_NET
tristate "Virtio network driver"
depends on VIRTIO
select NET_FAILOVER
+ select DIMLIB
help
This is the virtual network driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
@@ -410,6 +464,15 @@ config NLMON
diagnostics, etc. This is mostly intended for developers or support
to debug netlink issues. If unsure, say N.
+config NETKIT
+ bool "BPF-programmable network device"
+ depends on BPF_SYSCALL
+ help
+ The netkit device is a virtual networking device where BPF programs
+ can be attached to the device(s) transmission routine in order to
+ implement the driver's internal logic. The device can be configured
+ to operate in L3 or L2 mode. If unsure, say N.
+
config NET_VRF
tristate "Virtual Routing and Forwarding (Lite)"
depends on IP_MULTIPLE_TABLES
@@ -457,31 +520,11 @@ source "drivers/net/hippi/Kconfig"
source "drivers/net/ipa/Kconfig"
-config NET_SB1000
- tristate "General Instruments Surfboard 1000"
- depends on PNP
- help
- This is a driver for the General Instrument (also known as
- NextLevel) SURFboard 1000 internal
- cable modem. This is an ISA card which is used by a number of cable
- TV companies to provide cable modem access. It's a one-way
- downstream-only cable modem, meaning that your upstream net link is
- provided by your regular phone modem.
-
- At present this driver only compiles as a module, so say M here if
- you have this card. The module will be called sb1000. Then read
- <file:Documentation/networking/device_drivers/cable/sb1000.rst> for
- information on how to use this module, as it needs special ppp
- scripts for establishing a connection. Further documentation
- and the necessary scripts can be found at:
-
- <http://www.jacksonville.net/~fventuri/>
- <http://home.adelphia.net/~siglercm/sb1000.html>
- <http://linuxpower.cx/~cable/>
+source "drivers/net/phy/Kconfig"
- If you don't have this card, of course say N.
+source "drivers/net/pse-pd/Kconfig"
-source "drivers/net/phy/Kconfig"
+source "drivers/net/can/Kconfig"
source "drivers/net/mctp/Kconfig"
@@ -550,9 +593,8 @@ config XEN_NETDEV_BACKEND
config VMXNET3
tristate "VMware VMXNET3 ethernet driver"
depends on PCI && INET
- depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
- IA64_PAGE_SIZE_64KB || PARISC_PAGE_SIZE_64KB || \
- PPC_64K_PAGES)
+ depends on PAGE_SIZE_LESS_THAN_64KB
+ select PAGE_POOL
help
This driver supports VMware's vmxnet3 virtual ethernet NIC.
To compile this driver as a module, choose M here: the
@@ -565,18 +607,7 @@ config FUJITSU_ES
This driver provides support for Extended Socket network device
on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
-config USB4_NET
- tristate "Networking over USB4 and Thunderbolt cables"
- depends on USB4 && INET
- help
- Select this if you want to create network between two computers
- over a USB4 and Thunderbolt cables. The driver supports Apple
- ThunderboltIP protocol and allows communication with any host
- supporting the same protocol including Windows and macOS.
-
- To compile this driver a module, choose M here. The module will be
- called thunderbolt-net.
-
+source "drivers/net/thunderbolt/Kconfig"
source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
@@ -585,7 +616,10 @@ config NETDEVSIM
depends on INET
depends on IPV6 || IPV6=n
depends on PSAMPLE || PSAMPLE=n
+ depends on PTP_1588_CLOCK_MOCK || PTP_1588_CLOCK_MOCK=n
select NET_DEVLINK
+ select PAGE_POOL
+ select NET_SHAPER
help
This driver is a developer testing tool and software model that can
be used to test various control path networking APIs, especially
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 739838623cf6..73bc63ecd65f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -11,9 +11,11 @@ obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_IPVTAP) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_WIREGUARD) += wireguard/
+obj-$(CONFIG_OVPN) += ovpn/
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACSEC) += macsec.o
+obj-$(CONFIG_AMT) += amt.o
obj-$(CONFIG_MACVLAN) += macvlan.o
obj-$(CONFIG_MACVTAP) += macvtap.o
obj-$(CONFIG_MII) += mii.o
@@ -21,7 +23,9 @@ obj-$(CONFIG_MDIO) += mdio.o
obj-$(CONFIG_NET) += loopback.o
obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
+obj-$(CONFIG_NETKIT) += netkit.o
obj-y += phy/
+obj-y += pse-pd/
obj-y += mdio/
obj-y += pcs/
obj-$(CONFIG_RIONET) += rionet.o
@@ -30,11 +34,12 @@ obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_TAP) += tap.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
-obj-$(CONFIG_VXLAN) += vxlan.o
+obj-$(CONFIG_VXLAN) += vxlan/
obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
+obj-$(CONFIG_PFCP) += pfcp.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
obj-$(CONFIG_MHI_NET) += mhi_net.o
@@ -43,10 +48,11 @@ obj-$(CONFIG_MHI_NET) += mhi_net.o
# Networking Drivers
#
obj-$(CONFIG_ARCNET) += arcnet/
-obj-$(CONFIG_DEV_APPLETALK) += appletalk/
obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_CAN) += can/
-obj-$(CONFIG_NET_DSA) += dsa/
+ifdef CONFIG_NET_DSA
+obj-y += dsa/
+endif
obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
@@ -64,7 +70,6 @@ obj-$(CONFIG_PPPOL2TP) += ppp/
obj-$(CONFIG_PPTP) += ppp/
obj-$(CONFIG_SLIP) += slip/
obj-$(CONFIG_SLHC) += slip/
-obj-$(CONFIG_NET_SB1000) += sb1000.o
obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
@@ -82,8 +87,6 @@ obj-$(CONFIG_HYPERV_NET) += hyperv/
obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
obj-$(CONFIG_FUJITSU_ES) += fjes/
-
-thunderbolt-net-y += thunderbolt.o
-obj-$(CONFIG_USB4_NET) += thunderbolt-net.o
+obj-$(CONFIG_USB4_NET) += thunderbolt/
obj-$(CONFIG_NETDEVSIM) += netdevsim/
obj-$(CONFIG_NET_FAILOVER) += net_failover.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 49e67c9fb5a4..c01e2c2f7d6c 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -67,8 +67,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
s = dev_boot_setup;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
- memset(s[i].name, 0, sizeof(s[i].name));
- strlcpy(s[i].name, name, IFNAMSIZ);
+ strscpy_pad(s[i].name, name);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
@@ -222,9 +221,6 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_CS89x0_ISA
{cs89x0_probe, 0},
#endif
-#ifdef CONFIG_NI65
- {ni65_probe, 0},
-#endif
{NULL, 0},
};
@@ -250,12 +246,6 @@ static int __init net_olddevs_init(void)
for (num = 0; num < 8; ++num)
ethif_probe2(num);
-#ifdef CONFIG_COPS
- cops_probe(0);
- cops_probe(1);
- cops_probe(2);
-#endif
-
return 0;
}
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
new file mode 100644
index 000000000000..902c817a0dea
--- /dev/null
+++ b/drivers/net/amt.c
@@ -0,0 +1,3452 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com> */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <linux/jhash.h>
+#include <linux/if_tunnel.h>
+#include <linux/net.h>
+#include <linux/igmp.h>
+#include <linux/workqueue.h>
+#include <net/flow.h>
+#include <net/pkt_sched.h>
+#include <net/net_namespace.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/icmp.h>
+#include <net/mld.h>
+#include <net/amt.h>
+#include <uapi/linux/amt.h>
+#include <linux/security.h>
+#include <net/gro_cells.h>
+#include <net/ipv6.h>
+#include <net/if_inet6.h>
+#include <net/ndisc.h>
+#include <net/addrconf.h>
+#include <net/ip6_route.h>
+#include <net/inet_common.h>
+#include <net/inet_dscp.h>
+#include <net/ip6_checksum.h>
+
+static struct workqueue_struct *amt_wq;
+
+static HLIST_HEAD(source_gc_list);
+/* Lock for source_gc_list */
+static spinlock_t source_gc_lock;
+static struct delayed_work source_gc_wq;
+static char *status_str[] = {
+ "AMT_STATUS_INIT",
+ "AMT_STATUS_SENT_DISCOVERY",
+ "AMT_STATUS_RECEIVED_DISCOVERY",
+ "AMT_STATUS_SENT_ADVERTISEMENT",
+ "AMT_STATUS_RECEIVED_ADVERTISEMENT",
+ "AMT_STATUS_SENT_REQUEST",
+ "AMT_STATUS_RECEIVED_REQUEST",
+ "AMT_STATUS_SENT_QUERY",
+ "AMT_STATUS_RECEIVED_QUERY",
+ "AMT_STATUS_SENT_UPDATE",
+ "AMT_STATUS_RECEIVED_UPDATE",
+};
+
+static char *type_str[] = {
+ "", /* Type 0 is not defined */
+ "AMT_MSG_DISCOVERY",
+ "AMT_MSG_ADVERTISEMENT",
+ "AMT_MSG_REQUEST",
+ "AMT_MSG_MEMBERSHIP_QUERY",
+ "AMT_MSG_MEMBERSHIP_UPDATE",
+ "AMT_MSG_MULTICAST_DATA",
+ "AMT_MSG_TEARDOWN",
+};
+
+static char *action_str[] = {
+ "AMT_ACT_GMI",
+ "AMT_ACT_GMI_ZERO",
+ "AMT_ACT_GT",
+ "AMT_ACT_STATUS_FWD_NEW",
+ "AMT_ACT_STATUS_D_FWD_NEW",
+ "AMT_ACT_STATUS_NONE_NEW",
+};
+
+static struct igmpv3_grec igmpv3_zero_grec;
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } }
+static struct in6_addr mld2_all_node = MLD2_ALL_NODE_INIT;
+static struct mld2_grec mldv2_zero_grec;
+#endif
+
+static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct tc_skb_cb) >
+ sizeof_field(struct sk_buff, cb));
+
+ return (struct amt_skb_cb *)((void *)skb->cb +
+ sizeof(struct tc_skb_cb));
+}
+
+static void __amt_source_gc_work(void)
+{
+ struct amt_source_node *snode;
+ struct hlist_head gc_list;
+ struct hlist_node *t;
+
+ spin_lock_bh(&source_gc_lock);
+ hlist_move_list(&source_gc_list, &gc_list);
+ spin_unlock_bh(&source_gc_lock);
+
+ hlist_for_each_entry_safe(snode, t, &gc_list, node) {
+ hlist_del_rcu(&snode->node);
+ kfree_rcu(snode, rcu);
+ }
+}
+
+static void amt_source_gc_work(struct work_struct *work)
+{
+ __amt_source_gc_work();
+
+ spin_lock_bh(&source_gc_lock);
+ mod_delayed_work(amt_wq, &source_gc_wq,
+ msecs_to_jiffies(AMT_GC_INTERVAL));
+ spin_unlock_bh(&source_gc_lock);
+}
+
+static bool amt_addr_equal(union amt_addr *a, union amt_addr *b)
+{
+ return !memcmp(a, b, sizeof(union amt_addr));
+}
+
+static u32 amt_source_hash(struct amt_tunnel_list *tunnel, union amt_addr *src)
+{
+ u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed);
+
+ return reciprocal_scale(hash, tunnel->amt->hash_buckets);
+}
+
+static bool amt_status_filter(struct amt_source_node *snode,
+ enum amt_filter filter)
+{
+ bool rc = false;
+
+ switch (filter) {
+ case AMT_FILTER_FWD:
+ if (snode->status == AMT_SOURCE_STATUS_FWD &&
+ snode->flags == AMT_SOURCE_OLD)
+ rc = true;
+ break;
+ case AMT_FILTER_D_FWD:
+ if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
+ snode->flags == AMT_SOURCE_OLD)
+ rc = true;
+ break;
+ case AMT_FILTER_FWD_NEW:
+ if (snode->status == AMT_SOURCE_STATUS_FWD &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ case AMT_FILTER_D_FWD_NEW:
+ if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ case AMT_FILTER_ALL:
+ rc = true;
+ break;
+ case AMT_FILTER_NONE_NEW:
+ if (snode->status == AMT_SOURCE_STATUS_NONE &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ case AMT_FILTER_BOTH:
+ if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
+ snode->status == AMT_SOURCE_STATUS_FWD) &&
+ snode->flags == AMT_SOURCE_OLD)
+ rc = true;
+ break;
+ case AMT_FILTER_BOTH_NEW:
+ if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
+ snode->status == AMT_SOURCE_STATUS_FWD) &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return rc;
+}
+
+static struct amt_source_node *amt_lookup_src(struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ enum amt_filter filter,
+ union amt_addr *src)
+{
+ u32 hash = amt_source_hash(tunnel, src);
+ struct amt_source_node *snode;
+
+ hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node)
+ if (amt_status_filter(snode, filter) &&
+ amt_addr_equal(&snode->source_addr, src))
+ return snode;
+
+ return NULL;
+}
+
+static u32 amt_group_hash(struct amt_tunnel_list *tunnel, union amt_addr *group)
+{
+ u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed);
+
+ return reciprocal_scale(hash, tunnel->amt->hash_buckets);
+}
+
+static struct amt_group_node *amt_lookup_group(struct amt_tunnel_list *tunnel,
+ union amt_addr *group,
+ union amt_addr *host,
+ bool v6)
+{
+ u32 hash = amt_group_hash(tunnel, group);
+ struct amt_group_node *gnode;
+
+ hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) {
+ if (amt_addr_equal(&gnode->group_addr, group) &&
+ amt_addr_equal(&gnode->host_addr, host) &&
+ gnode->v6 == v6)
+ return gnode;
+ }
+
+ return NULL;
+}
+
+static void amt_destroy_source(struct amt_source_node *snode)
+{
+ struct amt_group_node *gnode = snode->gnode;
+ struct amt_tunnel_list *tunnel;
+
+ tunnel = gnode->tunnel_list;
+
+ if (!gnode->v6) {
+ netdev_dbg(snode->gnode->amt->dev,
+ "Delete source %pI4 from %pI4\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ netdev_dbg(snode->gnode->amt->dev,
+ "Delete source %pI6 from %pI6\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6);
+#endif
+ }
+
+ cancel_delayed_work(&snode->source_timer);
+ hlist_del_init_rcu(&snode->node);
+ tunnel->nr_sources--;
+ gnode->nr_sources--;
+ spin_lock_bh(&source_gc_lock);
+ hlist_add_head_rcu(&snode->node, &source_gc_list);
+ spin_unlock_bh(&source_gc_lock);
+}
+
+static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode)
+{
+ struct amt_source_node *snode;
+ struct hlist_node *t;
+ int i;
+
+ if (cancel_delayed_work(&gnode->group_timer))
+ dev_put(amt->dev);
+ hlist_del_rcu(&gnode->node);
+ gnode->tunnel_list->nr_groups--;
+
+ if (!gnode->v6)
+ netdev_dbg(amt->dev, "Leave group %pI4\n",
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(amt->dev, "Leave group %pI6\n",
+ &gnode->group_addr.ip6);
+#endif
+ for (i = 0; i < amt->hash_buckets; i++)
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node)
+ amt_destroy_source(snode);
+
+ /* tunnel->lock was acquired outside of amt_del_group()
+ * But rcu_read_lock() was acquired too so It's safe.
+ */
+ kfree_rcu(gnode, rcu);
+}
+
+/* If a source timer expires with a router filter-mode for the group of
+ * INCLUDE, the router concludes that traffic from this particular
+ * source is no longer desired on the attached network, and deletes the
+ * associated source record.
+ */
+static void amt_source_work(struct work_struct *work)
+{
+ struct amt_source_node *snode = container_of(to_delayed_work(work),
+ struct amt_source_node,
+ source_timer);
+ struct amt_group_node *gnode = snode->gnode;
+ struct amt_dev *amt = gnode->amt;
+ struct amt_tunnel_list *tunnel;
+
+ tunnel = gnode->tunnel_list;
+ spin_lock_bh(&tunnel->lock);
+ rcu_read_lock();
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+ amt_destroy_source(snode);
+ if (!gnode->nr_sources)
+ amt_del_group(amt, gnode);
+ } else {
+ /* When a router filter-mode for a group is EXCLUDE,
+ * source records are only deleted when the group timer expires
+ */
+ snode->status = AMT_SOURCE_STATUS_D_FWD;
+ }
+ rcu_read_unlock();
+ spin_unlock_bh(&tunnel->lock);
+}
+
+static void amt_act_src(struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ struct amt_source_node *snode,
+ enum amt_act act)
+{
+ struct amt_dev *amt = tunnel->amt;
+
+ switch (act) {
+ case AMT_ACT_GMI:
+ mod_delayed_work(amt_wq, &snode->source_timer,
+ msecs_to_jiffies(amt_gmi(amt)));
+ break;
+ case AMT_ACT_GMI_ZERO:
+ cancel_delayed_work(&snode->source_timer);
+ break;
+ case AMT_ACT_GT:
+ mod_delayed_work(amt_wq, &snode->source_timer,
+ gnode->group_timer.timer.expires);
+ break;
+ case AMT_ACT_STATUS_FWD_NEW:
+ snode->status = AMT_SOURCE_STATUS_FWD;
+ snode->flags = AMT_SOURCE_NEW;
+ break;
+ case AMT_ACT_STATUS_D_FWD_NEW:
+ snode->status = AMT_SOURCE_STATUS_D_FWD;
+ snode->flags = AMT_SOURCE_NEW;
+ break;
+ case AMT_ACT_STATUS_NONE_NEW:
+ cancel_delayed_work(&snode->source_timer);
+ snode->status = AMT_SOURCE_STATUS_NONE;
+ snode->flags = AMT_SOURCE_NEW;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ if (!gnode->v6)
+ netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4,
+ action_str[act]);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6,
+ action_str[act]);
+#endif
+}
+
+static struct amt_source_node *amt_alloc_snode(struct amt_group_node *gnode,
+ union amt_addr *src)
+{
+ struct amt_source_node *snode;
+
+ snode = kzalloc(sizeof(*snode), GFP_ATOMIC);
+ if (!snode)
+ return NULL;
+
+ memcpy(&snode->source_addr, src, sizeof(union amt_addr));
+ snode->gnode = gnode;
+ snode->status = AMT_SOURCE_STATUS_NONE;
+ snode->flags = AMT_SOURCE_NEW;
+ INIT_HLIST_NODE(&snode->node);
+ INIT_DELAYED_WORK(&snode->source_timer, amt_source_work);
+
+ return snode;
+}
+
+/* RFC 3810 - 7.2.2. Definition of Filter Timers
+ *
+ * Router Mode Filter Timer Actions/Comments
+ * ----------- ----------------- ----------------
+ *
+ * INCLUDE Not Used All listeners in
+ * INCLUDE mode.
+ *
+ * EXCLUDE Timer > 0 At least one listener
+ * in EXCLUDE mode.
+ *
+ * EXCLUDE Timer == 0 No more listeners in
+ * EXCLUDE mode for the
+ * multicast address.
+ * If the Requested List
+ * is empty, delete
+ * Multicast Address
+ * Record. If not, switch
+ * to INCLUDE filter mode;
+ * the sources in the
+ * Requested List are
+ * moved to the Include
+ * List, and the Exclude
+ * List is deleted.
+ */
+static void amt_group_work(struct work_struct *work)
+{
+ struct amt_group_node *gnode = container_of(to_delayed_work(work),
+ struct amt_group_node,
+ group_timer);
+ struct amt_tunnel_list *tunnel = gnode->tunnel_list;
+ struct amt_dev *amt = gnode->amt;
+ struct amt_source_node *snode;
+ bool delete_group = true;
+ struct hlist_node *t;
+ int i, buckets;
+
+ buckets = amt->hash_buckets;
+
+ spin_lock_bh(&tunnel->lock);
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+ /* Not Used */
+ spin_unlock_bh(&tunnel->lock);
+ goto out;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < buckets; i++) {
+ hlist_for_each_entry_safe(snode, t,
+ &gnode->sources[i], node) {
+ if (!delayed_work_pending(&snode->source_timer) ||
+ snode->status == AMT_SOURCE_STATUS_D_FWD) {
+ amt_destroy_source(snode);
+ } else {
+ delete_group = false;
+ snode->status = AMT_SOURCE_STATUS_FWD;
+ }
+ }
+ }
+ if (delete_group)
+ amt_del_group(amt, gnode);
+ else
+ gnode->filter_mode = MCAST_INCLUDE;
+ rcu_read_unlock();
+ spin_unlock_bh(&tunnel->lock);
+out:
+ dev_put(amt->dev);
+}
+
+/* Non-existent group is created as INCLUDE {empty}:
+ *
+ * RFC 3376 - 5.1. Action on Change of Interface State
+ *
+ * If no interface state existed for that multicast address before
+ * the change (i.e., the change consisted of creating a new
+ * per-interface record), or if no state exists after the change
+ * (i.e., the change consisted of deleting a per-interface record),
+ * then the "non-existent" state is considered to have a filter mode
+ * of INCLUDE and an empty source list.
+ */
+static struct amt_group_node *amt_add_group(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ union amt_addr *group,
+ union amt_addr *host,
+ bool v6)
+{
+ struct amt_group_node *gnode;
+ u32 hash;
+ int i;
+
+ if (tunnel->nr_groups >= amt->max_groups)
+ return ERR_PTR(-ENOSPC);
+
+ gnode = kzalloc(sizeof(*gnode) +
+ (sizeof(struct hlist_head) * amt->hash_buckets),
+ GFP_ATOMIC);
+ if (unlikely(!gnode))
+ return ERR_PTR(-ENOMEM);
+
+ gnode->amt = amt;
+ gnode->group_addr = *group;
+ gnode->host_addr = *host;
+ gnode->v6 = v6;
+ gnode->tunnel_list = tunnel;
+ gnode->filter_mode = MCAST_INCLUDE;
+ INIT_HLIST_NODE(&gnode->node);
+ INIT_DELAYED_WORK(&gnode->group_timer, amt_group_work);
+ for (i = 0; i < amt->hash_buckets; i++)
+ INIT_HLIST_HEAD(&gnode->sources[i]);
+
+ hash = amt_group_hash(tunnel, group);
+ hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]);
+ tunnel->nr_groups++;
+
+ if (!gnode->v6)
+ netdev_dbg(amt->dev, "Join group %pI4\n",
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(amt->dev, "Join group %pI6\n",
+ &gnode->group_addr.ip6);
+#endif
+
+ return gnode;
+}
+
+static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
+{
+ u8 ra[AMT_IPHDR_OPTS] = { IPOPT_RA, 4, 0, 0 };
+ int hlen = LL_RESERVED_SPACE(amt->dev);
+ int tlen = amt->dev->needed_tailroom;
+ struct igmpv3_query *ihv3;
+ void *csum_start = NULL;
+ __sum16 *csum = NULL;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ unsigned int len;
+ int offset;
+
+ len = hlen + tlen + sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, hlen);
+ skb_push(skb, sizeof(*eth));
+ skb->protocol = htons(ETH_P_IP);
+ skb_reset_mac_header(skb);
+ skb->priority = TC_PRIO_CONTROL;
+ skb_put(skb, sizeof(*iph));
+ skb_put_data(skb, ra, sizeof(ra));
+ skb_put(skb, sizeof(*ihv3));
+ skb_pull(skb, sizeof(*eth));
+ skb_reset_network_header(skb);
+
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr) + AMT_IPHDR_OPTS) >> 2;
+ iph->tos = AMT_TOS;
+ iph->tot_len = htons(sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3));
+ iph->frag_off = htons(IP_DF);
+ iph->ttl = 1;
+ iph->id = 0;
+ iph->protocol = IPPROTO_IGMP;
+ iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
+ iph->saddr = htonl(INADDR_ANY);
+ ip_send_check(iph);
+
+ eth = eth_hdr(skb);
+ ether_addr_copy(eth->h_source, amt->dev->dev_addr);
+ ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP), eth->h_dest);
+ eth->h_proto = htons(ETH_P_IP);
+
+ ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
+ skb_reset_transport_header(skb);
+ ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
+ ihv3->code = 1;
+ ihv3->group = 0;
+ ihv3->qqic = amt->qi;
+ ihv3->nsrcs = 0;
+ ihv3->resv = 0;
+ ihv3->suppress = false;
+ ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
+ ihv3->csum = 0;
+ csum = &ihv3->csum;
+ csum_start = (void *)ihv3;
+ *csum = ip_compute_csum(csum_start, sizeof(*ihv3));
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_push(skb, sizeof(*eth) + sizeof(*iph) + AMT_IPHDR_OPTS);
+
+ return skb;
+}
+
+static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
+ bool validate)
+{
+ if (validate && amt->status >= status)
+ return;
+ netdev_dbg(amt->dev, "Update GW status %s -> %s",
+ status_str[amt->status], status_str[status]);
+ WRITE_ONCE(amt->status, status);
+}
+
+static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
+ enum amt_status status,
+ bool validate)
+{
+ if (validate && tunnel->status >= status)
+ return;
+ netdev_dbg(tunnel->amt->dev,
+ "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s",
+ &tunnel->ip4, ntohs(tunnel->source_port),
+ status_str[tunnel->status], status_str[status]);
+ tunnel->status = status;
+}
+
+static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
+ enum amt_status status, bool validate)
+{
+ spin_lock_bh(&tunnel->lock);
+ __amt_update_relay_status(tunnel, status, validate);
+ spin_unlock_bh(&tunnel->lock);
+}
+
+static void amt_send_discovery(struct amt_dev *amt)
+{
+ struct amt_header_discovery *amtd;
+ int hlen, tlen, offset;
+ struct socket *sock;
+ struct udphdr *udph;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ u32 len;
+ int err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(amt->sock);
+ if (!sock)
+ goto out;
+
+ if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
+ goto out;
+
+ rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
+ amt->discovery_ip, amt->local_ip,
+ amt->gw_port, amt->relay_port,
+ IPPROTO_UDP, 0,
+ amt->stream_dev->ifindex);
+ if (IS_ERR(rt)) {
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ hlen = LL_RESERVED_SPACE(amt->dev);
+ tlen = amt->dev->needed_tailroom;
+ len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb) {
+ ip_rt_put(rt);
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ skb->priority = TC_PRIO_CONTROL;
+ skb_dst_set(skb, &rt->dst);
+
+ len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
+ skb_reset_network_header(skb);
+ skb_put(skb, len);
+ amtd = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
+ amtd->version = 0;
+ amtd->type = AMT_MSG_DISCOVERY;
+ amtd->reserved = 0;
+ amtd->nonce = amt->nonce;
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+ udph->source = amt->gw_port;
+ udph->dest = amt->relay_port;
+ udph->len = htons(sizeof(*udph) + sizeof(*amtd));
+ udph->check = 0;
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip,
+ sizeof(*udph) + sizeof(*amtd),
+ IPPROTO_UDP, skb->csum);
+
+ skb_push(skb, sizeof(*iph));
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr)) >> 2;
+ iph->tos = AMT_TOS;
+ iph->frag_off = 0;
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
+ iph->daddr = amt->discovery_ip;
+ iph->saddr = amt->local_ip;
+ iph->protocol = IPPROTO_UDP;
+ iph->tot_len = htons(len);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(amt->net, skb, NULL);
+ ip_send_check(iph);
+ err = ip_local_out(amt->net, sock->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ amt->dev->stats.tx_errors++;
+
+ amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
+out:
+ rcu_read_unlock();
+}
+
+static void amt_send_request(struct amt_dev *amt, bool v6)
+{
+ struct amt_header_request *amtrh;
+ int hlen, tlen, offset;
+ struct socket *sock;
+ struct udphdr *udph;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ u32 len;
+ int err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(amt->sock);
+ if (!sock)
+ goto out;
+
+ if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
+ goto out;
+
+ rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
+ amt->remote_ip, amt->local_ip,
+ amt->gw_port, amt->relay_port,
+ IPPROTO_UDP, 0,
+ amt->stream_dev->ifindex);
+ if (IS_ERR(rt)) {
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ hlen = LL_RESERVED_SPACE(amt->dev);
+ tlen = amt->dev->needed_tailroom;
+ len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb) {
+ ip_rt_put(rt);
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ skb->priority = TC_PRIO_CONTROL;
+ skb_dst_set(skb, &rt->dst);
+
+ len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
+ skb_reset_network_header(skb);
+ skb_put(skb, len);
+ amtrh = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
+ amtrh->version = 0;
+ amtrh->type = AMT_MSG_REQUEST;
+ amtrh->reserved1 = 0;
+ amtrh->p = v6;
+ amtrh->reserved2 = 0;
+ amtrh->nonce = amt->nonce;
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+ udph->source = amt->gw_port;
+ udph->dest = amt->relay_port;
+ udph->len = htons(sizeof(*amtrh) + sizeof(*udph));
+ udph->check = 0;
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip,
+ sizeof(*udph) + sizeof(*amtrh),
+ IPPROTO_UDP, skb->csum);
+
+ skb_push(skb, sizeof(*iph));
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr)) >> 2;
+ iph->tos = AMT_TOS;
+ iph->frag_off = 0;
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
+ iph->daddr = amt->remote_ip;
+ iph->saddr = amt->local_ip;
+ iph->protocol = IPPROTO_UDP;
+ iph->tot_len = htons(len);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(amt->net, skb, NULL);
+ ip_send_check(iph);
+ err = ip_local_out(amt->net, sock->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ amt->dev->stats.tx_errors++;
+
+out:
+ rcu_read_unlock();
+}
+
+static void amt_send_igmp_gq(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel)
+{
+ struct sk_buff *skb;
+
+ skb = amt_build_igmp_gq(amt);
+ if (!skb)
+ return;
+
+ amt_skb_cb(skb)->tunnel = tunnel;
+ dev_queue_xmit(skb);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt)
+{
+ u8 ra[AMT_IP6HDR_OPTS] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
+ 2, 0, 0, IPV6_TLV_PAD1, IPV6_TLV_PAD1 };
+ int hlen = LL_RESERVED_SPACE(amt->dev);
+ int tlen = amt->dev->needed_tailroom;
+ struct mld2_query *mld2q;
+ void *csum_start = NULL;
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ u32 len;
+
+ len = hlen + tlen + sizeof(*ip6h) + sizeof(ra) + sizeof(*mld2q);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, hlen);
+ skb_push(skb, sizeof(*eth));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->priority = TC_PRIO_CONTROL;
+ skb->protocol = htons(ETH_P_IPV6);
+ skb_put_zero(skb, sizeof(*ip6h));
+ skb_put_data(skb, ra, sizeof(ra));
+ skb_put_zero(skb, sizeof(*mld2q));
+ skb_pull(skb, sizeof(*eth));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6h->payload_len = htons(sizeof(ra) + sizeof(*mld2q));
+ ip6h->nexthdr = NEXTHDR_HOP;
+ ip6h->hop_limit = 1;
+ ip6h->daddr = mld2_all_node;
+ ip6_flow_hdr(ip6h, 0, 0);
+
+ if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0,
+ &ip6h->saddr)) {
+ amt->dev->stats.tx_errors++;
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ eth->h_proto = htons(ETH_P_IPV6);
+ ether_addr_copy(eth->h_source, amt->dev->dev_addr);
+ ipv6_eth_mc_map(&mld2_all_node, eth->h_dest);
+
+ skb_pull(skb, sizeof(*ip6h) + sizeof(ra));
+ skb_reset_transport_header(skb);
+ mld2q = (struct mld2_query *)icmp6_hdr(skb);
+ mld2q->mld2q_mrc = htons(1);
+ mld2q->mld2q_type = ICMPV6_MGM_QUERY;
+ mld2q->mld2q_code = 0;
+ mld2q->mld2q_cksum = 0;
+ mld2q->mld2q_resv1 = 0;
+ mld2q->mld2q_resv2 = 0;
+ mld2q->mld2q_suppress = 0;
+ mld2q->mld2q_qrv = amt->qrv;
+ mld2q->mld2q_nsrcs = 0;
+ mld2q->mld2q_qqic = amt->qi;
+ csum_start = (void *)mld2q;
+ mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ sizeof(*mld2q),
+ IPPROTO_ICMPV6,
+ csum_partial(csum_start,
+ sizeof(*mld2q), 0));
+
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_push(skb, sizeof(*eth) + sizeof(*ip6h) + sizeof(ra));
+ return skb;
+}
+
+static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
+{
+ struct sk_buff *skb;
+
+ skb = amt_build_mld_gq(amt);
+ if (!skb)
+ return;
+
+ amt_skb_cb(skb)->tunnel = tunnel;
+ dev_queue_xmit(skb);
+}
+#else
+static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
+{
+}
+#endif
+
+static bool amt_queue_event(struct amt_dev *amt, enum amt_event event,
+ struct sk_buff *skb)
+{
+ int index;
+
+ spin_lock_bh(&amt->lock);
+ if (amt->nr_events >= AMT_MAX_EVENTS) {
+ spin_unlock_bh(&amt->lock);
+ return 1;
+ }
+
+ index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS;
+ amt->events[index].event = event;
+ amt->events[index].skb = skb;
+ amt->nr_events++;
+ amt->event_idx %= AMT_MAX_EVENTS;
+ queue_work(amt_wq, &amt->event_wq);
+ spin_unlock_bh(&amt->lock);
+
+ return 0;
+}
+
+static void amt_secret_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ secret_wq);
+
+ spin_lock_bh(&amt->lock);
+ get_random_bytes(&amt->key, sizeof(siphash_key_t));
+ spin_unlock_bh(&amt->lock);
+ mod_delayed_work(amt_wq, &amt->secret_wq,
+ msecs_to_jiffies(AMT_SECRET_TIMEOUT));
+}
+
+static void amt_event_send_discovery(struct amt_dev *amt)
+{
+ if (amt->status > AMT_STATUS_SENT_DISCOVERY)
+ goto out;
+ get_random_bytes(&amt->nonce, sizeof(__be32));
+
+ amt_send_discovery(amt);
+out:
+ mod_delayed_work(amt_wq, &amt->discovery_wq,
+ msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
+}
+
+static void amt_discovery_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ discovery_wq);
+
+ if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL))
+ mod_delayed_work(amt_wq, &amt->discovery_wq,
+ msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
+}
+
+static void amt_event_send_request(struct amt_dev *amt)
+{
+ u32 exp;
+
+ if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
+ goto out;
+
+ if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
+ netdev_dbg(amt->dev, "Gateway is not ready");
+ amt->qi = AMT_INIT_REQ_TIMEOUT;
+ WRITE_ONCE(amt->ready4, false);
+ WRITE_ONCE(amt->ready6, false);
+ amt->remote_ip = 0;
+ amt_update_gw_status(amt, AMT_STATUS_INIT, false);
+ amt->req_cnt = 0;
+ amt->nonce = 0;
+ goto out;
+ }
+
+ if (!amt->req_cnt) {
+ WRITE_ONCE(amt->ready4, false);
+ WRITE_ONCE(amt->ready6, false);
+ get_random_bytes(&amt->nonce, sizeof(__be32));
+ }
+
+ amt_send_request(amt, false);
+ amt_send_request(amt, true);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
+ amt->req_cnt++;
+out:
+ exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
+ mod_delayed_work(amt_wq, &amt->req_wq, secs_to_jiffies(exp));
+}
+
+static void amt_req_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ req_wq);
+
+ if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL))
+ mod_delayed_work(amt_wq, &amt->req_wq,
+ msecs_to_jiffies(100));
+}
+
+static bool amt_send_membership_update(struct amt_dev *amt,
+ struct sk_buff *skb,
+ bool v6)
+{
+ struct amt_header_membership_update *amtmu;
+ struct socket *sock;
+ struct iphdr *iph;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ int err;
+
+ sock = rcu_dereference_bh(amt->sock);
+ if (!sock)
+ return true;
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) +
+ sizeof(*iph) + sizeof(struct udphdr));
+ if (err)
+ return true;
+
+ skb_reset_inner_headers(skb);
+ memset(&fl4, 0, sizeof(struct flowi4));
+ fl4.flowi4_oif = amt->stream_dev->ifindex;
+ fl4.daddr = amt->remote_ip;
+ fl4.saddr = amt->local_ip;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
+ fl4.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(amt->net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip);
+ return true;
+ }
+
+ amtmu = skb_push(skb, sizeof(*amtmu));
+ amtmu->version = 0;
+ amtmu->type = AMT_MSG_MEMBERSHIP_UPDATE;
+ amtmu->reserved = 0;
+ amtmu->nonce = amt->nonce;
+ amtmu->response_mac = amt->mac;
+
+ if (!v6)
+ skb_set_inner_protocol(skb, htons(ETH_P_IP));
+ else
+ skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
+ udp_tunnel_xmit_skb(rt, sock->sk, skb,
+ fl4.saddr,
+ fl4.daddr,
+ AMT_TOS,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ amt->gw_port,
+ amt->relay_port,
+ false,
+ false,
+ 0);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
+ return false;
+}
+
+static void amt_send_multicast_data(struct amt_dev *amt,
+ const struct sk_buff *oskb,
+ struct amt_tunnel_list *tunnel,
+ bool v6)
+{
+ struct amt_header_mcast_data *amtmd;
+ struct socket *sock;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct flowi4 fl4;
+ struct rtable *rt;
+
+ sock = rcu_dereference_bh(amt->sock);
+ if (!sock)
+ return;
+
+ skb = skb_copy_expand(oskb, sizeof(*amtmd) + sizeof(*iph) +
+ sizeof(struct udphdr), 0, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb_reset_inner_headers(skb);
+ memset(&fl4, 0, sizeof(struct flowi4));
+ fl4.flowi4_oif = amt->stream_dev->ifindex;
+ fl4.daddr = tunnel->ip4;
+ fl4.saddr = amt->local_ip;
+ fl4.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(amt->net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
+ kfree_skb(skb);
+ return;
+ }
+
+ amtmd = skb_push(skb, sizeof(*amtmd));
+ amtmd->version = 0;
+ amtmd->reserved = 0;
+ amtmd->type = AMT_MSG_MULTICAST_DATA;
+
+ if (!v6)
+ skb_set_inner_protocol(skb, htons(ETH_P_IP));
+ else
+ skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
+ udp_tunnel_xmit_skb(rt, sock->sk, skb,
+ fl4.saddr,
+ fl4.daddr,
+ AMT_TOS,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ amt->relay_port,
+ tunnel->source_port,
+ false,
+ false,
+ 0);
+}
+
+static bool amt_send_membership_query(struct amt_dev *amt,
+ struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel,
+ bool v6)
+{
+ struct amt_header_membership_query *amtmq;
+ struct socket *sock;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ int err;
+
+ sock = rcu_dereference_bh(amt->sock);
+ if (!sock)
+ return true;
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) +
+ sizeof(struct iphdr) + sizeof(struct udphdr));
+ if (err)
+ return true;
+
+ skb_reset_inner_headers(skb);
+ memset(&fl4, 0, sizeof(struct flowi4));
+ fl4.flowi4_oif = amt->stream_dev->ifindex;
+ fl4.daddr = tunnel->ip4;
+ fl4.saddr = amt->local_ip;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
+ fl4.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(amt->net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
+ return true;
+ }
+
+ amtmq = skb_push(skb, sizeof(*amtmq));
+ amtmq->version = 0;
+ amtmq->type = AMT_MSG_MEMBERSHIP_QUERY;
+ amtmq->reserved = 0;
+ amtmq->l = 0;
+ amtmq->g = 0;
+ amtmq->nonce = tunnel->nonce;
+ amtmq->response_mac = tunnel->mac;
+
+ if (!v6)
+ skb_set_inner_protocol(skb, htons(ETH_P_IP));
+ else
+ skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
+ udp_tunnel_xmit_skb(rt, sock->sk, skb,
+ fl4.saddr,
+ fl4.daddr,
+ AMT_TOS,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ amt->relay_port,
+ tunnel->source_port,
+ false,
+ false,
+ 0);
+ amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
+ return false;
+}
+
+static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ struct amt_tunnel_list *tunnel;
+ struct amt_group_node *gnode;
+ union amt_addr group = {0,};
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ip6h;
+ struct mld_msg *mld;
+#endif
+ bool report = false;
+ struct igmphdr *ih;
+ bool query = false;
+ struct iphdr *iph;
+ bool data = false;
+ bool v6 = false;
+ u32 hash;
+
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (!ipv4_is_multicast(iph->daddr))
+ goto free;
+
+ if (!ip_mc_check_igmp(skb)) {
+ ih = igmp_hdr(skb);
+ switch (ih->type) {
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
+ case IGMP_HOST_MEMBERSHIP_REPORT:
+ report = true;
+ break;
+ case IGMP_HOST_MEMBERSHIP_QUERY:
+ query = true;
+ break;
+ default:
+ goto free;
+ }
+ } else {
+ data = true;
+ }
+ v6 = false;
+ group.ip4 = iph->daddr;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ ip6h = ipv6_hdr(skb);
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ goto free;
+
+ if (!ipv6_mc_check_mld(skb)) {
+ mld = (struct mld_msg *)skb_transport_header(skb);
+ switch (mld->mld_type) {
+ case ICMPV6_MGM_REPORT:
+ case ICMPV6_MLD2_REPORT:
+ report = true;
+ break;
+ case ICMPV6_MGM_QUERY:
+ query = true;
+ break;
+ default:
+ goto free;
+ }
+ } else {
+ data = true;
+ }
+ v6 = true;
+ group.ip6 = ip6h->daddr;
+#endif
+ } else {
+ dev->stats.tx_errors++;
+ goto free;
+ }
+
+ if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
+ goto free;
+
+ skb_pull(skb, sizeof(struct ethhdr));
+
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ /* Gateway only passes IGMP/MLD packets */
+ if (!report)
+ goto free;
+ if ((!v6 && !READ_ONCE(amt->ready4)) ||
+ (v6 && !READ_ONCE(amt->ready6)))
+ goto free;
+ if (amt_send_membership_update(amt, skb, v6))
+ goto free;
+ goto unlock;
+ } else if (amt->mode == AMT_MODE_RELAY) {
+ if (query) {
+ tunnel = amt_skb_cb(skb)->tunnel;
+ if (!tunnel) {
+ WARN_ON(1);
+ goto free;
+ }
+
+ /* Do not forward unexpected query */
+ if (amt_send_membership_query(amt, skb, tunnel, v6))
+ goto free;
+ goto unlock;
+ }
+
+ if (!data)
+ goto free;
+ list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
+ hash = amt_group_hash(tunnel, &group);
+ hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash],
+ node) {
+ if (!v6) {
+ if (gnode->group_addr.ip4 == iph->daddr)
+ goto found;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (ipv6_addr_equal(&gnode->group_addr.ip6,
+ &ip6h->daddr))
+ goto found;
+#endif
+ }
+ }
+ continue;
+found:
+ amt_send_multicast_data(amt, skb, tunnel, v6);
+ }
+ }
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+free:
+ dev_kfree_skb(skb);
+unlock:
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static int amt_parse_type(struct sk_buff *skb)
+{
+ struct amt_header *amth;
+
+ if (!pskb_may_pull(skb, sizeof(struct udphdr) +
+ sizeof(struct amt_header)))
+ return -1;
+
+ amth = (struct amt_header *)(udp_hdr(skb) + 1);
+
+ if (amth->version != 0)
+ return -1;
+
+ if (amth->type >= __AMT_MSG_MAX || !amth->type)
+ return -1;
+ return amth->type;
+}
+
+static void amt_clear_groups(struct amt_tunnel_list *tunnel)
+{
+ struct amt_dev *amt = tunnel->amt;
+ struct amt_group_node *gnode;
+ struct hlist_node *t;
+ int i;
+
+ spin_lock_bh(&tunnel->lock);
+ rcu_read_lock();
+ for (i = 0; i < amt->hash_buckets; i++)
+ hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node)
+ amt_del_group(amt, gnode);
+ rcu_read_unlock();
+ spin_unlock_bh(&tunnel->lock);
+}
+
+static void amt_tunnel_expire(struct work_struct *work)
+{
+ struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work),
+ struct amt_tunnel_list,
+ gc_wq);
+ struct amt_dev *amt = tunnel->amt;
+
+ spin_lock_bh(&amt->lock);
+ rcu_read_lock();
+ list_del_rcu(&tunnel->list);
+ amt->nr_tunnels--;
+ amt_clear_groups(tunnel);
+ rcu_read_unlock();
+ spin_unlock_bh(&amt->lock);
+ kfree_rcu(tunnel, rcu);
+}
+
+static void amt_cleanup_srcs(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode)
+{
+ struct amt_source_node *snode;
+ struct hlist_node *t;
+ int i;
+
+ /* Delete old sources */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) {
+ if (snode->flags == AMT_SOURCE_OLD)
+ amt_destroy_source(snode);
+ }
+ }
+
+ /* switch from new to old */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) {
+ snode->flags = AMT_SOURCE_OLD;
+ if (!gnode->v6)
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as OLD %pI4 from %pI4\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as OLD %pI6 from %pI6\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6);
+#endif
+ }
+ }
+}
+
+static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode, void *grec,
+ bool v6)
+{
+ struct igmpv3_grec *igmp_grec;
+ struct amt_source_node *snode;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct mld2_grec *mld_grec;
+#endif
+ union amt_addr src = {0,};
+ u16 nsrcs;
+ u32 hash;
+ int i;
+
+ if (!v6) {
+ igmp_grec = grec;
+ nsrcs = ntohs(igmp_grec->grec_nsrcs);
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ mld_grec = grec;
+ nsrcs = ntohs(mld_grec->grec_nsrcs);
+#else
+ return;
+#endif
+ }
+ for (i = 0; i < nsrcs; i++) {
+ if (tunnel->nr_sources >= amt->max_sources)
+ return;
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ if (amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src))
+ continue;
+
+ snode = amt_alloc_snode(gnode, &src);
+ if (snode) {
+ hash = amt_source_hash(tunnel, &snode->source_addr);
+ hlist_add_head_rcu(&snode->node, &gnode->sources[hash]);
+ tunnel->nr_sources++;
+ gnode->nr_sources++;
+
+ if (!gnode->v6)
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as NEW %pI4 from %pI4\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as NEW %pI6 from %pI6\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6);
+#endif
+ }
+ }
+}
+
+/* Router State Report Rec'd New Router State
+ * ------------ ------------ ----------------
+ * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A)
+ *
+ * -----------+-----------+-----------+
+ * | OLD | NEW |
+ * -----------+-----------+-----------+
+ * FWD | X | X+A |
+ * -----------+-----------+-----------+
+ * D_FWD | Y | Y-A |
+ * -----------+-----------+-----------+
+ * NONE | | A |
+ * -----------+-----------+-----------+
+ *
+ * a) Received sources are NONE/NEW
+ * b) All NONE will be deleted by amt_cleanup_srcs().
+ * c) All OLD will be deleted by amt_cleanup_srcs().
+ * d) After delete, NEW source will be switched to OLD.
+ */
+static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec,
+ enum amt_ops ops,
+ enum amt_filter filter,
+ enum amt_act act,
+ bool v6)
+{
+ struct amt_dev *amt = tunnel->amt;
+ struct amt_source_node *snode;
+ struct igmpv3_grec *igmp_grec;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct mld2_grec *mld_grec;
+#endif
+ union amt_addr src = {0,};
+ struct hlist_node *t;
+ u16 nsrcs;
+ int i, j;
+
+ if (!v6) {
+ igmp_grec = grec;
+ nsrcs = ntohs(igmp_grec->grec_nsrcs);
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ mld_grec = grec;
+ nsrcs = ntohs(mld_grec->grec_nsrcs);
+#else
+ return;
+#endif
+ }
+
+ memset(&src, 0, sizeof(union amt_addr));
+ switch (ops) {
+ case AMT_OPS_INT:
+ /* A*B */
+ for (i = 0; i < nsrcs; i++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ snode = amt_lookup_src(tunnel, gnode, filter, &src);
+ if (!snode)
+ continue;
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ break;
+ case AMT_OPS_UNI:
+ /* A+B */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
+ node) {
+ if (amt_status_filter(snode, filter))
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ }
+ for (i = 0; i < nsrcs; i++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ snode = amt_lookup_src(tunnel, gnode, filter, &src);
+ if (!snode)
+ continue;
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ break;
+ case AMT_OPS_SUB:
+ /* A-B */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
+ node) {
+ if (!amt_status_filter(snode, filter))
+ continue;
+ for (j = 0; j < nsrcs; j++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[j];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6,
+ &mld_grec->grec_src[j],
+ sizeof(struct in6_addr));
+#endif
+ if (amt_addr_equal(&snode->source_addr,
+ &src))
+ goto out_sub;
+ }
+ amt_act_src(tunnel, gnode, snode, act);
+ continue;
+out_sub:;
+ }
+ }
+ break;
+ case AMT_OPS_SUB_REV:
+ /* B-A */
+ for (i = 0; i < nsrcs; i++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ snode = amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL,
+ &src);
+ if (!snode) {
+ snode = amt_lookup_src(tunnel, gnode,
+ filter, &src);
+ if (snode)
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ }
+ break;
+ default:
+ netdev_dbg(amt->dev, "Invalid type\n");
+ return;
+ }
+}
+
+static void amt_mcast_is_in_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
+ */
+ /* Update IS_IN (B) as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update INCLUDE (A) as NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* (B)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ } else {
+/* State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
+ */
+ /* Update (A) in (X, Y) as NONE/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_BOTH,
+ AMT_ACT_STATUS_NONE_NEW,
+ v6);
+ /* Update FWD/OLD as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update IS_IN (A) as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update EXCLUDE (, Y-A) as D_FWD_NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ }
+}
+
+static void amt_mcast_is_ex_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
+ * Delete (A-B)
+ * Group Timer=GMI
+ */
+ /* EXCLUDE(A*B, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE(, B-A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (B-A)=0 */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD_NEW,
+ AMT_ACT_GMI_ZERO,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ gnode->filter_mode = MCAST_EXCLUDE;
+ /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
+ * Delete (X-A)
+ * Delete (Y-A)
+ * Group Timer=GMI
+ */
+ /* EXCLUDE (A-Y, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y*A ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (A-X-Y)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_BOTH_NEW,
+ AMT_ACT_GMI,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
+ }
+}
+
+static void amt_mcast_to_in_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
+ * Send Q(G,A-B)
+ */
+ /* Update TO_IN (B) sources as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update INCLUDE (A) sources as NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* (B)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
+ * Send Q(G,X-A)
+ * Send Q(G)
+ */
+ /* Update TO_IN (A) sources as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update EXCLUDE(X,) sources as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y-A)
+ * (A) are already switched to FWD_NEW.
+ * So, D_FWD/OLD -> D_FWD/NEW is okay.
+ */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (A)=GMI
+ * Only FWD_NEW will have (A) sources.
+ */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ }
+}
+
+static void amt_mcast_to_ex_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
+ * Delete (A-B)
+ * Send Q(G,A*B)
+ * Group Timer=GMI
+ */
+ /* EXCLUDE (A*B, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, B-A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (B-A)=0 */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD_NEW,
+ AMT_ACT_GMI_ZERO,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ gnode->filter_mode = MCAST_EXCLUDE;
+ /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
+ * Delete (X-A)
+ * Delete (Y-A)
+ * Send Q(G,A-Y)
+ * Group Timer=GMI
+ */
+ /* Update (A-X-Y) as NONE/OLD */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_BOTH,
+ AMT_ACT_GT,
+ v6);
+ /* EXCLUDE (A-Y, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y*A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
+ }
+}
+
+static void amt_mcast_allow_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
+ */
+ /* INCLUDE (A+B) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* (B)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
+ */
+ /* EXCLUDE (X+A, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y-A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (A)=GMI
+ * All (A) source are now FWD/NEW status.
+ */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ }
+}
+
+static void amt_mcast_block_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
+ */
+ /* INCLUDE (A) */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
+ * Send Q(G,A-Y)
+ */
+ /* (A-X-Y)=Group Timer */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_BOTH,
+ AMT_ACT_GT,
+ v6);
+ /* EXCLUDE (X, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (X+(A-Y) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ }
+}
+
+/* RFC 3376
+ * 7.3.2. In the Presence of Older Version Group Members
+ *
+ * When Group Compatibility Mode is IGMPv2, a router internally
+ * translates the following IGMPv2 messages for that group to their
+ * IGMPv3 equivalents:
+ *
+ * IGMPv2 Message IGMPv3 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Leave TO_IN( {} )
+ */
+static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmphdr *ih = igmp_hdr(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip4 = ih->group;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+
+ gnode = amt_lookup_group(tunnel, &group, &host, false);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host, false);
+ if (!IS_ERR(gnode)) {
+ gnode->filter_mode = MCAST_EXCLUDE;
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ }
+ }
+}
+
+/* RFC 3376
+ * 7.3.2. In the Presence of Older Version Group Members
+ *
+ * When Group Compatibility Mode is IGMPv2, a router internally
+ * translates the following IGMPv2 messages for that group to their
+ * IGMPv3 equivalents:
+ *
+ * IGMPv2 Message IGMPv3 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Leave TO_IN( {} )
+ */
+static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmphdr *ih = igmp_hdr(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip4 = ih->group;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+
+ gnode = amt_lookup_group(tunnel, &group, &host, false);
+ if (gnode)
+ amt_del_group(amt, gnode);
+}
+
+static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmpv3_report *ihrv3 = igmpv3_report_hdr(skb);
+ int len = skb_transport_offset(skb) + sizeof(*ihrv3);
+ void *zero_grec = (void *)&igmpv3_zero_grec;
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+ struct igmpv3_grec *grec;
+ u16 nsrcs;
+ int i;
+
+ for (i = 0; i < ntohs(ihrv3->ngrec); i++) {
+ len += sizeof(*grec);
+ if (!ip_mc_may_pull(skb, len))
+ break;
+
+ grec = (void *)(skb->data + len - sizeof(*grec));
+ nsrcs = ntohs(grec->grec_nsrcs);
+
+ len += nsrcs * sizeof(__be32);
+ if (!ip_mc_may_pull(skb, len))
+ break;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip4 = grec->grec_mca;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+ gnode = amt_lookup_group(tunnel, &group, &host, false);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host,
+ false);
+ if (IS_ERR(gnode))
+ continue;
+ }
+
+ amt_add_srcs(amt, tunnel, gnode, grec, false);
+ switch (grec->grec_type) {
+ case IGMPV3_MODE_IS_INCLUDE:
+ amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_MODE_IS_EXCLUDE:
+ amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_CHANGE_TO_INCLUDE:
+ amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_CHANGE_TO_EXCLUDE:
+ amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_ALLOW_NEW_SOURCES:
+ amt_mcast_allow_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_BLOCK_OLD_SOURCES:
+ amt_mcast_block_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ default:
+ break;
+ }
+ amt_cleanup_srcs(amt, tunnel, gnode);
+ }
+}
+
+/* caller held tunnel->lock */
+static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmphdr *ih = igmp_hdr(skb);
+
+ switch (ih->type) {
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
+ amt_igmpv3_report_handler(amt, skb, tunnel);
+ break;
+ case IGMPV2_HOST_MEMBERSHIP_REPORT:
+ amt_igmpv2_report_handler(amt, skb, tunnel);
+ break;
+ case IGMP_HOST_LEAVE_MESSAGE:
+ amt_igmpv2_leave_handler(amt, skb, tunnel);
+ break;
+ default:
+ break;
+ }
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/* RFC 3810
+ * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
+ *
+ * When Multicast Address Compatibility Mode is MLDv2, a router acts
+ * using the MLDv2 protocol for that multicast address. When Multicast
+ * Address Compatibility Mode is MLDv1, a router internally translates
+ * the following MLDv1 messages for that multicast address to their
+ * MLDv2 equivalents:
+ *
+ * MLDv1 Message MLDv2 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Done TO_IN( {} )
+ */
+static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
+ memcpy(&host.ip6, &ip6h->saddr, sizeof(struct in6_addr));
+
+ gnode = amt_lookup_group(tunnel, &group, &host, true);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host, true);
+ if (!IS_ERR(gnode)) {
+ gnode->filter_mode = MCAST_EXCLUDE;
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ }
+ }
+}
+
+/* RFC 3810
+ * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
+ *
+ * When Multicast Address Compatibility Mode is MLDv2, a router acts
+ * using the MLDv2 protocol for that multicast address. When Multicast
+ * Address Compatibility Mode is MLDv1, a router internally translates
+ * the following MLDv1 messages for that multicast address to their
+ * MLDv2 equivalents:
+ *
+ * MLDv1 Message MLDv2 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Done TO_IN( {} )
+ */
+static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+
+ gnode = amt_lookup_group(tunnel, &group, &host, true);
+ if (gnode) {
+ amt_del_group(amt, gnode);
+ return;
+ }
+}
+
+static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld2_report *mld2r = (struct mld2_report *)icmp6_hdr(skb);
+ int len = skb_transport_offset(skb) + sizeof(*mld2r);
+ void *zero_grec = (void *)&mldv2_zero_grec;
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+ struct mld2_grec *grec;
+ u16 nsrcs;
+ int i;
+
+ for (i = 0; i < ntohs(mld2r->mld2r_ngrec); i++) {
+ len += sizeof(*grec);
+ if (!ipv6_mc_may_pull(skb, len))
+ break;
+
+ grec = (void *)(skb->data + len - sizeof(*grec));
+ nsrcs = ntohs(grec->grec_nsrcs);
+
+ len += nsrcs * sizeof(struct in6_addr);
+ if (!ipv6_mc_may_pull(skb, len))
+ break;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip6 = grec->grec_mca;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip6 = ip6h->saddr;
+ gnode = amt_lookup_group(tunnel, &group, &host, true);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host,
+ ETH_P_IPV6);
+ if (IS_ERR(gnode))
+ continue;
+ }
+
+ amt_add_srcs(amt, tunnel, gnode, grec, true);
+ switch (grec->grec_type) {
+ case MLD2_MODE_IS_INCLUDE:
+ amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_MODE_IS_EXCLUDE:
+ amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_CHANGE_TO_INCLUDE:
+ amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_CHANGE_TO_EXCLUDE:
+ amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_ALLOW_NEW_SOURCES:
+ amt_mcast_allow_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_BLOCK_OLD_SOURCES:
+ amt_mcast_block_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ default:
+ break;
+ }
+ amt_cleanup_srcs(amt, tunnel, gnode);
+ }
+}
+
+/* caller held tunnel->lock */
+static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
+
+ switch (mld->mld_type) {
+ case ICMPV6_MGM_REPORT:
+ amt_mldv1_report_handler(amt, skb, tunnel);
+ break;
+ case ICMPV6_MLD2_REPORT:
+ amt_mldv2_report_handler(amt, skb, tunnel);
+ break;
+ case ICMPV6_MGM_REDUCTION:
+ amt_mldv1_leave_handler(amt, skb, tunnel);
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_advertisement *amta;
+ int hdr_size;
+
+ hdr_size = sizeof(*amta) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
+ amta = (struct amt_header_advertisement *)(udp_hdr(skb) + 1);
+ if (!amta->ip4)
+ return true;
+
+ if (amta->reserved || amta->version)
+ return true;
+
+ if (ipv4_is_loopback(amta->ip4) || ipv4_is_multicast(amta->ip4) ||
+ ipv4_is_zeronet(amta->ip4))
+ return true;
+
+ if (amt->status != AMT_STATUS_SENT_DISCOVERY ||
+ amt->nonce != amta->nonce)
+ return true;
+
+ amt->remote_ip = amta->ip4;
+ netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
+ mod_delayed_work(amt_wq, &amt->req_wq, 0);
+
+ amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true);
+ return false;
+}
+
+static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_mcast_data *amtmd;
+ int hdr_size, len, err;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+
+ if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE)
+ return true;
+
+ hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
+ amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
+ if (amtmd->reserved || amtmd->version)
+ return true;
+
+ if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
+ return true;
+
+ skb_reset_network_header(skb);
+ skb_push(skb, sizeof(*eth));
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ eth = eth_hdr(skb);
+
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
+ iph = ip_hdr(skb);
+
+ if (iph->version == 4) {
+ if (!ipv4_is_multicast(iph->daddr))
+ return true;
+ skb->protocol = htons(ETH_P_IP);
+ eth->h_proto = htons(ETH_P_IP);
+ ip_eth_mc_map(iph->daddr, eth->h_dest);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ struct ipv6hdr *ip6h;
+
+ if (!pskb_may_pull(skb, sizeof(*ip6h)))
+ return true;
+
+ ip6h = ipv6_hdr(skb);
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ return true;
+ skb->protocol = htons(ETH_P_IPV6);
+ eth->h_proto = htons(ETH_P_IPV6);
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+#endif
+ } else {
+ return true;
+ }
+
+ skb->pkt_type = PACKET_MULTICAST;
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ err = gro_cells_receive(&amt->gro_cells, skb);
+ if (likely(err == NET_RX_SUCCESS))
+ dev_sw_netstats_rx_add(amt->dev, len);
+ else
+ amt->dev->stats.rx_dropped++;
+
+ return false;
+}
+
+static bool amt_membership_query_handler(struct amt_dev *amt,
+ struct sk_buff *skb)
+{
+ struct amt_header_membership_query *amtmq;
+ struct igmpv3_query *ihv3;
+ struct ethhdr *eth, *oeth;
+ struct iphdr *iph;
+ int hdr_size, len;
+
+ hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
+ amtmq = (struct amt_header_membership_query *)(udp_hdr(skb) + 1);
+ if (amtmq->reserved || amtmq->version)
+ return true;
+
+ if (amtmq->nonce != amt->nonce)
+ return true;
+
+ hdr_size -= sizeof(*eth);
+ if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
+ return true;
+
+ oeth = eth_hdr(skb);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ skb_reset_network_header(skb);
+ eth = eth_hdr(skb);
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
+
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (READ_ONCE(amt->ready4))
+ return true;
+
+ if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
+ sizeof(*ihv3)))
+ return true;
+
+ if (!ipv4_is_multicast(iph->daddr))
+ return true;
+
+ ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
+ skb_reset_transport_header(skb);
+ skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
+ WRITE_ONCE(amt->ready4, true);
+ amt->mac = amtmq->response_mac;
+ amt->req_cnt = 0;
+ amt->qi = ihv3->qqic;
+ skb->protocol = htons(ETH_P_IP);
+ eth->h_proto = htons(ETH_P_IP);
+ ip_eth_mc_map(iph->daddr, eth->h_dest);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ struct mld2_query *mld2q;
+ struct ipv6hdr *ip6h;
+
+ if (READ_ONCE(amt->ready6))
+ return true;
+
+ if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
+ sizeof(*mld2q)))
+ return true;
+
+ ip6h = ipv6_hdr(skb);
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ return true;
+
+ mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
+ skb_reset_transport_header(skb);
+ skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
+ WRITE_ONCE(amt->ready6, true);
+ amt->mac = amtmq->response_mac;
+ amt->req_cnt = 0;
+ amt->qi = mld2q->mld2q_qqic;
+ skb->protocol = htons(ETH_P_IPV6);
+ eth->h_proto = htons(ETH_P_IPV6);
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+#endif
+ } else {
+ return true;
+ }
+
+ ether_addr_copy(eth->h_source, oeth->h_source);
+ skb->pkt_type = PACKET_MULTICAST;
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ local_bh_disable();
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
+ amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
+ dev_sw_netstats_rx_add(amt->dev, len);
+ } else {
+ amt->dev->stats.rx_dropped++;
+ }
+ local_bh_enable();
+
+ return false;
+}
+
+static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_membership_update *amtmu;
+ struct amt_tunnel_list *tunnel;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ int len, hdr_size;
+
+ iph = ip_hdr(skb);
+
+ hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
+ amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
+ if (amtmu->reserved || amtmu->version)
+ return true;
+
+ if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
+ return true;
+
+ skb_reset_network_header(skb);
+
+ list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
+ if (tunnel->ip4 == iph->saddr) {
+ if ((amtmu->nonce == tunnel->nonce &&
+ amtmu->response_mac == tunnel->mac)) {
+ mod_delayed_work(amt_wq, &tunnel->gc_wq,
+ msecs_to_jiffies(amt_gmi(amt))
+ * 3);
+ goto report;
+ } else {
+ netdev_dbg(amt->dev, "Invalid MAC\n");
+ return true;
+ }
+ }
+ }
+
+ return true;
+
+report:
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
+
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (ip_mc_check_igmp(skb)) {
+ netdev_dbg(amt->dev, "Invalid IGMP\n");
+ return true;
+ }
+
+ spin_lock_bh(&tunnel->lock);
+ amt_igmp_report_handler(amt, skb, tunnel);
+ spin_unlock_bh(&tunnel->lock);
+
+ skb_push(skb, sizeof(struct ethhdr));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->protocol = htons(ETH_P_IP);
+ eth->h_proto = htons(ETH_P_IP);
+ ip_eth_mc_map(iph->daddr, eth->h_dest);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ if (ipv6_mc_check_mld(skb)) {
+ netdev_dbg(amt->dev, "Invalid MLD\n");
+ return true;
+ }
+
+ spin_lock_bh(&tunnel->lock);
+ amt_mld_report_handler(amt, skb, tunnel);
+ spin_unlock_bh(&tunnel->lock);
+
+ skb_push(skb, sizeof(struct ethhdr));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->protocol = htons(ETH_P_IPV6);
+ eth->h_proto = htons(ETH_P_IPV6);
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+#endif
+ } else {
+ netdev_dbg(amt->dev, "Unsupported Protocol\n");
+ return true;
+ }
+
+ skb_pull(skb, sizeof(struct ethhdr));
+ skb->pkt_type = PACKET_MULTICAST;
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
+ amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
+ true);
+ dev_sw_netstats_rx_add(amt->dev, len);
+ } else {
+ amt->dev->stats.rx_dropped++;
+ }
+
+ return false;
+}
+
+static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce,
+ __be32 daddr, __be16 dport)
+{
+ struct amt_header_advertisement *amta;
+ int hlen, tlen, offset;
+ struct socket *sock;
+ struct udphdr *udph;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ u32 len;
+ int err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(amt->sock);
+ if (!sock)
+ goto out;
+
+ if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
+ goto out;
+
+ rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
+ daddr, amt->local_ip,
+ dport, amt->relay_port,
+ IPPROTO_UDP, 0,
+ amt->stream_dev->ifindex);
+ if (IS_ERR(rt)) {
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ hlen = LL_RESERVED_SPACE(amt->dev);
+ tlen = amt->dev->needed_tailroom;
+ len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb) {
+ ip_rt_put(rt);
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ skb->priority = TC_PRIO_CONTROL;
+ skb_dst_set(skb, &rt->dst);
+
+ len = sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
+ skb_reset_network_header(skb);
+ skb_put(skb, len);
+ amta = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
+ amta->version = 0;
+ amta->type = AMT_MSG_ADVERTISEMENT;
+ amta->reserved = 0;
+ amta->nonce = nonce;
+ amta->ip4 = amt->local_ip;
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+ udph->source = amt->relay_port;
+ udph->dest = dport;
+ udph->len = htons(sizeof(*amta) + sizeof(*udph));
+ udph->check = 0;
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ udph->check = csum_tcpudp_magic(amt->local_ip, daddr,
+ sizeof(*udph) + sizeof(*amta),
+ IPPROTO_UDP, skb->csum);
+
+ skb_push(skb, sizeof(*iph));
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr)) >> 2;
+ iph->tos = AMT_TOS;
+ iph->frag_off = 0;
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
+ iph->daddr = daddr;
+ iph->saddr = amt->local_ip;
+ iph->protocol = IPPROTO_UDP;
+ iph->tot_len = htons(len);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(amt->net, skb, NULL);
+ ip_send_check(iph);
+ err = ip_local_out(amt->net, sock->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ amt->dev->stats.tx_errors++;
+
+out:
+ rcu_read_unlock();
+}
+
+static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_discovery *amtd;
+ struct udphdr *udph;
+ struct iphdr *iph;
+
+ if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtd)))
+ return true;
+
+ iph = ip_hdr(skb);
+ udph = udp_hdr(skb);
+ amtd = (struct amt_header_discovery *)(udp_hdr(skb) + 1);
+
+ if (amtd->reserved || amtd->version)
+ return true;
+
+ amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source);
+
+ return false;
+}
+
+static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_request *amtrh;
+ struct amt_tunnel_list *tunnel;
+ unsigned long long key;
+ struct udphdr *udph;
+ struct iphdr *iph;
+ u64 mac;
+ int i;
+
+ if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtrh)))
+ return true;
+
+ iph = ip_hdr(skb);
+ udph = udp_hdr(skb);
+ amtrh = (struct amt_header_request *)(udp_hdr(skb) + 1);
+
+ if (amtrh->reserved1 || amtrh->reserved2 || amtrh->version)
+ return true;
+
+ list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list)
+ if (tunnel->ip4 == iph->saddr)
+ goto send;
+
+ spin_lock_bh(&amt->lock);
+ if (amt->nr_tunnels >= amt->max_tunnels) {
+ spin_unlock_bh(&amt->lock);
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ return true;
+ }
+
+ tunnel = kzalloc(sizeof(*tunnel) +
+ (sizeof(struct hlist_head) * amt->hash_buckets),
+ GFP_ATOMIC);
+ if (!tunnel) {
+ spin_unlock_bh(&amt->lock);
+ return true;
+ }
+
+ tunnel->source_port = udph->source;
+ tunnel->ip4 = iph->saddr;
+
+ memcpy(&key, &tunnel->key, sizeof(unsigned long long));
+ tunnel->amt = amt;
+ spin_lock_init(&tunnel->lock);
+ for (i = 0; i < amt->hash_buckets; i++)
+ INIT_HLIST_HEAD(&tunnel->groups[i]);
+
+ INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
+
+ list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
+ tunnel->key = amt->key;
+ __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
+ amt->nr_tunnels++;
+ mod_delayed_work(amt_wq, &tunnel->gc_wq,
+ msecs_to_jiffies(amt_gmi(amt)));
+ spin_unlock_bh(&amt->lock);
+
+send:
+ tunnel->nonce = amtrh->nonce;
+ mac = siphash_3u32((__force u32)tunnel->ip4,
+ (__force u32)tunnel->source_port,
+ (__force u32)tunnel->nonce,
+ &tunnel->key);
+ tunnel->mac = mac >> 16;
+
+ if (!netif_running(amt->dev) || !netif_running(amt->stream_dev))
+ return true;
+
+ if (!amtrh->p)
+ amt_send_igmp_gq(amt, tunnel);
+ else
+ amt_send_mld_gq(amt, tunnel);
+
+ return false;
+}
+
+static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb)
+{
+ int type = amt_parse_type(skb);
+ int err = 1;
+
+ if (type == -1)
+ goto drop;
+
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ switch (type) {
+ case AMT_MSG_ADVERTISEMENT:
+ err = amt_advertisement_handler(amt, skb);
+ break;
+ case AMT_MSG_MEMBERSHIP_QUERY:
+ err = amt_membership_query_handler(amt, skb);
+ if (!err)
+ return;
+ break;
+ default:
+ netdev_dbg(amt->dev, "Invalid type of Gateway\n");
+ break;
+ }
+ }
+drop:
+ if (err) {
+ amt->dev->stats.rx_dropped++;
+ kfree_skb(skb);
+ } else {
+ consume_skb(skb);
+ }
+}
+
+static int amt_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ struct amt_dev *amt;
+ struct iphdr *iph;
+ int type;
+ bool err;
+
+ rcu_read_lock_bh();
+ amt = rcu_dereference_sk_user_data(sk);
+ if (!amt) {
+ err = true;
+ kfree_skb(skb);
+ goto out;
+ }
+
+ skb->dev = amt->dev;
+ iph = ip_hdr(skb);
+ type = amt_parse_type(skb);
+ if (type == -1) {
+ err = true;
+ goto drop;
+ }
+
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ switch (type) {
+ case AMT_MSG_ADVERTISEMENT:
+ if (iph->saddr != amt->discovery_ip) {
+ netdev_dbg(amt->dev, "Invalid Relay IP\n");
+ err = true;
+ goto drop;
+ }
+ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+ netdev_dbg(amt->dev, "AMT Event queue full\n");
+ err = true;
+ goto drop;
+ }
+ goto out;
+ case AMT_MSG_MULTICAST_DATA:
+ if (iph->saddr != amt->remote_ip) {
+ netdev_dbg(amt->dev, "Invalid Relay IP\n");
+ err = true;
+ goto drop;
+ }
+ err = amt_multicast_data_handler(amt, skb);
+ if (err)
+ goto drop;
+ else
+ goto out;
+ case AMT_MSG_MEMBERSHIP_QUERY:
+ if (iph->saddr != amt->remote_ip) {
+ netdev_dbg(amt->dev, "Invalid Relay IP\n");
+ err = true;
+ goto drop;
+ }
+ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+ netdev_dbg(amt->dev, "AMT Event queue full\n");
+ err = true;
+ goto drop;
+ }
+ goto out;
+ default:
+ err = true;
+ netdev_dbg(amt->dev, "Invalid type of Gateway\n");
+ break;
+ }
+ } else {
+ switch (type) {
+ case AMT_MSG_DISCOVERY:
+ err = amt_discovery_handler(amt, skb);
+ break;
+ case AMT_MSG_REQUEST:
+ err = amt_request_handler(amt, skb);
+ break;
+ case AMT_MSG_MEMBERSHIP_UPDATE:
+ err = amt_update_handler(amt, skb);
+ if (err)
+ goto drop;
+ else
+ goto out;
+ default:
+ err = true;
+ netdev_dbg(amt->dev, "Invalid type of relay\n");
+ break;
+ }
+ }
+drop:
+ if (err) {
+ amt->dev->stats.rx_dropped++;
+ kfree_skb(skb);
+ } else {
+ consume_skb(skb);
+ }
+out:
+ rcu_read_unlock_bh();
+ return 0;
+}
+
+static void amt_event_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(work, struct amt_dev, event_wq);
+ struct sk_buff *skb;
+ u8 event;
+ int i;
+
+ for (i = 0; i < AMT_MAX_EVENTS; i++) {
+ spin_lock_bh(&amt->lock);
+ if (amt->nr_events == 0) {
+ spin_unlock_bh(&amt->lock);
+ return;
+ }
+ event = amt->events[amt->event_idx].event;
+ skb = amt->events[amt->event_idx].skb;
+ amt->events[amt->event_idx].event = AMT_EVENT_NONE;
+ amt->events[amt->event_idx].skb = NULL;
+ amt->nr_events--;
+ amt->event_idx++;
+ amt->event_idx %= AMT_MAX_EVENTS;
+ spin_unlock_bh(&amt->lock);
+
+ switch (event) {
+ case AMT_EVENT_RECEIVE:
+ amt_gw_rcv(amt, skb);
+ break;
+ case AMT_EVENT_SEND_DISCOVERY:
+ amt_event_send_discovery(amt);
+ break;
+ case AMT_EVENT_SEND_REQUEST:
+ amt_event_send_request(amt);
+ break;
+ default:
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
+static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ struct amt_dev *amt;
+ int type;
+
+ rcu_read_lock_bh();
+ amt = rcu_dereference_sk_user_data(sk);
+ if (!amt)
+ goto out;
+
+ if (amt->mode != AMT_MODE_GATEWAY)
+ goto drop;
+
+ type = amt_parse_type(skb);
+ if (type == -1)
+ goto drop;
+
+ netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n",
+ type_str[type]);
+ switch (type) {
+ case AMT_MSG_DISCOVERY:
+ break;
+ case AMT_MSG_REQUEST:
+ case AMT_MSG_MEMBERSHIP_UPDATE:
+ if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
+ mod_delayed_work(amt_wq, &amt->req_wq, 0);
+ break;
+ default:
+ goto drop;
+ }
+out:
+ rcu_read_unlock_bh();
+ return 0;
+drop:
+ rcu_read_unlock_bh();
+ amt->dev->stats.rx_dropped++;
+ return 0;
+}
+
+static struct socket *amt_create_sock(struct net *net, __be16 port)
+{
+ struct udp_port_cfg udp_conf;
+ struct socket *sock;
+ int err;
+
+ memset(&udp_conf, 0, sizeof(udp_conf));
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+
+ udp_conf.local_udp_port = port;
+
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return sock;
+}
+
+static int amt_socket_create(struct amt_dev *amt)
+{
+ struct udp_tunnel_sock_cfg tunnel_cfg;
+ struct socket *sock;
+
+ sock = amt_create_sock(amt->net, amt->relay_port);
+ if (IS_ERR(sock))
+ return PTR_ERR(sock);
+
+ /* Mark socket as an encapsulation socket */
+ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
+ tunnel_cfg.sk_user_data = amt;
+ tunnel_cfg.encap_type = 1;
+ tunnel_cfg.encap_rcv = amt_rcv;
+ tunnel_cfg.encap_err_lookup = amt_err_lookup;
+ tunnel_cfg.encap_destroy = NULL;
+ setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg);
+
+ rcu_assign_pointer(amt->sock, sock);
+ return 0;
+}
+
+static int amt_dev_open(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ int err;
+
+ amt->ready4 = false;
+ amt->ready6 = false;
+ amt->event_idx = 0;
+ amt->nr_events = 0;
+
+ err = amt_socket_create(amt);
+ if (err)
+ return err;
+
+ amt->req_cnt = 0;
+ amt->remote_ip = 0;
+ amt->nonce = 0;
+ get_random_bytes(&amt->key, sizeof(siphash_key_t));
+
+ amt->status = AMT_STATUS_INIT;
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ mod_delayed_work(amt_wq, &amt->discovery_wq, 0);
+ mod_delayed_work(amt_wq, &amt->req_wq, 0);
+ } else if (amt->mode == AMT_MODE_RELAY) {
+ mod_delayed_work(amt_wq, &amt->secret_wq,
+ msecs_to_jiffies(AMT_SECRET_TIMEOUT));
+ }
+ return err;
+}
+
+static int amt_dev_stop(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ struct amt_tunnel_list *tunnel, *tmp;
+ struct socket *sock;
+ struct sk_buff *skb;
+ int i;
+
+ cancel_delayed_work_sync(&amt->req_wq);
+ cancel_delayed_work_sync(&amt->discovery_wq);
+ cancel_delayed_work_sync(&amt->secret_wq);
+
+ /* shutdown */
+ sock = rtnl_dereference(amt->sock);
+ RCU_INIT_POINTER(amt->sock, NULL);
+ synchronize_net();
+ if (sock)
+ udp_tunnel_sock_release(sock);
+
+ cancel_work_sync(&amt->event_wq);
+ for (i = 0; i < AMT_MAX_EVENTS; i++) {
+ skb = amt->events[i].skb;
+ kfree_skb(skb);
+ amt->events[i].event = AMT_EVENT_NONE;
+ amt->events[i].skb = NULL;
+ }
+
+ amt->ready4 = false;
+ amt->ready6 = false;
+ amt->req_cnt = 0;
+ amt->remote_ip = 0;
+
+ list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) {
+ list_del_rcu(&tunnel->list);
+ amt->nr_tunnels--;
+ cancel_delayed_work_sync(&tunnel->gc_wq);
+ amt_clear_groups(tunnel);
+ kfree_rcu(tunnel, rcu);
+ }
+
+ return 0;
+}
+
+static const struct device_type amt_type = {
+ .name = "amt",
+};
+
+static int amt_dev_init(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ int err;
+
+ amt->dev = dev;
+
+ err = gro_cells_init(&amt->gro_cells, dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void amt_dev_uninit(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+
+ gro_cells_destroy(&amt->gro_cells);
+}
+
+static const struct net_device_ops amt_netdev_ops = {
+ .ndo_init = amt_dev_init,
+ .ndo_uninit = amt_dev_uninit,
+ .ndo_open = amt_dev_open,
+ .ndo_stop = amt_dev_stop,
+ .ndo_start_xmit = amt_dev_xmit,
+};
+
+static void amt_link_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &amt_netdev_ops;
+ dev->needs_free_netdev = true;
+ SET_NETDEV_DEVTYPE(dev, &amt_type);
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = ETH_MAX_MTU;
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->lltx = true;
+ dev->netns_immutable = true;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ eth_hw_addr_random(dev);
+ eth_zero_addr(dev->broadcast);
+ ether_setup(dev);
+}
+
+static const struct nla_policy amt_policy[IFLA_AMT_MAX + 1] = {
+ [IFLA_AMT_MODE] = { .type = NLA_U32 },
+ [IFLA_AMT_RELAY_PORT] = { .type = NLA_U16 },
+ [IFLA_AMT_GATEWAY_PORT] = { .type = NLA_U16 },
+ [IFLA_AMT_LINK] = { .type = NLA_U32 },
+ [IFLA_AMT_LOCAL_IP] = { .len = sizeof_field(struct iphdr, daddr) },
+ [IFLA_AMT_REMOTE_IP] = { .len = sizeof_field(struct iphdr, daddr) },
+ [IFLA_AMT_DISCOVERY_IP] = { .len = sizeof_field(struct iphdr, daddr) },
+ [IFLA_AMT_MAX_TUNNELS] = { .type = NLA_U32 },
+};
+
+static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ if (!data)
+ return -EINVAL;
+
+ if (!data[IFLA_AMT_LINK]) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LINK],
+ "Link attribute is required");
+ return -EINVAL;
+ }
+
+ if (!data[IFLA_AMT_MODE]) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
+ "Mode attribute is required");
+ return -EINVAL;
+ }
+
+ if (nla_get_u32(data[IFLA_AMT_MODE]) > AMT_MODE_MAX) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
+ "Mode attribute is not valid");
+ return -EINVAL;
+ }
+
+ if (!data[IFLA_AMT_LOCAL_IP]) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_DISCOVERY_IP],
+ "Local attribute is required");
+ return -EINVAL;
+ }
+
+ if (!data[IFLA_AMT_DISCOVERY_IP] &&
+ nla_get_u32(data[IFLA_AMT_MODE]) == AMT_MODE_GATEWAY) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LOCAL_IP],
+ "Discovery attribute is required");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int amt_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct amt_dev *amt = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
+ int err = -EINVAL;
+
+ amt->net = link_net;
+ amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
+
+ if (data[IFLA_AMT_MAX_TUNNELS] &&
+ nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]))
+ amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]);
+ else
+ amt->max_tunnels = AMT_MAX_TUNNELS;
+
+ spin_lock_init(&amt->lock);
+ amt->max_groups = AMT_MAX_GROUP;
+ amt->max_sources = AMT_MAX_SOURCE;
+ amt->hash_buckets = AMT_HSIZE;
+ amt->nr_tunnels = 0;
+ get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
+ amt->stream_dev = dev_get_by_index(link_net,
+ nla_get_u32(data[IFLA_AMT_LINK]));
+ if (!amt->stream_dev) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
+ "Can't find stream device");
+ return -ENODEV;
+ }
+
+ if (amt->stream_dev->type != ARPHRD_ETHER) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
+ "Invalid stream device type");
+ goto err;
+ }
+
+ amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]);
+ if (ipv4_is_loopback(amt->local_ip) ||
+ ipv4_is_zeronet(amt->local_ip) ||
+ ipv4_is_multicast(amt->local_ip)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LOCAL_IP],
+ "Invalid Local address");
+ goto err;
+ }
+
+ amt->relay_port = nla_get_be16_default(data[IFLA_AMT_RELAY_PORT],
+ htons(IANA_AMT_UDP_PORT));
+
+ amt->gw_port = nla_get_be16_default(data[IFLA_AMT_GATEWAY_PORT],
+ htons(IANA_AMT_UDP_PORT));
+
+ if (!amt->relay_port) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "relay port must not be 0");
+ goto err;
+ }
+ if (amt->mode == AMT_MODE_RELAY) {
+ amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
+ amt->qri = 10;
+ dev->needed_headroom = amt->stream_dev->needed_headroom +
+ AMT_RELAY_HLEN;
+ dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN;
+ dev->max_mtu = dev->mtu;
+ dev->min_mtu = ETH_MIN_MTU + AMT_RELAY_HLEN;
+ } else {
+ if (!data[IFLA_AMT_DISCOVERY_IP]) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "discovery must be set in gateway mode");
+ goto err;
+ }
+ if (!amt->gw_port) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "gateway port must not be 0");
+ goto err;
+ }
+ amt->remote_ip = 0;
+ amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]);
+ if (ipv4_is_loopback(amt->discovery_ip) ||
+ ipv4_is_zeronet(amt->discovery_ip) ||
+ ipv4_is_multicast(amt->discovery_ip)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "discovery must be unicast");
+ goto err;
+ }
+
+ dev->needed_headroom = amt->stream_dev->needed_headroom +
+ AMT_GW_HLEN;
+ dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN;
+ dev->max_mtu = dev->mtu;
+ dev->min_mtu = ETH_MIN_MTU + AMT_GW_HLEN;
+ }
+ amt->qi = AMT_INIT_QUERY_INTERVAL;
+
+ err = register_netdevice(dev);
+ if (err < 0) {
+ netdev_dbg(dev, "failed to register new netdev %d\n", err);
+ goto err;
+ }
+
+ err = netdev_upper_dev_link(amt->stream_dev, dev, extack);
+ if (err < 0) {
+ unregister_netdevice(dev);
+ goto err;
+ }
+
+ INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
+ INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
+ INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
+ INIT_WORK(&amt->event_wq, amt_event_work);
+ INIT_LIST_HEAD(&amt->tunnel_list);
+ return 0;
+err:
+ dev_put(amt->stream_dev);
+ return err;
+}
+
+static void amt_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+
+ unregister_netdevice_queue(dev, head);
+ netdev_upper_dev_unlink(amt->stream_dev, dev);
+ dev_put(amt->stream_dev);
+}
+
+static size_t amt_get_size(const struct net_device *dev)
+{
+ return nla_total_size(sizeof(__u32)) + /* IFLA_AMT_MODE */
+ nla_total_size(sizeof(__u16)) + /* IFLA_AMT_RELAY_PORT */
+ nla_total_size(sizeof(__u16)) + /* IFLA_AMT_GATEWAY_PORT */
+ nla_total_size(sizeof(__u32)) + /* IFLA_AMT_LINK */
+ nla_total_size(sizeof(__u32)) + /* IFLA_MAX_TUNNELS */
+ nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_DISCOVERY_IP */
+ nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_REMOTE_IP */
+ nla_total_size(sizeof(struct iphdr)); /* IFLA_AMT_LOCAL_IP */
+}
+
+static int amt_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+
+ if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode))
+ goto nla_put_failure;
+ if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port))
+ goto nla_put_failure;
+ if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex))
+ goto nla_put_failure;
+ if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip))
+ goto nla_put_failure;
+ if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip))
+ goto nla_put_failure;
+ if (amt->remote_ip)
+ if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops amt_link_ops __read_mostly = {
+ .kind = "amt",
+ .maxtype = IFLA_AMT_MAX,
+ .policy = amt_policy,
+ .priv_size = sizeof(struct amt_dev),
+ .setup = amt_link_setup,
+ .validate = amt_validate,
+ .newlink = amt_newlink,
+ .dellink = amt_dellink,
+ .get_size = amt_get_size,
+ .fill_info = amt_fill_info,
+};
+
+static struct net_device *amt_lookup_upper_dev(struct net_device *dev)
+{
+ struct net_device *upper_dev;
+ struct amt_dev *amt;
+
+ for_each_netdev(dev_net(dev), upper_dev) {
+ if (netif_is_amt(upper_dev)) {
+ amt = netdev_priv(upper_dev);
+ if (amt->stream_dev == dev)
+ return upper_dev;
+ }
+ }
+
+ return NULL;
+}
+
+static int amt_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *upper_dev;
+ struct amt_dev *amt;
+ LIST_HEAD(list);
+ int new_mtu;
+
+ upper_dev = amt_lookup_upper_dev(dev);
+ if (!upper_dev)
+ return NOTIFY_DONE;
+ amt = netdev_priv(upper_dev);
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ amt_dellink(amt->dev, &list);
+ unregister_netdevice_many(&list);
+ break;
+ case NETDEV_CHANGEMTU:
+ if (amt->mode == AMT_MODE_RELAY)
+ new_mtu = dev->mtu - AMT_RELAY_HLEN;
+ else
+ new_mtu = dev->mtu - AMT_GW_HLEN;
+
+ dev_set_mtu(amt->dev, new_mtu);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block amt_notifier_block __read_mostly = {
+ .notifier_call = amt_device_event,
+};
+
+static int __init amt_init(void)
+{
+ int err;
+
+ err = register_netdevice_notifier(&amt_notifier_block);
+ if (err < 0)
+ goto err;
+
+ err = rtnl_link_register(&amt_link_ops);
+ if (err < 0)
+ goto unregister_notifier;
+
+ amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0);
+ if (!amt_wq) {
+ err = -ENOMEM;
+ goto rtnl_unregister;
+ }
+
+ spin_lock_init(&source_gc_lock);
+ spin_lock_bh(&source_gc_lock);
+ INIT_DELAYED_WORK(&source_gc_wq, amt_source_gc_work);
+ mod_delayed_work(amt_wq, &source_gc_wq,
+ msecs_to_jiffies(AMT_GC_INTERVAL));
+ spin_unlock_bh(&source_gc_lock);
+
+ return 0;
+
+rtnl_unregister:
+ rtnl_link_unregister(&amt_link_ops);
+unregister_notifier:
+ unregister_netdevice_notifier(&amt_notifier_block);
+err:
+ pr_err("error loading AMT module loaded\n");
+ return err;
+}
+late_initcall(amt_init);
+
+static void __exit amt_fini(void)
+{
+ rtnl_link_unregister(&amt_link_ops);
+ unregister_netdevice_notifier(&amt_notifier_block);
+ cancel_delayed_work_sync(&source_gc_wq);
+ __amt_source_gc_work();
+ destroy_workqueue(amt_wq);
+}
+module_exit(amt_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Driver for Automatic Multicast Tunneling (AMT)");
+MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
+MODULE_ALIAS_RTNL_LINK("amt");
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
deleted file mode 100644
index 90b9f1d6eda9..000000000000
--- a/drivers/net/appletalk/Kconfig
+++ /dev/null
@@ -1,113 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Appletalk driver configuration
-#
-config ATALK
- tristate "Appletalk protocol support"
- select LLC
- help
- AppleTalk is the protocol that Apple computers can use to communicate
- on a network. If your Linux box is connected to such a network and you
- wish to connect to it, say Y. You will need to use the netatalk package
- so that your Linux box can act as a print and file server for Macs as
- well as access AppleTalk printers. Check out
- <http://www.zettabyte.net/netatalk/> on the WWW for details.
- EtherTalk is the name used for AppleTalk over Ethernet and the
- cheaper and slower LocalTalk is AppleTalk over a proprietary Apple
- network using serial links. EtherTalk and LocalTalk are fully
- supported by Linux.
-
- General information about how to connect Linux, Windows machines and
- Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>. The
- NET3-4-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, contains valuable
- information as well.
-
- To compile this driver as a module, choose M here: the module will be
- called appletalk. You almost certainly want to compile it as a
- module so you can restart your AppleTalk stack without rebooting
- your machine. I hear that the GNU boycott of Apple is over, so
- even politically correct people are allowed to say Y here.
-
-config DEV_APPLETALK
- tristate "Appletalk interfaces support"
- depends on ATALK
- help
- AppleTalk is the protocol that Apple computers can use to communicate
- on a network. If your Linux box is connected to such a network, and wish
- to do IP over it, or you have a LocalTalk card and wish to use it to
- connect to the AppleTalk network, say Y.
-
-
-config LTPC
- tristate "Apple/Farallon LocalTalk PC support"
- depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
- help
- This allows you to use the AppleTalk PC card to connect to LocalTalk
- networks. The card is also known as the Farallon PhoneNet PC card.
- If you are in doubt, this card is the one with the 65C02 chip on it.
- You also need version 1.3.3 or later of the netatalk package.
- This driver is experimental, which means that it may not work.
- See the file <file:Documentation/networking/device_drivers/appletalk/ltpc.rst>.
-
-config COPS
- tristate "COPS LocalTalk PC support"
- depends on DEV_APPLETALK && ISA
- depends on NETDEVICES
- select NETDEV_LEGACY_INIT
- help
- This allows you to use COPS AppleTalk cards to connect to LocalTalk
- networks. You also need version 1.3.3 or later of the netatalk
- package. This driver is experimental, which means that it may not
- work. This driver will only work if you choose "AppleTalk DDP"
- networking support, above.
- Please read the file
- <file:Documentation/networking/device_drivers/appletalk/cops.rst>.
-
-config COPS_DAYNA
- bool "Dayna firmware support"
- depends on COPS
- help
- Support COPS compatible cards with Dayna style firmware (Dayna
- DL2000/ Daynatalk/PC (half length), COPS LT-95, Farallon PhoneNET PC
- III, Farallon PhoneNET PC II).
-
-config COPS_TANGENT
- bool "Tangent firmware support"
- depends on COPS
- help
- Support COPS compatible cards with Tangent style firmware (Tangent
- ATB_II, Novell NL-1000, Daystar Digital LT-200.
-
-config IPDDP
- tristate "Appletalk-IP driver support"
- depends on DEV_APPLETALK && ATALK
- help
- This allows IP networking for users who only have AppleTalk
- networking available. This feature is experimental. With this
- driver, you can encapsulate IP inside AppleTalk (e.g. if your Linux
- box is stuck on an AppleTalk only network) or decapsulate (e.g. if
- you want your Linux box to act as an Internet gateway for a zoo of
- AppleTalk connected Macs). Please see the file
- <file:Documentation/networking/ipddp.rst> for more information.
-
- If you say Y here, the AppleTalk-IP support will be compiled into
- the kernel. In this case, you can either use encapsulation or
- decapsulation, but not both. With the following two questions, you
- decide which one you want.
-
- To compile the AppleTalk-IP support as a module, choose M here: the
- module will be called ipddp.
- In this case, you will be able to use both encapsulation and
- decapsulation simultaneously, by loading two copies of the module
- and specifying different values for the module option ipddp_mode.
-
-config IPDDP_ENCAP
- bool "IP to Appletalk-IP Encapsulation support"
- depends on IPDDP
- help
- If you say Y here, the AppleTalk-IP code will be able to encapsulate
- IP packets inside AppleTalk frames; this is useful if your Linux box
- is stuck on an AppleTalk network (which hopefully contains a
- decapsulator somewhere). Please see
- <file:Documentation/networking/ipddp.rst> for more information.
diff --git a/drivers/net/appletalk/Makefile b/drivers/net/appletalk/Makefile
deleted file mode 100644
index 903da3303f41..000000000000
--- a/drivers/net/appletalk/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for drivers/net/appletalk
-#
-
-obj-$(CONFIG_IPDDP) += ipddp.o
-obj-$(CONFIG_COPS) += cops.o
-obj-$(CONFIG_LTPC) += ltpc.o
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
deleted file mode 100644
index 97f254bdbb16..000000000000
--- a/drivers/net/appletalk/cops.c
+++ /dev/null
@@ -1,1005 +0,0 @@
-/* cops.c: LocalTalk driver for Linux.
- *
- * Authors:
- * - Jay Schulist <jschlst@samba.org>
- *
- * With more than a little help from;
- * - Alan Cox <alan@lxorguk.ukuu.org.uk>
- *
- * Derived from:
- * - skeleton.c: A network driver outline for linux.
- * Written 1993-94 by Donald Becker.
- * - ltpc.c: A driver for the LocalTalk PC card.
- * Written by Bradford W. Johnson.
- *
- * Copyright 1993 United States Government as represented by the
- * Director, National Security Agency.
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * Changes:
- * 19970608 Alan Cox Allowed dual card type support
- * Can set board type in insmod
- * Hooks for cops_setup routine
- * (not yet implemented).
- * 19971101 Jay Schulist Fixes for multiple lt* devices.
- * 19980607 Steven Hirsch Fixed the badly broken support
- * for Tangent type cards. Only
- * tested on Daystar LT200. Some
- * cleanup of formatting and program
- * logic. Added emacs 'local-vars'
- * setup for Jay's brace style.
- * 20000211 Alan Cox Cleaned up for softnet
- */
-
-static const char *version =
-"cops.c:v0.04 6/7/98 Jay Schulist <jschlst@samba.org>\n";
-/*
- * Sources:
- * COPS Localtalk SDK. This provides almost all of the information
- * needed.
- */
-
-/*
- * insmod/modprobe configurable stuff.
- * - IO Port, choose one your card supports or 0 if you dare.
- * - IRQ, also choose one your card supports or nothing and let
- * the driver figure it out.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/if_ltalk.h>
-#include <linux/delay.h> /* For udelay() */
-#include <linux/atalk.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <net/Space.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "cops.h" /* Our Stuff */
-#include "cops_ltdrv.h" /* Firmware code for Tangent type cards. */
-#include "cops_ffdrv.h" /* Firmware code for Dayna type cards. */
-
-/*
- * The name of the card. Is used for messages and in the requests for
- * io regions, irqs and dma channels
- */
-
-static const char *cardname = "cops";
-
-#ifdef CONFIG_COPS_DAYNA
-static int board_type = DAYNA; /* Module exported */
-#else
-static int board_type = TANGENT;
-#endif
-
-static int io = 0x240; /* Default IO for Dayna */
-static int irq = 5; /* Default IRQ */
-
-/*
- * COPS Autoprobe information.
- * Right now if port address is right but IRQ is not 5 this will
- * return a 5 no matter what since we will still get a status response.
- * Need one more additional check to narrow down after we have gotten
- * the ioaddr. But since only other possible IRQs is 3 and 4 so no real
- * hurry on this. I *STRONGLY* recommend using IRQ 5 for your card with
- * this driver.
- *
- * This driver has 2 modes and they are: Dayna mode and Tangent mode.
- * Each mode corresponds with the type of card. It has been found
- * that there are 2 main types of cards and all other cards are
- * the same and just have different names or only have minor differences
- * such as more IO ports. As this driver is tested it will
- * become more clear on exactly what cards are supported. The driver
- * defaults to using Dayna mode. To change the drivers mode, simply
- * select Dayna or Tangent mode when configuring the kernel.
- *
- * This driver should support:
- * TANGENT driver mode:
- * Tangent ATB-II, Novell NL-1000, Daystar Digital LT-200,
- * COPS LT-1
- * DAYNA driver mode:
- * Dayna DL2000/DaynaTalk PC (Half Length), COPS LT-95,
- * Farallon PhoneNET PC III, Farallon PhoneNET PC II
- * Other cards possibly supported mode unknown though:
- * Dayna DL2000 (Full length), COPS LT/M (Micro-Channel)
- *
- * Cards NOT supported by this driver but supported by the ltpc.c
- * driver written by Bradford W. Johnson <johns393@maroon.tc.umn.edu>
- * Farallon PhoneNET PC
- * Original Apple LocalTalk PC card
- *
- * N.B.
- *
- * The Daystar Digital LT200 boards do not support interrupt-driven
- * IO. You must specify 'irq=0xff' as a module parameter to invoke
- * polled mode. I also believe that the port probing logic is quite
- * dangerous at best and certainly hopeless for a polled card. Best to
- * specify both. - Steve H.
- *
- */
-
-/*
- * Zero terminated list of IO ports to probe.
- */
-
-static unsigned int ports[] = {
- 0x240, 0x340, 0x200, 0x210, 0x220, 0x230, 0x260,
- 0x2A0, 0x300, 0x310, 0x320, 0x330, 0x350, 0x360,
- 0
-};
-
-/*
- * Zero terminated list of IRQ ports to probe.
- */
-
-static int cops_irqlist[] = {
- 5, 4, 3, 0
-};
-
-static struct timer_list cops_timer;
-static struct net_device *cops_timer_dev;
-
-/* use 0 for production, 1 for verification, 2 for debug, 3 for verbose debug */
-#ifndef COPS_DEBUG
-#define COPS_DEBUG 1
-#endif
-static unsigned int cops_debug = COPS_DEBUG;
-
-/* The number of low I/O ports used by the card. */
-#define COPS_IO_EXTENT 8
-
-/* Information that needs to be kept for each board. */
-
-struct cops_local
-{
- int board; /* Holds what board type is. */
- int nodeid; /* Set to 1 once have nodeid. */
- unsigned char node_acquire; /* Node ID when acquired. */
- struct atalk_addr node_addr; /* Full node address */
- spinlock_t lock; /* RX/TX lock */
-};
-
-/* Index to functions, as function prototypes. */
-static int cops_probe1 (struct net_device *dev, int ioaddr);
-static int cops_irq (int ioaddr, int board);
-
-static int cops_open (struct net_device *dev);
-static int cops_jumpstart (struct net_device *dev);
-static void cops_reset (struct net_device *dev, int sleep);
-static void cops_load (struct net_device *dev);
-static int cops_nodeid (struct net_device *dev, int nodeid);
-
-static irqreturn_t cops_interrupt (int irq, void *dev_id);
-static void cops_poll(struct timer_list *t);
-static void cops_timeout(struct net_device *dev, unsigned int txqueue);
-static void cops_rx (struct net_device *dev);
-static netdev_tx_t cops_send_packet (struct sk_buff *skb,
- struct net_device *dev);
-static void set_multicast_list (struct net_device *dev);
-static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
-static int cops_close (struct net_device *dev);
-
-static void cleanup_card(struct net_device *dev)
-{
- if (dev->irq)
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, COPS_IO_EXTENT);
-}
-
-/*
- * Check for a network adaptor of this type, and return '0' iff one exists.
- * If dev->base_addr == 0, probe all likely locations.
- * If dev->base_addr in [1..0x1ff], always return failure.
- * otherwise go with what we pass in.
- */
-struct net_device * __init cops_probe(int unit)
-{
- struct net_device *dev;
- unsigned *port;
- int base_addr;
- int err = 0;
-
- dev = alloc_ltalkdev(sizeof(struct cops_local));
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "lt%d", unit);
- netdev_boot_setup_check(dev);
- irq = dev->irq;
- base_addr = dev->base_addr;
- } else {
- base_addr = dev->base_addr = io;
- }
-
- if (base_addr > 0x1ff) { /* Check a single specified location. */
- err = cops_probe1(dev, base_addr);
- } else if (base_addr != 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- /* FIXME Does this really work for cards which generate irq?
- * It's definitely N.G. for polled Tangent. sh
- * Dayna cards don't autoprobe well at all, but if your card is
- * at IRQ 5 & IO 0x240 we find it every time. ;) JS
- */
- for (port = ports; *port && cops_probe1(dev, *port) < 0; port++)
- ;
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- cleanup_card(dev);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops cops_netdev_ops = {
- .ndo_open = cops_open,
- .ndo_stop = cops_close,
- .ndo_start_xmit = cops_send_packet,
- .ndo_tx_timeout = cops_timeout,
- .ndo_do_ioctl = cops_ioctl,
- .ndo_set_rx_mode = set_multicast_list,
-};
-
-/*
- * This is the real probe routine. Linux has a history of friendly device
- * probes on the ISA bus. A good device probes avoids doing writes, and
- * verifies that the correct device exists and functions.
- */
-static int __init cops_probe1(struct net_device *dev, int ioaddr)
-{
- struct cops_local *lp;
- static unsigned version_printed;
- int board = board_type;
- int retval;
-
- if(cops_debug && version_printed++ == 0)
- printk("%s", version);
-
- /* Grab the region so no one else tries to probe our ioports. */
- if (!request_region(ioaddr, COPS_IO_EXTENT, dev->name))
- return -EBUSY;
-
- /*
- * Since this board has jumpered interrupts, allocate the interrupt
- * vector now. There is no point in waiting since no other device
- * can use the interrupt, and this marks the irq as busy. Jumpered
- * interrupts are typically not reported by the boards, and we must
- * used AutoIRQ to find them.
- */
- dev->irq = irq;
- switch (dev->irq)
- {
- case 0:
- /* COPS AutoIRQ routine */
- dev->irq = cops_irq(ioaddr, board);
- if (dev->irq)
- break;
- fallthrough; /* Once no IRQ found on this port */
- case 1:
- retval = -EINVAL;
- goto err_out;
-
- /* Fixup for users that don't know that IRQ 2 is really
- * IRQ 9, or don't know which one to set.
- */
- case 2:
- dev->irq = 9;
- break;
-
- /* Polled operation requested. Although irq of zero passed as
- * a parameter tells the init routines to probe, we'll
- * overload it to denote polled operation at runtime.
- */
- case 0xff:
- dev->irq = 0;
- break;
-
- default:
- break;
- }
-
- dev->base_addr = ioaddr;
-
- /* Reserve any actual interrupt. */
- if (dev->irq) {
- retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
- if (retval)
- goto err_out;
- }
-
- lp = netdev_priv(dev);
- spin_lock_init(&lp->lock);
-
- /* Copy local board variable to lp struct. */
- lp->board = board;
-
- dev->netdev_ops = &cops_netdev_ops;
- dev->watchdog_timeo = HZ * 2;
-
-
- /* Tell the user where the card is and what mode we're in. */
- if(board==DAYNA)
- printk("%s: %s at %#3x, using IRQ %d, in Dayna mode.\n",
- dev->name, cardname, ioaddr, dev->irq);
- if(board==TANGENT) {
- if(dev->irq)
- printk("%s: %s at %#3x, IRQ %d, in Tangent mode\n",
- dev->name, cardname, ioaddr, dev->irq);
- else
- printk("%s: %s at %#3x, using polled IO, in Tangent mode.\n",
- dev->name, cardname, ioaddr);
-
- }
- return 0;
-
-err_out:
- release_region(ioaddr, COPS_IO_EXTENT);
- return retval;
-}
-
-static int __init cops_irq (int ioaddr, int board)
-{ /*
- * This does not use the IRQ to determine where the IRQ is. We just
- * assume that when we get a correct status response that it's the IRQ.
- * This really just verifies the IO port but since we only have access
- * to such a small number of IRQs (5, 4, 3) this is not bad.
- * This will probably not work for more than one card.
- */
- int irqaddr=0;
- int i, x, status;
-
- if(board==DAYNA)
- {
- outb(0, ioaddr+DAYNA_RESET);
- inb(ioaddr+DAYNA_RESET);
- mdelay(333);
- }
- if(board==TANGENT)
- {
- inb(ioaddr);
- outb(0, ioaddr);
- outb(0, ioaddr+TANG_RESET);
- }
-
- for(i=0; cops_irqlist[i] !=0; i++)
- {
- irqaddr = cops_irqlist[i];
- for(x = 0xFFFF; x>0; x --) /* wait for response */
- {
- if(board==DAYNA)
- {
- status = (inb(ioaddr+DAYNA_CARD_STATUS)&3);
- if(status == 1)
- return irqaddr;
- }
- if(board==TANGENT)
- {
- if((inb(ioaddr+TANG_CARD_STATUS)& TANG_TX_READY) !=0)
- return irqaddr;
- }
- }
- }
- return 0; /* no IRQ found */
-}
-
-/*
- * Open/initialize the board. This is called (in the current kernel)
- * sometime after booting when the 'ifconfig' program is run.
- */
-static int cops_open(struct net_device *dev)
-{
- struct cops_local *lp = netdev_priv(dev);
-
- if(dev->irq==0)
- {
- /*
- * I don't know if the Dayna-style boards support polled
- * operation. For now, only allow it for Tangent.
- */
- if(lp->board==TANGENT) /* Poll 20 times per second */
- {
- cops_timer_dev = dev;
- timer_setup(&cops_timer, cops_poll, 0);
- cops_timer.expires = jiffies + HZ/20;
- add_timer(&cops_timer);
- }
- else
- {
- printk(KERN_WARNING "%s: No irq line set\n", dev->name);
- return -EAGAIN;
- }
- }
-
- cops_jumpstart(dev); /* Start the card up. */
-
- netif_start_queue(dev);
- return 0;
-}
-
-/*
- * This allows for a dynamic start/restart of the entire card.
- */
-static int cops_jumpstart(struct net_device *dev)
-{
- struct cops_local *lp = netdev_priv(dev);
-
- /*
- * Once the card has the firmware loaded and has acquired
- * the nodeid, if it is reset it will lose it all.
- */
- cops_reset(dev,1); /* Need to reset card before load firmware. */
- cops_load(dev); /* Load the firmware. */
-
- /*
- * If atalkd already gave us a nodeid we will use that
- * one again, else we wait for atalkd to give us a nodeid
- * in cops_ioctl. This may cause a problem if someone steals
- * our nodeid while we are resetting.
- */
- if(lp->nodeid == 1)
- cops_nodeid(dev,lp->node_acquire);
-
- return 0;
-}
-
-static void tangent_wait_reset(int ioaddr)
-{
- int timeout=0;
-
- while(timeout++ < 5 && (inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
- mdelay(1); /* Wait 1 second */
-}
-
-/*
- * Reset the LocalTalk board.
- */
-static void cops_reset(struct net_device *dev, int sleep)
-{
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr=dev->base_addr;
-
- if(lp->board==TANGENT)
- {
- inb(ioaddr); /* Clear request latch. */
- outb(0,ioaddr); /* Clear the TANG_TX_READY flop. */
- outb(0, ioaddr+TANG_RESET); /* Reset the adapter. */
-
- tangent_wait_reset(ioaddr);
- outb(0, ioaddr+TANG_CLEAR_INT);
- }
- if(lp->board==DAYNA)
- {
- outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */
- inb(ioaddr+DAYNA_RESET); /* Clear the reset */
- if (sleep)
- msleep(333);
- else
- mdelay(333);
- }
-
- netif_wake_queue(dev);
-}
-
-static void cops_load (struct net_device *dev)
-{
- struct ifreq ifr;
- struct ltfirmware *ltf= (struct ltfirmware *)&ifr.ifr_ifru;
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr=dev->base_addr;
- int length, i = 0;
-
- strcpy(ifr.ifr_name,"lt0");
-
- /* Get card's firmware code and do some checks on it. */
-#ifdef CONFIG_COPS_DAYNA
- if(lp->board==DAYNA)
- {
- ltf->length=sizeof(ffdrv_code);
- ltf->data=ffdrv_code;
- }
- else
-#endif
-#ifdef CONFIG_COPS_TANGENT
- if(lp->board==TANGENT)
- {
- ltf->length=sizeof(ltdrv_code);
- ltf->data=ltdrv_code;
- }
- else
-#endif
- {
- printk(KERN_INFO "%s; unsupported board type.\n", dev->name);
- return;
- }
-
- /* Check to make sure firmware is correct length. */
- if(lp->board==DAYNA && ltf->length!=5983)
- {
- printk(KERN_WARNING "%s: Firmware is not length of FFDRV.BIN.\n", dev->name);
- return;
- }
- if(lp->board==TANGENT && ltf->length!=2501)
- {
- printk(KERN_WARNING "%s: Firmware is not length of DRVCODE.BIN.\n", dev->name);
- return;
- }
-
- if(lp->board==DAYNA)
- {
- /*
- * We must wait for a status response
- * with the DAYNA board.
- */
- while(++i<65536)
- {
- if((inb(ioaddr+DAYNA_CARD_STATUS)&3)==1)
- break;
- }
-
- if(i==65536)
- return;
- }
-
- /*
- * Upload the firmware and kick. Byte-by-byte works nicely here.
- */
- i=0;
- length = ltf->length;
- while(length--)
- {
- outb(ltf->data[i], ioaddr);
- i++;
- }
-
- if(cops_debug > 1)
- printk("%s: Uploaded firmware - %d bytes of %d bytes.\n",
- dev->name, i, ltf->length);
-
- if(lp->board==DAYNA) /* Tell Dayna to run the firmware code. */
- outb(1, ioaddr+DAYNA_INT_CARD);
- else /* Tell Tang to run the firmware code. */
- inb(ioaddr);
-
- if(lp->board==TANGENT)
- {
- tangent_wait_reset(ioaddr);
- inb(ioaddr); /* Clear initial ready signal. */
- }
-}
-
-/*
- * Get the LocalTalk Nodeid from the card. We can suggest
- * any nodeid 1-254. The card will try and get that exact
- * address else we can specify 0 as the nodeid and the card
- * will autoprobe for a nodeid.
- */
-static int cops_nodeid (struct net_device *dev, int nodeid)
-{
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- if(lp->board == DAYNA)
- {
- /* Empty any pending adapter responses. */
- while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0)
- {
- outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupts. */
- if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
- cops_rx(dev); /* Kick any packets waiting. */
- schedule();
- }
-
- outb(2, ioaddr); /* Output command packet length as 2. */
- outb(0, ioaddr);
- outb(LAP_INIT, ioaddr); /* Send LAP_INIT command byte. */
- outb(nodeid, ioaddr); /* Suggest node address. */
- }
-
- if(lp->board == TANGENT)
- {
- /* Empty any pending adapter responses. */
- while(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY)
- {
- outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupt. */
- cops_rx(dev); /* Kick out packets waiting. */
- schedule();
- }
-
- /* Not sure what Tangent does if nodeid picked is used. */
- if(nodeid == 0) /* Seed. */
- nodeid = jiffies&0xFF; /* Get a random try */
- outb(2, ioaddr); /* Command length LSB */
- outb(0, ioaddr); /* Command length MSB */
- outb(LAP_INIT, ioaddr); /* Send LAP_INIT byte */
- outb(nodeid, ioaddr); /* LAP address hint. */
- outb(0xFF, ioaddr); /* Int. level to use */
- }
-
- lp->node_acquire=0; /* Set nodeid holder to 0. */
- while(lp->node_acquire==0) /* Get *True* nodeid finally. */
- {
- outb(0, ioaddr+COPS_CLEAR_INT); /* Clear any interrupt. */
-
- if(lp->board == DAYNA)
- {
- if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST)
- cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
- }
- if(lp->board == TANGENT)
- {
- if(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY)
- cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */
- }
- schedule();
- }
-
- if(cops_debug > 1)
- printk(KERN_DEBUG "%s: Node ID %d has been acquired.\n",
- dev->name, lp->node_acquire);
-
- lp->nodeid=1; /* Set got nodeid to 1. */
-
- return 0;
-}
-
-/*
- * Poll the Tangent type cards to see if we have work.
- */
-
-static void cops_poll(struct timer_list *unused)
-{
- int ioaddr, status;
- int boguscount = 0;
- struct net_device *dev = cops_timer_dev;
-
- del_timer(&cops_timer);
-
- if(dev == NULL)
- return; /* We've been downed */
-
- ioaddr = dev->base_addr;
- do {
- status=inb(ioaddr+TANG_CARD_STATUS);
- if(status & TANG_RX_READY)
- cops_rx(dev);
- if(status & TANG_TX_READY)
- netif_wake_queue(dev);
- status = inb(ioaddr+TANG_CARD_STATUS);
- } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY)));
-
- /* poll 20 times per second */
- cops_timer.expires = jiffies + HZ/20;
- add_timer(&cops_timer);
-}
-
-/*
- * The typical workload of the driver:
- * Handle the network interface interrupts.
- */
-static irqreturn_t cops_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct cops_local *lp;
- int ioaddr, status;
- int boguscount = 0;
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- if(lp->board==DAYNA)
- {
- do {
- outb(0, ioaddr + COPS_CLEAR_INT);
- status=inb(ioaddr+DAYNA_CARD_STATUS);
- if((status&0x03)==DAYNA_RX_REQUEST)
- cops_rx(dev);
- netif_wake_queue(dev);
- } while(++boguscount < 20);
- }
- else
- {
- do {
- status=inb(ioaddr+TANG_CARD_STATUS);
- if(status & TANG_RX_READY)
- cops_rx(dev);
- if(status & TANG_TX_READY)
- netif_wake_queue(dev);
- status=inb(ioaddr+TANG_CARD_STATUS);
- } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY)));
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * We have a good packet(s), get it/them out of the buffers.
- */
-static void cops_rx(struct net_device *dev)
-{
- int pkt_len = 0;
- int rsp_type = 0;
- struct sk_buff *skb = NULL;
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int boguscount = 0;
- unsigned long flags;
-
-
- spin_lock_irqsave(&lp->lock, flags);
-
- if(lp->board==DAYNA)
- {
- outb(0, ioaddr); /* Send out Zero length. */
- outb(0, ioaddr);
- outb(DATA_READ, ioaddr); /* Send read command out. */
-
- /* Wait for DMA to turn around. */
- while(++boguscount<1000000)
- {
- barrier();
- if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_READY)
- break;
- }
-
- if(boguscount==1000000)
- {
- printk(KERN_WARNING "%s: DMA timed out.\n",dev->name);
- spin_unlock_irqrestore(&lp->lock, flags);
- return;
- }
- }
-
- /* Get response length. */
- pkt_len = inb(ioaddr);
- pkt_len |= (inb(ioaddr) << 8);
- /* Input IO code. */
- rsp_type=inb(ioaddr);
-
- /* Malloc up new buffer. */
- skb = dev_alloc_skb(pkt_len);
- if(skb == NULL)
- {
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
- while(pkt_len--) /* Discard packet */
- inb(ioaddr);
- spin_unlock_irqrestore(&lp->lock, flags);
- return;
- }
- skb->dev = dev;
- skb_put(skb, pkt_len);
- skb->protocol = htons(ETH_P_LOCALTALK);
-
- insb(ioaddr, skb->data, pkt_len); /* Eat the Data */
-
- if(lp->board==DAYNA)
- outb(1, ioaddr+DAYNA_INT_CARD); /* Interrupt the card */
-
- spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
-
- /* Check for bad response length */
- if(pkt_len < 0 || pkt_len > MAX_LLAP_SIZE)
- {
- printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n",
- dev->name, pkt_len);
- dev->stats.tx_errors++;
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* Set nodeid and then get out. */
- if(rsp_type == LAP_INIT_RSP)
- { /* Nodeid taken from received packet. */
- lp->node_acquire = skb->data[0];
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* One last check to make sure we have a good packet. */
- if(rsp_type != LAP_RESPONSE)
- {
- printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type);
- dev->stats.tx_errors++;
- dev_kfree_skb_any(skb);
- return;
- }
-
- skb_reset_mac_header(skb); /* Point to entire packet. */
- skb_pull(skb,3);
- skb_reset_transport_header(skb); /* Point to data (Skip header). */
-
- /* Update the counters. */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
-
- /* Send packet to a higher place. */
- netif_rx(skb);
-}
-
-static void cops_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- dev->stats.tx_errors++;
- if(lp->board==TANGENT)
- {
- if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
- printk(KERN_WARNING "%s: No TX complete interrupt.\n", dev->name);
- }
- printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
- cops_jumpstart(dev); /* Restart the card. */
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-
-/*
- * Make the card transmit a LocalTalk packet.
- */
-
-static netdev_tx_t cops_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct cops_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
-
- /*
- * Block a timer-based transmit from overlapping.
- */
-
- netif_stop_queue(dev);
-
- spin_lock_irqsave(&lp->lock, flags);
- if(lp->board == DAYNA) /* Wait for adapter transmit buffer. */
- while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0)
- cpu_relax();
- if(lp->board == TANGENT) /* Wait for adapter transmit buffer. */
- while((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
- cpu_relax();
-
- /* Output IO length. */
- outb(skb->len, ioaddr);
- outb(skb->len >> 8, ioaddr);
-
- /* Output IO code. */
- outb(LAP_WRITE, ioaddr);
-
- if(lp->board == DAYNA) /* Check the transmit buffer again. */
- while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0);
-
- outsb(ioaddr, skb->data, skb->len); /* Send out the data. */
-
- if(lp->board==DAYNA) /* Dayna requires you kick the card */
- outb(1, ioaddr+DAYNA_INT_CARD);
-
- spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
-
- /* Done sending packet, update counters and cleanup. */
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- dev_kfree_skb (skb);
- return NETDEV_TX_OK;
-}
-
-/*
- * Dummy function to keep the Appletalk layer happy.
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
- if(cops_debug >= 3)
- printk("%s: set_multicast_list executed\n", dev->name);
-}
-
-/*
- * System ioctls for the COPS LocalTalk card.
- */
-
-static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct cops_local *lp = netdev_priv(dev);
- struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr;
- struct atalk_addr *aa = &lp->node_addr;
-
- switch(cmd)
- {
- case SIOCSIFADDR:
- /* Get and set the nodeid and network # atalkd wants. */
- cops_nodeid(dev, sa->sat_addr.s_node);
- aa->s_net = sa->sat_addr.s_net;
- aa->s_node = lp->node_acquire;
-
- /* Set broardcast address. */
- dev->broadcast[0] = 0xFF;
-
- /* Set hardware address. */
- dev->addr_len = 1;
- dev_addr_set(dev, &aa->s_node);
- return 0;
-
- case SIOCGIFADDR:
- sa->sat_addr.s_net = aa->s_net;
- sa->sat_addr.s_node = aa->s_node;
- return 0;
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/*
- * The inverse routine to cops_open().
- */
-
-static int cops_close(struct net_device *dev)
-{
- struct cops_local *lp = netdev_priv(dev);
-
- /* If we were running polled, yank the timer.
- */
- if(lp->board==TANGENT && dev->irq==0)
- del_timer(&cops_timer);
-
- netif_stop_queue(dev);
- return 0;
-}
-
-
-#ifdef MODULE
-static struct net_device *cops_dev;
-
-MODULE_LICENSE("GPL");
-module_param_hw(io, int, ioport, 0);
-module_param_hw(irq, int, irq, 0);
-module_param_hw(board_type, int, other, 0);
-
-static int __init cops_module_init(void)
-{
- if (io == 0)
- printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
- cardname);
- cops_dev = cops_probe(-1);
- return PTR_ERR_OR_ZERO(cops_dev);
-}
-
-static void __exit cops_module_exit(void)
-{
- unregister_netdev(cops_dev);
- cleanup_card(cops_dev);
- free_netdev(cops_dev);
-}
-module_init(cops_module_init);
-module_exit(cops_module_exit);
-#endif /* MODULE */
diff --git a/drivers/net/appletalk/cops.h b/drivers/net/appletalk/cops.h
deleted file mode 100644
index 7a0bfb351929..000000000000
--- a/drivers/net/appletalk/cops.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* cops.h: LocalTalk driver for Linux.
- *
- * Authors:
- * - Jay Schulist <jschlst@samba.org>
- */
-
-#ifndef __LINUX_COPSLTALK_H
-#define __LINUX_COPSLTALK_H
-
-#ifdef __KERNEL__
-
-/* Max LLAP size we will accept. */
-#define MAX_LLAP_SIZE 603
-
-/* Tangent */
-#define TANG_CARD_STATUS 1
-#define TANG_CLEAR_INT 1
-#define TANG_RESET 3
-
-#define TANG_TX_READY 1
-#define TANG_RX_READY 2
-
-/* Dayna */
-#define DAYNA_CMD_DATA 0
-#define DAYNA_CLEAR_INT 1
-#define DAYNA_CARD_STATUS 2
-#define DAYNA_INT_CARD 3
-#define DAYNA_RESET 4
-
-#define DAYNA_RX_READY 0
-#define DAYNA_TX_READY 1
-#define DAYNA_RX_REQUEST 3
-
-/* Same on both card types */
-#define COPS_CLEAR_INT 1
-
-/* LAP response codes received from the cards. */
-#define LAP_INIT 1 /* Init cmd */
-#define LAP_INIT_RSP 2 /* Init response */
-#define LAP_WRITE 3 /* Write cmd */
-#define DATA_READ 4 /* Data read */
-#define LAP_RESPONSE 4 /* Received ALAP frame response */
-#define LAP_GETSTAT 5 /* Get LAP and HW status */
-#define LAP_RSPSTAT 6 /* Status response */
-
-#endif
-
-/*
- * Structure to hold the firmware information.
- */
-struct ltfirmware
-{
- unsigned int length;
- const unsigned char *data;
-};
-
-#define DAYNA 1
-#define TANGENT 2
-
-#endif
diff --git a/drivers/net/appletalk/cops_ffdrv.h b/drivers/net/appletalk/cops_ffdrv.h
deleted file mode 100644
index b02005087c1b..000000000000
--- a/drivers/net/appletalk/cops_ffdrv.h
+++ /dev/null
@@ -1,532 +0,0 @@
-
-/*
- * The firmware this driver downloads into the Localtalk card is a
- * separate program and is not GPL'd source code, even though the Linux
- * side driver and the routine that loads this data into the card are.
- *
- * It is taken from the COPS SDK and is under the following license
- *
- * This material is licensed to you strictly for use in conjunction with
- * the use of COPS LocalTalk adapters.
- * There is no charge for this SDK. And no waranty express or implied
- * about its fitness for any purpose. However, we will cheerefully
- * refund every penny you paid for this SDK...
- * Regards,
- *
- * Thomas F. Divine
- * Chief Scientist
- */
-
-
-/* cops_ffdrv.h: LocalTalk driver firmware dump for Linux.
- *
- * Authors:
- * - Jay Schulist <jschlst@samba.org>
- */
-
-
-#ifdef CONFIG_COPS_DAYNA
-
-static const unsigned char ffdrv_code[] = {
- 58,3,0,50,228,149,33,255,255,34,226,149,
- 249,17,40,152,33,202,154,183,237,82,77,68,
- 11,107,98,19,54,0,237,176,175,50,80,0,
- 62,128,237,71,62,32,237,57,51,62,12,237,
- 57,50,237,57,54,62,6,237,57,52,62,12,
- 237,57,49,33,107,137,34,32,128,33,83,130,
- 34,40,128,33,86,130,34,42,128,33,112,130,
- 34,36,128,33,211,130,34,38,128,62,0,237,
- 57,16,33,63,148,34,34,128,237,94,205,15,
- 130,251,205,168,145,24,141,67,111,112,121,114,
- 105,103,104,116,32,40,67,41,32,49,57,56,
- 56,32,45,32,68,97,121,110,97,32,67,111,
- 109,109,117,110,105,99,97,116,105,111,110,115,
- 32,32,32,65,108,108,32,114,105,103,104,116,
- 115,32,114,101,115,101,114,118,101,100,46,32,
- 32,40,68,40,68,7,16,8,34,7,22,6,
- 16,5,12,4,8,3,6,140,0,16,39,128,
- 0,4,96,10,224,6,0,7,126,2,64,11,
- 118,12,6,13,0,14,193,15,0,5,96,3,
- 192,1,64,9,8,62,9,211,66,62,192,211,
- 66,62,100,61,32,253,6,28,33,205,129,14,
- 66,237,163,194,253,129,6,28,33,205,129,14,
- 64,237,163,194,9,130,201,62,47,50,71,152,
- 62,47,211,68,58,203,129,237,57,20,58,204,
- 129,237,57,21,33,77,152,54,132,205,233,129,
- 58,228,149,254,209,40,6,56,4,62,0,24,
- 2,219,96,33,233,149,119,230,62,33,232,149,
- 119,213,33,8,152,17,7,0,25,119,19,25,
- 119,209,201,251,237,77,245,197,213,229,221,229,
- 205,233,129,62,1,50,106,137,205,158,139,221,
- 225,225,209,193,241,251,237,77,245,197,213,219,
- 72,237,56,16,230,46,237,57,16,237,56,12,
- 58,72,152,183,32,26,6,20,17,128,2,237,
- 56,46,187,32,35,237,56,47,186,32,29,219,
- 72,230,1,32,3,5,32,232,175,50,72,152,
- 229,221,229,62,1,50,106,137,205,158,139,221,
- 225,225,24,25,62,1,50,72,152,58,201,129,
- 237,57,12,58,202,129,237,57,13,237,56,16,
- 246,17,237,57,16,209,193,241,251,237,77,245,
- 197,229,213,221,229,237,56,16,230,17,237,57,
- 16,237,56,20,58,34,152,246,16,246,8,211,
- 68,62,6,61,32,253,58,34,152,246,8,211,
- 68,58,203,129,237,57,20,58,204,129,237,57,
- 21,237,56,16,246,34,237,57,16,221,225,209,
- 225,193,241,251,237,77,33,2,0,57,126,230,
- 3,237,100,1,40,2,246,128,230,130,245,62,
- 5,211,64,241,211,64,201,229,213,243,237,56,
- 16,230,46,237,57,16,237,56,12,251,70,35,
- 35,126,254,175,202,77,133,254,129,202,15,133,
- 230,128,194,191,132,43,58,44,152,119,33,76,
- 152,119,35,62,132,119,120,254,255,40,4,58,
- 49,152,119,219,72,43,43,112,17,3,0,237,
- 56,52,230,248,237,57,52,219,72,230,1,194,
- 141,131,209,225,237,56,52,246,6,237,57,52,
- 62,1,55,251,201,62,3,211,66,62,192,211,
- 66,62,48,211,66,0,0,219,66,230,1,40,
- 4,219,67,24,240,205,203,135,58,75,152,254,
- 255,202,128,132,58,49,152,254,161,250,207,131,
- 58,34,152,211,68,62,10,211,66,62,128,211,
- 66,62,11,211,66,62,6,211,66,24,0,62,
- 14,211,66,62,33,211,66,62,1,211,66,62,
- 64,211,66,62,3,211,66,62,209,211,66,62,
- 100,71,219,66,230,1,32,6,5,32,247,195,
- 248,132,219,67,71,58,44,152,184,194,248,132,
- 62,100,71,219,66,230,1,32,6,5,32,247,
- 195,248,132,219,67,62,100,71,219,66,230,1,
- 32,6,5,32,247,195,248,132,219,67,254,133,
- 32,7,62,0,50,74,152,24,17,254,173,32,
- 7,62,1,50,74,152,24,6,254,141,194,248,
- 132,71,209,225,58,49,152,254,132,32,10,62,
- 50,205,2,134,205,144,135,24,27,254,140,32,
- 15,62,110,205,2,134,62,141,184,32,5,205,
- 144,135,24,8,62,10,205,2,134,205,8,134,
- 62,1,50,106,137,205,158,139,237,56,52,246,
- 6,237,57,52,175,183,251,201,62,20,135,237,
- 57,20,175,237,57,21,237,56,16,246,2,237,
- 57,16,237,56,20,95,237,56,21,123,254,10,
- 48,244,237,56,16,230,17,237,57,16,209,225,
- 205,144,135,62,1,50,106,137,205,158,139,237,
- 56,52,246,6,237,57,52,175,183,251,201,209,
- 225,243,219,72,230,1,40,13,62,10,211,66,
- 0,0,219,66,230,192,202,226,132,237,56,52,
- 246,6,237,57,52,62,1,55,251,201,205,203,
- 135,62,1,50,106,137,205,158,139,237,56,52,
- 246,6,237,57,52,183,251,201,209,225,62,1,
- 50,106,137,205,158,139,237,56,52,246,6,237,
- 57,52,62,2,55,251,201,209,225,243,219,72,
- 230,1,202,213,132,62,10,211,66,0,0,219,
- 66,230,192,194,213,132,229,62,1,50,106,137,
- 42,40,152,205,65,143,225,17,3,0,205,111,
- 136,62,6,211,66,58,44,152,211,66,237,56,
- 52,246,6,237,57,52,183,251,201,209,197,237,
- 56,52,230,248,237,57,52,219,72,230,1,32,
- 15,193,225,237,56,52,246,6,237,57,52,62,
- 1,55,251,201,14,23,58,37,152,254,0,40,
- 14,14,2,254,1,32,5,62,140,119,24,3,
- 62,132,119,43,43,197,205,203,135,193,62,1,
- 211,66,62,64,211,66,62,3,211,66,62,193,
- 211,66,62,100,203,39,71,219,66,230,1,32,
- 6,5,32,247,195,229,133,33,238,151,219,67,
- 71,58,44,152,184,194,229,133,119,62,100,71,
- 219,66,230,1,32,6,5,32,247,195,229,133,
- 219,67,35,119,13,32,234,193,225,62,1,50,
- 106,137,205,158,139,237,56,52,246,6,237,57,
- 52,175,183,251,201,33,234,151,35,35,62,255,
- 119,193,225,62,1,50,106,137,205,158,139,237,
- 56,52,246,6,237,57,52,175,251,201,243,61,
- 32,253,251,201,62,3,211,66,62,192,211,66,
- 58,49,152,254,140,32,19,197,229,213,17,181,
- 129,33,185,129,1,2,0,237,176,209,225,193,
- 24,27,229,213,33,187,129,58,49,152,230,15,
- 87,30,2,237,92,25,17,181,129,126,18,19,
- 35,126,18,209,225,58,34,152,246,8,211,68,
- 58,49,152,254,165,40,14,254,164,40,10,62,
- 10,211,66,62,224,211,66,24,25,58,74,152,
- 254,0,40,10,62,10,211,66,62,160,211,66,
- 24,8,62,10,211,66,62,128,211,66,62,11,
- 211,66,62,6,211,66,205,147,143,62,5,211,
- 66,62,224,211,66,62,5,211,66,62,96,211,
- 66,62,5,61,32,253,62,5,211,66,62,224,
- 211,66,62,14,61,32,253,62,5,211,66,62,
- 233,211,66,62,128,211,66,58,181,129,61,32,
- 253,62,1,211,66,62,192,211,66,1,254,19,
- 237,56,46,187,32,6,13,32,247,195,226,134,
- 62,192,211,66,0,0,219,66,203,119,40,250,
- 219,66,203,87,40,250,243,237,56,16,230,17,
- 237,57,16,237,56,20,251,62,5,211,66,62,
- 224,211,66,58,182,129,61,32,253,229,33,181,
- 129,58,183,129,203,63,119,35,58,184,129,119,
- 225,62,10,211,66,62,224,211,66,62,11,211,
- 66,62,118,211,66,62,47,211,68,62,5,211,
- 66,62,233,211,66,58,181,129,61,32,253,62,
- 5,211,66,62,224,211,66,58,182,129,61,32,
- 253,62,5,211,66,62,96,211,66,201,229,213,
- 58,50,152,230,15,87,30,2,237,92,33,187,
- 129,25,17,181,129,126,18,35,19,126,18,209,
- 225,58,71,152,246,8,211,68,58,50,152,254,
- 165,40,14,254,164,40,10,62,10,211,66,62,
- 224,211,66,24,8,62,10,211,66,62,128,211,
- 66,62,11,211,66,62,6,211,66,195,248,135,
- 62,3,211,66,62,192,211,66,197,229,213,17,
- 181,129,33,183,129,1,2,0,237,176,209,225,
- 193,62,47,211,68,62,10,211,66,62,224,211,
- 66,62,11,211,66,62,118,211,66,62,1,211,
- 66,62,0,211,66,205,147,143,195,16,136,62,
- 3,211,66,62,192,211,66,197,229,213,17,181,
- 129,33,183,129,1,2,0,237,176,209,225,193,
- 62,47,211,68,62,10,211,66,62,224,211,66,
- 62,11,211,66,62,118,211,66,205,147,143,62,
- 5,211,66,62,224,211,66,62,5,211,66,62,
- 96,211,66,62,5,61,32,253,62,5,211,66,
- 62,224,211,66,62,14,61,32,253,62,5,211,
- 66,62,233,211,66,62,128,211,66,58,181,129,
- 61,32,253,62,1,211,66,62,192,211,66,1,
- 254,19,237,56,46,187,32,6,13,32,247,195,
- 88,136,62,192,211,66,0,0,219,66,203,119,
- 40,250,219,66,203,87,40,250,62,5,211,66,
- 62,224,211,66,58,182,129,61,32,253,62,5,
- 211,66,62,96,211,66,201,197,14,67,6,0,
- 62,3,211,66,62,192,211,66,62,48,211,66,
- 0,0,219,66,230,1,40,4,219,67,24,240,
- 62,5,211,66,62,233,211,66,62,128,211,66,
- 58,181,129,61,32,253,237,163,29,62,192,211,
- 66,219,66,230,4,40,250,237,163,29,32,245,
- 219,66,230,4,40,250,62,255,71,219,66,230,
- 4,40,3,5,32,247,219,66,230,4,40,250,
- 62,5,211,66,62,224,211,66,58,182,129,61,
- 32,253,62,5,211,66,62,96,211,66,58,71,
- 152,254,1,202,18,137,62,16,211,66,62,56,
- 211,66,62,14,211,66,62,33,211,66,62,1,
- 211,66,62,248,211,66,237,56,48,246,153,230,
- 207,237,57,48,62,3,211,66,62,221,211,66,
- 193,201,58,71,152,211,68,62,10,211,66,62,
- 128,211,66,62,11,211,66,62,6,211,66,62,
- 6,211,66,58,44,152,211,66,62,16,211,66,
- 62,56,211,66,62,48,211,66,0,0,62,14,
- 211,66,62,33,211,66,62,1,211,66,62,248,
- 211,66,237,56,48,246,145,246,8,230,207,237,
- 57,48,62,3,211,66,62,221,211,66,193,201,
- 44,3,1,0,70,69,1,245,197,213,229,175,
- 50,72,152,237,56,16,230,46,237,57,16,237,
- 56,12,62,1,211,66,0,0,219,66,95,230,
- 160,32,3,195,20,139,123,230,96,194,72,139,
- 62,48,211,66,62,1,211,66,62,64,211,66,
- 237,91,40,152,205,207,143,25,43,55,237,82,
- 218,70,139,34,42,152,98,107,58,44,152,190,
- 194,210,138,35,35,62,130,190,194,200,137,62,
- 1,50,48,152,62,175,190,202,82,139,62,132,
- 190,32,44,50,50,152,62,47,50,71,152,229,
- 175,50,106,137,42,40,152,205,65,143,225,54,
- 133,43,70,58,44,152,119,43,112,17,3,0,
- 62,10,205,2,134,205,111,136,195,158,138,62,
- 140,190,32,19,50,50,152,58,233,149,230,4,
- 202,222,138,62,1,50,71,152,195,219,137,126,
- 254,160,250,185,138,254,166,242,185,138,50,50,
- 152,43,126,35,229,213,33,234,149,95,22,0,
- 25,126,254,132,40,18,254,140,40,14,58,50,
- 152,230,15,87,126,31,21,242,65,138,56,2,
- 175,119,58,50,152,230,15,87,58,233,149,230,
- 62,31,21,242,85,138,218,98,138,209,225,195,
- 20,139,58,50,152,33,100,137,230,15,95,22,
- 0,25,126,50,71,152,209,225,58,50,152,254,
- 164,250,135,138,58,73,152,254,0,40,4,54,
- 173,24,2,54,133,43,70,58,44,152,119,43,
- 112,17,3,0,205,70,135,175,50,106,137,205,
- 208,139,58,199,129,237,57,12,58,200,129,237,
- 57,13,237,56,16,246,17,237,57,16,225,209,
- 193,241,251,237,77,62,129,190,194,227,138,54,
- 130,43,70,58,44,152,119,43,112,17,3,0,
- 205,144,135,195,20,139,35,35,126,254,132,194,
- 227,138,175,50,106,137,205,158,139,24,42,58,
- 201,154,254,1,40,7,62,1,50,106,137,24,
- 237,58,106,137,254,1,202,222,138,62,128,166,
- 194,222,138,221,229,221,33,67,152,205,127,142,
- 205,109,144,221,225,225,209,193,241,251,237,77,
- 58,106,137,254,1,202,44,139,58,50,152,254,
- 164,250,44,139,58,73,152,238,1,50,73,152,
- 221,229,221,33,51,152,205,127,142,221,225,62,
- 1,50,106,137,205,158,139,195,13,139,24,208,
- 24,206,24,204,230,64,40,3,195,20,139,195,
- 20,139,43,126,33,8,152,119,35,58,44,152,
- 119,43,237,91,35,152,205,203,135,205,158,139,
- 195,13,139,175,50,78,152,62,3,211,66,62,
- 192,211,66,201,197,33,4,0,57,126,35,102,
- 111,62,1,50,106,137,219,72,205,141,139,193,
- 201,62,1,50,78,152,34,40,152,54,0,35,
- 35,54,0,195,163,139,58,78,152,183,200,229,
- 33,181,129,58,183,129,119,35,58,184,129,119,
- 225,62,47,211,68,62,14,211,66,62,193,211,
- 66,62,10,211,66,62,224,211,66,62,11,211,
- 66,62,118,211,66,195,3,140,58,78,152,183,
- 200,58,71,152,211,68,254,69,40,4,254,70,
- 32,17,58,73,152,254,0,40,10,62,10,211,
- 66,62,160,211,66,24,8,62,10,211,66,62,
- 128,211,66,62,11,211,66,62,6,211,66,62,
- 6,211,66,58,44,152,211,66,62,16,211,66,
- 62,56,211,66,62,48,211,66,0,0,219,66,
- 230,1,40,4,219,67,24,240,62,14,211,66,
- 62,33,211,66,42,40,152,205,65,143,62,1,
- 211,66,62,248,211,66,237,56,48,246,145,246,
- 8,230,207,237,57,48,62,3,211,66,62,221,
- 211,66,201,62,16,211,66,62,56,211,66,62,
- 48,211,66,0,0,219,66,230,1,40,4,219,
- 67,24,240,62,14,211,66,62,33,211,66,62,
- 1,211,66,62,248,211,66,237,56,48,246,153,
- 230,207,237,57,48,62,3,211,66,62,221,211,
- 66,201,229,213,33,234,149,95,22,0,25,126,
- 254,132,40,4,254,140,32,2,175,119,123,209,
- 225,201,6,8,14,0,31,48,1,12,16,250,
- 121,201,33,4,0,57,94,35,86,33,2,0,
- 57,126,35,102,111,221,229,34,89,152,237,83,
- 91,152,221,33,63,152,205,127,142,58,81,152,
- 50,82,152,58,80,152,135,50,80,152,205,162,
- 140,254,3,56,16,58,81,152,135,60,230,15,
- 50,81,152,175,50,80,152,24,23,58,79,152,
- 205,162,140,254,3,48,13,58,81,152,203,63,
- 50,81,152,62,255,50,79,152,58,81,152,50,
- 82,152,58,79,152,135,50,79,152,62,32,50,
- 83,152,50,84,152,237,56,16,230,17,237,57,
- 16,219,72,62,192,50,93,152,62,93,50,94,
- 152,58,93,152,61,50,93,152,32,9,58,94,
- 152,61,50,94,152,40,44,62,170,237,57,20,
- 175,237,57,21,237,56,16,246,2,237,57,16,
- 219,72,230,1,202,29,141,237,56,20,71,237,
- 56,21,120,254,10,48,237,237,56,16,230,17,
- 237,57,16,243,62,14,211,66,62,65,211,66,
- 251,58,39,152,23,23,60,50,39,152,71,58,
- 82,152,160,230,15,40,22,71,14,10,219,66,
- 230,16,202,186,141,219,72,230,1,202,186,141,
- 13,32,239,16,235,42,89,152,237,91,91,152,
- 205,47,131,48,7,61,202,186,141,195,227,141,
- 221,225,33,0,0,201,221,33,55,152,205,127,
- 142,58,84,152,61,50,84,152,40,19,58,82,
- 152,246,1,50,82,152,58,79,152,246,1,50,
- 79,152,195,29,141,221,225,33,1,0,201,221,
- 33,59,152,205,127,142,58,80,152,246,1,50,
- 80,152,58,82,152,135,246,1,50,82,152,58,
- 83,152,61,50,83,152,194,29,141,221,225,33,
- 2,0,201,221,229,33,0,0,57,17,4,0,
- 25,126,50,44,152,230,128,50,85,152,58,85,
- 152,183,40,6,221,33,88,2,24,4,221,33,
- 150,0,58,44,152,183,40,53,60,40,50,60,
- 40,47,61,61,33,86,152,119,35,119,35,54,
- 129,175,50,48,152,221,43,221,229,225,124,181,
- 40,42,33,86,152,17,3,0,205,189,140,17,
- 232,3,27,123,178,32,251,58,48,152,183,40,
- 224,58,44,152,71,62,7,128,230,127,71,58,
- 85,152,176,50,44,152,24,162,221,225,201,183,
- 221,52,0,192,221,52,1,192,221,52,2,192,
- 221,52,3,192,55,201,245,62,1,211,100,241,
- 201,245,62,1,211,96,241,201,33,2,0,57,
- 126,35,102,111,237,56,48,230,175,237,57,48,
- 62,48,237,57,49,125,237,57,32,124,237,57,
- 33,62,0,237,57,34,62,88,237,57,35,62,
- 0,237,57,36,237,57,37,33,128,2,125,237,
- 57,38,124,237,57,39,237,56,48,246,97,230,
- 207,237,57,48,62,0,237,57,0,62,0,211,
- 96,211,100,201,33,2,0,57,126,35,102,111,
- 237,56,48,230,175,237,57,48,62,12,237,57,
- 49,62,76,237,57,32,62,0,237,57,33,237,
- 57,34,125,237,57,35,124,237,57,36,62,0,
- 237,57,37,33,128,2,125,237,57,38,124,237,
- 57,39,237,56,48,246,97,230,207,237,57,48,
- 62,1,211,96,201,33,2,0,57,126,35,102,
- 111,229,237,56,48,230,87,237,57,48,125,237,
- 57,40,124,237,57,41,62,0,237,57,42,62,
- 67,237,57,43,62,0,237,57,44,58,106,137,
- 254,1,32,5,33,6,0,24,3,33,128,2,
- 125,237,57,46,124,237,57,47,237,56,50,230,
- 252,246,2,237,57,50,225,201,33,4,0,57,
- 94,35,86,33,2,0,57,126,35,102,111,237,
- 56,48,230,87,237,57,48,125,237,57,40,124,
- 237,57,41,62,0,237,57,42,62,67,237,57,
- 43,62,0,237,57,44,123,237,57,46,122,237,
- 57,47,237,56,50,230,244,246,0,237,57,50,
- 237,56,48,246,145,230,207,237,57,48,201,213,
- 237,56,46,95,237,56,47,87,237,56,46,111,
- 237,56,47,103,183,237,82,32,235,33,128,2,
- 183,237,82,209,201,213,237,56,38,95,237,56,
- 39,87,237,56,38,111,237,56,39,103,183,237,
- 82,32,235,33,128,2,183,237,82,209,201,245,
- 197,1,52,0,237,120,230,253,237,121,193,241,
- 201,245,197,1,52,0,237,120,246,2,237,121,
- 193,241,201,33,2,0,57,126,35,102,111,126,
- 35,110,103,201,33,0,0,34,102,152,34,96,
- 152,34,98,152,33,202,154,34,104,152,237,91,
- 104,152,42,226,149,183,237,82,17,0,255,25,
- 34,100,152,203,124,40,6,33,0,125,34,100,
- 152,42,104,152,35,35,35,229,205,120,139,193,
- 201,205,186,149,229,42,40,152,35,35,35,229,
- 205,39,144,193,124,230,3,103,221,117,254,221,
- 116,255,237,91,42,152,35,35,35,183,237,82,
- 32,12,17,5,0,42,42,152,205,171,149,242,
- 169,144,42,40,152,229,205,120,139,193,195,198,
- 149,237,91,42,152,42,98,152,25,34,98,152,
- 19,19,19,42,102,152,25,34,102,152,237,91,
- 100,152,33,158,253,25,237,91,102,152,205,171,
- 149,242,214,144,33,0,0,34,102,152,62,1,
- 50,95,152,205,225,144,195,198,149,58,95,152,
- 183,200,237,91,96,152,42,102,152,205,171,149,
- 242,5,145,237,91,102,152,33,98,2,25,237,
- 91,96,152,205,171,149,250,37,145,237,91,96,
- 152,42,102,152,183,237,82,32,7,42,98,152,
- 125,180,40,13,237,91,102,152,42,96,152,205,
- 171,149,242,58,145,237,91,104,152,42,102,152,
- 25,35,35,35,229,205,120,139,193,175,50,95,
- 152,201,195,107,139,205,206,149,250,255,243,205,
- 225,144,251,58,230,149,183,194,198,149,17,1,
- 0,42,98,152,205,171,149,250,198,149,62,1,
- 50,230,149,237,91,96,152,42,104,152,25,221,
- 117,252,221,116,253,237,91,104,152,42,96,152,
- 25,35,35,35,221,117,254,221,116,255,35,35,
- 35,229,205,39,144,124,230,3,103,35,35,35,
- 221,117,250,221,116,251,235,221,110,252,221,102,
- 253,115,35,114,35,54,4,62,1,211,100,211,
- 84,195,198,149,33,0,0,34,102,152,34,96,
- 152,34,98,152,33,202,154,34,104,152,237,91,
- 104,152,42,226,149,183,237,82,17,0,255,25,
- 34,100,152,33,109,152,54,0,33,107,152,229,
- 205,240,142,193,62,47,50,34,152,62,132,50,
- 49,152,205,241,145,205,61,145,58,39,152,60,
- 50,39,152,24,241,205,206,149,251,255,33,109,
- 152,126,183,202,198,149,110,221,117,251,33,109,
- 152,54,0,221,126,251,254,1,40,28,254,3,
- 40,101,254,4,202,190,147,254,5,202,147,147,
- 254,8,40,87,33,107,152,229,205,240,142,195,
- 198,149,58,201,154,183,32,21,33,111,152,126,
- 50,229,149,205,52,144,33,110,152,110,38,0,
- 229,205,11,142,193,237,91,96,152,42,104,152,
- 25,221,117,254,221,116,255,35,35,54,2,17,
- 2,0,43,43,115,35,114,58,44,152,35,35,
- 119,58,228,149,35,119,62,1,211,100,211,84,
- 62,1,50,201,154,24,169,205,153,142,58,231,
- 149,183,40,250,175,50,231,149,33,110,152,126,
- 254,255,40,91,58,233,149,230,63,183,40,83,
- 94,22,0,33,234,149,25,126,183,40,13,33,
- 110,152,94,33,234,150,25,126,254,3,32,36,
- 205,81,148,125,180,33,110,152,94,22,0,40,
- 17,33,234,149,25,54,0,33,107,152,229,205,
- 240,142,193,195,198,149,33,234,150,25,54,0,
- 33,110,152,94,22,0,33,234,149,25,126,50,
- 49,152,254,132,32,37,62,47,50,34,152,42,
- 107,152,229,33,110,152,229,205,174,140,193,193,
- 125,180,33,110,152,94,22,0,33,234,150,202,
- 117,147,25,52,195,120,147,58,49,152,254,140,
- 32,7,62,1,50,34,152,24,210,62,32,50,
- 106,152,24,19,58,49,152,95,58,106,152,163,
- 183,58,106,152,32,11,203,63,50,106,152,58,
- 106,152,183,32,231,254,2,40,51,254,4,40,
- 38,254,8,40,26,254,16,40,13,254,32,32,
- 158,62,165,50,49,152,62,69,24,190,62,164,
- 50,49,152,62,70,24,181,62,163,50,49,152,
- 175,24,173,62,162,50,49,152,62,1,24,164,
- 62,161,50,49,152,62,3,24,155,25,54,0,
- 221,126,251,254,8,40,7,58,230,149,183,202,
- 32,146,33,107,152,229,205,240,142,193,211,84,
- 195,198,149,237,91,96,152,42,104,152,25,221,
- 117,254,221,116,255,35,35,54,6,17,2,0,
- 43,43,115,35,114,58,228,149,35,35,119,58,
- 233,149,35,119,205,146,142,195,32,146,237,91,
- 96,152,42,104,152,25,229,205,160,142,193,58,
- 231,149,183,40,250,175,50,231,149,243,237,91,
- 96,152,42,104,152,25,221,117,254,221,116,255,
- 78,35,70,221,113,252,221,112,253,89,80,42,
- 98,152,183,237,82,34,98,152,203,124,40,19,
- 33,0,0,34,98,152,34,102,152,34,96,152,
- 62,1,50,95,152,24,40,221,94,252,221,86,
- 253,19,19,19,42,96,152,25,34,96,152,237,
- 91,100,152,33,158,253,25,237,91,96,152,205,
- 171,149,242,55,148,33,0,0,34,96,152,175,
- 50,230,149,251,195,32,146,245,62,1,50,231,
- 149,62,16,237,57,0,211,80,241,251,237,77,
- 201,205,186,149,229,229,33,0,0,34,37,152,
- 33,110,152,126,50,234,151,58,44,152,33,235,
- 151,119,221,54,253,0,221,54,254,0,195,230,
- 148,33,236,151,54,175,33,3,0,229,33,234,
- 151,229,205,174,140,193,193,33,236,151,126,254,
- 255,40,74,33,245,151,110,221,117,255,33,249,
- 151,126,221,166,255,221,119,255,33,253,151,126,
- 221,166,255,221,119,255,58,232,149,95,221,126,
- 255,163,221,119,255,183,40,15,230,191,33,110,
- 152,94,22,0,33,234,149,25,119,24,12,33,
- 110,152,94,22,0,33,234,149,25,54,132,33,
- 0,0,195,198,149,221,110,253,221,102,254,35,
- 221,117,253,221,116,254,17,32,0,221,110,253,
- 221,102,254,205,171,149,250,117,148,58,233,149,
- 203,87,40,84,33,1,0,34,37,152,221,54,
- 253,0,221,54,254,0,24,53,33,236,151,54,
- 175,33,3,0,229,33,234,151,229,205,174,140,
- 193,193,33,236,151,126,254,255,40,14,33,110,
- 152,94,22,0,33,234,149,25,54,140,24,159,
- 221,110,253,221,102,254,35,221,117,253,221,116,
- 254,17,32,0,221,110,253,221,102,254,205,171,
- 149,250,12,149,33,2,0,34,37,152,221,54,
- 253,0,221,54,254,0,24,54,33,236,151,54,
- 175,33,3,0,229,33,234,151,229,205,174,140,
- 193,193,33,236,151,126,254,255,40,15,33,110,
- 152,94,22,0,33,234,149,25,54,132,195,211,
- 148,221,110,253,221,102,254,35,221,117,253,221,
- 116,254,17,32,0,221,110,253,221,102,254,205,
- 171,149,250,96,149,33,1,0,195,198,149,124,
- 170,250,179,149,237,82,201,124,230,128,237,82,
- 60,201,225,253,229,221,229,221,33,0,0,221,
- 57,233,221,249,221,225,253,225,201,233,225,253,
- 229,221,229,221,33,0,0,221,57,94,35,86,
- 35,235,57,249,235,233,0,0,0,0,0,0,
- 62,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 175,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,133,1,0,0,0,63,
- 255,255,255,255,0,0,0,63,0,0,0,0,
- 0,0,0,0,0,0,0,24,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0
- } ;
-
-#endif
diff --git a/drivers/net/appletalk/cops_ltdrv.h b/drivers/net/appletalk/cops_ltdrv.h
deleted file mode 100644
index c699b1ad31da..000000000000
--- a/drivers/net/appletalk/cops_ltdrv.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * The firmware this driver downloads into the Localtalk card is a
- * separate program and is not GPL'd source code, even though the Linux
- * side driver and the routine that loads this data into the card are.
- *
- * It is taken from the COPS SDK and is under the following license
- *
- * This material is licensed to you strictly for use in conjunction with
- * the use of COPS LocalTalk adapters.
- * There is no charge for this SDK. And no waranty express or implied
- * about its fitness for any purpose. However, we will cheerefully
- * refund every penny you paid for this SDK...
- * Regards,
- *
- * Thomas F. Divine
- * Chief Scientist
- */
-
-
-/* cops_ltdrv.h: LocalTalk driver firmware dump for Linux.
- *
- * Authors:
- * - Jay Schulist <jschlst@samba.org>
- */
-
-
-#ifdef CONFIG_COPS_TANGENT
-
-static const unsigned char ltdrv_code[] = {
- 58,3,0,50,148,10,33,143,15,62,85,119,
- 190,32,9,62,170,119,190,32,3,35,24,241,
- 34,146,10,249,17,150,10,33,143,15,183,237,
- 82,77,68,11,107,98,19,54,0,237,176,62,
- 16,237,57,51,62,0,237,57,50,237,57,54,
- 62,12,237,57,49,62,195,33,39,2,50,56,
- 0,34,57,0,237,86,205,30,2,251,205,60,
- 10,24,169,67,111,112,121,114,105,103,104,116,
- 32,40,99,41,32,49,57,56,56,45,49,57,
- 57,50,44,32,80,114,105,110,116,105,110,103,
- 32,67,111,109,109,117,110,105,99,97,116,105,
- 111,110,115,32,65,115,115,111,99,105,97,116,
- 101,115,44,32,73,110,99,46,65,108,108,32,
- 114,105,103,104,116,115,32,114,101,115,101,114,
- 118,101,100,46,32,32,4,4,22,40,255,60,
- 4,96,10,224,6,0,7,126,2,64,11,246,
- 12,6,13,0,14,193,15,0,5,96,3,192,
- 1,0,9,8,62,3,211,82,62,192,211,82,
- 201,62,3,211,82,62,213,211,82,201,62,5,
- 211,82,62,224,211,82,201,62,5,211,82,62,
- 224,211,82,201,62,5,211,82,62,96,211,82,
- 201,6,28,33,180,1,14,82,237,163,194,4,
- 2,33,39,2,34,64,0,58,3,0,230,1,
- 192,62,11,237,121,62,118,237,121,201,33,182,
- 10,54,132,205,253,1,201,245,197,213,229,42,
- 150,10,14,83,17,98,2,67,20,237,162,58,
- 179,1,95,219,82,230,1,32,6,29,32,247,
- 195,17,3,62,1,211,82,219,82,95,230,160,
- 32,10,237,162,32,225,21,32,222,195,15,3,
- 237,162,123,230,96,194,21,3,62,48,211,82,
- 62,1,211,82,175,211,82,237,91,150,10,43,
- 55,237,82,218,19,3,34,152,10,98,107,58,
- 154,10,190,32,81,62,1,50,158,10,35,35,
- 62,132,190,32,44,54,133,43,70,58,154,10,
- 119,43,112,17,3,0,205,137,3,62,16,211,
- 82,62,56,211,82,205,217,1,42,150,10,14,
- 83,17,98,2,67,20,58,178,1,95,195,59,
- 2,62,129,190,194,227,2,54,130,43,70,58,
- 154,10,119,43,112,17,3,0,205,137,3,195,
- 254,2,35,35,126,254,132,194,227,2,205,61,
- 3,24,20,62,128,166,194,222,2,221,229,221,
- 33,175,10,205,93,6,205,144,7,221,225,225,
- 209,193,241,251,237,77,221,229,221,33,159,10,
- 205,93,6,221,225,205,61,3,195,247,2,24,
- 237,24,235,24,233,230,64,40,2,24,227,24,
- 225,175,50,179,10,205,208,1,201,197,33,4,
- 0,57,126,35,102,111,205,51,3,193,201,62,
- 1,50,179,10,34,150,10,54,0,58,179,10,
- 183,200,62,14,211,82,62,193,211,82,62,10,
- 211,82,62,224,211,82,62,6,211,82,58,154,
- 10,211,82,62,16,211,82,62,56,211,82,62,
- 48,211,82,219,82,230,1,40,4,219,83,24,
- 242,62,14,211,82,62,33,211,82,62,1,211,
- 82,62,9,211,82,62,32,211,82,205,217,1,
- 201,14,83,205,208,1,24,23,14,83,205,208,
- 1,205,226,1,58,174,1,61,32,253,205,244,
- 1,58,174,1,61,32,253,205,226,1,58,175,
- 1,61,32,253,62,5,211,82,62,233,211,82,
- 62,128,211,82,58,176,1,61,32,253,237,163,
- 27,62,192,211,82,219,82,230,4,40,250,237,
- 163,27,122,179,32,243,219,82,230,4,40,250,
- 58,178,1,71,219,82,230,4,40,3,5,32,
- 247,219,82,230,4,40,250,205,235,1,58,177,
- 1,61,32,253,205,244,1,201,229,213,35,35,
- 126,230,128,194,145,4,43,58,154,10,119,43,
- 70,33,181,10,119,43,112,17,3,0,243,62,
- 10,211,82,219,82,230,128,202,41,4,209,225,
- 62,1,55,251,201,205,144,3,58,180,10,254,
- 255,202,127,4,205,217,1,58,178,1,71,219,
- 82,230,1,32,6,5,32,247,195,173,4,219,
- 83,71,58,154,10,184,194,173,4,58,178,1,
- 71,219,82,230,1,32,6,5,32,247,195,173,
- 4,219,83,58,178,1,71,219,82,230,1,32,
- 6,5,32,247,195,173,4,219,83,254,133,194,
- 173,4,58,179,1,24,4,58,179,1,135,61,
- 32,253,209,225,205,137,3,205,61,3,183,251,
- 201,209,225,243,62,10,211,82,219,82,230,128,
- 202,164,4,62,1,55,251,201,205,144,3,205,
- 61,3,183,251,201,209,225,62,2,55,251,201,
- 243,62,14,211,82,62,33,211,82,251,201,33,
- 4,0,57,94,35,86,33,2,0,57,126,35,
- 102,111,221,229,34,193,10,237,83,195,10,221,
- 33,171,10,205,93,6,58,185,10,50,186,10,
- 58,184,10,135,50,184,10,205,112,6,254,3,
- 56,16,58,185,10,135,60,230,15,50,185,10,
- 175,50,184,10,24,23,58,183,10,205,112,6,
- 254,3,48,13,58,185,10,203,63,50,185,10,
- 62,255,50,183,10,58,185,10,50,186,10,58,
- 183,10,135,50,183,10,62,32,50,187,10,50,
- 188,10,6,255,219,82,230,16,32,3,5,32,
- 247,205,180,4,6,40,219,82,230,16,40,3,
- 5,32,247,62,10,211,82,219,82,230,128,194,
- 46,5,219,82,230,16,40,214,237,95,71,58,
- 186,10,160,230,15,40,32,71,14,10,62,10,
- 211,82,219,82,230,128,202,119,5,205,180,4,
- 195,156,5,219,82,230,16,202,156,5,13,32,
- 229,16,225,42,193,10,237,91,195,10,205,252,
- 3,48,7,61,202,156,5,195,197,5,221,225,
- 33,0,0,201,221,33,163,10,205,93,6,58,
- 188,10,61,50,188,10,40,19,58,186,10,246,
- 1,50,186,10,58,183,10,246,1,50,183,10,
- 195,46,5,221,225,33,1,0,201,221,33,167,
- 10,205,93,6,58,184,10,246,1,50,184,10,
- 58,186,10,135,246,1,50,186,10,58,187,10,
- 61,50,187,10,194,46,5,221,225,33,2,0,
- 201,221,229,33,0,0,57,17,4,0,25,126,
- 50,154,10,230,128,50,189,10,58,189,10,183,
- 40,6,221,33,88,2,24,4,221,33,150,0,
- 58,154,10,183,40,49,60,40,46,61,33,190,
- 10,119,35,119,35,54,129,175,50,158,10,221,
- 43,221,229,225,124,181,40,42,33,190,10,17,
- 3,0,205,206,4,17,232,3,27,123,178,32,
- 251,58,158,10,183,40,224,58,154,10,71,62,
- 7,128,230,127,71,58,189,10,176,50,154,10,
- 24,166,221,225,201,183,221,52,0,192,221,52,
- 1,192,221,52,2,192,221,52,3,192,55,201,
- 6,8,14,0,31,48,1,12,16,250,121,201,
- 33,2,0,57,94,35,86,35,78,35,70,35,
- 126,35,102,105,79,120,68,103,237,176,201,33,
- 2,0,57,126,35,102,111,62,17,237,57,48,
- 125,237,57,40,124,237,57,41,62,0,237,57,
- 42,62,64,237,57,43,62,0,237,57,44,33,
- 128,2,125,237,57,46,124,237,57,47,62,145,
- 237,57,48,211,68,58,149,10,211,66,201,33,
- 2,0,57,126,35,102,111,62,33,237,57,48,
- 62,64,237,57,32,62,0,237,57,33,237,57,
- 34,125,237,57,35,124,237,57,36,62,0,237,
- 57,37,33,128,2,125,237,57,38,124,237,57,
- 39,62,97,237,57,48,211,67,58,149,10,211,
- 66,201,237,56,46,95,237,56,47,87,237,56,
- 46,111,237,56,47,103,183,237,82,32,235,33,
- 128,2,183,237,82,201,237,56,38,95,237,56,
- 39,87,237,56,38,111,237,56,39,103,183,237,
- 82,32,235,33,128,2,183,237,82,201,205,106,
- 10,221,110,6,221,102,7,126,35,110,103,195,
- 118,10,205,106,10,33,0,0,34,205,10,34,
- 198,10,34,200,10,33,143,15,34,207,10,237,
- 91,207,10,42,146,10,183,237,82,17,0,255,
- 25,34,203,10,203,124,40,6,33,0,125,34,
- 203,10,42,207,10,229,205,37,3,195,118,10,
- 205,106,10,229,42,150,10,35,35,35,229,205,
- 70,7,193,124,230,3,103,221,117,254,221,116,
- 255,237,91,152,10,35,35,35,183,237,82,32,
- 12,17,5,0,42,152,10,205,91,10,242,203,
- 7,42,150,10,229,205,37,3,195,118,10,237,
- 91,152,10,42,200,10,25,34,200,10,42,205,
- 10,25,34,205,10,237,91,203,10,33,158,253,
- 25,237,91,205,10,205,91,10,242,245,7,33,
- 0,0,34,205,10,62,1,50,197,10,205,5,
- 8,33,0,0,57,249,195,118,10,205,106,10,
- 58,197,10,183,202,118,10,237,91,198,10,42,
- 205,10,205,91,10,242,46,8,237,91,205,10,
- 33,98,2,25,237,91,198,10,205,91,10,250,
- 78,8,237,91,198,10,42,205,10,183,237,82,
- 32,7,42,200,10,125,180,40,13,237,91,205,
- 10,42,198,10,205,91,10,242,97,8,237,91,
- 207,10,42,205,10,25,229,205,37,3,175,50,
- 197,10,195,118,10,205,29,3,33,0,0,57,
- 249,195,118,10,205,106,10,58,202,10,183,40,
- 22,205,14,7,237,91,209,10,19,19,19,205,
- 91,10,242,139,8,33,1,0,195,118,10,33,
- 0,0,195,118,10,205,126,10,252,255,205,108,
- 8,125,180,194,118,10,237,91,200,10,33,0,
- 0,205,91,10,242,118,10,237,91,207,10,42,
- 198,10,25,221,117,254,221,116,255,35,35,35,
- 229,205,70,7,193,124,230,3,103,35,35,35,
- 221,117,252,221,116,253,229,221,110,254,221,102,
- 255,229,33,212,10,229,205,124,6,193,193,221,
- 110,252,221,102,253,34,209,10,33,211,10,54,
- 4,33,209,10,227,205,147,6,193,62,1,50,
- 202,10,243,221,94,252,221,86,253,42,200,10,
- 183,237,82,34,200,10,203,124,40,17,33,0,
- 0,34,200,10,34,205,10,34,198,10,50,197,
- 10,24,37,221,94,252,221,86,253,42,198,10,
- 25,34,198,10,237,91,203,10,33,158,253,25,
- 237,91,198,10,205,91,10,242,68,9,33,0,
- 0,34,198,10,205,5,8,33,0,0,57,249,
- 251,195,118,10,205,106,10,33,49,13,126,183,
- 40,16,205,42,7,237,91,47,13,19,19,19,
- 205,91,10,242,117,9,58,142,15,198,1,50,
- 142,15,195,118,10,33,49,13,126,254,1,40,
- 25,254,3,202,7,10,254,5,202,21,10,33,
- 49,13,54,0,33,47,13,229,205,207,6,195,
- 118,10,58,141,15,183,32,72,33,51,13,126,
- 50,149,10,205,86,7,33,50,13,126,230,127,
- 183,32,40,58,142,15,230,127,50,142,15,183,
- 32,5,198,1,50,142,15,33,50,13,126,111,
- 23,159,103,203,125,58,142,15,40,5,198,128,
- 50,142,15,33,50,13,119,33,50,13,126,111,
- 23,159,103,229,205,237,5,193,33,211,10,54,
- 2,33,2,0,34,209,10,58,154,10,33,212,
- 10,119,58,148,10,33,213,10,119,33,209,10,
- 229,205,147,6,193,24,128,42,47,13,229,33,
- 50,13,229,205,191,4,193,24,239,33,211,10,
- 54,6,33,3,0,34,209,10,58,154,10,33,
- 212,10,119,58,148,10,33,213,10,119,33,214,
- 10,54,5,33,209,10,229,205,147,6,24,200,
- 205,106,10,33,49,13,54,0,33,47,13,229,
- 205,207,6,33,209,10,227,205,147,6,193,205,
- 80,9,205,145,8,24,248,124,170,250,99,10,
- 237,82,201,124,230,128,237,82,60,201,225,253,
- 229,221,229,221,33,0,0,221,57,233,221,249,
- 221,225,253,225,201,233,225,253,229,221,229,221,
- 33,0,0,221,57,94,35,86,35,235,57,249,
- 235,233,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0
- } ;
-
-#endif
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
deleted file mode 100644
index 5566daefbff4..000000000000
--- a/drivers/net/appletalk/ipddp.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * ipddp.c: IP to Appletalk-IP Encapsulation driver for Linux
- * Appletalk-IP to IP Decapsulation driver for Linux
- *
- * Authors:
- * - DDP-IP Encap by: Bradford W. Johnson <johns393@maroon.tc.umn.edu>
- * - DDP-IP Decap by: Jay Schulist <jschlst@samba.org>
- *
- * Derived from:
- * - Almost all code already existed in net/appletalk/ddp.c I just
- * moved/reorginized it into a driver file. Original IP-over-DDP code
- * was done by Bradford W. Johnson <johns393@maroon.tc.umn.edu>
- * - skeleton.c: A network driver outline for linux.
- * Written 1993-94 by Donald Becker.
- * - dummy.c: A dummy net driver. By Nick Holloway.
- * - MacGate: A user space Daemon for Appletalk-IP Decap for
- * Linux by Jay Schulist <jschlst@samba.org>
- *
- * Copyright 1993 United States Government as represented by the
- * Director, National Security Agency.
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/atalk.h>
-#include <linux/if_arp.h>
-#include <linux/slab.h>
-#include <net/route.h>
-#include <linux/uaccess.h>
-
-#include "ipddp.h" /* Our stuff */
-
-static const char version[] = KERN_INFO "ipddp.c:v0.01 8/28/97 Bradford W. Johnson <johns393@maroon.tc.umn.edu>\n";
-
-static struct ipddp_route *ipddp_route_list;
-static DEFINE_SPINLOCK(ipddp_route_lock);
-
-#ifdef CONFIG_IPDDP_ENCAP
-static int ipddp_mode = IPDDP_ENCAP;
-#else
-static int ipddp_mode = IPDDP_DECAP;
-#endif
-
-/* Index to functions, as function prototypes. */
-static netdev_tx_t ipddp_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static int ipddp_create(struct ipddp_route *new_rt);
-static int ipddp_delete(struct ipddp_route *rt);
-static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt);
-static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd);
-
-static const struct net_device_ops ipddp_netdev_ops = {
- .ndo_start_xmit = ipddp_xmit,
- .ndo_siocdevprivate = ipddp_siocdevprivate,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static struct net_device * __init ipddp_init(void)
-{
- static unsigned version_printed;
- struct net_device *dev;
- int err;
-
- dev = alloc_etherdev(0);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- netif_keep_dst(dev);
- strcpy(dev->name, "ipddp%d");
-
- if (version_printed++ == 0)
- printk(version);
-
- /* Initialize the device structure. */
- dev->netdev_ops = &ipddp_netdev_ops;
-
- dev->type = ARPHRD_IPDDP; /* IP over DDP tunnel */
- dev->mtu = 585;
- dev->flags |= IFF_NOARP;
-
- /*
- * The worst case header we will need is currently a
- * ethernet header (14 bytes) and a ddp header (sizeof ddpehdr+1)
- * We send over SNAP so that takes another 8 bytes.
- */
- dev->hard_header_len = 14+8+sizeof(struct ddpehdr)+1;
-
- err = register_netdev(dev);
- if (err) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
-
- /* Let the user now what mode we are in */
- if(ipddp_mode == IPDDP_ENCAP)
- printk("%s: Appletalk-IP Encap. mode by Bradford W. Johnson <johns393@maroon.tc.umn.edu>\n",
- dev->name);
- if(ipddp_mode == IPDDP_DECAP)
- printk("%s: Appletalk-IP Decap. mode by Jay Schulist <jschlst@samba.org>\n",
- dev->name);
-
- return dev;
-}
-
-
-/*
- * Transmit LLAP/ELAP frame using aarp_send_ddp.
- */
-static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct rtable *rtable = skb_rtable(skb);
- __be32 paddr = 0;
- struct ddpehdr *ddp;
- struct ipddp_route *rt;
- struct atalk_addr *our_addr;
-
- if (rtable->rt_gw_family == AF_INET)
- paddr = rtable->rt_gw4;
-
- spin_lock(&ipddp_route_lock);
-
- /*
- * Find appropriate route to use, based only on IP number.
- */
- for(rt = ipddp_route_list; rt != NULL; rt = rt->next)
- {
- if(rt->ip == paddr)
- break;
- }
- if(rt == NULL) {
- spin_unlock(&ipddp_route_lock);
- return NETDEV_TX_OK;
- }
-
- our_addr = atalk_find_dev_addr(rt->dev);
-
- if(ipddp_mode == IPDDP_DECAP)
- /*
- * Pull off the excess room that should not be there.
- * This is due to a hard-header problem. This is the
- * quick fix for now though, till it breaks.
- */
- skb_pull(skb, 35-(sizeof(struct ddpehdr)+1));
-
- /* Create the Extended DDP header */
- ddp = (struct ddpehdr *)skb->data;
- ddp->deh_len_hops = htons(skb->len + (1<<10));
- ddp->deh_sum = 0;
-
- /*
- * For Localtalk we need aarp_send_ddp to strip the
- * long DDP header and place a shot DDP header on it.
- */
- if(rt->dev->type == ARPHRD_LOCALTLK)
- {
- ddp->deh_dnet = 0; /* FIXME more hops?? */
- ddp->deh_snet = 0;
- }
- else
- {
- ddp->deh_dnet = rt->at.s_net; /* FIXME more hops?? */
- ddp->deh_snet = our_addr->s_net;
- }
- ddp->deh_dnode = rt->at.s_node;
- ddp->deh_snode = our_addr->s_node;
- ddp->deh_dport = 72;
- ddp->deh_sport = 72;
-
- *((__u8 *)(ddp+1)) = 22; /* ddp type = IP */
-
- skb->protocol = htons(ETH_P_ATALK); /* Protocol has changed */
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- aarp_send_ddp(rt->dev, skb, &rt->at, NULL);
-
- spin_unlock(&ipddp_route_lock);
-
- return NETDEV_TX_OK;
-}
-
-/*
- * Create a routing entry. We first verify that the
- * record does not already exist. If it does we return -EEXIST
- */
-static int ipddp_create(struct ipddp_route *new_rt)
-{
- struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
-
- if (rt == NULL)
- return -ENOMEM;
-
- rt->ip = new_rt->ip;
- rt->at = new_rt->at;
- rt->next = NULL;
- if ((rt->dev = atrtr_get_dev(&rt->at)) == NULL) {
- kfree(rt);
- return -ENETUNREACH;
- }
-
- spin_lock_bh(&ipddp_route_lock);
- if (__ipddp_find_route(rt)) {
- spin_unlock_bh(&ipddp_route_lock);
- kfree(rt);
- return -EEXIST;
- }
-
- rt->next = ipddp_route_list;
- ipddp_route_list = rt;
-
- spin_unlock_bh(&ipddp_route_lock);
-
- return 0;
-}
-
-/*
- * Delete a route, we only delete a FULL match.
- * If route does not exist we return -ENOENT.
- */
-static int ipddp_delete(struct ipddp_route *rt)
-{
- struct ipddp_route **r = &ipddp_route_list;
- struct ipddp_route *tmp;
-
- spin_lock_bh(&ipddp_route_lock);
- while((tmp = *r) != NULL)
- {
- if(tmp->ip == rt->ip &&
- tmp->at.s_net == rt->at.s_net &&
- tmp->at.s_node == rt->at.s_node)
- {
- *r = tmp->next;
- spin_unlock_bh(&ipddp_route_lock);
- kfree(tmp);
- return 0;
- }
- r = &tmp->next;
- }
-
- spin_unlock_bh(&ipddp_route_lock);
- return -ENOENT;
-}
-
-/*
- * Find a routing entry, we only return a FULL match
- */
-static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
-{
- struct ipddp_route *f;
-
- for(f = ipddp_route_list; f != NULL; f = f->next)
- {
- if(f->ip == rt->ip &&
- f->at.s_net == rt->at.s_net &&
- f->at.s_node == rt->at.s_node)
- return f;
- }
-
- return NULL;
-}
-
-static int ipddp_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd)
-{
- struct ipddp_route rcp, rcp2, *rp;
-
- if (in_compat_syscall())
- return -EOPNOTSUPP;
-
- if(!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&rcp, data, sizeof(rcp)))
- return -EFAULT;
-
- switch(cmd)
- {
- case SIOCADDIPDDPRT:
- return ipddp_create(&rcp);
-
- case SIOCFINDIPDDPRT:
- spin_lock_bh(&ipddp_route_lock);
- rp = __ipddp_find_route(&rcp);
- if (rp) {
- memset(&rcp2, 0, sizeof(rcp2));
- rcp2.ip = rp->ip;
- rcp2.at = rp->at;
- rcp2.flags = rp->flags;
- }
- spin_unlock_bh(&ipddp_route_lock);
-
- if (rp) {
- if (copy_to_user(data, &rcp2,
- sizeof(struct ipddp_route)))
- return -EFAULT;
- return 0;
- } else
- return -ENOENT;
-
- case SIOCDELIPDDPRT:
- return ipddp_delete(&rcp);
-
- default:
- return -EINVAL;
- }
-}
-
-static struct net_device *dev_ipddp;
-
-MODULE_LICENSE("GPL");
-module_param(ipddp_mode, int, 0);
-
-static int __init ipddp_init_module(void)
-{
- dev_ipddp = ipddp_init();
- return PTR_ERR_OR_ZERO(dev_ipddp);
-}
-
-static void __exit ipddp_cleanup_module(void)
-{
- struct ipddp_route *p;
-
- unregister_netdev(dev_ipddp);
- free_netdev(dev_ipddp);
-
- while (ipddp_route_list) {
- p = ipddp_route_list->next;
- kfree(ipddp_route_list);
- ipddp_route_list = p;
- }
-}
-
-module_init(ipddp_init_module);
-module_exit(ipddp_cleanup_module);
diff --git a/drivers/net/appletalk/ipddp.h b/drivers/net/appletalk/ipddp.h
deleted file mode 100644
index 9a8e45a46925..000000000000
--- a/drivers/net/appletalk/ipddp.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ipddp.h: Header for IP-over-DDP driver for Linux.
- */
-
-#ifndef __LINUX_IPDDP_H
-#define __LINUX_IPDDP_H
-
-#ifdef __KERNEL__
-
-#define SIOCADDIPDDPRT (SIOCDEVPRIVATE)
-#define SIOCDELIPDDPRT (SIOCDEVPRIVATE+1)
-#define SIOCFINDIPDDPRT (SIOCDEVPRIVATE+2)
-
-struct ipddp_route
-{
- struct net_device *dev; /* Carrier device */
- __be32 ip; /* IP address */
- struct atalk_addr at; /* Gateway appletalk address */
- int flags;
- struct ipddp_route *next;
-};
-
-#define IPDDP_ENCAP 1
-#define IPDDP_DECAP 2
-
-#endif /* __KERNEL__ */
-#endif /* __LINUX_IPDDP_H */
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
deleted file mode 100644
index 388d7b3bd4c2..000000000000
--- a/drivers/net/appletalk/ltpc.c
+++ /dev/null
@@ -1,1277 +0,0 @@
-/*** ltpc.c -- a driver for the LocalTalk PC card.
- *
- * Copyright (c) 1995,1996 Bradford W. Johnson <johns393@maroon.tc.umn.edu>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * This is ALPHA code at best. It may not work for you. It may
- * damage your equipment. It may damage your relations with other
- * users of your network. Use it at your own risk!
- *
- * Based in part on:
- * skeleton.c by Donald Becker
- * dummy.c by Nick Holloway and Alan Cox
- * loopback.c by Ross Biro, Fred van Kampen, Donald Becker
- * the netatalk source code (UMICH)
- * lots of work on the card...
- *
- * I do not have access to the (proprietary) SDK that goes with the card.
- * If you do, I don't want to know about it, and you can probably write
- * a better driver yourself anyway. This does mean that the pieces that
- * talk to the card are guesswork on my part, so use at your own risk!
- *
- * This is my first try at writing Linux networking code, and is also
- * guesswork. Again, use at your own risk! (Although on this part, I'd
- * welcome suggestions)
- *
- * This is a loadable kernel module which seems to work at my site
- * consisting of a 1.2.13 linux box running netatalk 1.3.3, and with
- * the kernel support from 1.3.3b2 including patches routing.patch
- * and ddp.disappears.from.chooser. In order to run it, you will need
- * to patch ddp.c and aarp.c in the kernel, but only a little...
- *
- * I'm fairly confident that while this is arguably badly written, the
- * problems that people experience will be "higher level", that is, with
- * complications in the netatalk code. The driver itself doesn't do
- * anything terribly complicated -- it pretends to be an ether device
- * as far as netatalk is concerned, strips the DDP data out of the ether
- * frame and builds a LLAP packet to send out the card. In the other
- * direction, it receives LLAP frames from the card and builds a fake
- * ether packet that it then tosses up to the networking code. You can
- * argue (correctly) that this is an ugly way to do things, but it
- * requires a minimal amount of fooling with the code in ddp.c and aarp.c.
- *
- * The card will do a lot more than is used here -- I *think* it has the
- * layers up through ATP. Even if you knew how that part works (which I
- * don't) it would be a big job to carve up the kernel ddp code to insert
- * things at a higher level, and probably a bad idea...
- *
- * There are a number of other cards that do LocalTalk on the PC. If
- * nobody finds any insurmountable (at the netatalk level) problems
- * here, this driver should encourage people to put some work into the
- * other cards (some of which I gather are still commercially available)
- * and also to put hooks for LocalTalk into the official ddp code.
- *
- * I welcome comments and suggestions. This is my first try at Linux
- * networking stuff, and there are probably lots of things that I did
- * suboptimally.
- *
- ***/
-
-/***
- *
- * $Log: ltpc.c,v $
- * Revision 1.1.2.1 2000/03/01 05:35:07 jgarzik
- * at and tr cleanup
- *
- * Revision 1.8 1997/01/28 05:44:54 bradford
- * Clean up for non-module a little.
- * Hacked about a bit to clean things up - Alan Cox
- * Probably broken it from the origina 1.8
- *
-
- * 1998/11/09: David Huggins-Daines <dhd@debian.org>
- * Cleaned up the initialization code to use the standard autoirq methods,
- and to probe for things in the standard order of i/o, irq, dma. This
- removes the "reset the reset" hack, because I couldn't figure out an
- easy way to get the card to trigger an interrupt after it.
- * Added support for passing configuration parameters on the kernel command
- line and through insmod
- * Changed the device name from "ltalk0" to "lt0", both to conform with the
- other localtalk driver, and to clear up the inconsistency between the
- module and the non-module versions of the driver :-)
- * Added a bunch of comments (I was going to make some enums for the state
- codes and the register offsets, but I'm still not sure exactly what their
- semantics are)
- * Don't poll anymore in interrupt-driven mode
- * It seems to work as a module now (as of 2.1.127), but I don't think
- I'm responsible for that...
-
- *
- * Revision 1.7 1996/12/12 03:42:33 bradford
- * DMA alloc cribbed from 3c505.c.
- *
- * Revision 1.6 1996/12/12 03:18:58 bradford
- * Added virt_to_bus; works in 2.1.13.
- *
- * Revision 1.5 1996/12/12 03:13:22 root
- * xmitQel initialization -- think through better though.
- *
- * Revision 1.4 1996/06/18 14:55:55 root
- * Change names to ltpc. Tabs. Took a shot at dma alloc,
- * although more needs to be done eventually.
- *
- * Revision 1.3 1996/05/22 14:59:39 root
- * Change dev->open, dev->close to track dummy.c in 1.99.(around 7)
- *
- * Revision 1.2 1996/05/22 14:58:24 root
- * Change tabs mostly.
- *
- * Revision 1.1 1996/04/23 04:45:09 root
- * Initial revision
- *
- * Revision 0.16 1996/03/05 15:59:56 root
- * Change ARPHRD_LOCALTLK definition to the "real" one.
- *
- * Revision 0.15 1996/03/05 06:28:30 root
- * Changes for kernel 1.3.70. Still need a few patches to kernel, but
- * it's getting closer.
- *
- * Revision 0.14 1996/02/25 17:38:32 root
- * More cleanups. Removed query to card on get_stats.
- *
- * Revision 0.13 1996/02/21 16:27:40 root
- * Refix debug_print_skb. Fix mac.raw gotcha that appeared in 1.3.65.
- * Clean up receive code a little.
- *
- * Revision 0.12 1996/02/19 16:34:53 root
- * Fix debug_print_skb. Kludge outgoing snet to 0 when using startup
- * range. Change debug to mask: 1 for verbose, 2 for higher level stuff
- * including packet printing, 4 for lower level (card i/o) stuff.
- *
- * Revision 0.11 1996/02/12 15:53:38 root
- * Added router sends (requires new aarp.c patch)
- *
- * Revision 0.10 1996/02/11 00:19:35 root
- * Change source LTALK_LOGGING debug switch to insmod ... debug=2.
- *
- * Revision 0.9 1996/02/10 23:59:35 root
- * Fixed those fixes for 1.2 -- DANGER! The at.h that comes with netatalk
- * has a *different* definition of struct sockaddr_at than the Linux kernel
- * does. This is an "insidious and invidious" bug...
- * (Actually the preceding comment is false -- it's the atalk.h in the
- * ancient atalk-0.06 that's the problem)
- *
- * Revision 0.8 1996/02/10 19:09:00 root
- * Merge 1.3 changes. Tested OK under 1.3.60.
- *
- * Revision 0.7 1996/02/10 17:56:56 root
- * Added debug=1 parameter on insmod for debugging prints. Tried
- * to fix timer unload on rmmod, but I don't think that's the problem.
- *
- * Revision 0.6 1995/12/31 19:01:09 root
- * Clean up rmmod, irq comments per feedback from Corin Anderson (Thanks Corey!)
- * Clean up initial probing -- sometimes the card wakes up latched in reset.
- *
- * Revision 0.5 1995/12/22 06:03:44 root
- * Added comments in front and cleaned up a bit.
- * This version sent out to people.
- *
- * Revision 0.4 1995/12/18 03:46:44 root
- * Return shortDDP to longDDP fake to 0/0. Added command structs.
- *
- ***/
-
-/* ltpc jumpers are:
-*
-* Interrupts -- set at most one. If none are set, the driver uses
-* polled mode. Because the card was developed in the XT era, the
-* original documentation refers to IRQ2. Since you'll be running
-* this on an AT (or later) class machine, that really means IRQ9.
-*
-* SW1 IRQ 4
-* SW2 IRQ 3
-* SW3 IRQ 9 (2 in original card documentation only applies to XT)
-*
-*
-* DMA -- choose DMA 1 or 3, and set both corresponding switches.
-*
-* SW4 DMA 3
-* SW5 DMA 1
-* SW6 DMA 3
-* SW7 DMA 1
-*
-*
-* I/O address -- choose one.
-*
-* SW8 220 / 240
-*/
-
-/* To have some stuff logged, do
-* insmod ltpc.o debug=1
-*
-* For a whole bunch of stuff, use higher numbers.
-*
-* The default is 0, i.e. no messages except for the probe results.
-*/
-
-/* insmod-tweakable variables */
-static int debug;
-#define DEBUG_VERBOSE 1
-#define DEBUG_UPPER 2
-#define DEBUG_LOWER 4
-
-static int io;
-static int irq;
-static int dma;
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/spinlock.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/if_ltalk.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/atalk.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <net/Space.h>
-
-#include <asm/dma.h>
-#include <asm/io.h>
-
-/* our stuff */
-#include "ltpc.h"
-
-static DEFINE_SPINLOCK(txqueue_lock);
-static DEFINE_SPINLOCK(mbox_lock);
-
-/* function prototypes */
-static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
- void *dbuf, int dbuflen);
-static int sendup_buffer (struct net_device *dev);
-
-/* Dma Memory related stuff, cribbed directly from 3c505.c */
-
-static unsigned long dma_mem_alloc(int size)
-{
- int order = get_order(size);
-
- return __get_dma_pages(GFP_KERNEL, order);
-}
-
-/* DMA data buffer, DMA command buffer */
-static unsigned char *ltdmabuf;
-static unsigned char *ltdmacbuf;
-
-/* private struct, holds our appletalk address */
-
-struct ltpc_private
-{
- struct atalk_addr my_addr;
-};
-
-/* transmit queue element struct */
-
-struct xmitQel {
- struct xmitQel *next;
- /* command buffer */
- unsigned char *cbuf;
- short cbuflen;
- /* data buffer */
- unsigned char *dbuf;
- short dbuflen;
- unsigned char QWrite; /* read or write data */
- unsigned char mailbox;
-};
-
-/* the transmit queue itself */
-
-static struct xmitQel *xmQhd, *xmQtl;
-
-static void enQ(struct xmitQel *qel)
-{
- unsigned long flags;
- qel->next = NULL;
-
- spin_lock_irqsave(&txqueue_lock, flags);
- if (xmQtl) {
- xmQtl->next = qel;
- } else {
- xmQhd = qel;
- }
- xmQtl = qel;
- spin_unlock_irqrestore(&txqueue_lock, flags);
-
- if (debug & DEBUG_LOWER)
- printk("enqueued a 0x%02x command\n",qel->cbuf[0]);
-}
-
-static struct xmitQel *deQ(void)
-{
- unsigned long flags;
- int i;
- struct xmitQel *qel=NULL;
-
- spin_lock_irqsave(&txqueue_lock, flags);
- if (xmQhd) {
- qel = xmQhd;
- xmQhd = qel->next;
- if(!xmQhd) xmQtl = NULL;
- }
- spin_unlock_irqrestore(&txqueue_lock, flags);
-
- if ((debug & DEBUG_LOWER) && qel) {
- int n;
- printk(KERN_DEBUG "ltpc: dequeued command ");
- n = qel->cbuflen;
- if (n>100) n=100;
- for(i=0;i<n;i++) printk("%02x ",qel->cbuf[i]);
- printk("\n");
- }
-
- return qel;
-}
-
-/* and... the queue elements we'll be using */
-static struct xmitQel qels[16];
-
-/* and their corresponding mailboxes */
-static unsigned char mailbox[16];
-static unsigned char mboxinuse[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-
-static int wait_timeout(struct net_device *dev, int c)
-{
- /* returns true if it stayed c */
- /* this uses base+6, but it's ok */
- int i;
-
- /* twenty second or so total */
-
- for(i=0;i<200000;i++) {
- if ( c != inb_p(dev->base_addr+6) ) return 0;
- udelay(100);
- }
- return 1; /* timed out */
-}
-
-/* get the first free mailbox */
-
-static int getmbox(void)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&mbox_lock, flags);
- for(i=1;i<16;i++) if(!mboxinuse[i]) {
- mboxinuse[i]=1;
- spin_unlock_irqrestore(&mbox_lock, flags);
- return i;
- }
- spin_unlock_irqrestore(&mbox_lock, flags);
- return 0;
-}
-
-/* read a command from the card */
-static void handlefc(struct net_device *dev)
-{
- /* called *only* from idle, non-reentrant */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmacbuf));
- set_dma_count(dma,50);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
-
- if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n");
-}
-
-/* read data from the card */
-static void handlefd(struct net_device *dev)
-{
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,800);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
-
- if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
- sendup_buffer(dev);
-}
-
-static void handlewrite(struct net_device *dev)
-{
- /* called *only* from idle, non-reentrant */
- /* on entry, 0xfb and ltdmabuf holds data */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_WRITE);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,800);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
-
- if ( wait_timeout(dev,0xfb) ) {
- flags=claim_dma_lock();
- printk("timed out in handlewrite, dma res %d\n",
- get_dma_residue(dev->dma) );
- release_dma_lock(flags);
- }
-}
-
-static void handleread(struct net_device *dev)
-{
- /* on entry, 0xfb */
- /* on exit, ltdmabuf holds data */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,800);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
- if ( wait_timeout(dev,0xfb) ) printk("timed out in handleread\n");
-}
-
-static void handlecommand(struct net_device *dev)
-{
- /* on entry, 0xfa and ltdmacbuf holds command */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_WRITE);
- set_dma_addr(dma,virt_to_bus(ltdmacbuf));
- set_dma_count(dma,50);
- enable_dma(dma);
- release_dma_lock(flags);
- inb_p(base+3);
- inb_p(base+2);
- if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
-}
-
-/* ready made command for getting the result from the card */
-static unsigned char rescbuf[2] = {LT_GETRESULT,0};
-static unsigned char resdbuf[2];
-
-static int QInIdle;
-
-/* idle expects to be called with the IRQ line high -- either because of
- * an interrupt, or because the line is tri-stated
- */
-
-static void idle(struct net_device *dev)
-{
- unsigned long flags;
- int state;
- /* FIXME This is initialized to shut the warning up, but I need to
- * think this through again.
- */
- struct xmitQel *q = NULL;
- int oops;
- int i;
- int base = dev->base_addr;
-
- spin_lock_irqsave(&txqueue_lock, flags);
- if(QInIdle) {
- spin_unlock_irqrestore(&txqueue_lock, flags);
- return;
- }
- QInIdle = 1;
- spin_unlock_irqrestore(&txqueue_lock, flags);
-
- /* this tri-states the IRQ line */
- (void) inb_p(base+6);
-
- oops = 100;
-
-loop:
- if (0>oops--) {
- printk("idle: looped too many times\n");
- goto done;
- }
-
- state = inb_p(base+6);
- if (state != inb_p(base+6)) goto loop;
-
- switch(state) {
- case 0xfc:
- /* incoming command */
- if (debug & DEBUG_LOWER) printk("idle: fc\n");
- handlefc(dev);
- break;
- case 0xfd:
- /* incoming data */
- if(debug & DEBUG_LOWER) printk("idle: fd\n");
- handlefd(dev);
- break;
- case 0xf9:
- /* result ready */
- if (debug & DEBUG_LOWER) printk("idle: f9\n");
- if(!mboxinuse[0]) {
- mboxinuse[0] = 1;
- qels[0].cbuf = rescbuf;
- qels[0].cbuflen = 2;
- qels[0].dbuf = resdbuf;
- qels[0].dbuflen = 2;
- qels[0].QWrite = 0;
- qels[0].mailbox = 0;
- enQ(&qels[0]);
- }
- inb_p(dev->base_addr+1);
- inb_p(dev->base_addr+0);
- if( wait_timeout(dev,0xf9) )
- printk("timed out idle f9\n");
- break;
- case 0xf8:
- /* ?? */
- if (xmQhd) {
- inb_p(dev->base_addr+1);
- inb_p(dev->base_addr+0);
- if(wait_timeout(dev,0xf8) )
- printk("timed out idle f8\n");
- } else {
- goto done;
- }
- break;
- case 0xfa:
- /* waiting for command */
- if(debug & DEBUG_LOWER) printk("idle: fa\n");
- if (xmQhd) {
- q=deQ();
- memcpy(ltdmacbuf,q->cbuf,q->cbuflen);
- ltdmacbuf[1] = q->mailbox;
- if (debug>1) {
- int n;
- printk("ltpc: sent command ");
- n = q->cbuflen;
- if (n>100) n=100;
- for(i=0;i<n;i++)
- printk("%02x ",ltdmacbuf[i]);
- printk("\n");
- }
-
- handlecommand(dev);
-
- if (0xfa == inb_p(base + 6)) {
- /* we timed out, so return */
- goto done;
- }
- } else {
- /* we don't seem to have a command */
- if (!mboxinuse[0]) {
- mboxinuse[0] = 1;
- qels[0].cbuf = rescbuf;
- qels[0].cbuflen = 2;
- qels[0].dbuf = resdbuf;
- qels[0].dbuflen = 2;
- qels[0].QWrite = 0;
- qels[0].mailbox = 0;
- enQ(&qels[0]);
- } else {
- printk("trouble: response command already queued\n");
- goto done;
- }
- }
- break;
- case 0Xfb:
- /* data transfer ready */
- if(debug & DEBUG_LOWER) printk("idle: fb\n");
- if(q->QWrite) {
- memcpy(ltdmabuf,q->dbuf,q->dbuflen);
- handlewrite(dev);
- } else {
- handleread(dev);
- /* non-zero mailbox numbers are for
- commmands, 0 is for GETRESULT
- requests */
- if(q->mailbox) {
- memcpy(q->dbuf,ltdmabuf,q->dbuflen);
- } else {
- /* this was a result */
- mailbox[ 0x0f & ltdmabuf[0] ] = ltdmabuf[1];
- mboxinuse[0]=0;
- }
- }
- break;
- }
- goto loop;
-
-done:
- QInIdle=0;
-
- /* now set the interrupts back as appropriate */
- /* the first read takes it out of tri-state (but still high) */
- /* the second resets it */
- /* note that after this point, any read of base+6 will
- trigger an interrupt */
-
- if (dev->irq) {
- inb_p(base+7);
- inb_p(base+7);
- }
-}
-
-
-static int do_write(struct net_device *dev, void *cbuf, int cbuflen,
- void *dbuf, int dbuflen)
-{
-
- int i = getmbox();
- int ret;
-
- if(i) {
- qels[i].cbuf = cbuf;
- qels[i].cbuflen = cbuflen;
- qels[i].dbuf = dbuf;
- qels[i].dbuflen = dbuflen;
- qels[i].QWrite = 1;
- qels[i].mailbox = i; /* this should be initted rather */
- enQ(&qels[i]);
- idle(dev);
- ret = mailbox[i];
- mboxinuse[i]=0;
- return ret;
- }
- printk("ltpc: could not allocate mbox\n");
- return -1;
-}
-
-static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
- void *dbuf, int dbuflen)
-{
-
- int i = getmbox();
- int ret;
-
- if(i) {
- qels[i].cbuf = cbuf;
- qels[i].cbuflen = cbuflen;
- qels[i].dbuf = dbuf;
- qels[i].dbuflen = dbuflen;
- qels[i].QWrite = 0;
- qels[i].mailbox = i; /* this should be initted rather */
- enQ(&qels[i]);
- idle(dev);
- ret = mailbox[i];
- mboxinuse[i]=0;
- return ret;
- }
- printk("ltpc: could not allocate mbox\n");
- return -1;
-}
-
-/* end of idle handlers -- what should be seen is do_read, do_write */
-
-static struct timer_list ltpc_timer;
-static struct net_device *ltpc_timer_dev;
-
-static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
-
-static int read_30 ( struct net_device *dev)
-{
- lt_command c;
- c.getflags.command = LT_GETFLAGS;
- return do_read(dev, &c, sizeof(c.getflags),&c,0);
-}
-
-static int set_30 (struct net_device *dev,int x)
-{
- lt_command c;
- c.setflags.command = LT_SETFLAGS;
- c.setflags.flags = x;
- return do_write(dev, &c, sizeof(c.setflags),&c,0);
-}
-
-/* LLAP to DDP translation */
-
-static int sendup_buffer (struct net_device *dev)
-{
- /* on entry, command is in ltdmacbuf, data in ltdmabuf */
- /* called from idle, non-reentrant */
-
- int dnode, snode, llaptype, len;
- int sklen;
- struct sk_buff *skb;
- struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
-
- if (ltc->command != LT_RCVLAP) {
- printk("unknown command 0x%02x from ltpc card\n",ltc->command);
- return -1;
- }
- dnode = ltc->dnode;
- snode = ltc->snode;
- llaptype = ltc->laptype;
- len = ltc->length;
-
- sklen = len;
- if (llaptype == 1)
- sklen += 8; /* correct for short ddp */
- if(sklen > 800) {
- printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
- dev->name,sklen);
- return -1;
- }
-
- if ( (llaptype==0) || (llaptype>2) ) {
- printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
- return -1;
- }
-
-
- skb = dev_alloc_skb(3+sklen);
- if (skb == NULL)
- {
- printk("%s: dropping packet due to memory squeeze.\n",
- dev->name);
- return -1;
- }
- skb->dev = dev;
-
- if (sklen > len)
- skb_reserve(skb,8);
- skb_put(skb,len+3);
- skb->protocol = htons(ETH_P_LOCALTALK);
- /* add LLAP header */
- skb->data[0] = dnode;
- skb->data[1] = snode;
- skb->data[2] = llaptype;
- skb_reset_mac_header(skb); /* save pointer to llap header */
- skb_pull(skb,3);
-
- /* copy ddp(s,e)hdr + contents */
- skb_copy_to_linear_data(skb, ltdmabuf, len);
-
- skb_reset_transport_header(skb);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
-
- /* toss it onwards */
- netif_rx(skb);
- return 0;
-}
-
-/* the handler for the board interrupt */
-
-static irqreturn_t
-ltpc_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
-
- if (dev==NULL) {
- printk("ltpc_interrupt: unknown device.\n");
- return IRQ_NONE;
- }
-
- inb_p(dev->base_addr+6); /* disable further interrupts from board */
-
- idle(dev); /* handle whatever is coming in */
-
- /* idle re-enables interrupts from board */
-
- return IRQ_HANDLED;
-}
-
-/***
- *
- * The ioctls that the driver responds to are:
- *
- * SIOCSIFADDR -- do probe using the passed node hint.
- * SIOCGIFADDR -- return net, node.
- *
- * some of this stuff should be done elsewhere.
- *
- ***/
-
-static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr;
- /* we'll keep the localtalk node address in dev->pa_addr */
- struct ltpc_private *ltpc_priv = netdev_priv(dev);
- struct atalk_addr *aa = &ltpc_priv->my_addr;
- struct lt_init c;
- int ltflags;
-
- if(debug & DEBUG_VERBOSE) printk("ltpc_ioctl called\n");
-
- switch(cmd) {
- case SIOCSIFADDR:
-
- aa->s_net = sa->sat_addr.s_net;
-
- /* this does the probe and returns the node addr */
- c.command = LT_INIT;
- c.hint = sa->sat_addr.s_node;
-
- aa->s_node = do_read(dev,&c,sizeof(c),&c,0);
-
- /* get all llap frames raw */
- ltflags = read_30(dev);
- ltflags |= LT_FLAG_ALLLAP;
- set_30 (dev,ltflags);
-
- dev->broadcast[0] = 0xFF;
- dev->addr_len=1;
- dev_addr_set(dev, &aa->s_node);
-
- return 0;
-
- case SIOCGIFADDR:
-
- sa->sat_addr.s_net = aa->s_net;
- sa->sat_addr.s_node = aa->s_node;
-
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static void set_multicast_list(struct net_device *dev)
-{
- /* This needs to be present to keep netatalk happy. */
- /* Actually netatalk needs fixing! */
-}
-
-static int ltpc_poll_counter;
-
-static void ltpc_poll(struct timer_list *unused)
-{
- del_timer(&ltpc_timer);
-
- if(debug & DEBUG_VERBOSE) {
- if (!ltpc_poll_counter) {
- ltpc_poll_counter = 50;
- printk("ltpc poll is alive\n");
- }
- ltpc_poll_counter--;
- }
-
- /* poll 20 times per second */
- idle(ltpc_timer_dev);
- ltpc_timer.expires = jiffies + HZ/20;
- add_timer(&ltpc_timer);
-}
-
-/* DDP to LLAP translation */
-
-static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- /* in kernel 1.3.xx, on entry skb->data points to ddp header,
- * and skb->len is the length of the ddp data + ddp header
- */
- int i;
- struct lt_sendlap cbuf;
- unsigned char *hdr;
-
- cbuf.command = LT_SENDLAP;
- cbuf.dnode = skb->data[0];
- cbuf.laptype = skb->data[2];
- skb_pull(skb,3); /* skip past LLAP header */
- cbuf.length = skb->len; /* this is host order */
- skb_reset_transport_header(skb);
-
- if(debug & DEBUG_UPPER) {
- printk("command ");
- for(i=0;i<6;i++)
- printk("%02x ",((unsigned char *)&cbuf)[i]);
- printk("\n");
- }
-
- hdr = skb_transport_header(skb);
- do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
-
- if(debug & DEBUG_UPPER) {
- printk("sent %d ddp bytes\n",skb->len);
- for (i = 0; i < skb->len; i++)
- printk("%02x ", hdr[i]);
- printk("\n");
- }
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* initialization stuff */
-
-static int __init ltpc_probe_dma(int base, int dma)
-{
- int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
- unsigned long timeout;
- unsigned long f;
-
- if (want & 1) {
- if (request_dma(1,"ltpc")) {
- want &= ~1;
- } else {
- f=claim_dma_lock();
- disable_dma(1);
- clear_dma_ff(1);
- set_dma_mode(1,DMA_MODE_WRITE);
- set_dma_addr(1,virt_to_bus(ltdmabuf));
- set_dma_count(1,sizeof(struct lt_mem));
- enable_dma(1);
- release_dma_lock(f);
- }
- }
- if (want & 2) {
- if (request_dma(3,"ltpc")) {
- want &= ~2;
- } else {
- f=claim_dma_lock();
- disable_dma(3);
- clear_dma_ff(3);
- set_dma_mode(3,DMA_MODE_WRITE);
- set_dma_addr(3,virt_to_bus(ltdmabuf));
- set_dma_count(3,sizeof(struct lt_mem));
- enable_dma(3);
- release_dma_lock(f);
- }
- }
- /* set up request */
-
- /* FIXME -- do timings better! */
-
- ltdmabuf[0] = LT_READMEM;
- ltdmabuf[1] = 1; /* mailbox */
- ltdmabuf[2] = 0; ltdmabuf[3] = 0; /* address */
- ltdmabuf[4] = 0; ltdmabuf[5] = 1; /* read 0x0100 bytes */
- ltdmabuf[6] = 0; /* dunno if this is necessary */
-
- inb_p(io+1);
- inb_p(io+0);
- timeout = jiffies+100*HZ/100;
- while(time_before(jiffies, timeout)) {
- if ( 0xfa == inb_p(io+6) ) break;
- }
-
- inb_p(io+3);
- inb_p(io+2);
- while(time_before(jiffies, timeout)) {
- if ( 0xfb == inb_p(io+6) ) break;
- }
-
- /* release the other dma channel (if we opened both of them) */
-
- if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) {
- want &= ~2;
- free_dma(3);
- }
-
- if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) {
- want &= ~1;
- free_dma(1);
- }
-
- if (!want)
- return 0;
-
- return (want & 2) ? 3 : 1;
-}
-
-static const struct net_device_ops ltpc_netdev = {
- .ndo_start_xmit = ltpc_xmit,
- .ndo_do_ioctl = ltpc_ioctl,
- .ndo_set_rx_mode = set_multicast_list,
-};
-
-static struct net_device * __init ltpc_probe(void)
-{
- struct net_device *dev;
- int err = -ENOMEM;
- int x=0,y=0;
- int autoirq;
- unsigned long f;
- unsigned long timeout;
-
- dev = alloc_ltalkdev(sizeof(struct ltpc_private));
- if (!dev)
- goto out;
-
- /* probe for the I/O port address */
-
- if (io != 0x240 && request_region(0x220,8,"ltpc")) {
- x = inb_p(0x220+6);
- if ( (x!=0xff) && (x>=0xf0) ) {
- io = 0x220;
- goto got_port;
- }
- release_region(0x220,8);
- }
- if (io != 0x220 && request_region(0x240,8,"ltpc")) {
- y = inb_p(0x240+6);
- if ( (y!=0xff) && (y>=0xf0) ){
- io = 0x240;
- goto got_port;
- }
- release_region(0x240,8);
- }
-
- /* give up in despair */
- printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
- err = -ENODEV;
- goto out1;
-
- got_port:
- /* probe for the IRQ line */
- if (irq < 2) {
- unsigned long irq_mask;
-
- irq_mask = probe_irq_on();
- /* reset the interrupt line */
- inb_p(io+7);
- inb_p(io+7);
- /* trigger an interrupt (I hope) */
- inb_p(io+6);
- mdelay(2);
- autoirq = probe_irq_off(irq_mask);
-
- if (autoirq == 0) {
- printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
- } else {
- irq = autoirq;
- }
- }
-
- /* allocate a DMA buffer */
- ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
- if (!ltdmabuf) {
- printk(KERN_ERR "ltpc: mem alloc failed\n");
- err = -ENOMEM;
- goto out2;
- }
-
- ltdmacbuf = &ltdmabuf[800];
-
- if(debug & DEBUG_VERBOSE) {
- printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
- }
-
- /* reset the card */
-
- inb_p(io+1);
- inb_p(io+3);
-
- msleep(20);
-
- inb_p(io+0);
- inb_p(io+2);
- inb_p(io+7); /* clear reset */
- inb_p(io+4);
- inb_p(io+5);
- inb_p(io+5); /* enable dma */
- inb_p(io+6); /* tri-state interrupt line */
-
- ssleep(1);
-
- /* now, figure out which dma channel we're using, unless it's
- already been specified */
- /* well, 0 is a legal DMA channel, but the LTPC card doesn't
- use it... */
- dma = ltpc_probe_dma(io, dma);
- if (!dma) { /* no dma channel */
- printk(KERN_ERR "No DMA channel found on ltpc card.\n");
- err = -ENODEV;
- goto out3;
- }
-
- /* print out friendly message */
- if(irq)
- printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
- else
- printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
-
- dev->netdev_ops = &ltpc_netdev;
- dev->base_addr = io;
- dev->irq = irq;
- dev->dma = dma;
-
- /* the card will want to send a result at this point */
- /* (I think... leaving out this part makes the kernel crash,
- so I put it back in...) */
-
- f=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,0x100);
- enable_dma(dma);
- release_dma_lock(f);
-
- (void) inb_p(io+3);
- (void) inb_p(io+2);
- timeout = jiffies+100*HZ/100;
-
- while(time_before(jiffies, timeout)) {
- if( 0xf9 == inb_p(io+6))
- break;
- schedule();
- }
-
- if(debug & DEBUG_VERBOSE) {
- printk("setting up timer and irq\n");
- }
-
- /* grab it and don't let go :-) */
- if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
- {
- (void) inb_p(io+7); /* enable interrupts from board */
- (void) inb_p(io+7); /* and reset irq line */
- } else {
- if( irq )
- printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
- dev->irq = 0;
- /* polled mode -- 20 times per second */
- /* this is really, really slow... should it poll more often? */
- ltpc_timer_dev = dev;
- timer_setup(&ltpc_timer, ltpc_poll, 0);
-
- ltpc_timer.expires = jiffies + HZ/20;
- add_timer(&ltpc_timer);
- }
- err = register_netdev(dev);
- if (err)
- goto out4;
-
- return NULL;
-out4:
- del_timer_sync(&ltpc_timer);
- if (dev->irq)
- free_irq(dev->irq, dev);
-out3:
- free_pages((unsigned long)ltdmabuf, get_order(1000));
-out2:
- release_region(io, 8);
-out1:
- free_netdev(dev);
-out:
- return ERR_PTR(err);
-}
-
-#ifndef MODULE
-/* handles "ltpc=io,irq,dma" kernel command lines */
-static int __init ltpc_setup(char *str)
-{
- int ints[5];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- if (ints[0] == 0) {
- if (str && !strncmp(str, "auto", 4)) {
- /* do nothing :-) */
- }
- else {
- /* usage message */
- printk (KERN_ERR
- "ltpc: usage: ltpc=auto|iobase[,irq[,dma]]\n");
- return 0;
- }
- } else {
- io = ints[1];
- if (ints[0] > 1) {
- irq = ints[2];
- }
- if (ints[0] > 2) {
- dma = ints[3];
- }
- /* ignore any other parameters */
- }
- return 1;
-}
-
-__setup("ltpc=", ltpc_setup);
-#endif
-
-static struct net_device *dev_ltpc;
-
-MODULE_LICENSE("GPL");
-module_param(debug, int, 0);
-module_param_hw(io, int, ioport, 0);
-module_param_hw(irq, int, irq, 0);
-module_param_hw(dma, int, dma, 0);
-
-
-static int __init ltpc_module_init(void)
-{
- if(io == 0)
- printk(KERN_NOTICE
- "ltpc: Autoprobing is not recommended for modules\n");
-
- dev_ltpc = ltpc_probe();
- return PTR_ERR_OR_ZERO(dev_ltpc);
-}
-module_init(ltpc_module_init);
-
-static void __exit ltpc_cleanup(void)
-{
-
- if(debug & DEBUG_VERBOSE) printk("unregister_netdev\n");
- unregister_netdev(dev_ltpc);
-
- del_timer_sync(&ltpc_timer);
-
- if(debug & DEBUG_VERBOSE) printk("freeing irq\n");
-
- if (dev_ltpc->irq)
- free_irq(dev_ltpc->irq, dev_ltpc);
-
- if(debug & DEBUG_VERBOSE) printk("freeing dma\n");
-
- if (dev_ltpc->dma)
- free_dma(dev_ltpc->dma);
-
- if(debug & DEBUG_VERBOSE) printk("freeing ioaddr\n");
-
- if (dev_ltpc->base_addr)
- release_region(dev_ltpc->base_addr,8);
-
- free_netdev(dev_ltpc);
-
- if(debug & DEBUG_VERBOSE) printk("free_pages\n");
-
- free_pages( (unsigned long) ltdmabuf, get_order(1000));
-
- if(debug & DEBUG_VERBOSE) printk("returning from cleanup_module\n");
-}
-
-module_exit(ltpc_cleanup);
diff --git a/drivers/net/appletalk/ltpc.h b/drivers/net/appletalk/ltpc.h
deleted file mode 100644
index 58cf945732a4..000000000000
--- a/drivers/net/appletalk/ltpc.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*** ltpc.h
- *
- *
- ***/
-
-#define LT_GETRESULT 0x00
-#define LT_WRITEMEM 0x01
-#define LT_READMEM 0x02
-#define LT_GETFLAGS 0x04
-#define LT_SETFLAGS 0x05
-#define LT_INIT 0x10
-#define LT_SENDLAP 0x13
-#define LT_RCVLAP 0x14
-
-/* the flag that we care about */
-#define LT_FLAG_ALLLAP 0x04
-
-struct lt_getresult {
- unsigned char command;
- unsigned char mailbox;
-};
-
-struct lt_mem {
- unsigned char command;
- unsigned char mailbox;
- unsigned short addr; /* host order */
- unsigned short length; /* host order */
-};
-
-struct lt_setflags {
- unsigned char command;
- unsigned char mailbox;
- unsigned char flags;
-};
-
-struct lt_getflags {
- unsigned char command;
- unsigned char mailbox;
-};
-
-struct lt_init {
- unsigned char command;
- unsigned char mailbox;
- unsigned char hint;
-};
-
-struct lt_sendlap {
- unsigned char command;
- unsigned char mailbox;
- unsigned char dnode;
- unsigned char laptype;
- unsigned short length; /* host order */
-};
-
-struct lt_rcvlap {
- unsigned char command;
- unsigned char dnode;
- unsigned char snode;
- unsigned char laptype;
- unsigned short length; /* host order */
-};
-
-union lt_command {
- struct lt_getresult getresult;
- struct lt_mem mem;
- struct lt_setflags setflags;
- struct lt_getflags getflags;
- struct lt_init init;
- struct lt_sendlap sendlap;
- struct lt_rcvlap rcvlap;
-};
-typedef union lt_command lt_command;
-
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index a51b9dab6d3a..d1d07a1d4fbc 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -4,7 +4,7 @@
#
menuconfig ARCNET
- depends on NETDEVICES && (ISA || PCI || PCMCIA)
+ depends on NETDEVICES && (ISA || PCI || PCMCIA) && HAS_IOPORT
tristate "ARCnet support"
help
If you have a network card of this type, say Y and check out the
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 8c651fdee039..57f1729066f2 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -186,4 +186,5 @@ static void __exit arcnet_raw_exit(void)
module_init(arcnet_raw_init);
module_exit(arcnet_raw_exit);
+MODULE_DESCRIPTION("ARCnet raw mode packet interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 8c3ccc7c83cd..53d10a04d1bd 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -312,6 +312,7 @@ module_param(node, int, 0);
module_param(io, int, 0);
module_param(irq, int, 0);
module_param_string(device, device, sizeof(device), 0);
+MODULE_DESCRIPTION("ARCnet COM90xx RIM I chipset driver");
MODULE_LICENSE("GPL");
static struct net_device *my_dev;
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index 19e996a829c9..bee60b377d7c 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -16,6 +16,7 @@
#ifdef __KERNEL__
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
/*
* RECON_THRESHOLD is the maximum number of RECON messages to receive
@@ -186,6 +187,8 @@ do { \
#define ARC_IS_5MBIT 1 /* card default speed is 5MBit */
#define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit,
but default is 2.5MBit. */
+#define ARC_HAS_LED 4 /* card has software controlled LEDs */
+#define ARC_HAS_ROTARY 8 /* card has rotary encoder */
/* information needed to define an encapsulation driver */
struct ArcProto {
@@ -266,7 +269,7 @@ struct arcnet_local {
struct net_device *dev;
int reply_status;
- struct tasklet_struct reply_tasklet;
+ struct work_struct reply_work;
/*
* Buffer management: an ARCnet card has 4 x 512-byte buffers, each of
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 1bad1866ae46..882972604c82 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -54,6 +54,7 @@
#include <linux/errqueue.h>
#include <linux/leds.h>
+#include <linux/workqueue.h>
#include "arcdevice.h"
#include "com9026.h"
@@ -108,6 +109,7 @@ static int go_tx(struct net_device *dev);
static int debug = ARCNET_DEBUG;
module_param(debug, int, 0);
+MODULE_DESCRIPTION("ARCnet core driver");
MODULE_LICENSE("GPL");
static int __init arcnet_init(void)
@@ -196,13 +198,10 @@ static void arcnet_dump_packet(struct net_device *dev, int bufnum,
void arcnet_led_event(struct net_device *dev, enum arcnet_led_event event)
{
struct arcnet_local *lp = netdev_priv(dev);
- unsigned long led_delay = 350;
- unsigned long tx_delay = 50;
switch (event) {
case ARCNET_LED_EVENT_RECON:
- led_trigger_blink_oneshot(lp->recon_led_trig,
- &led_delay, &led_delay, 0);
+ led_trigger_blink_oneshot(lp->recon_led_trig, 350, 350, 0);
break;
case ARCNET_LED_EVENT_OPEN:
led_trigger_event(lp->tx_led_trig, LED_OFF);
@@ -213,8 +212,7 @@ void arcnet_led_event(struct net_device *dev, enum arcnet_led_event event)
led_trigger_event(lp->recon_led_trig, LED_OFF);
break;
case ARCNET_LED_EVENT_TX:
- led_trigger_blink_oneshot(lp->tx_led_trig,
- &tx_delay, &tx_delay, 0);
+ led_trigger_blink_oneshot(lp->tx_led_trig, 50, 50, 0);
break;
}
}
@@ -384,7 +382,7 @@ static void arcdev_setup(struct net_device *dev)
static void arcnet_timer(struct timer_list *t)
{
- struct arcnet_local *lp = from_timer(lp, t, timer);
+ struct arcnet_local *lp = timer_container_of(lp, t, timer);
struct net_device *dev = lp->dev;
spin_lock_irq(&lp->lock);
@@ -427,9 +425,9 @@ out:
rtnl_unlock();
}
-static void arcnet_reply_tasklet(struct tasklet_struct *t)
+static void arcnet_reply_work(struct work_struct *t)
{
- struct arcnet_local *lp = from_tasklet(lp, t, reply_tasklet);
+ struct arcnet_local *lp = from_work(lp, t, reply_work);
struct sk_buff *ackskb, *skb;
struct sock_exterr_skb *serr;
@@ -468,7 +466,7 @@ static void arcnet_reply_tasklet(struct tasklet_struct *t)
ret = sock_queue_err_skb(sk, ackskb);
if (ret)
- kfree_skb(ackskb);
+ dev_kfree_skb_irq(ackskb);
local_irq_enable();
};
@@ -530,7 +528,7 @@ int arcnet_open(struct net_device *dev)
arc_cont(D_PROTO, "\n");
}
- tasklet_setup(&lp->reply_tasklet, arcnet_reply_tasklet);
+ INIT_WORK(&lp->reply_work, arcnet_reply_work);
arc_printk(D_INIT, dev, "arcnet_open: resetting card.\n");
@@ -618,12 +616,12 @@ int arcnet_close(struct net_device *dev)
struct arcnet_local *lp = netdev_priv(dev);
arcnet_led_event(dev, ARCNET_LED_EVENT_STOP);
- del_timer_sync(&lp->timer);
+ timer_delete_sync(&lp->timer);
netif_stop_queue(dev);
netif_carrier_off(dev);
- tasklet_kill(&lp->reply_tasklet);
+ cancel_work_sync(&lp->reply_work);
/* flush TX and disable RX */
lp->hw.intmask(dev, 0);
@@ -987,7 +985,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
->ack_tx(dev, ackstatus);
}
lp->reply_status = ackstatus;
- tasklet_hi_schedule(&lp->reply_tasklet);
+ queue_work(system_bh_highpri_wq, &lp->reply_work);
}
if (lp->cur_tx != -1)
release_arcbuf(dev, lp->cur_tx);
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index c09b567845e1..7a0a79973769 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -265,4 +265,5 @@ static void __exit capmode_module_exit(void)
module_init(capmode_module_init);
module_exit(capmode_module_exit);
+MODULE_DESCRIPTION("ARCnet CAP mode packet interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 293a621e654c..fef2ac2852a8 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -137,6 +137,7 @@ module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset ISA driver");
MODULE_LICENSE("GPL");
static struct net_device *my_dev;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 6382e1937cca..0472bcdff130 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -61,6 +61,7 @@ module_param(timeout, int, 0);
module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset PCI driver");
MODULE_LICENSE("GPL");
static void led_tx_set(struct led_classdev *led_cdev,
@@ -138,6 +139,9 @@ static int com20020pci_probe(struct pci_dev *pdev,
return -ENOMEM;
ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+ return -EINVAL;
+
priv->ci = ci;
mm = &ci->misc_map;
@@ -210,12 +214,13 @@ static int com20020pci_probe(struct pci_dev *pdev,
if (!strncmp(ci->name, "EAE PLX-PCI FB2", 15))
lp->backplane = 1;
- /* Get the dev_id from the PLX rotary coder */
- if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
- dev_id_mask = 0x3;
- dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
-
- snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
+ if (ci->flags & ARC_HAS_ROTARY) {
+ /* Get the dev_id from the PLX rotary coder */
+ if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
+ dev_id_mask = 0x3;
+ dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
+ snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
+ }
if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
pr_err("IO address %Xh is empty!\n", ioaddr);
@@ -227,6 +232,10 @@ static int com20020pci_probe(struct pci_dev *pdev,
goto err_free_arcdev;
}
+ ret = com20020_found(dev, IRQF_SHARED);
+ if (ret)
+ goto err_free_arcdev;
+
card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
GFP_KERNEL);
if (!card) {
@@ -236,41 +245,54 @@ static int com20020pci_probe(struct pci_dev *pdev,
card->index = i;
card->pci_priv = priv;
- card->tx_led.brightness_set = led_tx_set;
- card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
- GFP_KERNEL, "arc%d-%d-tx",
- dev->dev_id, i);
- card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "pci:green:tx:%d-%d",
- dev->dev_id, i);
-
- card->tx_led.dev = &dev->dev;
- card->recon_led.brightness_set = led_recon_set;
- card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
- GFP_KERNEL, "arc%d-%d-recon",
- dev->dev_id, i);
- card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "pci:red:recon:%d-%d",
- dev->dev_id, i);
- card->recon_led.dev = &dev->dev;
- card->dev = dev;
-
- ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
- if (ret)
- goto err_free_arcdev;
-
- ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
- if (ret)
- goto err_free_arcdev;
-
- dev_set_drvdata(&dev->dev, card);
-
- ret = com20020_found(dev, IRQF_SHARED);
- if (ret)
- goto err_free_arcdev;
- devm_arcnet_led_init(dev, dev->dev_id, i);
+ if (ci->flags & ARC_HAS_LED) {
+ card->tx_led.brightness_set = led_tx_set;
+ card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "arc%d-%d-tx",
+ dev->dev_id, i);
+ if (!card->tx_led.default_trigger) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
+ card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pci:green:tx:%d-%d",
+ dev->dev_id, i);
+ if (!card->tx_led.name) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
+ card->tx_led.dev = &dev->dev;
+ card->recon_led.brightness_set = led_recon_set;
+ card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "arc%d-%d-recon",
+ dev->dev_id, i);
+ if (!card->recon_led.default_trigger) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
+ card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pci:red:recon:%d-%d",
+ dev->dev_id, i);
+ if (!card->recon_led.name) {
+ ret = -ENOMEM;
+ goto err_free_arcdev;
+ }
+ card->recon_led.dev = &dev->dev;
+
+ ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+ if (ret)
+ goto err_free_arcdev;
+
+ ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
+ if (ret)
+ goto err_free_arcdev;
+
+ dev_set_drvdata(&dev->dev, card);
+ devm_arcnet_led_init(dev, dev->dev_id, i);
+ }
+ card->dev = dev;
list_add(&card->list, &priv->list_dev);
continue;
@@ -326,7 +348,7 @@ static struct com20020_pci_card_info card_info_5mbit = {
};
static struct com20020_pci_card_info card_info_sohard = {
- .name = "PLX-PCI",
+ .name = "SOHARD SH ARC-PCI",
.devcount = 1,
/* SOHARD needs PCI base addr 4 */
.chan_map_tbl = {
@@ -361,7 +383,7 @@ static struct com20020_pci_card_info card_info_eae_arc1 = {
},
},
.rotary = 0x0,
- .flags = ARC_CAN_10MBIT,
+ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
};
static struct com20020_pci_card_info card_info_eae_ma1 = {
@@ -393,7 +415,7 @@ static struct com20020_pci_card_info card_info_eae_ma1 = {
},
},
.rotary = 0x0,
- .flags = ARC_CAN_10MBIT,
+ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
};
static struct com20020_pci_card_info card_info_eae_fb2 = {
@@ -418,7 +440,7 @@ static struct com20020_pci_card_info card_info_eae_fb2 = {
},
},
.rotary = 0x0,
- .flags = ARC_CAN_10MBIT,
+ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
};
static const struct pci_device_id com20020pci_id_table[] = {
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 06e1651b594b..a0053e3992a3 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -399,6 +399,7 @@ EXPORT_SYMBOL(com20020_found);
EXPORT_SYMBOL(com20020_netdev_ops);
#endif
+MODULE_DESCRIPTION("ARCnet COM20020 chipset core driver");
MODULE_LICENSE("GPL");
#ifdef MODULE
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index 24150c933fcb..75f08aa7528b 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -97,6 +97,7 @@ module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset PCMCIA driver");
MODULE_LICENSE("GPL");
/*====================================================================*/
@@ -113,6 +114,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
struct com20020_dev *info;
struct net_device *dev;
struct arcnet_local *lp;
+ int ret = -ENOMEM;
dev_dbg(&p_dev->dev, "com20020_attach()\n");
@@ -142,12 +144,18 @@ static int com20020_probe(struct pcmcia_device *p_dev)
info->dev = dev;
p_dev->priv = info;
- return com20020_config(p_dev);
+ ret = com20020_config(p_dev);
+ if (ret)
+ goto fail_config;
+
+ return 0;
+fail_config:
+ free_arcdev(dev);
fail_alloc_dev:
kfree(info);
fail_alloc_info:
- return -ENOMEM;
+ return ret;
} /* com20020_attach */
static void com20020_detach(struct pcmcia_device *link)
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 37b47749fc8b..3b463fbc6402 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -350,6 +350,7 @@ static char device[9]; /* use eg. device=arc1 to change name */
module_param_hw(io, int, ioport, 0);
module_param_hw(irq, int, irq, 0);
module_param_string(device, device, sizeof(device), 0);
+MODULE_DESCRIPTION("ARCnet COM90xx IO mapped chipset driver");
MODULE_LICENSE("GPL");
#ifndef MODULE
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index f49dae194284..b3b287c16561 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -645,6 +645,7 @@ static void com90xx_copy_from_card(struct net_device *dev, int bufnum,
TIME(dev, "memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
}
+MODULE_DESCRIPTION("ARCnet COM90xx normal chipset driver");
MODULE_LICENSE("GPL");
static int __init com90xx_init(void)
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index a7752a5b647f..46519ca63a0a 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -78,6 +78,7 @@ static void __exit arcnet_rfc1051_exit(void)
module_init(arcnet_rfc1051_init);
module_exit(arcnet_rfc1051_exit);
+MODULE_DESCRIPTION("ARCNet packet format (RFC 1051) module");
MODULE_LICENSE("GPL");
/* Determine a packet's protocol ID.
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index a4c856282674..0edf35d971c5 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -35,6 +35,7 @@
#include "arcdevice.h"
+MODULE_DESCRIPTION("ARCNet packet format (RFC 1201) module");
MODULE_LICENSE("GPL");
static __be16 type_trans(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 54e321a695ce..0df3208783ad 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -38,6 +38,13 @@ struct bareudp_net {
struct list_head bareudp_list;
};
+struct bareudp_conf {
+ __be16 ethertype;
+ __be16 port;
+ u16 sport_min;
+ bool multi_proto_mode;
+};
+
/* Pseudo network device */
struct bareudp_dev {
struct net *net; /* netns for packet i/o */
@@ -54,12 +61,14 @@ struct bareudp_dev {
static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct metadata_dst *tun_dst = NULL;
+ IP_TUNNEL_DECLARE_FLAGS(key) = { };
struct bareudp_dev *bareudp;
unsigned short family;
unsigned int len;
__be16 proto;
void *oiph;
int err;
+ int nh;
bareudp = rcu_dereference_sk_user_data(sk);
if (!bareudp)
@@ -75,7 +84,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
sizeof(ipversion))) {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
ipversion >>= 4;
@@ -85,7 +94,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
proto = htons(ETH_P_IPV6);
} else {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
@@ -99,7 +108,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
ipv4_is_multicast(tunnel_hdr->daddr)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else {
@@ -115,7 +124,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
(addr_type & IPV6_ADDR_MULTICAST)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
}
@@ -127,28 +136,46 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
proto,
!net_eq(bareudp->net,
dev_net(bareudp->dev)))) {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
- tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
+
+ __set_bit(IP_TUNNEL_KEY_BIT, key);
+
+ tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
if (!tun_dst) {
- bareudp->dev->stats.rx_dropped++;
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
skb_dst_set(skb, &tun_dst->dst);
skb->dev = bareudp->dev;
- oiph = skb_network_header(skb);
- skb_reset_network_header(skb);
skb_reset_mac_header(skb);
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
+ skb_reset_network_header(skb);
+
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(bareudp->dev, rx_length_errors);
+ DEV_STATS_INC(bareudp->dev, rx_errors);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
+ if (!ipv6_mod_enabled() || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
else
err = IP6_ECN_decapsulate(oiph, skb);
if (unlikely(err)) {
if (log_ecn_error) {
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ if (!ipv6_mod_enabled() || family == AF_INET)
net_info_ratelimited("non-ECT from %pI4 "
"with TOS=%#x\n",
&((struct iphdr *)oiph)->saddr,
@@ -158,8 +185,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
&((struct ipv6hdr *)oiph)->saddr);
}
if (err > 1) {
- ++bareudp->dev->stats.rx_frame_errors;
- ++bareudp->dev->stats.rx_errors;
+ DEV_STATS_INC(bareudp->dev, rx_frame_errors);
+ DEV_STATS_INC(bareudp->dev, rx_errors);
goto drop;
}
}
@@ -167,7 +194,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
len = skb->len;
err = gro_cells_receive(&bareudp->gro_cells, skb);
if (likely(err == NET_RX_SUCCESS))
- dev_sw_netstats_rx_add(bareudp->dev, len);
+ dev_dstats_rx_add(bareudp->dev, len);
return 0;
drop:
@@ -187,15 +214,10 @@ static int bareudp_init(struct net_device *dev)
struct bareudp_dev *bareudp = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&bareudp->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
+
return 0;
}
@@ -204,7 +226,6 @@ static void bareudp_uninit(struct net_device *dev)
struct bareudp_dev *bareudp = netdev_priv(dev);
gro_cells_destroy(&bareudp->gro_cells);
- free_percpu(dev->tstats);
}
static struct socket *bareudp_create_sock(struct net *net, __be16 port)
@@ -214,11 +235,12 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port)
int err;
memset(&udp_conf, 0, sizeof(udp_conf));
-#if IS_ENABLED(CONFIG_IPV6)
- udp_conf.family = AF_INET6;
-#else
- udp_conf.family = AF_INET;
-#endif
+
+ if (ipv6_mod_enabled())
+ udp_conf.family = AF_INET6;
+ else
+ udp_conf.family = AF_INET;
+
udp_conf.local_udp_port = port;
/* Open UDP socket */
err = udp_sock_create(net, &udp_conf, &sock);
@@ -283,10 +305,10 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
{
+ bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct socket *sock = rcu_dereference(bareudp->sock);
- bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
__be16 sport, df;
@@ -295,11 +317,19 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be32 saddr;
int err;
+ if (skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
+ return -EINVAL;
+
if (!sock)
return -ESHUTDOWN;
- rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info,
- IPPROTO_UDP, use_cache);
+ sport = udp_flow_src_port(bareudp->net, skb,
+ bareudp->sport_min, USHRT_MAX,
+ true);
+ rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr, &info->key,
+ sport, bareudp->port, key->tos,
+ use_cache ?
+ (struct dst_cache *)&info->dst_cache : NULL);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -307,12 +337,10 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
skb_tunnel_check_pmtu(skb, &rt->dst,
BAREUDP_IPV4_HLEN + info->options_len, false);
- sport = udp_flow_src_port(bareudp->net, skb,
- bareudp->sport_min, USHRT_MAX,
- true);
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ?
+ htons(IP_DF) : 0;
skb_scrub_packet(skb, xnet);
err = -ENOSPC;
@@ -334,7 +362,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, bareudp->port,
!net_eq(bareudp->net, dev_net(bareudp->dev)),
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags),
+ 0);
return 0;
free_dst:
@@ -346,10 +375,10 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
{
+ bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct socket *sock = rcu_dereference(bareudp->sock);
- bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
const struct ip_tunnel_key *key = &info->key;
struct dst_entry *dst = NULL;
struct in6_addr saddr, daddr;
@@ -358,20 +387,25 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
+ if (skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
+ return -EINVAL;
+
if (!sock)
return -ESHUTDOWN;
- dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info,
- IPPROTO_UDP, use_cache);
+ sport = udp_flow_src_port(bareudp->net, skb,
+ bareudp->sport_min, USHRT_MAX,
+ true);
+ dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 0, &saddr,
+ key, sport, bareudp->port, key->tos,
+ use_cache ?
+ (struct dst_cache *) &info->dst_cache : NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len,
false);
- sport = udp_flow_src_port(bareudp->net, skb,
- bareudp->sport_min, USHRT_MAX,
- true);
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
@@ -396,7 +430,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
&saddr, &daddr, prio, ttl,
info->key.label, sport, bareudp->port,
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags),
+ 0);
return 0;
free_dst:
@@ -441,7 +477,7 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
}
rcu_read_lock();
- if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
+ if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
err = bareudp6_xmit_skb(skb, dev, bareudp, info);
else
err = bareudp_xmit_skb(skb, dev, bareudp, info);
@@ -454,11 +490,11 @@ tx_error:
dev_kfree_skb(skb);
if (err == -ELOOP)
- dev->stats.collisions++;
+ DEV_STATS_INC(dev, collisions);
else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK;
}
@@ -468,15 +504,21 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
struct ip_tunnel_info *info = skb_tunnel_info(skb);
struct bareudp_dev *bareudp = netdev_priv(dev);
bool use_cache;
+ __be16 sport;
use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ sport = udp_flow_src_port(bareudp->net, skb,
+ bareudp->sport_min, USHRT_MAX,
+ true);
- if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
+ if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
struct rtable *rt;
__be32 saddr;
- rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr,
- info, IPPROTO_UDP, use_cache);
+ rt = udp_tunnel_dst_lookup(skb, dev, bareudp->net, 0, &saddr,
+ &info->key, sport, bareudp->port,
+ info->key.tos,
+ use_cache ? &info->dst_cache : NULL);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -487,9 +529,10 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
struct in6_addr saddr;
struct socket *sock = rcu_dereference(bareudp->sock);
- dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock,
- &saddr, info, IPPROTO_UDP,
- use_cache);
+ dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock,
+ 0, &saddr, &info->key,
+ sport, bareudp->port, info->key.tos,
+ use_cache ? &info->dst_cache : NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -499,9 +542,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
return -EINVAL;
}
- info->key.tp_src = udp_flow_src_port(bareudp->net, skb,
- bareudp->sport_min,
- USHRT_MAX, true);
+ info->key.tp_src = sport;
info->key.tp_dst = bareudp->port;
return 0;
}
@@ -512,7 +553,6 @@ static const struct net_device_ops bareudp_netdev_ops = {
.ndo_open = bareudp_open,
.ndo_stop = bareudp_stop,
.ndo_start_xmit = bareudp_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
};
@@ -536,7 +576,6 @@ static void bareudp_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_RXCSUM;
- dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
dev->hw_features |= NETIF_F_RXCSUM;
@@ -549,7 +588,9 @@ static void bareudp_setup(struct net_device *dev)
dev->type = ARPHRD_NONE;
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->lltx = true;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
}
static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -577,11 +618,8 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
return -EINVAL;
}
- if (data[IFLA_BAREUDP_PORT])
- conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
-
- if (data[IFLA_BAREUDP_ETHERTYPE])
- conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
+ conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
+ conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
if (data[IFLA_BAREUDP_SRCPORT_MIN])
conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
@@ -605,7 +643,8 @@ static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
}
static int bareudp_configure(struct net *net, struct net_device *dev,
- struct bareudp_conf *conf)
+ struct bareudp_conf *conf,
+ struct netlink_ext_ack *extack)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
struct bareudp_dev *t, *bareudp = netdev_priv(dev);
@@ -614,13 +653,17 @@ static int bareudp_configure(struct net *net, struct net_device *dev,
bareudp->net = net;
bareudp->dev = dev;
t = bareudp_find_dev(bn, conf);
- if (t)
+ if (t) {
+ NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists");
return -EBUSY;
+ }
if (conf->multi_proto_mode &&
(conf->ethertype != htons(ETH_P_MPLS_UC) &&
- conf->ethertype != htons(ETH_P_IP)))
+ conf->ethertype != htons(ETH_P_IP))) {
+ NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)");
return -EINVAL;
+ }
bareudp->port = conf->port;
bareudp->ethertype = conf->ethertype;
@@ -656,10 +699,13 @@ static void bareudp_dellink(struct net_device *dev, struct list_head *head)
unregister_netdevice_queue(dev, head);
}
-static int bareudp_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int bareudp_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct bareudp_conf conf;
int err;
@@ -667,7 +713,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
- err = bareudp_configure(net, dev, &conf);
+ err = bareudp_configure(link_net, dev, &conf, extack);
if (err)
return err;
@@ -724,40 +770,6 @@ static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
.fill_info = bareudp_fill_info,
};
-struct net_device *bareudp_dev_create(struct net *net, const char *name,
- u8 name_assign_type,
- struct bareudp_conf *conf)
-{
- struct nlattr *tb[IFLA_MAX + 1];
- struct net_device *dev;
- int err;
-
- memset(tb, 0, sizeof(tb));
- dev = rtnl_create_link(net, name, name_assign_type,
- &bareudp_link_ops, tb, NULL);
- if (IS_ERR(dev))
- return dev;
-
- err = bareudp_configure(net, dev, conf);
- if (err) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
- err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN);
- if (err)
- goto err;
-
- err = rtnl_configure_link(dev, NULL);
- if (err < 0)
- goto err;
-
- return dev;
-err:
- bareudp_dellink(dev, NULL);
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(bareudp_dev_create);
-
static __net_init int bareudp_init_net(struct net *net)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
@@ -766,32 +778,19 @@ static __net_init int bareudp_init_net(struct net *net)
return 0;
}
-static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit bareudp_exit_rtnl_net(struct net *net,
+ struct list_head *dev_kill_list)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
struct bareudp_dev *bareudp, *next;
list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
- unregister_netdevice_queue(bareudp->dev, head);
-}
-
-static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
-{
- struct net *net;
- LIST_HEAD(list);
-
- rtnl_lock();
- list_for_each_entry(net, net_list, exit_list)
- bareudp_destroy_tunnels(net, &list);
-
- /* unregister the devices gathered above */
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ bareudp_dellink(bareudp->dev, dev_kill_list);
}
static struct pernet_operations bareudp_net_ops = {
.init = bareudp_init_net,
- .exit_batch = bareudp_exit_batch_net,
+ .exit_rtnl = bareudp_exit_rtnl_net,
.id = &bareudp_net_id,
.size = sizeof(struct bareudp_net),
};
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 6006c2e8fa2b..1a8de2bf8655 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -75,20 +75,20 @@ enum ad_link_speed_type {
AD_LINK_SPEED_100000MBPS,
AD_LINK_SPEED_200000MBPS,
AD_LINK_SPEED_400000MBPS,
+ AD_LINK_SPEED_800000MBPS,
+ AD_LINK_SPEED_1600000MBPS,
};
/* compare MAC addresses */
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
-static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
- 0, 0, 0, 0, 0, 0
-};
-static u16 ad_ticks_per_sec;
+static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
- MULTICAST_LACPDU_ADDR;
+const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
+};
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
@@ -96,13 +96,16 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
static void ad_mux_machine(struct port *port, bool *update_slave_arr);
static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
static void ad_tx_machine(struct port *port);
-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
+static void ad_periodic_machine(struct port *port);
static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
static void ad_agg_selection_logic(struct aggregator *aggregator,
bool *update_slave_arr);
static void ad_clear_agg(struct aggregator *aggregator);
static void ad_initialize_agg(struct aggregator *aggregator);
-static void ad_initialize_port(struct port *port, int lacp_fast);
+static void ad_initialize_port(struct port *port, const struct bond_params *bond_params);
+static void ad_enable_collecting(struct port *port);
+static void ad_disable_distributing(struct port *port,
+ bool *update_slave_arr);
static void ad_enable_collecting_distributing(struct port *port,
bool *update_slave_arr);
static void ad_disable_collecting_distributing(struct port *port,
@@ -169,8 +172,37 @@ static inline int __agg_has_partner(struct aggregator *agg)
}
/**
+ * __disable_distributing_port - disable the port's slave for distributing.
+ * Port will still be able to collect.
+ * @port: the port we're looking at
+ *
+ * This will disable only distributing on the port's slave.
+ */
+static void __disable_distributing_port(struct port *port)
+{
+ bond_set_slave_tx_disabled_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
+}
+
+/**
+ * __enable_collecting_port - enable the port's slave for collecting,
+ * if it's up
+ * @port: the port we're looking at
+ *
+ * This will enable only collecting on the port's slave.
+ */
+static void __enable_collecting_port(struct port *port)
+{
+ struct slave *slave = port->slave;
+
+ if (slave->link == BOND_LINK_UP && bond_slave_is_up(slave))
+ bond_set_slave_rx_enabled_flags(slave, BOND_SLAVE_NOTIFY_LATER);
+}
+
+/**
* __disable_port - disable the port's slave
* @port: the port we're looking at
+ *
+ * This will disable both collecting and distributing on the port's slave.
*/
static inline void __disable_port(struct port *port)
{
@@ -180,6 +212,8 @@ static inline void __disable_port(struct port *port)
/**
* __enable_port - enable the port's slave, if it's up
* @port: the port we're looking at
+ *
+ * This will enable both collecting and distributing on the port's slave.
*/
static inline void __enable_port(struct port *port)
{
@@ -190,10 +224,27 @@ static inline void __enable_port(struct port *port)
}
/**
- * __port_is_enabled - check if the port's slave is in active state
+ * __port_move_to_attached_state - check if port should transition back to attached
+ * state.
* @port: the port we're looking at
*/
-static inline int __port_is_enabled(struct port *port)
+static bool __port_move_to_attached_state(struct port *port)
+{
+ if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
+ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION))
+ port->sm_mux_state = AD_MUX_ATTACHED;
+
+ return port->sm_mux_state == AD_MUX_ATTACHED;
+}
+
+/**
+ * __port_is_collecting_distributing - check if the port's slave is in the
+ * combined collecting/distributing state
+ * @port: the port we're looking at
+ */
+static int __port_is_collecting_distributing(struct port *port)
{
return bond_is_active_slave(port->slave);
}
@@ -225,7 +276,7 @@ static inline int __check_agg_selection_timer(struct port *port)
if (bond == NULL)
return 0;
- return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
+ return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
}
/**
@@ -249,6 +300,8 @@ static inline int __check_agg_selection_timer(struct port *port)
* %AD_LINK_SPEED_100000MBPS
* %AD_LINK_SPEED_200000MBPS
* %AD_LINK_SPEED_400000MBPS
+ * %AD_LINK_SPEED_800000MBPS
+ * %AD_LINK_SPEED_1600000MBPS
*/
static u16 __get_link_speed(struct port *port)
{
@@ -324,6 +377,14 @@ static u16 __get_link_speed(struct port *port)
speed = AD_LINK_SPEED_400000MBPS;
break;
+ case SPEED_800000:
+ speed = AD_LINK_SPEED_800000MBPS;
+ break;
+
+ case SPEED_1600000:
+ speed = AD_LINK_SPEED_1600000MBPS;
+ break;
+
default:
/* unknown speed value from ethtool. shouldn't happen */
if (slave->speed != SPEED_UNKNOWN)
@@ -381,6 +442,7 @@ static void __ad_actor_update_port(struct port *port)
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority;
+ port->actor_port_priority = SLAVE_AD_INFO(port->slave)->port_priority;
}
/* Conversions */
@@ -691,6 +753,18 @@ static int __agg_active_ports(struct aggregator *agg)
return active;
}
+static unsigned int __agg_ports_priority(const struct aggregator *agg)
+{
+ struct port *port = agg->lag_ports;
+ unsigned int prio = 0;
+
+ for (; port; port = port->next_port_in_aggregator)
+ if (port->is_enabled)
+ prio += port->actor_port_priority;
+
+ return prio;
+}
+
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
@@ -751,6 +825,12 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
case AD_LINK_SPEED_400000MBPS:
bandwidth = nports * 400000;
break;
+ case AD_LINK_SPEED_800000MBPS:
+ bandwidth = nports * 800000;
+ break;
+ case AD_LINK_SPEED_1600000MBPS:
+ bandwidth = nports * 1600000;
+ break;
default:
bandwidth = 0; /* to silence the compiler */
}
@@ -924,6 +1004,17 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
return 0;
}
+static void ad_cond_set_peer_notif(struct port *port)
+{
+ struct bonding *bond = port->slave->bond;
+
+ if (bond->params.broadcast_neighbor && rtnl_trylock()) {
+ bond->send_peer_notif = bond->params.num_peer_notif *
+ max(1, bond->params.peer_notif_delay);
+ rtnl_unlock();
+ }
+}
+
/**
* ad_mux_machine - handle a port's mux state machine
* @port: the port we're looking at
@@ -931,6 +1022,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
*/
static void ad_mux_machine(struct port *port, bool *update_slave_arr)
{
+ struct bonding *bond = __get_bond_by_port(port);
mux_states_t last_state;
/* keep current State Machine state to compare later if it was
@@ -988,9 +1080,13 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if ((port->sm_vars & AD_PORT_SELECTED) &&
(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
!__check_agg_selection_timer(port)) {
- if (port->aggregator->is_active)
- port->sm_mux_state =
- AD_MUX_COLLECTING_DISTRIBUTING;
+ if (port->aggregator->is_active) {
+ int state = AD_MUX_COLLECTING_DISTRIBUTING;
+
+ if (!bond->params.coupled_control)
+ state = AD_MUX_COLLECTING;
+ port->sm_mux_state = state;
+ }
} else if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY)) {
/* if UNSELECTED or STANDBY */
@@ -1008,11 +1104,45 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
+ if (!__port_move_to_attached_state(port)) {
+ /* if port state hasn't changed make
+ * sure that a collecting distributing
+ * port in an active aggregator is enabled
+ */
+ if (port->aggregator->is_active &&
+ !__port_is_collecting_distributing(port)) {
+ __enable_port(port);
+ *update_slave_arr = true;
+ }
+ }
+ break;
+ case AD_MUX_COLLECTING:
+ if (!__port_move_to_attached_state(port)) {
+ if ((port->sm_vars & AD_PORT_SELECTED) &&
+ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
+ (port->partner_oper.port_state & LACP_STATE_COLLECTING)) {
+ port->sm_mux_state = AD_MUX_DISTRIBUTING;
+ } else {
+ /* If port state hasn't changed, make sure that a collecting
+ * port is enabled for an active aggregator.
+ */
+ struct slave *slave = port->slave;
+
+ if (port->aggregator->is_active &&
+ bond_is_slave_rx_disabled(slave)) {
+ ad_enable_collecting(port);
+ *update_slave_arr = true;
+ }
+ }
+ }
+ break;
+ case AD_MUX_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & LACP_STATE_COLLECTING) ||
!(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
!(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) {
- port->sm_mux_state = AD_MUX_ATTACHED;
+ port->sm_mux_state = AD_MUX_COLLECTING;
} else {
/* if port state hasn't changed make
* sure that a collecting distributing
@@ -1020,9 +1150,9 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
*/
if (port->aggregator &&
port->aggregator->is_active &&
- !__port_is_enabled(port)) {
-
+ !__port_is_collecting_distributing(port)) {
__enable_port(port);
+ *update_slave_arr = true;
}
}
break;
@@ -1071,6 +1201,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
update_slave_arr);
port->ntt = true;
break;
+ case AD_MUX_COLLECTING:
+ port->actor_oper_port_state |= LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
+ ad_enable_collecting(port);
+ ad_disable_distributing(port, update_slave_arr);
+ port->ntt = true;
+ break;
+ case AD_MUX_DISTRIBUTING:
+ port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
+ ad_enable_collecting_distributing(port,
+ update_slave_arr);
+ break;
default:
break;
}
@@ -1185,10 +1329,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
* case of EXPIRED even if LINK_DOWN didn't arrive for
* the port.
*/
- port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
+ /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive
+ * machine state diagram, the statue should be
+ * Partner_Oper_Port_State.Synchronization = FALSE;
+ * Partner_Oper_Port_State.LACP_Timeout = Short Timeout;
+ * start current_while_timer(Short Timeout);
+ * Actor_Oper_Port_State.Expired = TRUE;
+ */
+ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
- port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
port->actor_oper_port_state |= LACP_STATE_EXPIRED;
port->sm_vars |= AD_PORT_CHURNED;
@@ -1267,7 +1417,7 @@ static void ad_tx_machine(struct port *port)
/* check if tx timer expired, to verify that we do not send more than
* 3 packets per second
*/
- if (port->sm_tx_timer_counter && !(--port->sm_tx_timer_counter)) {
+ if (!port->sm_tx_timer_counter || !(--port->sm_tx_timer_counter)) {
/* check if there is something to send */
if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) {
__update_lacpdu_from_port(port);
@@ -1282,23 +1432,23 @@ static void ad_tx_machine(struct port *port)
* again until demanded
*/
port->ntt = false;
+
+ /* restart tx timer(to verify that we will not
+ * exceed AD_MAX_TX_IN_SECOND
+ */
+ port->sm_tx_timer_counter = ad_ticks_per_sec / AD_MAX_TX_IN_SECOND;
}
}
- /* restart tx timer(to verify that we will not exceed
- * AD_MAX_TX_IN_SECOND
- */
- port->sm_tx_timer_counter = ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
}
}
/**
* ad_periodic_machine - handle a port's periodic state machine
* @port: the port we're looking at
- * @bond_params: bond parameters we will use
*
* Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
*/
-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
+static void ad_periodic_machine(struct port *port)
{
periodic_states_t last_state;
@@ -1307,8 +1457,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
/* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
- (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
- !bond_params->lacp_active) {
+ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) {
port->sm_periodic_state = AD_NO_PERIODIC;
}
/* check if state machine should change state */
@@ -1473,7 +1622,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
(aggregator->partner_system_priority == port->partner_oper.system_priority) &&
(aggregator->partner_oper_aggregator_key == port->partner_oper.key)
) &&
- ((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
+ ((__agg_has_partner(aggregator) && /* partner answers */
!aggregator->is_individual) /* but is not individual OR */
)
) {
@@ -1538,6 +1687,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
slave_err(bond->dev, port->slave->dev,
"Port %d did not find a suitable aggregator\n",
port->actor_port_number);
+ return;
}
}
/* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
@@ -1579,6 +1729,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
* 4. Therefore, current and best both have partner replies or
* both do not, so perform selection policy:
*
+ * BOND_AD_PRIO: Select by total priority of ports. If priority
+ * is equal, select by count.
+ *
* BOND_AD_COUNT: Select by count of ports. If count is equal,
* select by bandwidth.
*
@@ -1600,6 +1753,14 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
return best;
switch (__get_agg_selection_mode(curr->lag_ports)) {
+ case BOND_AD_PRIO:
+ if (__agg_ports_priority(curr) > __agg_ports_priority(best))
+ return curr;
+
+ if (__agg_ports_priority(curr) < __agg_ports_priority(best))
+ return best;
+
+ fallthrough;
case BOND_AD_COUNT:
if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
@@ -1665,6 +1826,10 @@ static int agg_device_up(const struct aggregator *agg)
* (slaves), and reselect whenever a link state change takes place or the
* set of slaves in the bond changes.
*
+ * BOND_AD_PRIO: select the aggregator with highest total priority of ports
+ * (slaves), and reselect whenever a link state change takes place or the
+ * set of slaves in the bond changes.
+ *
* FIXME: this function MUST be called with the first agg in the bond, or
* __get_active_agg() won't work correctly. This function should be better
* called with the bond itself, and retrieve the first agg from it.
@@ -1779,6 +1944,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
port = port->next_port_in_aggregator) {
__enable_port(port);
}
+ *update_slave_arr = true;
}
}
@@ -1830,16 +1996,16 @@ static void ad_initialize_agg(struct aggregator *aggregator)
/**
* ad_initialize_port - initialize a given port's parameters
* @port: the port we're looking at
- * @lacp_fast: boolean. whether fast periodic should be used
+ * @bond_params: bond parameters we will use
*/
-static void ad_initialize_port(struct port *port, int lacp_fast)
+static void ad_initialize_port(struct port *port, const struct bond_params *bond_params)
{
static const struct port_params tmpl = {
.system_priority = 0xffff,
.key = 1,
.port_number = 1,
.port_priority = 0xff,
- .port_state = 1,
+ .port_state = 0,
};
static const struct lacpdu lacpdu = {
.subtype = 0x01,
@@ -1857,12 +2023,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->actor_port_priority = 0xff;
port->actor_port_aggregator_identifier = 0;
port->ntt = false;
- port->actor_admin_port_state = LACP_STATE_AGGREGATION |
- LACP_STATE_LACP_ACTIVITY;
- port->actor_oper_port_state = LACP_STATE_AGGREGATION |
- LACP_STATE_LACP_ACTIVITY;
+ port->actor_admin_port_state = LACP_STATE_AGGREGATION;
+ port->actor_oper_port_state = LACP_STATE_AGGREGATION;
+ if (bond_params->lacp_active) {
+ port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY;
+ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
+ }
- if (lacp_fast)
+ if (bond_params->lacp_fast)
port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
@@ -1894,6 +2062,43 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
}
/**
+ * ad_enable_collecting - enable a port's receive
+ * @port: the port we're looking at
+ *
+ * Enable @port if it's in an active aggregator
+ */
+static void ad_enable_collecting(struct port *port)
+{
+ if (port->aggregator->is_active) {
+ struct slave *slave = port->slave;
+
+ slave_dbg(slave->bond->dev, slave->dev,
+ "Enabling collecting on port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
+ __enable_collecting_port(port);
+ }
+}
+
+/**
+ * ad_disable_distributing - disable a port's transmit
+ * @port: the port we're looking at
+ * @update_slave_arr: Does slave array need update?
+ */
+static void ad_disable_distributing(struct port *port, bool *update_slave_arr)
+{
+ if (port->aggregator && __agg_has_partner(port->aggregator)) {
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "Disabling distributing on port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
+ __disable_distributing_port(port);
+ /* Slave array needs an update */
+ *update_slave_arr = true;
+ }
+}
+
+/**
* ad_enable_collecting_distributing - enable a port's transmit/receive
* @port: the port we're looking at
* @update_slave_arr: Does slave array need update?
@@ -1911,6 +2116,8 @@ static void ad_enable_collecting_distributing(struct port *port,
__enable_port(port);
/* Slave array needs update */
*update_slave_arr = true;
+ /* Should notify peers if possible */
+ ad_cond_set_peer_notif(port);
}
}
@@ -1922,9 +2129,7 @@ static void ad_enable_collecting_distributing(struct port *port,
static void ad_disable_collecting_distributing(struct port *port,
bool *update_slave_arr)
{
- if (port->aggregator &&
- !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system),
- &(null_mac_addr))) {
+ if (port->aggregator && __agg_has_partner(port->aggregator)) {
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Disabling port %d (LAG %d)\n",
port->actor_port_number,
@@ -1994,42 +2199,30 @@ static void ad_marker_response_received(struct bond_marker *marker,
*/
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
{
- BOND_AD_INFO(bond).agg_select_timer = timeout;
+ atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
}
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
- * @tick_resolution: tick duration (millisecond resolution)
*
* Can be called only after the mac address of the bond is set.
*/
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+void bond_3ad_initialize(struct bonding *bond)
{
- /* check that the bond is not initialized yet */
- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
- bond->dev->dev_addr)) {
-
- BOND_AD_INFO(bond).aggregator_identifier = 0;
-
- BOND_AD_INFO(bond).system.sys_priority =
- bond->params.ad_actor_sys_prio;
- if (is_zero_ether_addr(bond->params.ad_actor_system))
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->dev->dev_addr);
- else
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->params.ad_actor_system);
-
- /* initialize how many times this module is called in one
- * second (should be about every 100ms)
- */
- ad_ticks_per_sec = tick_resolution;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
- bond_3ad_initiate_agg_selection(bond,
- AD_AGGREGATOR_SELECTION_TIMER *
- ad_ticks_per_sec);
- }
+ bond_3ad_initiate_agg_selection(bond,
+ AD_AGGREGATOR_SELECTION_TIMER *
+ ad_ticks_per_sec);
}
/**
@@ -2051,7 +2244,10 @@ void bond_3ad_bind_slave(struct slave *slave)
/* port initialization */
port = &(SLAVE_AD_INFO(slave)->port);
- ad_initialize_port(port, bond->params.lacp_fast);
+ ad_initialize_port(port, &bond->params);
+
+ /* Port priority is initialized. Update it to slave's ad info */
+ SLAVE_AD_INFO(slave)->port_priority = port->actor_port_priority;
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave)->id;
@@ -2227,7 +2423,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
temp_aggregator->num_of_ports--;
if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
- ad_clear_agg(temp_aggregator);
+ if (temp_aggregator->num_of_ports == 0)
+ ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
/* select new active aggregator */
@@ -2278,6 +2475,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
}
/**
+ * bond_agg_timer_advance - advance agg_select_timer
+ * @bond: bonding structure
+ *
+ * Return true when agg_select_timer reaches 0.
+ */
+static bool bond_agg_timer_advance(struct bonding *bond)
+{
+ int val, nval;
+
+ while (1) {
+ val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
+ if (!val)
+ return false;
+ nval = val - 1;
+ if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
+ val, nval) == val)
+ break;
+ }
+ return nval == 0;
+}
+
+/**
* bond_3ad_state_machine_handler - handle state machines timeout
* @work: work context to fetch bonding struct to work on from
*
@@ -2312,9 +2531,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
if (!bond_has_slaves(bond))
goto re_arm;
- /* check if agg_select_timer timer after initialize is timed out */
- if (BOND_AD_INFO(bond).agg_select_timer &&
- !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ if (bond_agg_timer_advance(bond)) {
slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
@@ -2342,7 +2559,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
ad_rx_machine(NULL, port);
- ad_periodic_machine(port, &bond->params);
+ ad_periodic_machine(port);
ad_port_selection_logic(port, &update_slave_arr);
ad_mux_machine(port, &update_slave_arr);
ad_tx_machine(port);
@@ -2712,6 +2929,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
spin_unlock_bh(&bond->mode_lock);
}
+/**
+ * bond_3ad_update_lacp_active - change the lacp active
+ * @bond: bonding struct
+ *
+ * Update actor_oper_port_state when lacp_active is modified.
+ */
+void bond_3ad_update_lacp_active(struct bonding *bond)
+{
+ struct port *port = NULL;
+ struct list_head *iter;
+ struct slave *slave;
+ int lacp_active;
+
+ lacp_active = bond->params.lacp_active;
+ spin_lock_bh(&bond->mode_lock);
+ bond_for_each_slave(bond, slave, iter) {
+ port = &(SLAVE_AD_INFO(slave)->port);
+ if (lacp_active)
+ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
+ else
+ port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
+ }
+ spin_unlock_bh(&bond->mode_lock);
+}
+
size_t bond_3ad_stats_size(void)
{
return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 2ec8e015c7b3..2d37b07c8215 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -19,6 +19,7 @@
#include <linux/in.h>
#include <net/arp.h>
#include <net/ipv6.h>
+#include <net/ndisc.h>
#include <asm/byteorder.h>
#include <net/bonding.h>
#include <net/bond_alb.h>
@@ -652,18 +653,28 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb,
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
struct slave *tx_slave = NULL;
+ struct net_device *dev;
struct arp_pkt *arp;
if (!pskb_network_may_pull(skb, sizeof(*arp)))
return NULL;
arp = (struct arp_pkt *)skb_network_header(skb);
- /* Don't modify or load balance ARPs that do not originate locally
- * (e.g.,arrive via a bridge).
+ /* Don't modify or load balance ARPs that do not originate
+ * from the bond itself or a VLAN directly above the bond.
*/
- if (!bond_slave_has_mac_rx(bond, arp->mac_src))
+ if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
return NULL;
+ dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);
+ if (dev) {
+ if (netif_is_any_bridge_master(dev)) {
+ dev_put(dev);
+ return NULL;
+ }
+ dev_put(dev);
+ }
+
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected rx channel */
tx_slave = rlb_choose_channel(skb, bond, arp);
@@ -974,7 +985,8 @@ static int alb_upper_dev_walk(struct net_device *upper,
if (netif_is_macvlan(upper) && !strict_match) {
tags = bond_verify_device_path(bond->dev, upper, 0);
if (IS_ERR_OR_NULL(tags))
- BUG();
+ return -ENOMEM;
+
alb_send_lp_vid(slave, upper->dev_addr,
tags[0].vlan_proto, tags[0].vlan_id);
kfree(tags);
@@ -1023,7 +1035,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[],
*/
memcpy(ss.__data, addr, len);
ss.ss_family = dev->type;
- if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) {
+ if (dev_set_mac_address(dev, &ss, NULL)) {
slave_err(slave->bond->dev, dev, "dev_set_mac_address on slave failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n");
return -EOPNOTSUPP;
}
@@ -1261,14 +1273,34 @@ unwind:
break;
bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
rollback_slave->dev->addr_len);
- dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(rollback_slave->dev, &ss, NULL);
dev_addr_set(rollback_slave->dev, tmp_addr);
}
return res;
}
+/* determine if the packet is NA or NS */
+static bool alb_determine_nd(struct sk_buff *skb, struct bonding *bond)
+{
+ struct ipv6hdr *ip6hdr;
+ struct icmp6hdr *hdr;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr)))
+ return true;
+
+ ip6hdr = ipv6_hdr(skb);
+ if (ip6hdr->nexthdr != IPPROTO_ICMPV6)
+ return false;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr) + sizeof(*hdr)))
+ return true;
+
+ hdr = icmp6_hdr(skb);
+ return hdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT ||
+ hdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION;
+}
+
/************************ exported alb functions ************************/
int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
@@ -1280,12 +1312,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
return res;
if (rlb_enabled) {
- bond->alb_info.rlb_enabled = 1;
res = rlb_initialize(bond);
if (res) {
tlb_deinitialize(bond);
return res;
}
+ bond->alb_info.rlb_enabled = 1;
} else {
bond->alb_info.rlb_enabled = 0;
}
@@ -1348,8 +1380,11 @@ struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
/* Do not TX balance any multicast or broadcast */
if (!is_multicast_ether_addr(eth_data->h_dest)) {
switch (skb->protocol) {
- case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
+ if (alb_determine_nd(skb, bond))
+ break;
+ fallthrough;
+ case htons(ETH_P_IP):
hash_index = bond_xmit_hash(bond, skb);
if (bond->params.tlb_dynamic_lb) {
tx_slave = tlb_choose_channel(bond,
@@ -1432,10 +1467,12 @@ struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
break;
}
- if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
+ if (alb_determine_nd(skb, bond)) {
do_tx_balance = false;
break;
}
+
+ /* The IPv6 header is pulled by alb_determine_nd */
/* Additionally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and
* prevent address configuration from working.
@@ -1501,14 +1538,14 @@ void bond_alb_monitor(struct work_struct *work)
struct slave *slave;
if (!bond_has_slaves(bond)) {
- bond_info->tx_rebalance_counter = 0;
+ atomic_set(&bond_info->tx_rebalance_counter, 0);
bond_info->lp_counter = 0;
goto re_arm;
}
rcu_read_lock();
- bond_info->tx_rebalance_counter++;
+ atomic_inc(&bond_info->tx_rebalance_counter);
bond_info->lp_counter++;
/* send learning packets */
@@ -1530,7 +1567,7 @@ void bond_alb_monitor(struct work_struct *work)
}
/* rebalance tx traffic */
- if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
+ if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
bond_for_each_slave_rcu(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == rcu_access_pointer(bond->curr_active_slave)) {
@@ -1540,7 +1577,7 @@ void bond_alb_monitor(struct work_struct *work)
bond_info->unbalanced_load = 0;
}
}
- bond_info->tx_rebalance_counter = 0;
+ atomic_set(&bond_info->tx_rebalance_counter, 0);
}
if (bond_info->rlb_enabled) {
@@ -1610,7 +1647,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
tlb_init_slave(slave);
/* order a rebalance ASAP */
- bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+ atomic_set(&bond->alb_info.tx_rebalance_counter,
+ BOND_TLB_REBALANCE_TICKS);
if (bond->alb_info.rlb_enabled)
bond->alb_info.rlb_rebalance = 1;
@@ -1647,7 +1685,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
rlb_clear_slave(bond, slave);
} else if (link == BOND_LINK_UP) {
/* order a rebalance ASAP */
- bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+ atomic_set(&bond_info->tx_rebalance_counter,
+ BOND_TLB_REBALANCE_TICKS);
if (bond->alb_info.rlb_enabled) {
bond->alb_info.rlb_rebalance = 1;
/* If the updelay module parameter is smaller than the
@@ -1723,8 +1762,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
bond->dev->addr_len);
ss.ss_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
- dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
- NULL);
+ dev_set_mac_address(new_slave->dev, &ss, NULL);
dev_addr_set(new_slave->dev, tmp_addr);
}
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 4f9b4a18c74c..8adbec7c5084 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -49,9 +49,6 @@ DEFINE_SHOW_ATTRIBUTE(bond_debug_rlb_hash);
void bond_debug_register(struct bonding *bond)
{
- if (!bonding_debug_root)
- return;
-
bond->debug_dir =
debugfs_create_dir(bond->dev->name, bonding_debug_root);
@@ -61,34 +58,23 @@ void bond_debug_register(struct bonding *bond)
void bond_debug_unregister(struct bonding *bond)
{
- if (!bonding_debug_root)
- return;
-
debugfs_remove_recursive(bond->debug_dir);
}
void bond_debug_reregister(struct bonding *bond)
{
- struct dentry *d;
-
- if (!bonding_debug_root)
- return;
-
- d = debugfs_rename(bonding_debug_root, bond->debug_dir,
- bonding_debug_root, bond->dev->name);
- if (d) {
- bond->debug_dir = d;
- } else {
+ int err = debugfs_change_name(bond->debug_dir, "%s", bond->dev->name);
+ if (err) {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
bond_debug_unregister(bond);
}
}
-void bond_create_debugfs(void)
+void __init bond_create_debugfs(void)
{
bonding_debug_root = debugfs_create_dir("bonding", NULL);
- if (!bonding_debug_root)
+ if (IS_ERR(bonding_debug_root))
pr_warn("Warning: Cannot create bonding directory in debugfs\n");
}
@@ -113,7 +99,7 @@ void bond_debug_reregister(struct bonding *bond)
{
}
-void bond_create_debugfs(void)
+void __init bond_create_debugfs(void)
{
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ff8da720a33a..3d56339a8a10 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1,8 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* originally based on the dummy device.
*
* Copyright 1999, Thomas Davis, tadavis@lbl.gov.
- * Licensed under the GPL. Based on dummy.c, and eql.c devices.
+ * Based on dummy.c, and eql.c devices.
*
* bonding.c: an Ethernet Bonding driver
*
@@ -35,6 +36,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
+#include <linux/filter.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
@@ -71,6 +73,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_bonding.h>
+#include <linux/phy.h>
#include <linux/jiffies.h>
#include <linux/preempt.h>
#include <net/route.h>
@@ -86,6 +89,9 @@
#if IS_ENABLED(CONFIG_TLS_DEVICE)
#include <net/tls.h>
#endif
+#include <net/ip6_route.h>
+#include <net/netdev_lock.h>
+#include <net/xdp.h>
#include "bonding_priv.h"
@@ -136,8 +142,7 @@ module_param(downdelay, int, 0);
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
"in milliseconds");
module_param(use_carrier, int, 0);
-MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
- "0 for off, 1 for on (default)");
+MODULE_PARM_DESC(use_carrier, "option obsolete, use_carrier cannot be disabled");
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
"1 for active-backup, 2 for balance-xor, "
@@ -206,6 +211,8 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(0);
unsigned int bond_net_id __read_mostly;
+DEFINE_STATIC_KEY_FALSE(bond_bcast_neigh_enabled);
+
static const struct flow_dissector_key flow_keys_bonding_keys[] = {
{
.key_id = FLOW_DISSECTOR_KEY_CONTROL,
@@ -304,7 +311,7 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
return dev_queue_xmit(skb);
}
-bool bond_sk_check(struct bonding *bond)
+static bool bond_sk_check(struct bonding *bond)
{
switch (BOND_MODE(bond)) {
case BOND_MODE_8023AD:
@@ -317,9 +324,9 @@ bool bond_sk_check(struct bonding *bond)
}
}
-static bool bond_xdp_check(struct bonding *bond)
+bool bond_xdp_check(struct bonding *bond, int mode)
{
- switch (BOND_MODE(bond)) {
+ switch (mode) {
case BOND_MODE_ROUNDROBIN:
case BOND_MODE_ACTIVEBACKUP:
return true;
@@ -414,12 +421,49 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
#ifdef CONFIG_XFRM_OFFLOAD
/**
- * bond_ipsec_add_sa - program device with a security association
+ * bond_ipsec_dev - Get active device for IPsec offload
* @xs: pointer to transformer state struct
+ *
+ * Context: caller must hold rcu_read_lock.
+ *
+ * Return: the device for ipsec offload, or NULL if not exist.
**/
-static int bond_ipsec_add_sa(struct xfrm_state *xs)
+static struct net_device *bond_ipsec_dev(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
+ struct bonding *bond;
+ struct slave *slave;
+
+ bond = netdev_priv(bond_dev);
+ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
+ return NULL;
+
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (!slave)
+ return NULL;
+
+ if (!xs->xso.real_dev)
+ return NULL;
+
+ if (xs->xso.real_dev != slave->dev)
+ pr_warn_ratelimited("%s: (slave %s): not same with IPsec offload real dev %s\n",
+ bond_dev->name, slave->dev->name, xs->xso.real_dev->name);
+
+ return slave->dev;
+}
+
+/**
+ * bond_ipsec_add_sa - program device with a security association
+ * @bond_dev: pointer to the bond net device
+ * @xs: pointer to transformer state struct
+ * @extack: extack point to fill failure reason
+ **/
+static int bond_ipsec_add_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *real_dev;
+ netdevice_tracker tracker;
struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
@@ -431,112 +475,185 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
- if (!slave) {
- rcu_read_unlock();
- return -ENODEV;
+ real_dev = slave ? slave->dev : NULL;
+ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+ if (!real_dev) {
+ err = -ENODEV;
+ goto out;
}
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
- netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
- rcu_read_unlock();
- return -EINVAL;
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(real_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
+ err = -EINVAL;
+ goto out;
}
- ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
+ ipsec = kmalloc(sizeof(*ipsec), GFP_KERNEL);
if (!ipsec) {
- rcu_read_unlock();
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out;
}
- xs->xso.real_dev = slave->dev;
- err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+ err = real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev, xs, extack);
if (!err) {
+ xs->xso.real_dev = real_dev;
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
- spin_lock_bh(&bond->ipsec_lock);
+ mutex_lock(&bond->ipsec_lock);
list_add(&ipsec->list, &bond->ipsec_list);
- spin_unlock_bh(&bond->ipsec_lock);
+ mutex_unlock(&bond->ipsec_lock);
} else {
kfree(ipsec);
}
- rcu_read_unlock();
+out:
+ netdev_put(real_dev, &tracker);
return err;
}
static void bond_ipsec_add_sa_all(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
+ struct net_device *real_dev;
struct bond_ipsec *ipsec;
struct slave *slave;
- rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- if (!slave)
- goto out;
+ slave = rtnl_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ if (!real_dev)
+ return;
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
- netif_is_bond_master(slave->dev)) {
- spin_lock_bh(&bond->ipsec_lock);
+ mutex_lock(&bond->ipsec_lock);
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(real_dev)) {
if (!list_empty(&bond->ipsec_list))
- slave_warn(bond_dev, slave->dev,
+ slave_warn(bond_dev, real_dev,
"%s: no slave xdo_dev_state_add\n",
__func__);
- spin_unlock_bh(&bond->ipsec_lock);
goto out;
}
- spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
- ipsec->xs->xso.real_dev = slave->dev;
- if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
- slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
- ipsec->xs->xso.real_dev = NULL;
+ /* If new state is added before ipsec_lock acquired */
+ if (ipsec->xs->xso.real_dev == real_dev)
+ continue;
+
+ if (real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev,
+ ipsec->xs, NULL)) {
+ slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__);
+ continue;
}
+
+ spin_lock_bh(&ipsec->xs->lock);
+ /* xs might have been killed by the user during the migration
+ * to the new dev, but bond_ipsec_del_sa() should have done
+ * nothing, as xso.real_dev is NULL.
+ * Delete it from the device we just added it to. The pending
+ * bond_ipsec_free_sa() call will do the rest of the cleanup.
+ */
+ if (ipsec->xs->km.state == XFRM_STATE_DEAD &&
+ real_dev->xfrmdev_ops->xdo_dev_state_delete)
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev,
+ ipsec->xs);
+ ipsec->xs->xso.real_dev = real_dev;
+ spin_unlock_bh(&ipsec->xs->lock);
}
- spin_unlock_bh(&bond->ipsec_lock);
out:
- rcu_read_unlock();
+ mutex_unlock(&bond->ipsec_lock);
}
/**
* bond_ipsec_del_sa - clear out this specific SA
+ * @bond_dev: pointer to the bond net device
* @xs: pointer to transformer state struct
**/
-static void bond_ipsec_del_sa(struct xfrm_state *xs)
+static void bond_ipsec_del_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
+ struct net_device *real_dev;
+
+ if (!bond_dev || !xs->xso.real_dev)
+ return;
+
+ real_dev = xs->xso.real_dev;
+
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(real_dev)) {
+ slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__);
+ return;
+ }
+
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev, xs);
+}
+
+static void bond_ipsec_del_sa_all(struct bonding *bond)
+{
+ struct net_device *bond_dev = bond->dev;
+ struct net_device *real_dev;
struct bond_ipsec *ipsec;
- struct bonding *bond;
struct slave *slave;
+ slave = rtnl_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ if (!real_dev)
+ return;
+
+ mutex_lock(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (!ipsec->xs->xso.real_dev)
+ continue;
+
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(real_dev)) {
+ slave_warn(bond_dev, real_dev,
+ "%s: no slave xdo_dev_state_delete\n",
+ __func__);
+ continue;
+ }
+
+ spin_lock_bh(&ipsec->xs->lock);
+ ipsec->xs->xso.real_dev = NULL;
+ /* Don't double delete states killed by the user. */
+ if (ipsec->xs->km.state != XFRM_STATE_DEAD)
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev,
+ ipsec->xs);
+ spin_unlock_bh(&ipsec->xs->lock);
+
+ if (real_dev->xfrmdev_ops->xdo_dev_state_free)
+ real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev,
+ ipsec->xs);
+ }
+ mutex_unlock(&bond->ipsec_lock);
+}
+
+static void bond_ipsec_free_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs)
+{
+ struct net_device *real_dev;
+ struct bond_ipsec *ipsec;
+ struct bonding *bond;
+
if (!bond_dev)
return;
- rcu_read_lock();
bond = netdev_priv(bond_dev);
- slave = rcu_dereference(bond->curr_active_slave);
-
- if (!slave)
- goto out;
+ mutex_lock(&bond->ipsec_lock);
if (!xs->xso.real_dev)
goto out;
- WARN_ON(xs->xso.real_dev != slave->dev);
-
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
- netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
- goto out;
- }
+ real_dev = xs->xso.real_dev;
- slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
+ xs->xso.real_dev = NULL;
+ if (real_dev->xfrmdev_ops &&
+ real_dev->xfrmdev_ops->xdo_dev_state_free)
+ real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev, xs);
out:
- spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
if (ipsec->xs == xs) {
list_del(&ipsec->list);
@@ -544,88 +661,84 @@ out:
break;
}
}
- spin_unlock_bh(&bond->ipsec_lock);
- rcu_read_unlock();
+ mutex_unlock(&bond->ipsec_lock);
}
-static void bond_ipsec_del_sa_all(struct bonding *bond)
+/**
+ * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
+ * @skb: current data packet
+ * @xs: pointer to transformer state struct
+ **/
+static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
- struct net_device *bond_dev = bond->dev;
- struct bond_ipsec *ipsec;
- struct slave *slave;
+ struct net_device *real_dev;
rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- if (!slave) {
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev || netif_is_bond_master(real_dev)) {
rcu_read_unlock();
- return;
+ return false;
}
- spin_lock_bh(&bond->ipsec_lock);
- list_for_each_entry(ipsec, &bond->ipsec_list, list) {
- if (!ipsec->xs->xso.real_dev)
- continue;
-
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
- netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev,
- "%s: no slave xdo_dev_state_delete\n",
- __func__);
- } else {
- slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
- }
- ipsec->xs->xso.real_dev = NULL;
- }
- spin_unlock_bh(&bond->ipsec_lock);
rcu_read_unlock();
+ return true;
}
/**
- * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
+ * bond_advance_esn_state - ESN support for IPSec HW offload
* @xs: pointer to transformer state struct
**/
-static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+static void bond_advance_esn_state(struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
- struct slave *curr_active;
- struct bonding *bond;
- int err;
- bond = netdev_priv(bond_dev);
rcu_read_lock();
- curr_active = rcu_dereference(bond->curr_active_slave);
- real_dev = curr_active->dev;
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev)
+ goto out;
- if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
- err = false;
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
+ pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_advance_esn\n", __func__, real_dev->name);
goto out;
}
- if (!xs->xso.real_dev) {
- err = false;
+ real_dev->xfrmdev_ops->xdo_dev_state_advance_esn(xs);
+out:
+ rcu_read_unlock();
+}
+
+/**
+ * bond_xfrm_update_stats - Update xfrm state
+ * @xs: pointer to transformer state struct
+ **/
+static void bond_xfrm_update_stats(struct xfrm_state *xs)
+{
+ struct net_device *real_dev;
+
+ rcu_read_lock();
+ real_dev = bond_ipsec_dev(xs);
+ if (!real_dev)
goto out;
- }
if (!real_dev->xfrmdev_ops ||
- !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
- netif_is_bond_master(real_dev)) {
- err = false;
+ !real_dev->xfrmdev_ops->xdo_dev_state_update_stats) {
+ pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_update_stats\n", __func__, real_dev->name);
goto out;
}
- err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+ real_dev->xfrmdev_ops->xdo_dev_state_update_stats(xs);
out:
rcu_read_unlock();
- return err;
}
static const struct xfrmdev_ops bond_xfrmdev_ops = {
.xdo_dev_state_add = bond_ipsec_add_sa,
.xdo_dev_state_delete = bond_ipsec_del_sa,
+ .xdo_dev_state_free = bond_ipsec_free_sa,
.xdo_dev_offload_ok = bond_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = bond_advance_esn_state,
+ .xdo_dev_state_update_stats = bond_xfrm_update_stats,
};
#endif /* CONFIG_XFRM_OFFLOAD */
@@ -716,73 +829,6 @@ const char *bond_slave_link_status(s8 link)
}
}
-/* if <dev> supports MII link status reporting, check its link status.
- *
- * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
- * depending upon the setting of the use_carrier parameter.
- *
- * Return either BMSR_LSTATUS, meaning that the link is up (or we
- * can't tell and just pretend it is), or 0, meaning that the link is
- * down.
- *
- * If reporting is non-zero, instead of faking link up, return -1 if
- * both ETHTOOL and MII ioctls fail (meaning the device does not
- * support them). If use_carrier is set, return whatever it says.
- * It'd be nice if there was a good way to tell if a driver supports
- * netif_carrier, but there really isn't.
- */
-static int bond_check_dev_link(struct bonding *bond,
- struct net_device *slave_dev, int reporting)
-{
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- int (*ioctl)(struct net_device *, struct ifreq *, int);
- struct ifreq ifr;
- struct mii_ioctl_data *mii;
-
- if (!reporting && !netif_running(slave_dev))
- return 0;
-
- if (bond->params.use_carrier)
- return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
-
- /* Try to get link status using Ethtool first. */
- if (slave_dev->ethtool_ops->get_link)
- return slave_dev->ethtool_ops->get_link(slave_dev) ?
- BMSR_LSTATUS : 0;
-
- /* Ethtool can't be used, fallback to MII ioctls. */
- ioctl = slave_ops->ndo_eth_ioctl;
- if (ioctl) {
- /* TODO: set pointer to correct ioctl on a per team member
- * bases to make this more efficient. that is, once
- * we determine the correct ioctl, we will always
- * call it and not the others for that team
- * member.
- */
-
- /* We cannot assume that SIOCGMIIPHY will also read a
- * register; not all network drivers (e.g., e100)
- * support that.
- */
-
- /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
- strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
- mii = if_mii(&ifr);
- if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
- mii->reg_num = MII_BMSR;
- if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
- return mii->val_out & BMSR_LSTATUS;
- }
- }
-
- /* If reporting, report that either there's no ndo_eth_ioctl,
- * or both SIOCGMIIREG and get_link failed (meaning that we
- * cannot report link status). If not reporting, pretend
- * we're ok.
- */
- return reporting ? -1 : BMSR_LSTATUS;
-}
-
/*----------------------------- Multicast list ------------------------------*/
/* Push the promiscuity flag down to appropriate slaves */
@@ -862,12 +908,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* del lacpdu mc addr from mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_del(slave_dev, lacpdu_multicast);
- }
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_del(slave_dev, lacpdu_mcast_addr);
}
/*--------------------------- Active slave change ---------------------------*/
@@ -887,7 +929,10 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- bond_hw_addr_flush(bond->dev, old_active->dev);
+ if (bond->dev->flags & IFF_UP)
+ bond_hw_addr_flush(bond->dev, old_active->dev);
+
+ bond_slave_ns_maddrs_add(bond, old_active);
}
if (new_active) {
@@ -898,10 +943,14 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev);
- dev_uc_sync(new_active->dev, bond->dev);
- dev_mc_sync(new_active->dev, bond->dev);
- netif_addr_unlock_bh(bond->dev);
+ if (bond->dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond->dev);
+ dev_uc_sync(new_active->dev, bond->dev);
+ dev_mc_sync(new_active->dev, bond->dev);
+ netif_addr_unlock_bh(bond->dev);
+ }
+
+ bond_slave_ns_maddrs_del(bond, new_active);
}
}
@@ -919,7 +968,7 @@ static int bond_set_dev_addr(struct net_device *bond_dev,
slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
bond_dev, slave_dev, slave_dev->addr_len);
- err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
+ err = netif_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
if (err)
return err;
@@ -993,8 +1042,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
ss.ss_family = bond->dev->type;
}
- rv = dev_set_mac_address(new_active->dev,
- (struct sockaddr *)&ss, NULL);
+ rv = dev_set_mac_address(new_active->dev, &ss, NULL);
if (rv) {
slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
-rv);
@@ -1008,8 +1056,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
new_active->dev->addr_len);
ss.ss_family = old_active->dev->type;
- rv = dev_set_mac_address(old_active->dev,
- (struct sockaddr *)&ss, NULL);
+ rv = dev_set_mac_address(old_active->dev, &ss, NULL);
if (rv)
slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
-rv);
@@ -1023,12 +1070,38 @@ out:
}
+/**
+ * bond_choose_primary_or_current - select the primary or high priority slave
+ * @bond: our bonding struct
+ *
+ * - Check if there is a primary link. If the primary link was set and is up,
+ * go on and do link reselection.
+ *
+ * - If primary link is not set or down, find the highest priority link.
+ * If the highest priority link is not current slave, set it as primary
+ * link and do link reselection.
+ */
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
{
struct slave *prim = rtnl_dereference(bond->primary_slave);
struct slave *curr = rtnl_dereference(bond->curr_active_slave);
+ struct slave *slave, *hprio = NULL;
+ struct list_head *iter;
if (!prim || prim->link != BOND_LINK_UP) {
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->link == BOND_LINK_UP) {
+ hprio = hprio ?: slave;
+ if (slave->prio > hprio->prio)
+ hprio = slave;
+ }
+ }
+
+ if (hprio && hprio != curr) {
+ prim = hprio;
+ goto link_reselect;
+ }
+
if (!curr || curr->link != BOND_LINK_UP)
return NULL;
return curr;
@@ -1039,6 +1112,7 @@ static struct slave *bond_choose_primary_or_current(struct bonding *bond)
return prim;
}
+link_reselect:
if (!curr || curr->link != BOND_LINK_UP)
return prim;
@@ -1088,24 +1162,36 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
return bestslave;
}
+/* must be called in RCU critical section or with RTNL held */
static bool bond_should_notify_peers(struct bonding *bond)
{
- struct slave *slave;
-
- rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- rcu_read_unlock();
-
- netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
- slave ? slave->dev->name : "NULL");
+ struct bond_up_slave *usable;
+ struct slave *slave = NULL;
- if (!slave || !bond->send_peer_notif ||
+ if (!bond->send_peer_notif ||
bond->send_peer_notif %
max(1, bond->params.peer_notif_delay) != 0 ||
- !netif_carrier_ok(bond->dev) ||
- test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
+ !netif_carrier_ok(bond->dev))
return false;
+ /* The send_peer_notif is set by active-backup or 8023ad
+ * mode, and cleared in bond_close() when changing mode.
+ * It is safe to only check bond mode here.
+ */
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ usable = rcu_dereference_rtnl(bond->usable_slaves);
+ if (!usable || !READ_ONCE(usable->count))
+ return false;
+ } else {
+ slave = rcu_dereference_rtnl(bond->curr_active_slave);
+ if (!slave || test_bit(__LINK_STATE_LINKWATCH_PENDING,
+ &slave->dev->state))
+ return false;
+ }
+
+ netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
+ slave ? slave->dev->name : "all");
+
return true;
}
@@ -1330,7 +1416,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
slave_disable_netpoll(slave);
}
-static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+static int bond_netpoll_setup(struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
struct list_head *iter;
@@ -1369,17 +1455,8 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t mask;
struct slave *slave;
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
- if (bond_sk_check(bond))
- features |= BOND_TLS_FEATURES;
- else
- features &= ~BOND_TLS_FEATURES;
-#endif
-
mask = features;
-
- features &= ~NETIF_F_ONE_FOR_ALL;
- features |= NETIF_F_ALL_FOR_ALL;
+ features = netdev_base_features(features);
bond_for_each_slave(bond, slave, iter) {
features = netdev_increment_features(features,
@@ -1391,89 +1468,13 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
return features;
}
-#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
- NETIF_F_HIGHDMA | NETIF_F_LRO)
-
-#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
-
-#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_GSO_SOFTWARE)
-
-
-static void bond_compute_features(struct bonding *bond)
-{
- unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
- IFF_XMIT_DST_RELEASE_PERM;
- netdev_features_t vlan_features = BOND_VLAN_FEATURES;
- netdev_features_t enc_features = BOND_ENC_FEATURES;
-#ifdef CONFIG_XFRM_OFFLOAD
- netdev_features_t xfrm_features = BOND_XFRM_FEATURES;
-#endif /* CONFIG_XFRM_OFFLOAD */
- netdev_features_t mpls_features = BOND_MPLS_FEATURES;
- struct net_device *bond_dev = bond->dev;
- struct list_head *iter;
- struct slave *slave;
- unsigned short max_hard_header_len = ETH_HLEN;
- unsigned int gso_max_size = GSO_MAX_SIZE;
- u16 gso_max_segs = GSO_MAX_SEGS;
-
- if (!bond_has_slaves(bond))
- goto done;
- vlan_features &= NETIF_F_ALL_FOR_ALL;
- mpls_features &= NETIF_F_ALL_FOR_ALL;
-
- bond_for_each_slave(bond, slave, iter) {
- vlan_features = netdev_increment_features(vlan_features,
- slave->dev->vlan_features, BOND_VLAN_FEATURES);
-
- enc_features = netdev_increment_features(enc_features,
- slave->dev->hw_enc_features,
- BOND_ENC_FEATURES);
-
-#ifdef CONFIG_XFRM_OFFLOAD
- xfrm_features = netdev_increment_features(xfrm_features,
- slave->dev->hw_enc_features,
- BOND_XFRM_FEATURES);
-#endif /* CONFIG_XFRM_OFFLOAD */
-
- mpls_features = netdev_increment_features(mpls_features,
- slave->dev->mpls_features,
- BOND_MPLS_FEATURES);
-
- dst_release_flag &= slave->dev->priv_flags;
- if (slave->dev->hard_header_len > max_hard_header_len)
- max_hard_header_len = slave->dev->hard_header_len;
-
- gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
- gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
- }
- bond_dev->hard_header_len = max_hard_header_len;
-
-done:
- bond_dev->vlan_features = vlan_features;
- bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
-#ifdef CONFIG_XFRM_OFFLOAD
- bond_dev->hw_enc_features |= xfrm_features;
-#endif /* CONFIG_XFRM_OFFLOAD */
- bond_dev->mpls_features = mpls_features;
- bond_dev->gso_max_segs = gso_max_segs;
- netif_set_gso_max_size(bond_dev, gso_max_size);
-
- bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
- if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
- dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
- bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
-
- netdev_change_features(bond_dev);
-}
-
static void bond_setup_by_slave(struct net_device *bond_dev,
struct net_device *slave_dev)
{
+ bool was_up = !!(bond_dev->flags & IFF_UP);
+
+ dev_close(bond_dev);
+
bond_dev->header_ops = slave_dev->header_ops;
bond_dev->type = slave_dev->type;
@@ -1483,6 +1484,13 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
memcpy(bond_dev->broadcast, slave_dev->broadcast,
slave_dev->addr_len);
+
+ if (slave_dev->flags & IFF_POINTOPOINT) {
+ bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
+ }
+ if (was_up)
+ dev_open(bond_dev, NULL);
}
/* On bonding slaves other than the currently active slave, suppress
@@ -1610,13 +1618,19 @@ static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
{
struct netdev_lag_upper_info lag_upper_info;
enum netdev_lag_tx_type type;
+ int err;
type = bond_lag_tx_type(bond);
lag_upper_info.tx_type = type;
lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
- return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
- &lag_upper_info, extack);
+ err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
+ &lag_upper_info, extack);
+ if (err)
+ return err;
+
+ slave->dev->flags |= IFF_SLAVE;
+ return 0;
}
static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
@@ -1745,6 +1759,42 @@ void bond_lower_state_changed(struct slave *slave)
slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \
} while (0)
+/* The bonding driver uses ether_setup() to convert a master bond device
+ * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
+ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP
+ * if they were set
+ */
+static void bond_ether_setup(struct net_device *bond_dev)
+{
+ unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP);
+
+ ether_setup(bond_dev);
+ bond_dev->flags |= IFF_MASTER | flags;
+ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+}
+
+void bond_xdp_set_features(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ xdp_features_t val = NETDEV_XDP_ACT_MASK;
+ struct list_head *iter;
+ struct slave *slave;
+
+ ASSERT_RTNL();
+
+ if (!bond_xdp_check(bond, BOND_MODE(bond)) || !bond_has_slaves(bond)) {
+ xdp_clear_features_flag(bond_dev);
+ return;
+ }
+
+ bond_for_each_slave(bond, slave, iter)
+ val &= slave->dev->xdp_features;
+
+ val &= ~NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
+ xdp_set_features_flag(bond_dev, val);
+}
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
struct netlink_ext_ack *extack)
@@ -1753,7 +1803,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
- int link_reporting;
int res = 0, i;
if (slave_dev->flags & IFF_MASTER &&
@@ -1763,12 +1812,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
return -EPERM;
}
- if (!bond->params.use_carrier &&
- slave_dev->ethtool_ops->get_link == NULL &&
- slave_ops->ndo_eth_ioctl == NULL) {
- slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
- }
-
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
@@ -1836,10 +1879,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
- else {
- ether_setup(bond_dev);
- bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- }
+ else
+ bond_ether_setup(bond_dev);
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
bond_dev);
@@ -1919,17 +1960,29 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
* set the master's mac address to that of the first slave
*/
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
- ss.ss_family = slave_dev->type;
- res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
- extack);
- if (res) {
- slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
- goto err_restore_mtu;
- }
+ } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
+ BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ bond_has_slaves(bond) &&
+ memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
+ /* Set slave to random address to avoid duplicate mac
+ * address in later fail over.
+ */
+ eth_random_addr(ss.__data);
+ } else {
+ goto skip_mac_set;
+ }
+
+ ss.ss_family = slave_dev->type;
+ res = dev_set_mac_address(slave_dev, &ss, extack);
+ if (res) {
+ slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
+ goto err_restore_mtu;
}
- /* set slave flag before open to prevent IPv6 addrconf */
- slave_dev->flags |= IFF_SLAVE;
+skip_mac_set:
+
+ /* set no_addrconf flag before open to prevent IPv6 addrconf */
+ slave_dev->priv_flags |= IFF_NO_ADDRCONF;
/* open the slave since the application closed it */
res = dev_open(slave_dev, extack);
@@ -1971,29 +2024,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
new_slave->target_last_arp_rx[i] = new_slave->last_rx;
- if (bond->params.miimon && !bond->params.use_carrier) {
- link_reporting = bond_check_dev_link(bond, slave_dev, 1);
-
- if ((link_reporting == -1) && !bond->params.arp_interval) {
- /* miimon is set but a bonded network driver
- * does not support ETHTOOL/MII and
- * arp_interval is not set. Note: if
- * use_carrier is enabled, we will never go
- * here (because netif_carrier is always
- * supported); thus, we don't need to change
- * the messages for netif_carrier.
- */
- slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
- } else if (link_reporting == -1) {
- /* unable get link status using mii/ethtool */
- slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
- }
- }
+ new_slave->last_tx = new_slave->last_rx;
/* check for initial state */
new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
- if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
+ if (netif_running(slave_dev) && netif_carrier_ok(slave_dev)) {
if (bond->params.updelay) {
bond_set_slave_link_state(new_slave,
BOND_LINK_BACK,
@@ -2049,7 +2085,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
- bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
+ bond_3ad_initialize(bond);
} else {
SLAVE_AD_INFO(new_slave)->id =
SLAVE_AD_INFO(prev_slave)->id + 1;
@@ -2134,33 +2170,37 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
}
- netif_addr_lock_bh(bond_dev);
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
- netif_addr_unlock_bh(bond_dev);
+ if (bond_dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_add(slave_dev, lacpdu_multicast);
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_add(slave_dev, lacpdu_mcast_addr);
}
}
bond->slave_cnt++;
- bond_compute_features(bond);
+ netdev_compute_master_upper_features(bond->dev, true);
bond_set_carrier(bond);
+ /* Needs to be called before bond_select_active_slave(), which will
+ * remove the maddrs if the slave is selected as active slave.
+ */
+ bond_slave_ns_maddrs_add(bond, new_slave);
+
if (bond_uses_primary(bond)) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
}
- if (bond_mode_can_use_xmit_hash(bond))
+ /* broadcast mode uses the all_slaves to loop through slaves. */
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, NULL);
-
if (!slave_dev->netdev_ops->ndo_bpf ||
!slave_dev->netdev_ops->ndo_xdp_xmit) {
if (bond->xdp_prog) {
@@ -2184,7 +2224,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_sysfs_del;
}
- res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ res = dev_xdp_propagate(slave_dev, &xdp);
if (res < 0) {
/* ndo_bpf() sets extack error message */
slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
@@ -2194,6 +2234,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
bpf_prog_inc(bond->xdp_prog);
}
+ bond_xdp_set_features(bond_dev);
+
slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
bond_is_active_slave(new_slave) ? "an active" : "a backup",
new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
@@ -2232,7 +2274,7 @@ err_close:
dev_close(slave_dev);
err_restore_mac:
- slave_dev->flags &= ~IFF_SLAVE;
+ slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
if (!bond->params.fail_over_mac ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
@@ -2242,7 +2284,7 @@ err_restore_mac:
bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
new_slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(slave_dev, &ss, NULL);
}
err_restore_mtu:
@@ -2259,9 +2301,7 @@ err_undo_flags:
eth_hw_addr_random(bond_dev);
if (bond_dev->type != ARPHRD_ETHER) {
dev_close(bond_dev);
- ether_setup(bond_dev);
- bond_dev->flags |= IFF_MASTER;
- bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ bond_ether_setup(bond_dev);
}
}
@@ -2320,7 +2360,7 @@ static int __bond_release_one(struct net_device *bond_dev,
.prog = NULL,
.extack = NULL,
};
- if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
+ if (dev_xdp_propagate(slave_dev, &xdp))
slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
}
@@ -2334,7 +2374,8 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_upper_dev_unlink(bond, slave);
- if (bond_mode_can_use_xmit_hash(bond))
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, slave);
slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
@@ -2344,7 +2385,7 @@ static int __bond_release_one(struct net_device *bond_dev,
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
- if (!all && (!bond->params.fail_over_mac ||
+ if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
@@ -2358,6 +2399,12 @@ static int __bond_release_one(struct net_device *bond_dev,
if (oldcurrent == slave)
bond_change_active_slave(bond, NULL);
+ /* Must be called after bond_change_active_slave () as the slave
+ * might change from an active slave to a backup slave. Then it is
+ * necessary to clear the maddrs on the backup slave.
+ */
+ bond_slave_ns_maddrs_del(bond, slave);
+
if (bond_is_lb(bond)) {
/* Must be called only after the slave has been
* detached from the list and the curr_active_slave
@@ -2377,10 +2424,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_select_active_slave(bond);
}
- if (!bond_has_slaves(bond)) {
- bond_set_carrier(bond);
+ bond_set_carrier(bond);
+ if (!bond_has_slaves(bond))
eth_hw_addr_random(bond_dev);
- }
unblock_netpoll_tx();
synchronize_rcu();
@@ -2391,7 +2437,7 @@ static int __bond_release_one(struct net_device *bond_dev,
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
}
- bond_compute_features(bond);
+ netdev_compute_master_upper_features(bond->dev, true);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
@@ -2416,7 +2462,8 @@ static int __bond_release_one(struct net_device *bond_dev,
if (old_flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
- bond_hw_addr_flush(bond_dev, slave_dev);
+ if (old_flags & IFF_UP)
+ bond_hw_addr_flush(bond_dev, slave_dev);
}
slave_disable_netpoll(slave);
@@ -2424,23 +2471,29 @@ static int __bond_release_one(struct net_device *bond_dev,
/* close slave before restoring its mac address */
dev_close(slave_dev);
+ slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
+
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(slave_dev, &ss, NULL);
}
- if (unregister)
- __dev_set_mtu(slave_dev, slave->original_mtu);
- else
+ if (unregister) {
+ netdev_lock_ops(slave_dev);
+ __netif_set_mtu(slave_dev, slave->original_mtu);
+ netdev_unlock_ops(slave_dev);
+ } else {
dev_set_mtu(slave_dev, slave->original_mtu);
+ }
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING;
+ bond_xdp_set_features(bond_dev);
kobject_put(&slave->kobj);
return 0;
@@ -2502,17 +2555,27 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
/* called with rcu_read_lock() */
static int bond_miimon_inspect(struct bonding *bond)
{
+ bool ignore_updelay = false;
int link_state, commit = 0;
struct list_head *iter;
struct slave *slave;
- bool ignore_updelay;
- ignore_updelay = !rcu_dereference(bond->curr_active_slave);
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
+ ignore_updelay = !rcu_dereference(bond->curr_active_slave);
+ } else {
+ struct bond_up_slave *usable_slaves;
+
+ usable_slaves = rcu_dereference(bond->usable_slaves);
+
+ if (usable_slaves && usable_slaves->count == 0)
+ ignore_updelay = true;
+ }
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
- link_state = bond_check_dev_link(bond, slave->dev, 0);
+ link_state = netif_running(slave->dev) &&
+ netif_carrier_ok(slave->dev);
switch (slave->link) {
case BOND_LINK_UP:
@@ -2522,7 +2585,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_propose_link_state(slave, BOND_LINK_FAIL);
commit++;
slave->delay = bond->params.downdelay;
- if (slave->delay) {
+ if (slave->delay && net_ratelimit()) {
slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
(BOND_MODE(bond) ==
BOND_MODE_ACTIVEBACKUP) ?
@@ -2536,9 +2599,10 @@ static int bond_miimon_inspect(struct bonding *bond)
/* recovered before downdelay expired */
bond_propose_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
- slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
- (bond->params.downdelay - slave->delay) *
- bond->params.miimon);
+ if (net_ratelimit())
+ slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
+ (bond->params.downdelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -2560,7 +2624,7 @@ static int bond_miimon_inspect(struct bonding *bond)
commit++;
slave->delay = bond->params.updelay;
- if (slave->delay) {
+ if (slave->delay && net_ratelimit()) {
slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
ignore_updelay ? 0 :
bond->params.updelay *
@@ -2570,9 +2634,10 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_BACK:
if (!link_state) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
- slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
- (bond->params.updelay - slave->delay) *
- bond->params.miimon);
+ if (net_ratelimit())
+ slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
+ (bond->params.updelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -2615,8 +2680,11 @@ static void bond_miimon_link_change(struct bonding *bond,
static void bond_miimon_commit(struct bonding *bond)
{
+ struct slave *slave, *primary, *active;
+ bool do_failover = false;
struct list_head *iter;
- struct slave *slave, *primary;
+
+ ASSERT_RTNL();
bond_for_each_slave(bond, slave, iter) {
switch (slave->link_new_state) {
@@ -2660,8 +2728,9 @@ static void bond_miimon_commit(struct bonding *bond)
bond_miimon_link_change(bond, slave, BOND_LINK_UP);
- if (!bond->curr_active_slave || slave == primary)
- goto do_failover;
+ active = rtnl_dereference(bond->curr_active_slave);
+ if (!active || slave == primary || slave->prio > active->prio)
+ do_failover = true;
continue;
@@ -2682,7 +2751,7 @@ static void bond_miimon_commit(struct bonding *bond)
bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
if (slave == rcu_access_pointer(bond->curr_active_slave))
- goto do_failover;
+ do_failover = true;
continue;
@@ -2693,8 +2762,9 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
}
+ }
-do_failover:
+ if (do_failover) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
@@ -2714,7 +2784,7 @@ static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
- bool should_notify_peers = false;
+ bool should_notify_peers;
bool commit;
unsigned long delay;
struct slave *slave;
@@ -2726,30 +2796,33 @@ static void bond_mii_monitor(struct work_struct *work)
goto re_arm;
rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
commit = !!bond_miimon_inspect(bond);
- if (bond->send_peer_notif) {
- rcu_read_unlock();
- if (rtnl_trylock()) {
- bond->send_peer_notif--;
- rtnl_unlock();
- }
- } else {
- rcu_read_unlock();
- }
- if (commit) {
+ rcu_read_unlock();
+
+ if (commit || bond->send_peer_notif) {
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
- should_notify_peers = false;
goto re_arm;
}
- bond_for_each_slave(bond, slave, iter) {
- bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
+ if (commit) {
+ bond_for_each_slave(bond, slave, iter) {
+ bond_commit_link_state(slave,
+ BOND_SLAVE_NOTIFY_LATER);
+ }
+ bond_miimon_commit(bond);
+ }
+
+ if (bond->send_peer_notif) {
+ bond->send_peer_notif--;
+ if (should_notify_peers)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
}
- bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
}
@@ -2757,13 +2830,6 @@ static void bond_mii_monitor(struct work_struct *work)
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
-
- if (should_notify_peers) {
- if (!rtnl_trylock())
- return;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
- rtnl_unlock();
- }
}
static int bond_upper_dev_walk(struct net_device *upper,
@@ -2792,36 +2858,22 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
return ret;
}
-/* We go to the (large) trouble of VLAN tagging ARP frames because
- * switches in VLAN mode (especially if ports are configured as
- * "native" to a VLAN) might not pass non-tagged frames.
- */
-static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
- __be32 src_ip, struct bond_vlan_tag *tags)
+#define BOND_VLAN_PROTO_NONE cpu_to_be16(0xffff)
+
+static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags,
+ struct sk_buff *skb)
{
- struct sk_buff *skb;
- struct bond_vlan_tag *outer_tag = tags;
- struct net_device *slave_dev = slave->dev;
struct net_device *bond_dev = slave->bond->dev;
+ struct net_device *slave_dev = slave->dev;
+ struct bond_vlan_tag *outer_tag = tags;
- slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
- arp_op, &dest_ip, &src_ip);
-
- skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
- NULL, slave_dev->dev_addr, NULL);
-
- if (!skb) {
- net_err_ratelimited("ARP packet allocation failed\n");
- return;
- }
-
- if (!tags || tags->vlan_proto == VLAN_N_VID)
- goto xmit;
+ if (!tags || tags->vlan_proto == BOND_VLAN_PROTO_NONE)
+ return true;
tags++;
/* Go through all the tags backwards and add them to the packet */
- while (tags->vlan_proto != VLAN_N_VID) {
+ while (tags->vlan_proto != BOND_VLAN_PROTO_NONE) {
if (!tags->vlan_id) {
tags++;
continue;
@@ -2833,7 +2885,7 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
tags->vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert inner VLAN tag\n");
- return;
+ return false;
}
tags++;
@@ -2846,8 +2898,37 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
outer_tag->vlan_id);
}
-xmit:
- arp_xmit(skb);
+ return true;
+}
+
+/* We go to the (large) trouble of VLAN tagging ARP frames because
+ * switches in VLAN mode (especially if ports are configured as
+ * "native" to a VLAN) might not pass non-tagged frames.
+ */
+static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
+ __be32 src_ip, struct bond_vlan_tag *tags)
+{
+ struct net_device *bond_dev = slave->bond->dev;
+ struct net_device *slave_dev = slave->dev;
+ struct sk_buff *skb;
+
+ slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
+ arp_op, &dest_ip, &src_ip);
+
+ skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
+ NULL, slave_dev->dev_addr, NULL);
+
+ if (!skb) {
+ net_err_ratelimited("ARP packet allocation failed\n");
+ return;
+ }
+
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
+ arp_xmit(skb);
+ }
+
+ return;
}
/* Validate the device path between the @start_dev and the @end_dev.
@@ -2868,7 +2949,7 @@ struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
if (!tags)
return ERR_PTR(-ENOMEM);
- tags[level].vlan_proto = VLAN_N_VID;
+ tags[level].vlan_proto = BOND_VLAN_PROTO_NONE;
return tags;
}
@@ -2903,8 +2984,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
tags = NULL;
/* Find out through which dev should the packet go */
- rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
- RTO_ONLINK, 0);
+ rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 0, 0,
+ RT_SCOPE_LINK);
if (IS_ERR(rt)) {
/* there's no route to target - try to send arp
* probe to generate any traffic (arp_validate=0)
@@ -2964,30 +3045,17 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
slave->target_last_arp_rx[i] = jiffies;
}
-int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
struct slave *curr_active_slave, *curr_arp_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
- int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
unsigned int alen;
- if (!slave_do_arp_validate(bond, slave)) {
- if ((slave_do_arp_validate_only(bond) && is_arp) ||
- !slave_do_arp_validate_only(bond))
- slave->last_rx = jiffies;
- return RX_HANDLER_ANOTHER;
- } else if (!is_arp) {
- return RX_HANDLER_ANOTHER;
- }
-
alen = arp_hdr_len(bond->dev);
- slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
- __func__, skb->dev->name);
-
if (alen > skb_headlen(skb)) {
arp = kmalloc(alen, GFP_ATOMIC);
if (!arp)
@@ -3048,8 +3116,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
- bond_time_in_interval(bond,
- dev_trans_start(curr_arp_slave->dev), 1))
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_arp(bond, slave, sip, tip);
out_unlock:
@@ -3058,6 +3125,234 @@ out_unlock:
return RX_HANDLER_ANOTHER;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
+ const struct in6_addr *saddr, struct bond_vlan_tag *tags)
+{
+ struct net_device *bond_dev = slave->bond->dev;
+ struct net_device *slave_dev = slave->dev;
+ struct in6_addr mcaddr;
+ struct sk_buff *skb;
+
+ slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n",
+ daddr, saddr);
+
+ skb = ndisc_ns_create(slave_dev, daddr, saddr, 0);
+ if (!skb) {
+ net_err_ratelimited("NS packet allocation failed\n");
+ return;
+ }
+
+ addrconf_addr_solict_mult(daddr, &mcaddr);
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
+ ndisc_send_skb(skb, &mcaddr, saddr);
+ }
+}
+
+static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct bond_vlan_tag *tags;
+ struct dst_entry *dst;
+ struct in6_addr saddr;
+ struct flowi6 fl6;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) {
+ slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n",
+ __func__, &targets[i]);
+ tags = NULL;
+
+ /* Find out through which dev should the packet go */
+ memset(&fl6, 0, sizeof(struct flowi6));
+ fl6.daddr = targets[i];
+
+ dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
+ if (dst->error) {
+ dst_release(dst);
+ /* there's no route to target - try to send arp
+ * probe to generate any traffic (arp_validate=0)
+ */
+ if (bond->params.arp_validate)
+ pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n",
+ bond->dev->name,
+ &targets[i]);
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+ continue;
+ }
+
+ /* bond device itself */
+ if (dst->dev == bond->dev)
+ goto found;
+
+ rcu_read_lock();
+ tags = bond_verify_device_path(bond->dev, dst->dev, 0);
+ rcu_read_unlock();
+
+ if (!IS_ERR_OR_NULL(tags))
+ goto found;
+
+ /* Not our device - skip */
+ slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n",
+ &targets[i], dst->dev ? dst->dev->name : "NULL");
+
+ dst_release(dst);
+ continue;
+
+found:
+ if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
+ bond_ns_send(slave, &targets[i], &saddr, tags);
+ else
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+
+ dst_release(dst);
+ kfree(tags);
+ }
+}
+
+static int bond_confirm_addr6(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct in6_addr *addr = (struct in6_addr *)priv->data;
+
+ return ipv6_chk_addr(dev_net(dev), addr, dev, 0);
+}
+
+static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
+{
+ struct netdev_nested_priv priv = {
+ .data = addr,
+ };
+ int ret = false;
+
+ if (bond_confirm_addr6(bond->dev, &priv))
+ return true;
+
+ rcu_read_lock();
+ if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv))
+ ret = true;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void bond_validate_na(struct bonding *bond, struct slave *slave,
+ struct in6_addr *saddr, struct in6_addr *daddr)
+{
+ int i;
+
+ /* Ignore NAs that:
+ * 1. Source address is unspecified address.
+ * 2. Dest address is neither all-nodes multicast address nor
+ * exist on bond interface.
+ */
+ if (ipv6_addr_any(saddr) ||
+ (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
+ !bond_has_this_ip6(bond, daddr))) {
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
+ __func__, saddr, daddr);
+ return;
+ }
+
+ i = bond_get_targets_ip6(bond->params.ns_targets, saddr);
+ if (i == -1) {
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n",
+ __func__, saddr);
+ return;
+ }
+ slave->last_rx = jiffies;
+ slave->target_last_arp_rx[i] = jiffies;
+}
+
+static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
+{
+ struct slave *curr_active_slave, *curr_arp_slave;
+ struct in6_addr *saddr, *daddr;
+ struct {
+ struct ipv6hdr ip6;
+ struct icmp6hdr icmp6;
+ } *combined, _combined;
+
+ if (skb->pkt_type == PACKET_OTHERHOST ||
+ skb->pkt_type == PACKET_LOOPBACK)
+ goto out;
+
+ combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
+ if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
+ (combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
+ combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
+ goto out;
+
+ saddr = &combined->ip6.saddr;
+ daddr = &combined->ip6.daddr;
+
+ slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
+ __func__, slave->dev->name, bond_slave_state(slave),
+ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ saddr, daddr);
+
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+ curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+
+ /* We 'trust' the received ARP enough to validate it if:
+ * see bond_arp_rcv().
+ */
+ if (bond_is_active_slave(slave))
+ bond_validate_na(bond, slave, saddr, daddr);
+ else if (curr_active_slave &&
+ time_after(slave_last_rx(bond, curr_active_slave),
+ curr_active_slave->last_link_up))
+ bond_validate_na(bond, slave, daddr, saddr);
+ else if (curr_arp_slave &&
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
+ bond_validate_na(bond, slave, saddr, daddr);
+
+out:
+ return RX_HANDLER_ANOTHER;
+}
+#endif
+
+int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6);
+#endif
+ bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
+
+ slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
+ __func__, skb->dev->name);
+
+ /* Use arp validate logic for both ARP and NS */
+ if (!slave_do_arp_validate(bond, slave)) {
+ if ((slave_do_arp_validate_only(bond) && is_arp) ||
+#if IS_ENABLED(CONFIG_IPV6)
+ (slave_do_arp_validate_only(bond) && is_ipv6) ||
+#endif
+ !slave_do_arp_validate_only(bond))
+ slave->last_rx = jiffies;
+ return RX_HANDLER_ANOTHER;
+ } else if (is_arp) {
+ return bond_arp_rcv(skb, bond, slave);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (is_ipv6) {
+ return bond_na_rcv(skb, bond, slave);
+#endif
+ } else {
+ return RX_HANDLER_ANOTHER;
+ }
+}
+
+static void bond_send_validate(struct bonding *bond, struct slave *slave)
+{
+ bond_arp_send_all(bond, slave);
+#if IS_ENABLED(CONFIG_IPV6)
+ bond_ns_send_all(bond, slave);
+#endif
+}
+
/* function to verify if we're in the arp_interval timeslice, returns true if
* (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
* arp_interval/2) . the arp_interval/2 is needed for really fast networks.
@@ -3099,12 +3394,12 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* so it can wait
*/
bond_for_each_slave_rcu(bond, slave, iter) {
- unsigned long trans_start = dev_trans_start(slave->dev);
+ unsigned long last_tx = slave_last_tx(slave);
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
if (slave->link != BOND_LINK_UP) {
- if (bond_time_in_interval(bond, trans_start, 1) &&
+ if (bond_time_in_interval(bond, last_tx, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
@@ -3129,8 +3424,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (!bond_time_in_interval(bond, trans_start, 2) ||
- !bond_time_in_interval(bond, slave->last_rx, 2)) {
+ if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
+ !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
slave_state_changed = 1;
@@ -3153,7 +3448,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* to be unstable during low/no traffic periods
*/
if (bond_slave_is_up(slave))
- bond_arp_send_all(bond, slave);
+ bond_send_validate(bond, slave);
}
rcu_read_unlock();
@@ -3195,7 +3490,7 @@ re_arm:
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
- unsigned long trans_start, last_rx;
+ unsigned long last_tx, last_rx;
struct list_head *iter;
struct slave *slave;
int commit = 0;
@@ -3224,7 +3519,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
/* Backup slave is down if:
* - No current_arp_slave AND
- * - more than 3*delta since last receive AND
+ * - more than (missed_max+1)*delta since last receive AND
* - the bond has an IP address
*
* Note: a non-null current_arp_slave indicates
@@ -3236,20 +3531,20 @@ static int bond_ab_arp_inspect(struct bonding *bond)
*/
if (!bond_is_active_slave(slave) &&
!rcu_access_pointer(bond->current_arp_slave) &&
- !bond_time_in_interval(bond, last_rx, 3)) {
+ !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
/* Active slave is down if:
- * - more than 2*delta since transmitting OR
- * - (more than 2*delta since receive AND
+ * - more than missed_max*delta since transmitting OR
+ * - (more than missed_max*delta since receive AND
* the bond has an IP address)
*/
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (bond_is_active_slave(slave) &&
- (!bond_time_in_interval(bond, trans_start, 2) ||
- !bond_time_in_interval(bond, last_rx, 2))) {
+ (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
+ !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
@@ -3265,8 +3560,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
- unsigned long trans_start;
+ bool do_failover = false;
struct list_head *iter;
+ unsigned long last_tx;
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
@@ -3275,10 +3571,10 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (rtnl_dereference(bond->curr_active_slave) != slave ||
(!rtnl_dereference(bond->curr_active_slave) &&
- bond_time_in_interval(bond, trans_start, 1))) {
+ bond_time_in_interval(bond, last_tx, 1))) {
struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
@@ -3294,8 +3590,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
slave_info(bond->dev, slave->dev, "link status definitely up\n");
if (!rtnl_dereference(bond->curr_active_slave) ||
- slave == rtnl_dereference(bond->primary_slave))
- goto do_failover;
+ slave == rtnl_dereference(bond->primary_slave) ||
+ slave->prio > rtnl_dereference(bond->curr_active_slave)->prio)
+ do_failover = true;
}
@@ -3314,7 +3611,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
if (slave == rtnl_dereference(bond->curr_active_slave)) {
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
- goto do_failover;
+ do_failover = true;
}
continue;
@@ -3338,8 +3635,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
slave->link_new_state);
continue;
}
+ }
-do_failover:
+ if (do_failover) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
@@ -3367,7 +3665,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
curr_active_slave->dev->name);
if (curr_active_slave) {
- bond_arp_send_all(bond, curr_active_slave);
+ bond_send_validate(bond, curr_active_slave);
return should_notify_rtnl;
}
@@ -3419,7 +3717,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_LATER);
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
- bond_arp_send_all(bond, new_slave);
+ bond_send_validate(bond, new_slave);
new_slave->last_link_up = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
@@ -3475,9 +3773,11 @@ re_arm:
if (!rtnl_trylock())
return;
- if (should_notify_peers)
+ if (should_notify_peers) {
+ bond->send_peer_notif--;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
+ }
if (should_notify_rtnl) {
bond_slave_state_notify(bond);
bond_slave_link_notify(bond);
@@ -3635,12 +3935,19 @@ static int bond_slave_netdev_event(unsigned long event,
unblock_netpoll_tx();
break;
case NETDEV_FEAT_CHANGE:
- bond_compute_features(bond);
+ if (!bond->notifier_ctx) {
+ bond->notifier_ctx = true;
+ netdev_compute_master_upper_features(bond->dev, true);
+ bond->notifier_ctx = false;
+ }
break;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, slave->bond->dev);
break;
+ case NETDEV_XDP_FEAT_CHANGE:
+ bond_xdp_set_features(bond_dev);
+ break;
default:
break;
}
@@ -3695,7 +4002,7 @@ static inline const void *bond_pull_data(struct sk_buff *skb,
if (likely(n <= hlen))
return data;
else if (skb && likely(pskb_may_pull(skb, n)))
- return skb->head;
+ return skb->data;
return NULL;
}
@@ -3743,7 +4050,7 @@ static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *
}
if (l34 && *ip_proto >= 0)
- fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
+ fk->ports.ports = skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
return true;
}
@@ -3818,14 +4125,19 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
return true;
}
-static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
+static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
{
hash ^= (__force u32)flow_get_u32_dst(flow) ^
(__force u32)flow_get_u32_src(flow);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
+
/* discard lowest hash bit to deal with the common even ports pattern */
- return hash >> 1;
+ if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
+ xmit_policy == BOND_XMIT_POLICY_ENCAP34)
+ return hash >> 1;
+
+ return hash;
}
/* Generate hash based on xmit policy. If @skb is given it is used to linearize
@@ -3855,7 +4167,7 @@ static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const voi
memcpy(&hash, &flow.ports.ports, sizeof(hash));
}
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
}
/**
@@ -3872,8 +4184,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
skb->l4_hash)
return skb->hash;
- return __bond_xmit_hash(bond, skb, skb->head, skb->protocol,
- skb->mac_header, skb->network_header,
+ return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
+ 0, skb_network_offset(skb),
skb_headlen(skb));
}
@@ -3910,7 +4222,7 @@ void bond_work_init_all(struct bonding *bond)
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
-static void bond_work_cancel_all(struct bonding *bond)
+void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);
@@ -3926,6 +4238,12 @@ static int bond_open(struct net_device *bond_dev)
struct list_head *iter;
struct slave *slave;
+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
+ bond->rr_tx_counter = alloc_percpu(u32);
+ if (!bond->rr_tx_counter)
+ return -ENOMEM;
+ }
+
/* reset slave->backup and slave->inactive */
if (bond_has_slaves(bond)) {
bond_for_each_slave(bond, slave, iter) {
@@ -3955,7 +4273,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->arp_work, 0);
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_rcv_validate;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -3963,6 +4281,12 @@ static int bond_open(struct net_device *bond_dev)
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
+
+ bond_for_each_slave(bond, slave, iter)
+ dev_mc_add(slave->dev, lacpdu_mcast_addr);
+
+ if (bond->params.broadcast_neighbor)
+ static_branch_inc(&bond_bcast_neigh_enabled);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -3974,6 +4298,7 @@ static int bond_open(struct net_device *bond_dev)
static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
bond_work_cancel_all(bond);
bond->send_peer_notif = 0;
@@ -3981,6 +4306,23 @@ static int bond_close(struct net_device *bond_dev)
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
+ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+ bond->params.broadcast_neighbor)
+ static_branch_dec(&bond_bcast_neigh_enabled);
+
+ if (bond_uses_primary(bond)) {
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ rcu_read_unlock();
+ } else {
+ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ }
+
return 0;
}
@@ -4091,7 +4433,6 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm
{
struct bonding *bond = netdev_priv(bond_dev);
struct mii_ioctl_data *mii = NULL;
- int res;
netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
@@ -4117,12 +4458,12 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm
mii->val_out = BMSR_LSTATUS;
}
- return 0;
+ break;
default:
- res = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
- return res;
+ return 0;
}
static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
@@ -4345,7 +4686,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
}
}
- bond_dev->mtu = new_mtu;
+ WRITE_ONCE(bond_dev->mtu, new_mtu);
return 0;
@@ -4428,8 +4769,7 @@ unwind:
if (rollback_slave == slave)
break;
- tmp_res = dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&tmp_ss, NULL);
+ tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_ss, NULL);
if (tmp_res) {
slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
__func__, tmp_res);
@@ -4490,7 +4830,7 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
switch (packets_per_slave) {
case 0:
- slave_id = prandom_u32();
+ slave_id = get_random_u32();
break;
case 1:
slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
@@ -4691,19 +5031,7 @@ static void bond_set_slave_arr(struct bonding *bond,
static void bond_reset_slave_arr(struct bonding *bond)
{
- struct bond_up_slave *usable, *all;
-
- usable = rtnl_dereference(bond->usable_slaves);
- if (usable) {
- RCU_INIT_POINTER(bond->usable_slaves, NULL);
- kfree_rcu(usable, rcu);
- }
-
- all = rtnl_dereference(bond->all_slaves);
- if (all) {
- RCU_INIT_POINTER(bond->all_slaves, NULL);
- kfree_rcu(all, rcu);
- }
+ bond_set_slave_arr(bond, NULL, NULL);
}
/* Build the usable slaves array in control path for modes that use xmit-hash
@@ -4817,6 +5145,37 @@ static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
return slaves->arr[hash % count];
}
+static bool bond_should_broadcast_neighbor(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct {
+ struct ipv6hdr ip6;
+ struct icmp6hdr icmp6;
+ } *combined, _combined;
+
+ if (!static_branch_unlikely(&bond_bcast_neigh_enabled))
+ return false;
+
+ if (!bond->params.broadcast_neighbor)
+ return false;
+
+ if (skb->protocol == htons(ETH_P_ARP))
+ return true;
+
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ combined = skb_header_pointer(skb, skb_mac_header_len(skb),
+ sizeof(_combined),
+ &_combined);
+ if (combined && combined->ip6.nexthdr == NEXTHDR_ICMP &&
+ (combined->icmp6.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+ combined->icmp6.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT))
+ return true;
+ }
+
+ return false;
+}
+
/* Use this Xmit function for 3AD as well as XOR modes. The current
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
@@ -4836,32 +5195,56 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
return bond_tx_drop(dev, skb);
}
-/* in broadcast mode, we send everything to all usable interfaces. */
+/* in broadcast mode, we send everything to all or usable slave interfaces.
+ * under rcu_read_lock when this function is called.
+ */
static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
- struct net_device *bond_dev)
+ struct net_device *bond_dev,
+ bool all_slaves)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave = NULL;
- struct list_head *iter;
+ struct bond_up_slave *slaves;
+ bool xmit_suc = false;
+ bool skb_used = false;
+ int slaves_count, i;
- bond_for_each_slave_rcu(bond, slave, iter) {
- if (bond_is_last_slave(bond, slave))
- break;
- if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
- struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (all_slaves)
+ slaves = rcu_dereference(bond->all_slaves);
+ else
+ slaves = rcu_dereference(bond->usable_slaves);
+
+ slaves_count = slaves ? READ_ONCE(slaves->count) : 0;
+ for (i = 0; i < slaves_count; i++) {
+ struct slave *slave = slaves->arr[i];
+ struct sk_buff *skb2;
+
+ if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
+ continue;
+ if (bond_is_last_slave(bond, slave)) {
+ skb2 = skb;
+ skb_used = true;
+ } else {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
bond_dev->name, __func__);
continue;
}
- bond_dev_queue_xmit(bond, skb2, slave->dev);
}
+
+ if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
+ xmit_suc = true;
}
- if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
- return bond_dev_queue_xmit(bond, skb, slave->dev);
- return bond_tx_drop(bond_dev, skb);
+ if (!skb_used)
+ dev_kfree_skb_any(skb);
+
+ if (xmit_suc)
+ return NETDEV_TX_OK;
+
+ dev_core_stats_tx_dropped_inc(bond_dev);
+ return NET_XMIT_DROP;
}
/*------------------------- Device initialization ---------------------------*/
@@ -4878,7 +5261,7 @@ static inline int bond_slave_override(struct bonding *bond,
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
- if (slave->queue_id == skb_get_queue_mapping(skb)) {
+ if (READ_ONCE(slave->queue_id) == skb_get_queue_mapping(skb)) {
if (bond_slave_is_up(slave) &&
slave->link == BOND_LINK_UP) {
bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -4961,7 +5344,7 @@ static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (sk->sk_ipv6only ||
+ if (ipv6_only_sock(sk) ||
ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
@@ -4999,7 +5382,7 @@ static u32 bond_sk_hash_l34(struct sock *sk)
/* L4 */
memcpy(&hash, &flow.ports.ports, sizeof(hash));
/* L3 */
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
}
static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
@@ -5039,8 +5422,14 @@ static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *dev)
{
- if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
- return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
+ struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
+
+ /* tls_netdev might become NULL, even if tls_is_skb_tx_device_offloaded
+ * was true, if tls_device_down is running in parallel, but it's OK,
+ * because bond_get_slave_by_dev has a NULL check.
+ */
+ if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
+ return bond_dev_queue_xmit(bond, skb, tls_netdev);
return bond_tx_drop(dev, skb);
}
#endif
@@ -5054,7 +5443,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
return NETDEV_TX_OK;
#if IS_ENABLED(CONFIG_TLS_DEVICE)
- if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
+ if (tls_is_skb_tx_device_offloaded(skb))
return bond_tls_device_xmit(bond, skb, dev);
#endif
@@ -5064,10 +5453,13 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
case BOND_MODE_ACTIVEBACKUP:
return bond_xmit_activebackup(skb, dev);
case BOND_MODE_8023AD:
+ if (bond_should_broadcast_neighbor(skb, dev))
+ return bond_xmit_broadcast(skb, dev, false);
+ fallthrough;
case BOND_MODE_XOR:
return bond_3ad_xor_xmit(skb, dev);
case BOND_MODE_BROADCAST:
- return bond_xmit_broadcast(skb, dev);
+ return bond_xmit_broadcast(skb, dev, true);
case BOND_MODE_ALB:
return bond_alb_xmit(skb, dev);
case BOND_MODE_TLB:
@@ -5124,9 +5516,9 @@ bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
break;
default:
- /* Should never happen. Mode guarded by bond_xdp_check() */
- netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
- WARN_ON_ONCE(1);
+ if (net_ratelimit())
+ netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n",
+ BOND_MODE(bond));
return NULL;
}
@@ -5190,8 +5582,11 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
ASSERT_RTNL();
- if (!bond_xdp_check(bond))
+ if (!bond_xdp_check(bond, BOND_MODE(bond))) {
+ BOND_NL_ERR(dev, extack,
+ "No native XDP support for the current bonding mode");
return -EOPNOTSUPP;
+ }
old_prog = bond->xdp_prog;
bond->xdp_prog = prog;
@@ -5214,7 +5609,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
goto err;
}
- err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err = dev_xdp_propagate(slave_dev, &xdp);
if (err < 0) {
/* ndo_bpf() sets extack error message */
slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
@@ -5246,7 +5641,7 @@ err:
if (slave == rollback_slave)
break;
- err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err_unwind = dev_xdp_propagate(slave_dev, &xdp);
if (err_unwind < 0)
slave_err(dev, slave_dev,
"Error %d when unwinding XDP program change\n", err_unwind);
@@ -5276,6 +5671,67 @@ static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
return speed;
}
+/* Set the BOND_PHC_INDEX flag to notify user space */
+static int bond_set_phc_index_flag(struct kernel_hwtstamp_config *kernel_cfg)
+{
+ struct ifreq *ifr = kernel_cfg->ifr;
+ struct hwtstamp_config cfg;
+
+ if (kernel_cfg->copied_to_user) {
+ /* Lower device has a legacy implementation */
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
+ if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)))
+ return -EFAULT;
+ } else {
+ kernel_cfg->flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
+ }
+
+ return 0;
+}
+
+static int bond_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct net_device *real_dev;
+ int err;
+
+ real_dev = bond_option_active_slave_get_rcu(bond);
+ if (!real_dev)
+ return -EOPNOTSUPP;
+
+ err = generic_hwtstamp_get_lower(real_dev, cfg);
+ if (err)
+ return err;
+
+ return bond_set_phc_index_flag(cfg);
+}
+
+static int bond_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct net_device *real_dev;
+ int err;
+
+ if (!(cfg->flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX))
+ return -EOPNOTSUPP;
+
+ real_dev = bond_option_active_slave_get_rcu(bond);
+ if (!real_dev)
+ return -EOPNOTSUPP;
+
+ err = generic_hwtstamp_set_lower(real_dev, cfg, extack);
+ if (err)
+ return err;
+
+ return bond_set_phc_index_flag(cfg);
+}
+
static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
struct ethtool_link_ksettings *cmd)
{
@@ -5294,6 +5750,7 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
*/
bond_for_each_slave(bond, slave, iter) {
if (bond_slave_can_tx(slave)) {
+ bond_update_speed_duplex(slave);
if (slave->speed != SPEED_UNKNOWN) {
if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
speed = bond_mode_bcast_speed(slave,
@@ -5314,15 +5771,57 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
BOND_ABI_VERSION);
}
+static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct kernel_ethtool_ts_info ts_info;
+ struct net_device *real_dev;
+ bool sw_tx_support = false;
+ struct list_head *iter;
+ struct slave *slave;
+ int ret = 0;
+
+ rcu_read_lock();
+ real_dev = bond_option_active_slave_get_rcu(bond);
+ dev_hold(real_dev);
+ rcu_read_unlock();
+
+ if (real_dev) {
+ ret = ethtool_get_ts_info_by_layer(real_dev, info);
+ } else {
+ /* Check if all slaves support software tx timestamping */
+ rcu_read_lock();
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ ret = ethtool_get_ts_info_by_layer(slave->dev, &ts_info);
+ if (!ret && (ts_info.so_timestamping & SOF_TIMESTAMPING_TX_SOFTWARE)) {
+ sw_tx_support = true;
+ continue;
+ }
+
+ sw_tx_support = false;
+ break;
+ }
+ rcu_read_unlock();
+ }
+
+ if (sw_tx_support)
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ dev_put(real_dev);
+ return ret;
+}
+
static const struct ethtool_ops bond_ethtool_ops = {
.get_drvinfo = bond_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = bond_ethtool_get_link_ksettings,
+ .get_ts_info = bond_ethtool_get_ts_info,
};
static const struct net_device_ops bond_netdev_ops = {
@@ -5357,6 +5856,8 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_bpf = bond_xdp,
.ndo_xdp_xmit = bond_xdp_xmit,
.ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave,
+ .ndo_hwtstamp_get = bond_hwtstamp_get,
+ .ndo_hwtstamp_set = bond_hwtstamp_set,
};
static const struct device_type bond_type = {
@@ -5370,8 +5871,7 @@ static void bond_destructor(struct net_device *bond_dev)
if (bond->wq)
destroy_workqueue(bond->wq);
- if (bond->rr_tx_counter)
- free_percpu(bond->rr_tx_counter);
+ free_percpu(bond->rr_tx_counter);
}
void bond_setup(struct net_device *bond_dev)
@@ -5404,11 +5904,14 @@ void bond_setup(struct net_device *bond_dev)
/* set up xfrm device ops (only supported in active-backup right now) */
bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
INIT_LIST_HEAD(&bond->ipsec_list);
- spin_lock_init(&bond->ipsec_lock);
+ mutex_init(&bond->ipsec_lock);
#endif /* CONFIG_XFRM_OFFLOAD */
/* don't acquire bond device's netif_tx_lock when transmitting */
- bond_dev->features |= NETIF_F_LLTX;
+ bond_dev->lltx = true;
+
+ /* Don't allow bond devices to change network namespaces. */
+ bond_dev->netns_immutable = true;
/* By default, we declare the bond to be fully
* VLAN hardware accelerated capable. Special
@@ -5417,26 +5920,22 @@ void bond_setup(struct net_device *bond_dev)
* capable
*/
- /* Don't allow bond devices to change network namespaces. */
- bond_dev->features |= NETIF_F_NETNS_LOCAL;
-
- bond_dev->hw_features = BOND_VLAN_FEATURES |
+ bond_dev->hw_features = MASTER_UPPER_DEV_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_FILTER;
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+ bond_dev->features |= NETIF_F_GSO_PARTIAL;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_features |= BOND_XFRM_FEATURES;
/* Only enable XFRM features if this is an active-backup config */
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_dev->features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
- if (bond_sk_check(bond))
- bond_dev->features |= BOND_TLS_FEATURES;
-#endif
}
/* Destroy a bonding device.
@@ -5445,7 +5944,6 @@ void bond_setup(struct net_device *bond_dev)
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct bond_up_slave *usable, *all;
struct list_head *iter;
struct slave *slave;
@@ -5456,26 +5954,20 @@ static void bond_uninit(struct net_device *bond_dev)
__bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
- usable = rtnl_dereference(bond->usable_slaves);
- if (usable) {
- RCU_INIT_POINTER(bond->usable_slaves, NULL);
- kfree_rcu(usable, rcu);
- }
+#ifdef CONFIG_XFRM_OFFLOAD
+ mutex_destroy(&bond->ipsec_lock);
+#endif /* CONFIG_XFRM_OFFLOAD */
- all = rtnl_dereference(bond->all_slaves);
- if (all) {
- RCU_INIT_POINTER(bond->all_slaves, NULL);
- kfree_rcu(all, rcu);
- }
+ bond_set_slave_arr(bond, NULL, NULL);
- list_del(&bond->bond_list);
+ list_del_rcu(&bond->bond_list);
bond_debug_unregister(bond);
}
/*------------------------- Module initialization ---------------------------*/
-static int bond_check_params(struct bond_params *params)
+static int __init bond_check_params(struct bond_params *params)
{
int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
struct bond_opt_value newval;
@@ -5576,10 +6068,10 @@ static int bond_check_params(struct bond_params *params)
downdelay = 0;
}
- if ((use_carrier != 0) && (use_carrier != 1)) {
- pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
- use_carrier);
- use_carrier = 1;
+ if (use_carrier != 1) {
+ pr_err("Error: invalid use_carrier parameter (%d)\n",
+ use_carrier);
+ return -EINVAL;
}
if (num_peer_notif < 0 || num_peer_notif > 255) {
@@ -5822,10 +6314,10 @@ static int bond_check_params(struct bond_params *params)
params->arp_interval = arp_interval;
params->arp_validate = arp_validate_value;
params->arp_all_targets = arp_all_targets_value;
+ params->missed_max = 2;
params->updelay = updelay;
params->downdelay = downdelay;
params->peer_notif_delay = 0;
- params->use_carrier = use_carrier;
params->lacp_active = 1;
params->lacp_fast = lacp_fast;
params->primary[0] = 0;
@@ -5841,6 +6333,8 @@ static int bond_check_params(struct bond_params *params)
params->ad_actor_sys_prio = ad_actor_sys_prio;
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
+ params->coupled_control = 1;
+ params->broadcast_neighbor = 0;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
@@ -5856,6 +6350,9 @@ static int bond_check_params(struct bond_params *params)
strscpy_pad(params->primary, primary, sizeof(params->primary));
memcpy(params->arp_targets, arp_target, sizeof(arp_target));
+#if IS_ENABLED(CONFIG_IPV6)
+ memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
+#endif
return 0;
}
@@ -5868,23 +6365,17 @@ static int bond_init(struct net_device *bond_dev)
netdev_dbg(bond_dev, "Begin bond_init\n");
- bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
+ bond->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ bond_dev->name);
if (!bond->wq)
return -ENOMEM;
- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
- bond->rr_tx_counter = alloc_percpu(u32);
- if (!bond->rr_tx_counter) {
- destroy_workqueue(bond->wq);
- bond->wq = NULL;
- return -ENOMEM;
- }
- }
+ bond->notifier_ctx = false;
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
- list_add_tail(&bond->bond_list, &bn->dev_list);
+ list_add_tail_rcu(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
@@ -5912,45 +6403,33 @@ int bond_create(struct net *net, const char *name)
{
struct net_device *bond_dev;
struct bonding *bond;
- struct alb_bond_info *bond_info;
- int res;
+ int res = -ENOMEM;
rtnl_lock();
bond_dev = alloc_netdev_mq(sizeof(struct bonding),
name ? name : "bond%d", NET_NAME_UNKNOWN,
bond_setup, tx_queues);
- if (!bond_dev) {
- pr_err("%s: eek! can't alloc netdev!\n", name);
- rtnl_unlock();
- return -ENOMEM;
- }
+ if (!bond_dev)
+ goto out;
- /*
- * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
- * It is set to 0 by default which is wrong.
- */
bond = netdev_priv(bond_dev);
- bond_info = &(BOND_ALB_INFO(bond));
- bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
-
dev_net_set(bond_dev, net);
bond_dev->rtnl_link_ops = &bond_link_ops;
res = register_netdevice(bond_dev);
if (res < 0) {
free_netdev(bond_dev);
- rtnl_unlock();
-
- return res;
+ goto out;
}
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
+out:
rtnl_unlock();
- return 0;
+ return res;
}
static int __net_init bond_net_init(struct net *net)
@@ -5966,27 +6445,48 @@ static int __net_init bond_net_init(struct net *net)
return 0;
}
-static void __net_exit bond_net_exit(struct net *net)
+/* According to commit 69b0216ac255 ("bonding: fix bonding_masters
+ * race condition in bond unloading") we need to remove sysfs files
+ * before we remove our devices (done later in bond_net_exit_rtnl())
+ */
+static void __net_exit bond_net_pre_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
- struct bonding *bond, *tmp_bond;
- LIST_HEAD(list);
bond_destroy_sysfs(bn);
+}
+
+static void __net_exit bond_net_exit_rtnl(struct net *net,
+ struct list_head *dev_kill_list)
+{
+ struct bond_net *bn = net_generic(net, bond_net_id);
+ struct bonding *bond, *tmp_bond;
/* Kill off any bonds created after unregistering bond rtnl ops */
- rtnl_lock();
list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ unregister_netdevice_queue(bond->dev, dev_kill_list);
+}
+
+/* According to commit 23fa5c2caae0 ("bonding: destroy proc directory
+ * only after all bonds are gone") bond_destroy_proc_dir() is called
+ * after bond_net_exit_rtnl() has completed.
+ */
+static void __net_exit bond_net_exit_batch(struct list_head *net_list)
+{
+ struct bond_net *bn;
+ struct net *net;
- bond_destroy_proc_dir(bn);
+ list_for_each_entry(net, net_list, exit_list) {
+ bn = net_generic(net, bond_net_id);
+ bond_destroy_proc_dir(bn);
+ }
}
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
- .exit = bond_net_exit,
+ .pre_exit = bond_net_pre_exit,
+ .exit_rtnl = bond_net_exit_rtnl,
+ .exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
};
@@ -6000,16 +6500,16 @@ static int __init bonding_init(void)
if (res)
goto out;
+ bond_create_debugfs();
+
res = register_pernet_subsys(&bond_net_ops);
if (res)
- goto out;
+ goto err_net_ops;
res = bond_netlink_init();
if (res)
goto err_link;
- bond_create_debugfs();
-
for (i = 0; i < max_bonds; i++) {
res = bond_create(&init_net, NULL);
if (res)
@@ -6024,10 +6524,11 @@ static int __init bonding_init(void)
out:
return res;
err:
- bond_destroy_debugfs();
bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
+err_net_ops:
+ bond_destroy_debugfs();
goto out;
}
@@ -6036,11 +6537,11 @@ static void __exit bonding_exit(void)
{
unregister_netdevice_notifier(&bond_netdev_notifier);
- bond_destroy_debugfs();
-
bond_netlink_fini();
unregister_pernet_subsys(&bond_net_ops);
+ bond_destroy_debugfs();
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Make sure we don't have an imbalance on our netpoll blocking */
WARN_ON(atomic_read(&netpoll_block_tx));
@@ -6052,3 +6553,4 @@ module_exit(bonding_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 5d54e11d18fa..286f11c517f7 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -14,6 +14,7 @@
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include <net/bonding.h>
+#include <net/ipv6.h>
static size_t bond_get_slave_size(const struct net_device *bond_dev,
const struct net_device *slave_dev)
@@ -26,6 +27,8 @@ static size_t bond_get_slave_size(const struct net_device *bond_dev,
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
+ nla_total_size(sizeof(s32)) + /* IFLA_BOND_SLAVE_PRIO */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_ACTOR_PORT_PRIO */
0;
}
@@ -49,7 +52,11 @@ static int bond_fill_slave_info(struct sk_buff *skb,
slave_dev->addr_len, slave->perm_hwaddr))
goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID,
+ READ_ONCE(slave->queue_id)))
+ goto nla_put_failure;
+
+ if (nla_put_s32(skb, IFLA_BOND_SLAVE_PRIO, slave->prio))
goto nla_put_failure;
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
@@ -71,6 +78,10 @@ static int bond_fill_slave_info(struct sk_buff *skb,
ad_port->partner_oper.port_state))
goto nla_put_failure;
}
+
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_ACTOR_PORT_PRIO,
+ SLAVE_AD_INFO(slave)->port_priority))
+ goto nla_put_failure;
}
return 0;
@@ -79,6 +90,11 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* Limit the max delay range to 300s */
+static const struct netlink_range_validation delay_range = {
+ .max = 300000,
+};
+
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
@@ -109,11 +125,17 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
- [IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
+ [IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
+ [IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
+ [IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 },
+ [IFLA_BOND_BROADCAST_NEIGH] = { .type = NLA_U8 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
[IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
+ [IFLA_BOND_SLAVE_PRIO] = { .type = NLA_S32 },
+ [IFLA_BOND_SLAVE_ACTOR_PORT_PRIO] = { .type = NLA_U16 },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -148,7 +170,28 @@ static int bond_slave_changelink(struct net_device *bond_dev,
snprintf(queue_id_str, sizeof(queue_id_str), "%s:%u\n",
slave_dev->name, queue_id);
bond_opt_initstr(&newval, queue_id_str);
- err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval,
+ data[IFLA_BOND_SLAVE_QUEUE_ID], extack);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_SLAVE_PRIO]) {
+ int prio = nla_get_s32(data[IFLA_BOND_SLAVE_PRIO]);
+
+ bond_opt_slave_initval(&newval, &slave_dev, prio);
+ err = __bond_opt_set(bond, BOND_OPT_PRIO, &newval,
+ data[IFLA_BOND_SLAVE_PRIO], extack);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO]) {
+ u16 ad_prio = nla_get_u16(data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO]);
+
+ bond_opt_slave_initval(&newval, &slave_dev, ad_prio);
+ err = __bond_opt_set(bond, BOND_OPT_ACTOR_PORT_PRIO, &newval,
+ data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO], extack);
if (err)
return err;
}
@@ -172,7 +215,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int mode = nla_get_u8(data[IFLA_BOND_MODE]);
bond_opt_initval(&newval, mode);
- err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MODE, &newval,
+ data[IFLA_BOND_MODE], extack);
if (err)
return err;
}
@@ -189,7 +233,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
active_slave = slave_dev->name;
}
bond_opt_initstr(&newval, active_slave);
- err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval,
+ data[IFLA_BOND_ACTIVE_SLAVE], extack);
if (err)
return err;
}
@@ -197,7 +242,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
bond_opt_initval(&newval, miimon);
- err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval,
+ data[IFLA_BOND_MIIMON], extack);
if (err)
return err;
}
@@ -205,7 +251,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
bond_opt_initval(&newval, updelay);
- err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval,
+ data[IFLA_BOND_UPDELAY], extack);
if (err)
return err;
}
@@ -213,7 +260,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
bond_opt_initval(&newval, downdelay);
- err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval,
+ data[IFLA_BOND_DOWNDELAY], extack);
if (err)
return err;
}
@@ -221,28 +269,30 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int delay = nla_get_u32(data[IFLA_BOND_PEER_NOTIF_DELAY]);
bond_opt_initval(&newval, delay);
- err = __bond_opt_set(bond, BOND_OPT_PEER_NOTIF_DELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PEER_NOTIF_DELAY, &newval,
+ data[IFLA_BOND_PEER_NOTIF_DELAY], extack);
if (err)
return err;
}
if (data[IFLA_BOND_USE_CARRIER]) {
- int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
-
- bond_opt_initval(&newval, use_carrier);
- err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
- if (err)
- return err;
+ if (nla_get_u8(data[IFLA_BOND_USE_CARRIER]) != 1) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_USE_CARRIER],
+ "option obsolete, use_carrier cannot be disabled");
+ return -EINVAL;
+ }
}
if (data[IFLA_BOND_ARP_INTERVAL]) {
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
if (arp_interval && miimon) {
- netdev_err(bond->dev, "ARP monitoring cannot be used with MII monitoring\n");
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_ARP_INTERVAL],
+ "ARP monitoring cannot be used with MII monitoring");
return -EINVAL;
}
bond_opt_initval(&newval, arp_interval);
- err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval,
+ data[IFLA_BOND_ARP_INTERVAL], extack);
if (err)
return err;
}
@@ -261,7 +311,9 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
bond_opt_initval(&newval, (__force u64)target);
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
- &newval);
+ &newval,
+ data[IFLA_BOND_ARP_IP_TARGET],
+ extack);
if (err)
break;
i++;
@@ -271,16 +323,49 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
if (err)
return err;
}
+#if IS_ENABLED(CONFIG_IPV6)
+ if (data[IFLA_BOND_NS_IP6_TARGET]) {
+ struct nlattr *attr;
+ int i = 0, rem;
+
+ bond_option_ns_ip6_targets_clear(bond);
+ nla_for_each_nested(attr, data[IFLA_BOND_NS_IP6_TARGET], rem) {
+ struct in6_addr addr6;
+
+ if (nla_len(attr) < sizeof(addr6)) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv6 address");
+ return -EINVAL;
+ }
+
+ addr6 = nla_get_in6_addr(attr);
+
+ bond_opt_initextra(&newval, &addr6, sizeof(addr6));
+ err = __bond_opt_set(bond, BOND_OPT_NS_TARGETS,
+ &newval,
+ data[IFLA_BOND_NS_IP6_TARGET],
+ extack);
+ if (err)
+ break;
+ i++;
+ }
+ if (i == 0 && bond->params.arp_interval)
+ netdev_warn(bond->dev, "Removing last ns target with arp_interval on\n");
+ if (err)
+ return err;
+ }
+#endif
if (data[IFLA_BOND_ARP_VALIDATE]) {
int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
if (arp_validate && miimon) {
- netdev_err(bond->dev, "ARP validating cannot be used with MII monitoring\n");
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_ARP_INTERVAL],
+ "ARP validating cannot be used with MII monitoring");
return -EINVAL;
}
bond_opt_initval(&newval, arp_validate);
- err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval,
+ data[IFLA_BOND_ARP_VALIDATE], extack);
if (err)
return err;
}
@@ -289,7 +374,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
bond_opt_initval(&newval, arp_all_targets);
- err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval,
+ data[IFLA_BOND_ARP_ALL_TARGETS], extack);
if (err)
return err;
}
@@ -303,7 +389,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
primary = dev->name;
bond_opt_initstr(&newval, primary);
- err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval,
+ data[IFLA_BOND_PRIMARY], extack);
if (err)
return err;
}
@@ -312,7 +399,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
bond_opt_initval(&newval, primary_reselect);
- err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval,
+ data[IFLA_BOND_PRIMARY_RESELECT], extack);
if (err)
return err;
}
@@ -321,7 +409,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
bond_opt_initval(&newval, fail_over_mac);
- err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval,
+ data[IFLA_BOND_FAIL_OVER_MAC], extack);
if (err)
return err;
}
@@ -330,7 +419,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
bond_opt_initval(&newval, xmit_hash_policy);
- err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval,
+ data[IFLA_BOND_XMIT_HASH_POLICY], extack);
if (err)
return err;
}
@@ -339,7 +429,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
bond_opt_initval(&newval, resend_igmp);
- err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval,
+ data[IFLA_BOND_RESEND_IGMP], extack);
if (err)
return err;
}
@@ -348,7 +439,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
bond_opt_initval(&newval, num_peer_notif);
- err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval,
+ data[IFLA_BOND_NUM_PEER_NOTIF], extack);
if (err)
return err;
}
@@ -357,7 +449,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
bond_opt_initval(&newval, all_slaves_active);
- err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval,
+ data[IFLA_BOND_ALL_SLAVES_ACTIVE], extack);
if (err)
return err;
}
@@ -366,7 +459,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
bond_opt_initval(&newval, min_links);
- err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval,
+ data[IFLA_BOND_MIN_LINKS], extack);
if (err)
return err;
}
@@ -375,7 +469,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
bond_opt_initval(&newval, lp_interval);
- err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval,
+ data[IFLA_BOND_LP_INTERVAL], extack);
if (err)
return err;
}
@@ -384,7 +479,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
bond_opt_initval(&newval, packets_per_slave);
- err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval,
+ data[IFLA_BOND_PACKETS_PER_SLAVE], extack);
if (err)
return err;
}
@@ -393,7 +489,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int lacp_active = nla_get_u8(data[IFLA_BOND_AD_LACP_ACTIVE]);
bond_opt_initval(&newval, lacp_active);
- err = __bond_opt_set(bond, BOND_OPT_LACP_ACTIVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LACP_ACTIVE, &newval,
+ data[IFLA_BOND_AD_LACP_ACTIVE], extack);
if (err)
return err;
}
@@ -403,7 +500,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
bond_opt_initval(&newval, lacp_rate);
- err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval,
+ data[IFLA_BOND_AD_LACP_RATE], extack);
if (err)
return err;
}
@@ -412,7 +510,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_AD_SELECT]);
bond_opt_initval(&newval, ad_select);
- err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval,
+ data[IFLA_BOND_AD_SELECT], extack);
if (err)
return err;
}
@@ -421,7 +520,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u16(data[IFLA_BOND_AD_ACTOR_SYS_PRIO]);
bond_opt_initval(&newval, actor_sys_prio);
- err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval,
+ data[IFLA_BOND_AD_ACTOR_SYS_PRIO], extack);
if (err)
return err;
}
@@ -430,7 +530,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
bond_opt_initval(&newval, port_key);
- err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval,
+ data[IFLA_BOND_AD_USER_PORT_KEY], extack);
if (err)
return err;
}
@@ -440,7 +541,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
bond_opt_initval(&newval,
nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
- err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval,
+ data[IFLA_BOND_AD_ACTOR_SYSTEM], extack);
if (err)
return err;
}
@@ -448,7 +550,38 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]);
bond_opt_initval(&newval, dynamic_lb);
- err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval,
+ data[IFLA_BOND_TLB_DYNAMIC_LB], extack);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_MISSED_MAX]) {
+ int missed_max = nla_get_u8(data[IFLA_BOND_MISSED_MAX]);
+
+ bond_opt_initval(&newval, missed_max);
+ err = __bond_opt_set(bond, BOND_OPT_MISSED_MAX, &newval,
+ data[IFLA_BOND_MISSED_MAX], extack);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_COUPLED_CONTROL]) {
+ int coupled_control = nla_get_u8(data[IFLA_BOND_COUPLED_CONTROL]);
+
+ bond_opt_initval(&newval, coupled_control);
+ err = __bond_opt_set(bond, BOND_OPT_COUPLED_CONTROL, &newval,
+ data[IFLA_BOND_COUPLED_CONTROL], extack);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_BROADCAST_NEIGH]) {
+ int broadcast_neigh = nla_get_u8(data[IFLA_BOND_BROADCAST_NEIGH]);
+
+ bond_opt_initval(&newval, broadcast_neigh);
+ err = __bond_opt_set(bond, BOND_OPT_BROADCAST_NEIGH, &newval,
+ data[IFLA_BOND_BROADCAST_NEIGH], extack);
if (err)
return err;
}
@@ -456,22 +589,26 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return 0;
}
-static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int bond_newlink(struct net_device *bond_dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err;
- err = bond_changelink(bond_dev, tb, data, extack);
- if (err < 0)
+ err = register_netdevice(bond_dev);
+ if (err)
return err;
- err = register_netdevice(bond_dev);
- if (!err) {
- struct bonding *bond = netdev_priv(bond_dev);
+ netif_carrier_off(bond_dev);
+ bond_work_init_all(bond);
- netif_carrier_off(bond_dev);
- bond_work_init_all(bond);
+ err = bond_changelink(bond_dev, tb, data, extack);
+ if (err) {
+ bond_work_cancel_all(bond);
+ unregister_netdevice(bond_dev);
}
return err;
@@ -515,6 +652,12 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */
+ /* IFLA_BOND_NS_IP6_TARGET */
+ nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_BROADCAST_NEIGH */
0;
}
@@ -561,7 +704,7 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.peer_notif_delay * bond->params.miimon))
goto nla_put_failure;
- if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
+ if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, 1))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
@@ -592,6 +735,26 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.arp_all_targets))
goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ targets = nla_nest_start(skb, IFLA_BOND_NS_IP6_TARGET);
+ if (!targets)
+ goto nla_put_failure;
+
+ targets_added = 0;
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ if (!ipv6_addr_any(&bond->params.ns_targets[i])) {
+ if (nla_put_in6_addr(skb, i, &bond->params.ns_targets[i]))
+ goto nla_put_failure;
+ targets_added = 1;
+ }
+ }
+
+ if (targets_added)
+ nla_nest_end(skb, targets);
+ else
+ nla_nest_cancel(skb, targets);
+#endif
+
primary = rtnl_dereference(bond->primary_slave);
if (primary &&
nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex))
@@ -650,6 +813,18 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.tlb_dynamic_lb))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_MISSED_MAX,
+ bond->params.missed_max))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_COUPLED_CONTROL,
+ bond->params.coupled_control))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_BOND_BROADCAST_NEIGH,
+ bond->params.broadcast_neighbor))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index a8fde3bc458f..384499c869b8 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -15,6 +15,7 @@
#include <linux/sched/signal.h>
#include <net/bonding.h>
+#include <net/ndisc.h>
static int bond_option_active_slave_set(struct bonding *bond,
const struct bond_opt_value *newval);
@@ -34,10 +35,14 @@ static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_all_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_primary_reselect_set(struct bonding *bond,
@@ -74,11 +79,18 @@ static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_actor_port_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_user_port_key_set(struct bonding *bond,
const struct bond_opt_value *newval);
-
+static int bond_option_missed_max_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_coupled_control_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_broadcast_neigh_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
{ "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
@@ -150,10 +162,11 @@ static const struct bond_opt_value bond_lacp_rate_tbl[] = {
};
static const struct bond_opt_value bond_ad_select_tbl[] = {
- { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
- { "bandwidth", BOND_AD_BANDWIDTH, 0},
- { "count", BOND_AD_COUNT, 0},
- { NULL, -1, 0},
+ { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
+ { "bandwidth", BOND_AD_BANDWIDTH, 0},
+ { "count", BOND_AD_COUNT, 0},
+ { "actor_port_prio", BOND_AD_PRIO, 0},
+ { NULL, -1, 0},
};
static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
@@ -163,6 +176,12 @@ static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
{ NULL, -1, 0}
};
+static const struct bond_opt_value bond_peer_notif_delay_tbl[] = {
+ { "off", 0, 0},
+ { "maxval", 300000, BOND_VALFLAG_MAX},
+ { NULL, -1, 0}
+};
+
static const struct bond_opt_value bond_primary_reselect_tbl[] = {
{ "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
{ "better", BOND_PRI_RESELECT_BETTER, 0},
@@ -171,7 +190,6 @@ static const struct bond_opt_value bond_primary_reselect_tbl[] = {
};
static const struct bond_opt_value bond_use_carrier_tbl[] = {
- { "off", 0, 0},
{ "on", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
@@ -213,6 +231,25 @@ static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_missed_max_tbl[] = {
+ { "minval", 1, BOND_VALFLAG_MIN},
+ { "maxval", 255, BOND_VALFLAG_MAX},
+ { "default", 2, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0},
+};
+
+static const struct bond_opt_value bond_coupled_control_tbl[] = {
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { "off", 0, 0},
+ { NULL, -1, 0},
+};
+
+static const struct bond_opt_value bond_broadcast_neigh_tbl[] = {
+ { "off", 0, BOND_VALFLAG_DEFAULT},
+ { "on", 1, 0},
+ { NULL, -1, 0}
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -270,6 +307,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_intmax_tbl,
.set = bond_option_arp_interval_set
},
+ [BOND_OPT_MISSED_MAX] = {
+ .id = BOND_OPT_MISSED_MAX,
+ .name = "arp_missed_max",
+ .desc = "Maximum number of missed ARP interval",
+ .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB),
+ .values = bond_missed_max_tbl,
+ .set = bond_option_missed_max_set
+ },
[BOND_OPT_ARP_TARGETS] = {
.id = BOND_OPT_ARP_TARGETS,
.name = "arp_ip_target",
@@ -277,6 +323,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_arp_ip_targets_set
},
+ [BOND_OPT_NS_TARGETS] = {
+ .id = BOND_OPT_NS_TARGETS,
+ .name = "ns_ip6_target",
+ .desc = "NS targets in ffff:ffff::ffff:ffff form",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_ns_ip6_targets_set
+ },
[BOND_OPT_DOWNDELAY] = {
.id = BOND_OPT_DOWNDELAY,
.name = "downdelay",
@@ -319,7 +372,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_AD_SELECT] = {
.id = BOND_OPT_AD_SELECT,
.name = "ad_select",
- .desc = "803.ad aggregation selection logic",
+ .desc = "802.3ad aggregation selection logic",
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_ad_select_tbl,
.set = bond_option_ad_select_set
@@ -338,6 +391,16 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_intmax_tbl,
.set = bond_option_miimon_set
},
+ [BOND_OPT_PRIO] = {
+ .id = BOND_OPT_PRIO,
+ .name = "prio",
+ .desc = "Link priority for failover re-selection",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
+ BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB)),
+ .set = bond_option_prio_set
+ },
[BOND_OPT_PRIMARY] = {
.id = BOND_OPT_PRIMARY,
.name = "primary",
@@ -358,7 +421,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_USE_CARRIER] = {
.id = BOND_OPT_USE_CARRIER,
.name = "use_carrier",
- .desc = "Use netif_carrier_ok (vs MII ioctls) in miimon",
+ .desc = "option obsolete, use_carrier cannot be disabled",
.values = bond_use_carrier_tbl,
.set = bond_option_use_carrier_set
},
@@ -423,6 +486,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_ad_actor_sys_prio_tbl,
.set = bond_option_ad_actor_sys_prio_set,
},
+ [BOND_OPT_ACTOR_PORT_PRIO] = {
+ .id = BOND_OPT_ACTOR_PORT_PRIO,
+ .name = "actor_port_prio",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_actor_port_prio_set,
+ },
[BOND_OPT_AD_ACTOR_SYSTEM] = {
.id = BOND_OPT_AD_ACTOR_SYSTEM,
.name = "ad_actor_system",
@@ -449,8 +519,25 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_PEER_NOTIF_DELAY,
.name = "peer_notif_delay",
.desc = "Delay between each peer notification on failover event, in milliseconds",
- .values = bond_intmax_tbl,
+ .values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
+ },
+ [BOND_OPT_COUPLED_CONTROL] = {
+ .id = BOND_OPT_COUPLED_CONTROL,
+ .name = "coupled_control",
+ .desc = "Opt into using coupled control MUX for LACP states",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .values = bond_coupled_control_tbl,
+ .set = bond_option_coupled_control_set,
+ },
+ [BOND_OPT_BROADCAST_NEIGH] = {
+ .id = BOND_OPT_BROADCAST_NEIGH,
+ .name = "broadcast_neighbor",
+ .desc = "Broadcast neighbor packets to all active slaves",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .values = bond_broadcast_neigh_tbl,
+ .set = bond_option_broadcast_neigh_set,
}
};
@@ -605,27 +692,35 @@ static int bond_opt_check_deps(struct bonding *bond,
}
static void bond_opt_dep_print(struct bonding *bond,
- const struct bond_option *opt)
+ const struct bond_option *opt,
+ struct nlattr *bad_attr,
+ struct netlink_ext_ack *extack)
{
const struct bond_opt_value *modeval;
struct bond_params *params;
params = &bond->params;
modeval = bond_opt_get_val(BOND_OPT_MODE, params->mode);
- if (test_bit(params->mode, &opt->unsuppmodes))
+ if (test_bit(params->mode, &opt->unsuppmodes)) {
netdev_err(bond->dev, "option %s: mode dependency failed, not supported in mode %s(%llu)\n",
opt->name, modeval->string, modeval->value);
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "option not supported in mode");
+ }
}
static void bond_opt_error_interpret(struct bonding *bond,
const struct bond_option *opt,
- int error, const struct bond_opt_value *val)
+ int error, const struct bond_opt_value *val,
+ struct nlattr *bad_attr,
+ struct netlink_ext_ack *extack)
{
const struct bond_opt_value *minval, *maxval;
char *p;
switch (error) {
case -EINVAL:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr, "invalid option value");
if (val) {
if (val->string) {
/* sometimes RAWVAL opts may have new lines */
@@ -647,13 +742,17 @@ static void bond_opt_error_interpret(struct bonding *bond,
opt->name, minval ? minval->value : 0, maxval->value);
break;
case -EACCES:
- bond_opt_dep_print(bond, opt);
+ bond_opt_dep_print(bond, opt, bad_attr, extack);
break;
case -ENOTEMPTY:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "unable to set option because the bond device has slaves");
netdev_err(bond->dev, "option %s: unable to set because the bond device has slaves\n",
opt->name);
break;
case -EBUSY:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "unable to set option because the bond is up");
netdev_err(bond->dev, "option %s: unable to set because the bond device is up\n",
opt->name);
break;
@@ -664,6 +763,8 @@ static void bond_opt_error_interpret(struct bonding *bond,
*p = '\0';
netdev_err(bond->dev, "option %s: interface %s does not exist!\n",
opt->name, val->string);
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "interface does not exist");
}
break;
default:
@@ -676,13 +777,17 @@ static void bond_opt_error_interpret(struct bonding *bond,
* @bond: target bond device
* @option: option to set
* @val: value to set it to
+ * @bad_attr: netlink attribue that caused the error
+ * @extack: extended netlink error structure, used when an error message
+ * needs to be returned to the caller via netlink
*
* This function is used to change the bond's option value, it can be
* used for both enabling/changing an option and for disabling it. RTNL lock
* must be obtained before calling this function.
*/
int __bond_opt_set(struct bonding *bond,
- unsigned int option, struct bond_opt_value *val)
+ unsigned int option, struct bond_opt_value *val,
+ struct nlattr *bad_attr, struct netlink_ext_ack *extack)
{
const struct bond_opt_value *retval = NULL;
const struct bond_option *opt;
@@ -704,7 +809,7 @@ int __bond_opt_set(struct bonding *bond,
ret = opt->set(bond, retval);
out:
if (ret)
- bond_opt_error_interpret(bond, opt, ret, val);
+ bond_opt_error_interpret(bond, opt, ret, val, bad_attr, extack);
return ret;
}
@@ -726,7 +831,7 @@ int __bond_opt_set_notify(struct bonding *bond,
ASSERT_RTNL();
- ret = __bond_opt_set(bond, option, val);
+ ret = __bond_opt_set(bond, option, val, NULL, NULL);
if (!ret && (bond->dev->reg_state == NETREG_REGISTERED))
call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
@@ -785,22 +890,12 @@ static bool bond_set_xfrm_features(struct bonding *bond)
return true;
}
-static bool bond_set_tls_features(struct bonding *bond)
-{
- if (!IS_ENABLED(CONFIG_TLS_DEVICE))
- return false;
-
- if (bond_sk_check(bond))
- bond->dev->wanted_features |= BOND_TLS_FEATURES;
- else
- bond->dev->wanted_features &= ~BOND_TLS_FEATURES;
-
- return true;
-}
-
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
+ if (bond->xdp_prog && !bond_xdp_check(bond, newval->value))
+ return -EOPNOTSUPP;
+
if (!bond_mode_uses_arp(newval->value)) {
if (bond->params.arp_interval) {
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
@@ -824,16 +919,24 @@ static int bond_option_mode_set(struct bonding *bond,
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
bond->params.mode = newval->value;
+ /* When changing mode, the bond device is down, we may reduce
+ * the bond_bcast_neigh_enabled in bond_close() if broadcast_neighbor
+ * enabled in 8023ad mode. Therefore, only clear broadcast_neighbor
+ * to 0.
+ */
+ bond->params.broadcast_neighbor = 0;
+
if (bond->dev->reg_state == NETREG_REGISTERED) {
bool update = false;
update |= bond_set_xfrm_features(bond);
- update |= bond_set_tls_features(bond);
if (update)
netdev_update_features(bond->dev);
}
+ bond_xdp_set_features(bond->dev);
+
return 0;
}
@@ -869,7 +972,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
/* check to see if we are clearing active */
if (!slave_dev) {
netdev_dbg(bond->dev, "Clearing current active slave\n");
- RCU_INIT_POINTER(bond->curr_active_slave, NULL);
+ bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
} else {
struct slave *old_active = rtnl_dereference(bond->curr_active_slave);
@@ -997,10 +1100,6 @@ static int bond_option_peer_notif_delay_set(struct bonding *bond,
static int bond_option_use_carrier_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_dbg(bond->dev, "Setting use_carrier to %llu\n",
- newval->value);
- bond->params.use_carrier = newval->value;
-
return 0;
}
@@ -1034,7 +1133,7 @@ static int bond_option_arp_interval_set(struct bonding *bond,
cancel_delayed_work_sync(&bond->arp_work);
} else {
/* arp_validate can be set only in active-backup mode */
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_rcv_validate;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
@@ -1147,9 +1246,9 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
__be32 target;
if (newval->string) {
- if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
- netdev_err(bond->dev, "invalid ARP target %pI4 specified\n",
- &target);
+ if (strlen(newval->string) < 1 ||
+ !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) {
+ netdev_err(bond->dev, "invalid ARP target specified\n");
return ret;
}
if (newval->string[0] == '+')
@@ -1166,13 +1265,196 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
return ret;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *slave)
+{
+ return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ !bond_is_active_slave(slave) &&
+ slave->dev->flags & IFF_MULTICAST;
+}
+
+/**
+ * slave_set_ns_maddrs - add/del all NS mac addresses for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @add: add or remove all the NS mac addresses
+ *
+ * This function tries to add or delete all the NS mac addresses on the slave
+ *
+ * Note, the IPv6 NS target address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * arp_validate changes
+ * * enslaving, releasing new slaves
+ */
+static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ char slot_maddr[MAX_ADDR_LEN];
+ struct in6_addr mcaddr;
+ int i;
+
+ if (!slave_can_set_ns_maddr(bond, slave))
+ return;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ if (ipv6_addr_any(&targets[i]))
+ break;
+
+ addrconf_addr_solict_mult(&targets[i], &mcaddr);
+ if (!ndisc_mc_map(&mcaddr, slot_maddr, slave->dev, 0)) {
+ if (add)
+ dev_mc_add(slave->dev, slot_maddr);
+ else
+ dev_mc_del(slave->dev, slot_maddr);
+ }
+ }
+}
+
+void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave)
+{
+ if (!bond->params.arp_validate)
+ return;
+ slave_set_ns_maddrs(bond, slave, true);
+}
+
+void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave)
+{
+ if (!bond->params.arp_validate)
+ return;
+ slave_set_ns_maddrs(bond, slave, false);
+}
+
+/**
+ * slave_set_ns_maddr - set new NS mac address for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @target: the new IPv6 target
+ * @slot: the old IPv6 target in the slot
+ *
+ * This function tries to replace the old mac address to new one on the slave.
+ *
+ * Note, the target/slot IPv6 address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * An IPv6 NS target is added or removed.
+ */
+static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave,
+ struct in6_addr *target, struct in6_addr *slot)
+{
+ char mac_addr[MAX_ADDR_LEN];
+ struct in6_addr mcast_addr;
+
+ if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave))
+ return;
+
+ /* remove the previous mac addr from slave */
+ addrconf_addr_solict_mult(slot, &mcast_addr);
+ if (!ipv6_addr_any(slot) &&
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_del(slave->dev, mac_addr);
+
+ /* add new mac addr on slave if target is set */
+ addrconf_addr_solict_mult(target, &mcast_addr);
+ if (!ipv6_addr_any(target) &&
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_add(slave->dev, mac_addr);
+}
+
+static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
+ struct in6_addr *target,
+ unsigned long last_rx)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
+ bond_for_each_slave(bond, slave, iter) {
+ slave->target_last_arp_rx[slot] = last_rx;
+ slave_set_ns_maddr(bond, slave, target, &targets[slot]);
+ }
+ targets[slot] = *target;
+ }
+}
+
+void bond_option_ns_ip6_targets_clear(struct bonding *bond)
+{
+ struct in6_addr addr_any = in6addr_any;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
+ _bond_options_ns_ip6_target_set(bond, i, &addr_any, 0);
+}
+
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct in6_addr *target = (struct in6_addr *)newval->extra;
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct in6_addr addr_any = in6addr_any;
+ int index;
+
+ if (!bond_is_ip6_target_ok(target)) {
+ netdev_err(bond->dev, "invalid NS target %pI6c specified for addition\n",
+ target);
+ return -EINVAL;
+ }
+
+ if (bond_get_targets_ip6(targets, target) != -1) { /* dup */
+ netdev_err(bond->dev, "NS target %pI6c is already present\n",
+ target);
+ return -EINVAL;
+ }
+
+ index = bond_get_targets_ip6(targets, &addr_any); /* first free slot */
+ if (index == -1) {
+ netdev_err(bond->dev, "NS target table is full!\n");
+ return -EINVAL;
+ }
+
+ netdev_dbg(bond->dev, "Adding NS target %pI6c\n", target);
+
+ _bond_options_ns_ip6_target_set(bond, index, target, jiffies);
+
+ return 0;
+}
+#else
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ return -EPERM;
+}
+
+static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add) {}
+
+void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave) {}
+
+void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave) {}
+#endif
+
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
+ bool changed = !!bond->params.arp_validate != !!newval->value;
+ struct list_head *iter;
+ struct slave *slave;
+
netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n",
newval->string, newval->value);
bond->params.arp_validate = newval->value;
+ if (changed) {
+ bond_for_each_slave(bond, slave, iter)
+ slave_set_ns_maddrs(bond, slave, !!bond->params.arp_validate);
+ }
+
return 0;
}
@@ -1186,6 +1468,37 @@ static int bond_option_arp_all_targets_set(struct bonding *bond,
return 0;
}
+static int bond_option_missed_max_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_dbg(bond->dev, "Setting missed max to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.missed_max = newval->value;
+
+ return 0;
+}
+
+static int bond_option_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct slave *slave;
+
+ slave = bond_slave_get_rtnl(newval->slave_dev);
+ if (!slave) {
+ netdev_dbg(newval->slave_dev, "%s called on NULL slave\n", __func__);
+ return -ENODEV;
+ }
+ slave->prio = newval->value;
+
+ if (rtnl_dereference(bond->primary_slave))
+ slave_warn(bond->dev, slave->dev,
+ "prio updated, but will not affect failover re-selection as primary slave have been set\n");
+ else
+ bond_select_active_slave(bond);
+
+ return 0;
+}
+
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -1265,10 +1578,6 @@ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
newval->string, newval->value);
bond->params.xmit_policy = newval->value;
- if (bond->dev->reg_state == NETREG_REGISTERED)
- if (bond_set_tls_features(bond))
- netdev_update_features(bond->dev);
-
return 0;
}
@@ -1356,6 +1665,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
newval->string, newval->value);
bond->params.lacp_active = newval->value;
+ bond_3ad_update_lacp_active(bond);
return 0;
}
@@ -1430,7 +1740,7 @@ static int bond_option_queue_id_set(struct bonding *bond,
goto err_no_cmd;
/* Actually set the qids for the slave */
- update_slave->queue_id = qid;
+ WRITE_ONCE(update_slave->queue_id, qid);
out:
return ret;
@@ -1512,6 +1822,26 @@ static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
return 0;
}
+static int bond_option_actor_port_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct slave *slave;
+
+ slave = bond_slave_get_rtnl(newval->slave_dev);
+ if (!slave) {
+ netdev_dbg(bond->dev, "%s called on NULL slave\n", __func__);
+ return -ENODEV;
+ }
+
+ netdev_dbg(newval->slave_dev, "Setting actor_port_prio to %llu\n",
+ newval->value);
+
+ SLAVE_AD_INFO(slave)->port_priority = newval->value;
+ bond_3ad_update_ad_actor_settings(bond);
+
+ return 0;
+}
+
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -1526,7 +1856,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
mac = (u8 *)&newval->value;
}
- if (!is_valid_ether_addr(mac))
+ if (is_multicast_ether_addr(mac))
goto err;
netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
@@ -1549,3 +1879,32 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
bond->params.ad_user_port_key = newval->value;
return 0;
}
+
+static int bond_option_coupled_control_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_info(bond->dev, "Setting coupled_control to %s (%llu)\n",
+ newval->string, newval->value);
+
+ bond->params.coupled_control = newval->value;
+ return 0;
+}
+
+static int bond_option_broadcast_neigh_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ if (bond->params.broadcast_neighbor == newval->value)
+ return 0;
+
+ bond->params.broadcast_neighbor = newval->value;
+ if (bond->dev->flags & IFF_UP) {
+ if (bond->params.broadcast_neighbor)
+ static_branch_inc(&bond_bcast_neigh_enabled);
+ else
+ static_branch_dec(&bond_bcast_neigh_enabled);
+ }
+
+ netdev_dbg(bond->dev, "Setting broadcast_neighbor to %s (%llu)\n",
+ newval->string, newval->value);
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index f3e3bfd72556..7edf72ec816a 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -11,7 +11,7 @@
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
loff_t off = 0;
@@ -30,7 +30,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
bool found = false;
@@ -57,7 +57,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
static void bond_info_show_master(struct seq_file *seq)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
const struct bond_opt_value *optval;
struct slave *curr, *primary;
int i;
@@ -115,6 +115,8 @@ static void bond_info_show_master(struct seq_file *seq)
seq_printf(seq, "ARP Polling Interval (ms): %d\n",
bond->params.arp_interval);
+ seq_printf(seq, "ARP Missed Max: %u\n",
+ bond->params.missed_max);
seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
@@ -127,6 +129,21 @@ static void bond_info_show_master(struct seq_file *seq)
printed = 1;
}
seq_printf(seq, "\n");
+
+#if IS_ENABLED(CONFIG_IPV6)
+ printed = 0;
+ seq_printf(seq, "NS IPv6 target/s (xx::xx form):");
+
+ for (i = 0; (i < BOND_MAX_NS_TARGETS); i++) {
+ if (ipv6_addr_any(&bond->params.ns_targets[i]))
+ break;
+ if (printed)
+ seq_printf(seq, ",");
+ seq_printf(seq, " %pI6c", &bond->params.ns_targets[i]);
+ printed = 1;
+ }
+ seq_printf(seq, "\n");
+#endif
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -173,7 +190,7 @@ static void bond_info_show_master(struct seq_file *seq)
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
@@ -192,7 +209,7 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_printf(seq, "Permanent HW addr: %*phC\n",
slave->dev->addr_len, slave->perm_hwaddr);
- seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
+ seq_printf(seq, "Slave queue ID: %d\n", READ_ONCE(slave->queue_id));
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
const struct port *port = &SLAVE_AD_INFO(slave)->port;
@@ -305,7 +322,6 @@ void __net_init bond_create_proc_dir(struct bond_net *bn)
}
/* Destroy the bonding directory under /proc/net, if empty.
- * Caller must hold rtnl_lock.
*/
void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index c48b77167fab..9a75ad3181ab 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -31,35 +31,35 @@
/* "show" function for the bond_masters attribute.
* The class parameter is ignored.
*/
-static ssize_t bonding_show_bonds(struct class *cls,
- struct class_attribute *attr,
+static ssize_t bonding_show_bonds(const struct class *cls,
+ const struct class_attribute *attr,
char *buf)
{
- struct bond_net *bn =
- container_of(attr, struct bond_net, class_attr_bonding_masters);
- int res = 0;
+ const struct bond_net *bn =
+ container_of_const(attr, struct bond_net, class_attr_bonding_masters);
struct bonding *bond;
+ int res = 0;
- rtnl_lock();
+ rcu_read_lock();
- list_for_each_entry(bond, &bn->dev_list, bond_list) {
+ list_for_each_entry_rcu(bond, &bn->dev_list, bond_list) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", bond->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", bond->dev->name);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
- rtnl_unlock();
+ rcu_read_unlock();
return res;
}
-static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifname)
+static struct net_device *bond_get_by_name(const struct bond_net *bn, const char *ifname)
{
struct bonding *bond;
@@ -75,12 +75,12 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna
*
* The class parameter is ignored.
*/
-static ssize_t bonding_store_bonds(struct class *cls,
- struct class_attribute *attr,
+static ssize_t bonding_store_bonds(const struct class *cls,
+ const struct class_attribute *attr,
const char *buffer, size_t count)
{
- struct bond_net *bn =
- container_of(attr, struct bond_net, class_attr_bonding_masters);
+ const struct bond_net *bn =
+ container_of_const(attr, struct bond_net, class_attr_bonding_masters);
char command[IFNAMSIZ + 1] = {0, };
char *ifname;
int rv, res = count;
@@ -170,21 +170,20 @@ static ssize_t bonding_show_slaves(struct device *d,
struct slave *slave;
int res = 0;
- if (!rtnl_trylock())
- return restart_syscall();
+ rcu_read_lock();
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", slave->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", slave->dev->name);
}
- rtnl_unlock();
+ rcu_read_unlock();
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -203,7 +202,7 @@ static ssize_t bonding_show_mode(struct device *d,
val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
- return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
+ return sysfs_emit(buf, "%s %d\n", val->string, BOND_MODE(bond));
}
static DEVICE_ATTR(mode, 0644, bonding_show_mode, bonding_sysfs_store_option);
@@ -217,7 +216,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
- return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
static DEVICE_ATTR(xmit_hash_policy, 0644,
bonding_show_xmit_hash, bonding_sysfs_store_option);
@@ -233,7 +232,7 @@ static ssize_t bonding_show_arp_validate(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
bond->params.arp_validate);
- return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
static DEVICE_ATTR(arp_validate, 0644, bonding_show_arp_validate,
bonding_sysfs_store_option);
@@ -248,7 +247,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
bond->params.arp_all_targets);
- return sprintf(buf, "%s %d\n",
+ return sysfs_emit(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
static DEVICE_ATTR(arp_all_targets, 0644,
@@ -265,7 +264,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
- return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
static DEVICE_ATTR(fail_over_mac, 0644,
bonding_show_fail_over_mac, bonding_sysfs_store_option);
@@ -277,7 +276,7 @@ static ssize_t bonding_show_arp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.arp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.arp_interval);
}
static DEVICE_ATTR(arp_interval, 0644,
bonding_show_arp_interval, bonding_sysfs_store_option);
@@ -292,8 +291,8 @@ static ssize_t bonding_show_arp_targets(struct device *d,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
if (bond->params.arp_targets[i])
- res += sprintf(buf + res, "%pI4 ",
- &bond->params.arp_targets[i]);
+ res += sysfs_emit_at(buf, res, "%pI4 ",
+ &bond->params.arp_targets[i]);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -303,6 +302,18 @@ static ssize_t bonding_show_arp_targets(struct device *d,
static DEVICE_ATTR(arp_ip_target, 0644,
bonding_show_arp_targets, bonding_sysfs_store_option);
+/* Show the arp missed max. */
+static ssize_t bonding_show_missed_max(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ return sysfs_emit(buf, "%u\n", bond->params.missed_max);
+}
+static DEVICE_ATTR(arp_missed_max, 0644,
+ bonding_show_missed_max, bonding_sysfs_store_option);
+
/* Show the up and down delays. */
static ssize_t bonding_show_downdelay(struct device *d,
struct device_attribute *attr,
@@ -310,7 +321,7 @@ static ssize_t bonding_show_downdelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
static DEVICE_ATTR(downdelay, 0644,
bonding_show_downdelay, bonding_sysfs_store_option);
@@ -321,7 +332,7 @@ static ssize_t bonding_show_updelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
static DEVICE_ATTR(updelay, 0644,
@@ -333,8 +344,8 @@ static ssize_t bonding_show_peer_notif_delay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n",
- bond->params.peer_notif_delay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n",
+ bond->params.peer_notif_delay * bond->params.miimon);
}
static DEVICE_ATTR(peer_notif_delay, 0644,
bonding_show_peer_notif_delay, bonding_sysfs_store_option);
@@ -349,7 +360,7 @@ static ssize_t bonding_show_lacp_active(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_ACTIVE, bond->params.lacp_active);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_active);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_active);
}
static DEVICE_ATTR(lacp_active, 0644,
bonding_show_lacp_active, bonding_sysfs_store_option);
@@ -363,7 +374,7 @@ static ssize_t bonding_show_lacp_rate(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
static DEVICE_ATTR(lacp_rate, 0644,
bonding_show_lacp_rate, bonding_sysfs_store_option);
@@ -374,7 +385,7 @@ static ssize_t bonding_show_min_links(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%u\n", bond->params.min_links);
+ return sysfs_emit(buf, "%u\n", bond->params.min_links);
}
static DEVICE_ATTR(min_links, 0644,
bonding_show_min_links, bonding_sysfs_store_option);
@@ -388,7 +399,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
- return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.ad_select);
}
static DEVICE_ATTR(ad_select, 0644,
bonding_show_ad_select, bonding_sysfs_store_option);
@@ -400,7 +411,7 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.num_peer_notif);
+ return sysfs_emit(buf, "%d\n", bond->params.num_peer_notif);
}
static DEVICE_ATTR(num_grat_arp, 0644,
bonding_show_num_peer_notif, bonding_sysfs_store_option);
@@ -414,7 +425,7 @@ static ssize_t bonding_show_miimon(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.miimon);
}
static DEVICE_ATTR(miimon, 0644,
bonding_show_miimon, bonding_sysfs_store_option);
@@ -431,7 +442,7 @@ static ssize_t bonding_show_primary(struct device *d,
rcu_read_lock();
primary = rcu_dereference(bond->primary_slave);
if (primary)
- count = sprintf(buf, "%s\n", primary->dev->name);
+ count = sysfs_emit(buf, "%s\n", primary->dev->name);
rcu_read_unlock();
return count;
@@ -450,20 +461,18 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
bond->params.primary_reselect);
- return sprintf(buf, "%s %d\n",
- val->string, bond->params.primary_reselect);
+ return sysfs_emit(buf, "%s %d\n",
+ val->string, bond->params.primary_reselect);
}
static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
-/* Show the use_carrier flag. */
+/* use_carrier is obsolete, but print value for compatibility */
static ssize_t bonding_show_carrier(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct bonding *bond = to_bond(d);
-
- return sprintf(buf, "%d\n", bond->params.use_carrier);
+ return sysfs_emit(buf, "1\n");
}
static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
@@ -481,7 +490,7 @@ static ssize_t bonding_show_active_slave(struct device *d,
rcu_read_lock();
slave_dev = bond_option_active_slave_get_rcu(bond);
if (slave_dev)
- count = sprintf(buf, "%s\n", slave_dev->name);
+ count = sysfs_emit(buf, "%s\n", slave_dev->name);
rcu_read_unlock();
return count;
@@ -497,7 +506,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
struct bonding *bond = to_bond(d);
bool active = netif_carrier_ok(bond->dev);
- return sprintf(buf, "%s\n", active ? "up" : "down");
+ return sysfs_emit(buf, "%s\n", active ? "up" : "down");
}
static DEVICE_ATTR(mii_status, 0444, bonding_show_mii_status, NULL);
@@ -512,9 +521,9 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.aggregator_id);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.aggregator_id);
}
return count;
@@ -533,9 +542,9 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.ports);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.ports);
}
return count;
@@ -554,9 +563,9 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.actor_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.actor_key);
}
return count;
@@ -575,9 +584,9 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.partner_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.partner_key);
}
return count;
@@ -597,7 +606,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
struct ad_info ad_info;
if (!bond_3ad_get_active_agg_info(bond, &ad_info))
- count = sprintf(buf, "%pM\n", ad_info.partner_system);
+ count = sysfs_emit(buf, "%pM\n", ad_info.partner_system);
}
return count;
@@ -614,24 +623,24 @@ static ssize_t bonding_show_queue_id(struct device *d,
struct slave *slave;
int res = 0;
- if (!rtnl_trylock())
- return restart_syscall();
+ rcu_read_lock();
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s:%d ",
- slave->dev->name, slave->queue_id);
+ res += sysfs_emit_at(buf, res, "%s:%d ",
+ slave->dev->name,
+ READ_ONCE(slave->queue_id));
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
- rtnl_unlock();
+ rcu_read_unlock();
return res;
}
@@ -646,7 +655,7 @@ static ssize_t bonding_show_slaves_active(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.all_slaves_active);
+ return sysfs_emit(buf, "%d\n", bond->params.all_slaves_active);
}
static DEVICE_ATTR(all_slaves_active, 0644,
bonding_show_slaves_active, bonding_sysfs_store_option);
@@ -658,7 +667,7 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.resend_igmp);
+ return sysfs_emit(buf, "%d\n", bond->params.resend_igmp);
}
static DEVICE_ATTR(resend_igmp, 0644,
bonding_show_resend_igmp, bonding_sysfs_store_option);
@@ -670,7 +679,7 @@ static ssize_t bonding_show_lp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.lp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.lp_interval);
}
static DEVICE_ATTR(lp_interval, 0644,
bonding_show_lp_interval, bonding_sysfs_store_option);
@@ -681,7 +690,7 @@ static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
+ return sysfs_emit(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
static DEVICE_ATTR(tlb_dynamic_lb, 0644,
bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
@@ -693,7 +702,7 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
- return sprintf(buf, "%u\n", packets_per_slave);
+ return sysfs_emit(buf, "%u\n", packets_per_slave);
}
static DEVICE_ATTR(packets_per_slave, 0644,
bonding_show_packets_per_slave, bonding_sysfs_store_option);
@@ -705,7 +714,7 @@ static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_actor_sys_prio);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_actor_sys_prio);
return 0;
}
@@ -719,7 +728,7 @@ static ssize_t bonding_show_ad_actor_system(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%pM\n", bond->params.ad_actor_system);
+ return sysfs_emit(buf, "%pM\n", bond->params.ad_actor_system);
return 0;
}
@@ -734,7 +743,7 @@ static ssize_t bonding_show_ad_user_port_key(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_user_port_key);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_user_port_key);
return 0;
}
@@ -779,6 +788,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_ad_actor_sys_prio.attr,
&dev_attr_ad_actor_system.attr,
&dev_attr_ad_user_port_key.attr,
+ &dev_attr_arp_missed_max.attr,
NULL,
};
@@ -790,7 +800,7 @@ static const struct attribute_group bonding_group = {
/* Initialize sysfs. This sets up the bonding_masters file in
* /sys/class/net.
*/
-int bond_create_sysfs(struct bond_net *bn)
+int __net_init bond_create_sysfs(struct bond_net *bn)
{
int ret;
@@ -823,7 +833,7 @@ int bond_create_sysfs(struct bond_net *bn)
}
/* Remove /sys/class/net/bonding_masters. */
-void bond_destroy_sysfs(struct bond_net *bn)
+void __net_exit bond_destroy_sysfs(struct bond_net *bn)
{
netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index fd07561da034..36d0e8440b5b 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -15,43 +15,37 @@ struct slave_attribute {
ssize_t (*show)(struct slave *, char *);
};
-#define SLAVE_ATTR(_name, _mode, _show) \
-const struct slave_attribute slave_attr_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode }, \
- .show = _show, \
-};
#define SLAVE_ATTR_RO(_name) \
- SLAVE_ATTR(_name, 0444, _name##_show)
+const struct slave_attribute slave_attr_##_name = __ATTR_RO(_name)
static ssize_t state_show(struct slave *slave, char *buf)
{
switch (bond_slave_state(slave)) {
case BOND_STATE_ACTIVE:
- return sprintf(buf, "active\n");
+ return sysfs_emit(buf, "active\n");
case BOND_STATE_BACKUP:
- return sprintf(buf, "backup\n");
+ return sysfs_emit(buf, "backup\n");
default:
- return sprintf(buf, "UNKNOWN\n");
+ return sysfs_emit(buf, "UNKNOWN\n");
}
}
static SLAVE_ATTR_RO(state);
static ssize_t mii_status_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%s\n", bond_slave_link_status(slave->link));
+ return sysfs_emit(buf, "%s\n", bond_slave_link_status(slave->link));
}
static SLAVE_ATTR_RO(mii_status);
static ssize_t link_failure_count_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->link_failure_count);
+ return sysfs_emit(buf, "%d\n", slave->link_failure_count);
}
static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%*phC\n",
+ return sysfs_emit(buf, "%*phC\n",
slave->dev->addr_len,
slave->perm_hwaddr);
}
@@ -59,7 +53,7 @@ static SLAVE_ATTR_RO(perm_hwaddr);
static ssize_t queue_id_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->queue_id);
+ return sysfs_emit(buf, "%d\n", READ_ONCE(slave->queue_id));
}
static SLAVE_ATTR_RO(queue_id);
@@ -70,11 +64,11 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
- return sprintf(buf, "%d\n",
- agg->aggregator_identifier);
+ return sysfs_emit(buf, "%d\n",
+ agg->aggregator_identifier);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_aggregator_id);
@@ -85,11 +79,11 @@ static ssize_t ad_actor_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->actor_oper_port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_actor_oper_port_state);
@@ -100,23 +94,23 @@ static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->partner_oper.port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_partner_oper_port_state);
-static const struct slave_attribute *slave_attrs[] = {
- &slave_attr_state,
- &slave_attr_mii_status,
- &slave_attr_link_failure_count,
- &slave_attr_perm_hwaddr,
- &slave_attr_queue_id,
- &slave_attr_ad_aggregator_id,
- &slave_attr_ad_actor_oper_port_state,
- &slave_attr_ad_partner_oper_port_state,
+static const struct attribute *slave_attrs[] = {
+ &slave_attr_state.attr,
+ &slave_attr_mii_status.attr,
+ &slave_attr_link_failure_count.attr,
+ &slave_attr_perm_hwaddr.attr,
+ &slave_attr_queue_id.attr,
+ &slave_attr_ad_aggregator_id.attr,
+ &slave_attr_ad_actor_oper_port_state.attr,
+ &slave_attr_ad_partner_oper_port_state.attr,
NULL
};
@@ -137,24 +131,10 @@ const struct sysfs_ops slave_sysfs_ops = {
int bond_sysfs_slave_add(struct slave *slave)
{
- const struct slave_attribute **a;
- int err;
-
- for (a = slave_attrs; *a; ++a) {
- err = sysfs_create_file(&slave->kobj, &((*a)->attr));
- if (err) {
- kobject_put(&slave->kobj);
- return err;
- }
- }
-
- return 0;
+ return sysfs_create_files(&slave->kobj, slave_attrs);
}
void bond_sysfs_slave_del(struct slave *slave)
{
- const struct slave_attribute **a;
-
- for (a = slave_attrs; *a; ++a)
- sysfs_remove_file(&slave->kobj, &((*a)->attr));
+ sysfs_remove_files(&slave->kobj, slave_attrs);
}
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h
index 48cdf3a49a7d..fef6288c6944 100644
--- a/drivers/net/bonding/bonding_priv.h
+++ b/drivers/net/bonding/bonding_priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
*
@@ -7,9 +8,6 @@
* BUT, I'm the one who modified it for ethernet, so:
* (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
*
- * This software may be used and distributed according to the terms
- * of the GNU Public License, incorporated herein by reference.
- *
*/
#ifndef _BONDING_PRIV_H
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 2a7af611d43a..c398ac42eae9 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -126,15 +126,6 @@ static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
ser->rx_blob.data = ser->rx_data;
ser->rx_blob.size = size;
}
-
-static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
-{
- if (size > sizeof(ser->tx_data))
- size = sizeof(ser->tx_data);
- memcpy(ser->tx_data, data, size);
- ser->tx_blob.data = ser->tx_data;
- ser->tx_blob.size = size;
-}
#else
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
{
@@ -151,15 +142,10 @@ static inline void update_tty_status(struct ser_device *ser)
static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
{
}
-
-static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
-{
-}
-
#endif
static void ldisc_receive(struct tty_struct *tty, const u8 *data,
- const char *flags, int count)
+ const u8 *flags, size_t count)
{
struct sk_buff *skb = NULL;
struct ser_device *ser;
@@ -196,7 +182,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
skb_reset_mac_header(skb);
debugfs_rx(ser, data, count);
/* Push received packet up the stack. */
- ret = netif_rx_ni(skb);
+ ret = netif_rx(skb);
if (!ret) {
ser->dev->stats.rx_packets++;
ser->dev->stats.rx_bytes += count;
@@ -344,7 +330,7 @@ static int ldisc_open(struct tty_struct *tty)
ser->tty = tty_kref_get(tty);
ser->dev = dev;
debugfs_init(ser, tty);
- tty->receive_room = N_TTY_BUF_SIZE;
+ tty->receive_room = 4096;
tty->disc_data = ser;
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
rtnl_lock();
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 91230894692d..c60386bf2d1a 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -646,9 +646,7 @@ static inline void debugfs_init(struct cfv_info *cfv)
/* Setup CAIF for the a virtio device */
static int cfv_probe(struct virtio_device *vdev)
{
- vq_callback_t *vq_cbs = cfv_release_cb;
vrh_callback_t *vrh_cbs = cfv_recv;
- const char *names = "output";
const char *cfv_netdev_name = "cfvrt";
struct net_device *netdev;
struct cfv_info *cfv;
@@ -675,9 +673,11 @@ static int cfv_probe(struct virtio_device *vdev)
goto err;
/* Get the TX virtio ring. This is a "guest side vring". */
- err = virtio_find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, NULL);
- if (err)
+ cfv->vq_tx = virtio_find_single_vq(vdev, cfv_release_cb, "output");
+ if (IS_ERR(cfv->vq_tx)) {
+ err = PTR_ERR(cfv->vq_tx);
goto err;
+ }
/* Get the CAIF configuration from virtio config space, if available */
if (vdev->config->get) {
@@ -714,20 +714,29 @@ static int cfv_probe(struct virtio_device *vdev)
/* Initialize NAPI poll context data */
vringh_kiov_init(&cfv->ctx.riov, NULL, 0);
cfv->ctx.head = USHRT_MAX;
- netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA);
+ netif_napi_add_weight(netdev, &cfv->napi, cfv_rx_poll,
+ CFV_DEFAULT_QUOTA);
tasklet_setup(&cfv->tx_release_tasklet, cfv_tx_release_tasklet);
/* Carrier is off until netdevice is opened */
netif_carrier_off(netdev);
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
/* register Netdev */
- err = register_netdev(netdev);
+ err = register_netdevice(netdev);
if (err) {
+ rtnl_unlock();
dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
goto err;
}
+ virtio_device_ready(vdev);
+
+ rtnl_unlock();
+
debugfs_init(cfv);
return 0;
@@ -736,7 +745,7 @@ err:
if (cfv->vr_rx)
vdev->vringh_config->del_vrhs(cfv->vdev);
- if (cfv->vdev)
+ if (cfv->vq_tx)
vdev->config->del_vqs(cfv->vdev);
free_netdev(netdev);
return err;
@@ -754,7 +763,7 @@ static void cfv_remove(struct virtio_device *vdev)
debugfs_remove_recursive(cfv->debugfs);
vringh_kiov_cleanup(&cfv->ctx.riov);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->vringh_config->del_vrhs(cfv->vdev);
cfv->vr_rx = NULL;
vdev->config->del_vqs(cfv->vdev);
@@ -773,7 +782,6 @@ static struct virtio_driver caif_virtio_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = cfv_probe,
.remove = cfv_remove,
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index fff259247d52..e15e320db476 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -1,5 +1,26 @@
# SPDX-License-Identifier: GPL-2.0-only
-menu "CAN Device Drivers"
+
+menuconfig CAN_DEV
+ tristate "CAN Device Drivers"
+ default y
+ depends on CAN
+ help
+ Controller Area Network (CAN) is serial communications protocol up to
+ 1Mbit/s for its original release (now known as Classical CAN) and up
+ to 8Mbit/s for the more recent CAN with Flexible Data-Rate
+ (CAN-FD). The CAN bus was originally mainly for automotive, but is now
+ widely used in marine (NMEA2000), industrial, and medical
+ applications. More information on the CAN network protocol family
+ PF_CAN is contained in <Documentation/networking/can.rst>.
+
+ This section contains all the CAN(-FD) device drivers including the
+ virtual ones. If you own such devices or plan to use the virtual CAN
+ interfaces to develop applications, say Y here.
+
+ To compile as a module, choose M here: the module will be called
+ can-dev.
+
+if CAN_DEV
config CAN_VCAN
tristate "Virtual Local CAN Interface (vcan)"
@@ -28,35 +49,22 @@ config CAN_VXCAN
This driver can also be built as a module. If so, the module
will be called vxcan.
-config CAN_SLCAN
- tristate "Serial / USB serial CAN Adaptors (slcan)"
- depends on TTY
+config CAN_NETLINK
+ bool "CAN device drivers with Netlink support"
+ default y
help
- CAN driver for several 'low cost' CAN interfaces that are attached
- via serial lines or via USB-to-serial adapters using the LAWICEL
- ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+ Enables the common framework for CAN device drivers. This is the
+ standard library and provides features for the Netlink interface such
+ as bittiming validation, support of CAN error states, device restart
+ and others.
- As only the sending and receiving of CAN frames is implemented, this
- driver should work with the (serial/USB) CAN hardware from:
- www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
+ The additional features selected by this option will be added to the
+ can-dev module.
- Userspace tools to attach the SLCAN line discipline (slcan_attach,
- slcand) can be found in the can-utils at the linux-can project, see
- https://github.com/linux-can/can-utils for details.
-
- The slcan driver supports up to 10 CAN netdevices by default which
- can be changed by the 'maxdev=xx' module option. This driver can
- also be built as a module. If so, the module will be called slcan.
-
-config CAN_DEV
- tristate "Platform CAN drivers with Netlink support"
- default y
- help
- Enables the common framework for platform CAN drivers with Netlink
- support. This is the standard library for CAN drivers.
- If unsure, say Y.
+ This is required by all platform and hardware CAN drivers. If you
+ plan to use such devices or if unsure, say Y.
-if CAN_DEV
+if CAN_NETLINK
config CAN_CALC_BITTIMING
bool "CAN bit-timing calculation"
@@ -69,36 +77,75 @@ config CAN_CALC_BITTIMING
source clock frequencies. Disabling saves some space, but then the
bit-timing parameters must be specified directly using the Netlink
arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
- If unsure, say Y.
-config CAN_LEDS
- bool "Enable LED triggers for Netlink based drivers"
- depends on LEDS_CLASS
- # The netdev trigger (LEDS_TRIGGER_NETDEV) should be able to do
- # everything that this driver is doing. This is marked as broken
- # because it uses stuff that is intended to be changed or removed.
- # Please consider switching to the netdev trigger and confirm it
- # fulfills your needs instead of fixing this driver.
- depends on BROKEN
- select LEDS_TRIGGERS
- help
- This option adds two LED triggers for packet receive and transmit
- events on each supported CAN device.
+ The additional features selected by this option will be added to the
+ can-dev module.
+
+ If unsure, say Y.
- Say Y here if you are working on a system with led-class supported
- LEDs and you want to use them as canbus activity indicators.
+config CAN_RX_OFFLOAD
+ bool
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
depends on (ARCH_AT91 || COMPILE_TEST) && HAS_IOMEM
+ select CAN_RX_OFFLOAD
help
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
+config CAN_BXCAN
+ tristate "STM32 Basic Extended CAN (bxCAN) devices"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on HAS_IOMEM
+ select CAN_RX_OFFLOAD
+ help
+ Say yes here to build support for the STMicroelectronics STM32 basic
+ extended CAN Controller (bxCAN).
+
+ This driver can also be built as a module. If so, the module
+ will be called bxcan.
+
+config CAN_CAN327
+ tristate "Serial / USB serial ELM327 based OBD-II Interfaces (can327)"
+ depends on TTY
+ select CAN_RX_OFFLOAD
+ help
+ CAN driver for several 'low cost' OBD-II interfaces based on the
+ ELM327 OBD-II interpreter chip.
+
+ This is a best effort driver - the ELM327 interface was never
+ designed to be used as a standalone CAN interface. However, it can
+ still be used for simple request-response protocols (such as OBD II),
+ and to monitor broadcast messages on a bus (such as in a vehicle).
+
+ Please refer to the documentation for information on how to use it:
+ Documentation/networking/device_drivers/can/can327.rst
+
+ If this driver is built as a module, it will be called can327.
+
+config CAN_DUMMY
+ tristate "Dummy CAN"
+ help
+ A dummy CAN module supporting Classical CAN, CAN FD and CAN XL. It
+ exposes bittiming values which can be configured through the netlink
+ interface.
+
+ The module will simply echo any frame sent to it. If debug messages
+ are activated, it prints all the CAN bittiming information in the
+ kernel log. Aside from that it does nothing.
+
+ This is convenient for testing the CAN netlink interface. Most of the
+ users will never need this. If unsure, say NO.
+
+ To compile this driver as a module, choose M here: the module will be
+ called dummy-can.
+
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
depends on OF || COLDFIRE || COMPILE_TEST
depends on HAS_IOMEM
+ select CAN_RX_OFFLOAD
help
Say Y here if you want to support for Freescale FlexCAN.
@@ -124,30 +171,57 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD
depends on PCI
tristate "Kvaser PCIe FD cards"
- select CRC32
- help
+ select NET_DEVLINK
+ help
This is a driver for the Kvaser PCI Express CAN FD family.
Supported devices:
Kvaser PCIEcan 4xHS
Kvaser PCIEcan 2xHS v2
Kvaser PCIEcan HS v2
+ Kvaser PCIEcan 1xCAN v3
+ Kvaser PCIEcan 2xCAN v3
+ Kvaser PCIEcan 4xCAN v2
Kvaser Mini PCI Express HS v2
Kvaser Mini PCI Express 2xHS v2
+ Kvaser Mini PCI Express 1xCAN v3
+ Kvaser Mini PCI Express 2xCAN v3
+ Kvaser M.2 PCIe 4xCAN
+ Kvaser PCIe 8xCAN
+
+config CAN_SLCAN
+ tristate "Serial / USB serial CAN Adaptors (slcan)"
+ depends on TTY
+ help
+ CAN driver for several 'low cost' CAN interfaces that are attached
+ via serial lines or via USB-to-serial adapters using the LAWICEL
+ ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+
+ As only the sending and receiving of CAN frames is implemented, this
+ driver should work with the (serial/USB) CAN hardware from:
+ www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
+
+ Userspace tools to attach the SLCAN line discipline (slcan_attach,
+ slcand) can be found in the can-utils at the linux-can project, see
+ https://github.com/linux-can/can-utils for details.
+
+ This driver can also be built as a module. If so, the module
+ will be called slcan.
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
- depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
+ depends on MACH_SUN4I || MACH_SUN7I || (RISCV && ARCH_SUNXI) || COMPILE_TEST
help
Say Y here if you want to use CAN controller found on Allwinner
- A10/A20 SoCs.
+ A10/A20/D1 SoCs.
To compile this driver as a module, choose M here: the module will
be called sun4i_can.
config CAN_TI_HECC
- depends on ARM
+ depends on ARM || COMPILE_TEST
tristate "TI High End CAN Controller"
+ select CAN_RX_OFFLOAD
help
Driver for TI HECC (High End CAN Controller) module found on many
TI devices. The device specifications are available from www.ti.com
@@ -160,27 +234,22 @@ config CAN_XILINXCAN
Xilinx CAN driver. This driver supports both soft AXI CAN IP and
Zynq CANPS IP.
-config PCH_CAN
- tristate "Intel EG20T PCH CAN controller"
- depends on PCI && (X86_32 || COMPILE_TEST)
- help
- This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
- is an IOH for x86 embedded processor (Intel Atom E6xx series).
- This driver can access CAN bus.
-
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
+source "drivers/net/can/ctucanfd/Kconfig"
+source "drivers/net/can/esd/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/peak_canfd/Kconfig"
source "drivers/net/can/rcar/Kconfig"
+source "drivers/net/can/rockchip/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/softing/Kconfig"
source "drivers/net/can/spi/Kconfig"
source "drivers/net/can/usb/Kconfig"
-endif
+endif #CAN_NETLINK
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
@@ -190,4 +259,4 @@ config CAN_DEBUG_DEVICES
a problem with CAN support and want to see more of what is going
on.
-endmenu
+endif #CAN_DEV
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index a2b4463d8480..d7bc10a6b8ea 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -5,22 +5,28 @@
obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
-obj-$(CONFIG_CAN_SLCAN) += slcan.o
+obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
+obj-y += esd/
obj-y += rcar/
+obj-y += rockchip/
obj-y += spi/
obj-y += usb/
obj-y += softing/
obj-$(CONFIG_CAN_AT91) += at91_can.o
+obj-$(CONFIG_CAN_BXCAN) += bxcan.o
+obj-$(CONFIG_CAN_CAN327) += can327.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
-obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
+obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/
+obj-$(CONFIG_CAN_DUMMY) += dummy_can.o
+obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
-obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o
+obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd/
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_M_CAN) += m_can/
obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/
@@ -28,6 +34,5 @@ obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
-obj-$(CONFIG_PCH_CAN) += pch_can.o
subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 3aea32c9b108..c2a3a4eef5b2 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -3,17 +3,20 @@
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
* (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
- * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2008, 2009, 2010, 2011, 2023 by Marc Kleine-Budde <kernel@pengutronix.de>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
@@ -23,91 +26,115 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
+#include <linux/can/rx-offload.h>
-#define AT91_MB_MASK(i) ((1 << (i)) - 1)
+#define AT91_MB_MASK(i) ((1 << (i)) - 1)
/* Common registers */
enum at91_reg {
- AT91_MR = 0x000,
- AT91_IER = 0x004,
- AT91_IDR = 0x008,
- AT91_IMR = 0x00C,
- AT91_SR = 0x010,
- AT91_BR = 0x014,
- AT91_TIM = 0x018,
- AT91_TIMESTP = 0x01C,
- AT91_ECR = 0x020,
- AT91_TCR = 0x024,
- AT91_ACR = 0x028,
+ AT91_MR = 0x000,
+ AT91_IER = 0x004,
+ AT91_IDR = 0x008,
+ AT91_IMR = 0x00C,
+ AT91_SR = 0x010,
+ AT91_BR = 0x014,
+ AT91_TIM = 0x018,
+ AT91_TIMESTP = 0x01C,
+ AT91_ECR = 0x020,
+ AT91_TCR = 0x024,
+ AT91_ACR = 0x028,
};
/* Mailbox registers (0 <= i <= 15) */
-#define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20)))
-#define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20)))
-#define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20)))
-#define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20)))
-#define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20)))
-#define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20)))
-#define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20)))
-#define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20)))
+#define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20)))
+#define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20)))
+#define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20)))
+#define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20)))
+#define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20)))
+#define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20)))
+#define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20)))
+#define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20)))
/* Register bits */
-#define AT91_MR_CANEN BIT(0)
-#define AT91_MR_LPM BIT(1)
-#define AT91_MR_ABM BIT(2)
-#define AT91_MR_OVL BIT(3)
-#define AT91_MR_TEOF BIT(4)
-#define AT91_MR_TTM BIT(5)
-#define AT91_MR_TIMFRZ BIT(6)
-#define AT91_MR_DRPT BIT(7)
-
-#define AT91_SR_RBSY BIT(29)
-
-#define AT91_MMR_PRIO_SHIFT (16)
-
-#define AT91_MID_MIDE BIT(29)
-
-#define AT91_MSR_MRTR BIT(20)
-#define AT91_MSR_MABT BIT(22)
-#define AT91_MSR_MRDY BIT(23)
-#define AT91_MSR_MMI BIT(24)
-
-#define AT91_MCR_MRTR BIT(20)
-#define AT91_MCR_MTCR BIT(23)
+#define AT91_MR_CANEN BIT(0)
+#define AT91_MR_LPM BIT(1)
+#define AT91_MR_ABM BIT(2)
+#define AT91_MR_OVL BIT(3)
+#define AT91_MR_TEOF BIT(4)
+#define AT91_MR_TTM BIT(5)
+#define AT91_MR_TIMFRZ BIT(6)
+#define AT91_MR_DRPT BIT(7)
+
+#define AT91_SR_RBSY BIT(29)
+#define AT91_SR_TBSY BIT(30)
+#define AT91_SR_OVLSY BIT(31)
+
+#define AT91_BR_PHASE2_MASK GENMASK(2, 0)
+#define AT91_BR_PHASE1_MASK GENMASK(6, 4)
+#define AT91_BR_PROPAG_MASK GENMASK(10, 8)
+#define AT91_BR_SJW_MASK GENMASK(13, 12)
+#define AT91_BR_BRP_MASK GENMASK(22, 16)
+#define AT91_BR_SMP BIT(24)
+
+#define AT91_TIM_TIMER_MASK GENMASK(15, 0)
+
+#define AT91_ECR_REC_MASK GENMASK(8, 0)
+#define AT91_ECR_TEC_MASK GENMASK(23, 16)
+
+#define AT91_TCR_TIMRST BIT(31)
+
+#define AT91_MMR_MTIMEMARK_MASK GENMASK(15, 0)
+#define AT91_MMR_PRIOR_MASK GENMASK(19, 16)
+#define AT91_MMR_MOT_MASK GENMASK(26, 24)
+
+#define AT91_MID_MIDVB_MASK GENMASK(17, 0)
+#define AT91_MID_MIDVA_MASK GENMASK(28, 18)
+#define AT91_MID_MIDE BIT(29)
+
+#define AT91_MSR_MTIMESTAMP_MASK GENMASK(15, 0)
+#define AT91_MSR_MDLC_MASK GENMASK(19, 16)
+#define AT91_MSR_MRTR BIT(20)
+#define AT91_MSR_MABT BIT(22)
+#define AT91_MSR_MRDY BIT(23)
+#define AT91_MSR_MMI BIT(24)
+
+#define AT91_MCR_MDLC_MASK GENMASK(19, 16)
+#define AT91_MCR_MRTR BIT(20)
+#define AT91_MCR_MACR BIT(22)
+#define AT91_MCR_MTCR BIT(23)
/* Mailbox Modes */
enum at91_mb_mode {
- AT91_MB_MODE_DISABLED = 0,
- AT91_MB_MODE_RX = 1,
- AT91_MB_MODE_RX_OVRWR = 2,
- AT91_MB_MODE_TX = 3,
- AT91_MB_MODE_CONSUMER = 4,
- AT91_MB_MODE_PRODUCER = 5,
+ AT91_MB_MODE_DISABLED = 0,
+ AT91_MB_MODE_RX = 1,
+ AT91_MB_MODE_RX_OVRWR = 2,
+ AT91_MB_MODE_TX = 3,
+ AT91_MB_MODE_CONSUMER = 4,
+ AT91_MB_MODE_PRODUCER = 5,
};
/* Interrupt mask bits */
-#define AT91_IRQ_ERRA BIT(16)
-#define AT91_IRQ_WARN BIT(17)
-#define AT91_IRQ_ERRP BIT(18)
-#define AT91_IRQ_BOFF BIT(19)
-#define AT91_IRQ_SLEEP BIT(20)
-#define AT91_IRQ_WAKEUP BIT(21)
-#define AT91_IRQ_TOVF BIT(22)
-#define AT91_IRQ_TSTP BIT(23)
-#define AT91_IRQ_CERR BIT(24)
-#define AT91_IRQ_SERR BIT(25)
-#define AT91_IRQ_AERR BIT(26)
-#define AT91_IRQ_FERR BIT(27)
-#define AT91_IRQ_BERR BIT(28)
-
-#define AT91_IRQ_ERR_ALL (0x1fff0000)
-#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
- AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
-#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
- AT91_IRQ_ERRP | AT91_IRQ_BOFF)
-
-#define AT91_IRQ_ALL (0x1fffffff)
+#define AT91_IRQ_ERRA BIT(16)
+#define AT91_IRQ_WARN BIT(17)
+#define AT91_IRQ_ERRP BIT(18)
+#define AT91_IRQ_BOFF BIT(19)
+#define AT91_IRQ_SLEEP BIT(20)
+#define AT91_IRQ_WAKEUP BIT(21)
+#define AT91_IRQ_TOVF BIT(22)
+#define AT91_IRQ_TSTP BIT(23)
+#define AT91_IRQ_CERR BIT(24)
+#define AT91_IRQ_SERR BIT(25)
+#define AT91_IRQ_AERR BIT(26)
+#define AT91_IRQ_FERR BIT(27)
+#define AT91_IRQ_BERR BIT(28)
+
+#define AT91_IRQ_ERR_ALL (0x1fff0000)
+#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
+ AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
+#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
+ AT91_IRQ_ERRP | AT91_IRQ_BOFF)
+
+#define AT91_IRQ_ALL (0x1fffffff)
enum at91_devtype {
AT91_DEVTYPE_SAM9263,
@@ -116,7 +143,6 @@ enum at91_devtype {
struct at91_devtype_data {
unsigned int rx_first;
- unsigned int rx_split;
unsigned int rx_last;
unsigned int tx_shift;
enum at91_devtype type;
@@ -124,14 +150,13 @@ struct at91_devtype_data {
struct at91_priv {
struct can_priv can; /* must be the first member! */
- struct napi_struct napi;
+ struct can_rx_offload offload;
+ struct phy *transceiver;
void __iomem *reg_base;
- u32 reg_sr;
- unsigned int tx_next;
- unsigned int tx_echo;
- unsigned int rx_next;
+ unsigned int tx_head;
+ unsigned int tx_tail;
struct at91_devtype_data devtype_data;
struct clk *clk;
@@ -140,9 +165,13 @@ struct at91_priv {
canid_t mb0_id;
};
+static inline struct at91_priv *rx_offload_to_priv(struct can_rx_offload *offload)
+{
+ return container_of(offload, struct at91_priv, offload);
+}
+
static const struct at91_devtype_data at91_at91sam9263_data = {
.rx_first = 1,
- .rx_split = 8,
.rx_last = 11,
.tx_shift = 2,
.type = AT91_DEVTYPE_SAM9263,
@@ -150,7 +179,6 @@ static const struct at91_devtype_data at91_at91sam9263_data = {
static const struct at91_devtype_data at91_at91sam9x5_data = {
.rx_first = 0,
- .rx_split = 4,
.rx_last = 5,
.tx_shift = 1,
.type = AT91_DEVTYPE_SAM9X5,
@@ -187,27 +215,6 @@ static inline unsigned int get_mb_rx_last(const struct at91_priv *priv)
return priv->devtype_data.rx_last;
}
-static inline unsigned int get_mb_rx_split(const struct at91_priv *priv)
-{
- return priv->devtype_data.rx_split;
-}
-
-static inline unsigned int get_mb_rx_num(const struct at91_priv *priv)
-{
- return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1;
-}
-
-static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv)
-{
- return get_mb_rx_split(priv) - 1;
-}
-
-static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv)
-{
- return AT91_MB_MASK(get_mb_rx_split(priv)) &
- ~AT91_MB_MASK(get_mb_rx_first(priv));
-}
-
static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv)
{
return priv->devtype_data.tx_shift;
@@ -228,24 +235,24 @@ static inline unsigned int get_mb_tx_last(const struct at91_priv *priv)
return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1;
}
-static inline unsigned int get_next_prio_shift(const struct at91_priv *priv)
+static inline unsigned int get_head_prio_shift(const struct at91_priv *priv)
{
return get_mb_tx_shift(priv);
}
-static inline unsigned int get_next_prio_mask(const struct at91_priv *priv)
+static inline unsigned int get_head_prio_mask(const struct at91_priv *priv)
{
return 0xf << get_mb_tx_shift(priv);
}
-static inline unsigned int get_next_mb_mask(const struct at91_priv *priv)
+static inline unsigned int get_head_mb_mask(const struct at91_priv *priv)
{
return AT91_MB_MASK(get_mb_tx_shift(priv));
}
-static inline unsigned int get_next_mask(const struct at91_priv *priv)
+static inline unsigned int get_head_mask(const struct at91_priv *priv)
{
- return get_next_mb_mask(priv) | get_next_prio_mask(priv);
+ return get_head_mb_mask(priv) | get_head_prio_mask(priv);
}
static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv)
@@ -260,19 +267,19 @@ static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv)
~AT91_MB_MASK(get_mb_tx_first(priv));
}
-static inline unsigned int get_tx_next_mb(const struct at91_priv *priv)
+static inline unsigned int get_tx_head_mb(const struct at91_priv *priv)
{
- return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
+ return (priv->tx_head & get_head_mb_mask(priv)) + get_mb_tx_first(priv);
}
-static inline unsigned int get_tx_next_prio(const struct at91_priv *priv)
+static inline unsigned int get_tx_head_prio(const struct at91_priv *priv)
{
- return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf;
+ return (priv->tx_head >> get_head_prio_shift(priv)) & 0xf;
}
-static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
+static inline unsigned int get_tx_tail_mb(const struct at91_priv *priv)
{
- return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
+ return (priv->tx_tail & get_head_mb_mask(priv)) + get_mb_tx_first(priv);
}
static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
@@ -288,9 +295,12 @@ static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
static inline void set_mb_mode_prio(const struct at91_priv *priv,
unsigned int mb, enum at91_mb_mode mode,
- int prio)
+ u8 prio)
{
- at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
+ const u32 reg_mmr = FIELD_PREP(AT91_MMR_MOT_MASK, mode) |
+ FIELD_PREP(AT91_MMR_PRIOR_MASK, prio);
+
+ at91_write(priv, AT91_MMR(mb), reg_mmr);
}
static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
@@ -304,9 +314,10 @@ static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
u32 reg_mid;
if (can_id & CAN_EFF_FLAG)
- reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
+ reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, can_id) |
+ AT91_MID_MIDE;
else
- reg_mid = (can_id & CAN_SFF_MASK) << 18;
+ reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK, can_id);
return reg_mid;
}
@@ -318,8 +329,8 @@ static void at91_setup_mailboxes(struct net_device *dev)
u32 reg_mid;
/* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
- * mailbox is disabled. The next 11 mailboxes are used as a
- * reception FIFO. The last mailbox is configured with
+ * mailbox is disabled. The next mailboxes are used as a
+ * reception FIFO. The last of the RX mailboxes is configured with
* overwrite option. The overwrite flag indicates a FIFO
* overflow.
*/
@@ -340,27 +351,30 @@ static void at91_setup_mailboxes(struct net_device *dev)
at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
}
- /* The last 4 mailboxes are used for transmitting. */
+ /* The last mailboxes are used for transmitting. */
for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
- /* Reset tx and rx helper pointers */
- priv->tx_next = priv->tx_echo = 0;
- priv->rx_next = get_mb_rx_first(priv);
+ /* Reset tx helper pointers */
+ priv->tx_head = priv->tx_tail = 0;
}
static int at91_set_bittiming(struct net_device *dev)
{
const struct at91_priv *priv = netdev_priv(dev);
const struct can_bittiming *bt = &priv->can.bittiming;
- u32 reg_br;
+ u32 reg_br = 0;
- reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
- ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
- ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
- ((bt->phase_seg2 - 1) << 0);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ reg_br |= AT91_BR_SMP;
- netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
+ reg_br |= FIELD_PREP(AT91_BR_BRP_MASK, bt->brp - 1) |
+ FIELD_PREP(AT91_BR_SJW_MASK, bt->sjw - 1) |
+ FIELD_PREP(AT91_BR_PROPAG_MASK, bt->prop_seg - 1) |
+ FIELD_PREP(AT91_BR_PHASE1_MASK, bt->phase_seg1 - 1) |
+ FIELD_PREP(AT91_BR_PHASE2_MASK, bt->phase_seg2 - 1);
+
+ netdev_dbg(dev, "writing AT91_BR: 0x%08x\n", reg_br);
at91_write(priv, AT91_BR, reg_br);
@@ -373,8 +387,8 @@ static int at91_get_berr_counter(const struct net_device *dev,
const struct at91_priv *priv = netdev_priv(dev);
u32 reg_ecr = at91_read(priv, AT91_ECR);
- bec->rxerr = reg_ecr & 0xff;
- bec->txerr = reg_ecr >> 16;
+ bec->rxerr = FIELD_GET(AT91_ECR_REC_MASK, reg_ecr);
+ bec->txerr = FIELD_GET(AT91_ECR_TEC_MASK, reg_ecr);
return 0;
}
@@ -403,9 +417,13 @@ static void at91_chip_start(struct net_device *dev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ /* Dummy read to clear latched line error interrupts on
+ * sam9x5 and newer SoCs.
+ */
+ at91_read(priv, AT91_SR);
+
/* Enable interrupts */
- reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
- at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+ reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERR_LINE | AT91_IRQ_ERR_FRAME;
at91_write(priv, AT91_IER, reg_ier);
}
@@ -414,6 +432,11 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
struct at91_priv *priv = netdev_priv(dev);
u32 reg_mr;
+ /* Abort any pending TX requests. However this doesn't seem to
+ * work in case of bus-off on sama5d3.
+ */
+ at91_write(priv, AT91_ACR, get_irq_mb_tx(priv));
+
/* disable interrupts */
at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
@@ -437,27 +460,26 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
* stop sending, waiting for all messages to be delivered, then start
* again with mailbox AT91_MB_TX_FIRST prio 0.
*
- * We use the priv->tx_next as counter for the next transmission
+ * We use the priv->tx_head as counter for the next transmission
* mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
* encode the mailbox number, the upper 4 bits the mailbox priority:
*
- * priv->tx_next = (prio << get_next_prio_shift(priv)) |
+ * priv->tx_head = (prio << get_next_prio_shift(priv)) |
* (mb - get_mb_tx_first(priv));
*
*/
static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
unsigned int mb, prio;
u32 reg_mid, reg_mcr;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
- mb = get_tx_next_mb(priv);
- prio = get_tx_next_prio(priv);
+ mb = get_tx_head_mb(priv);
+ prio = get_tx_head_prio(priv);
if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
netif_stop_queue(dev);
@@ -466,8 +488,12 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
reg_mid = at91_can_id_to_reg_mid(cf->can_id);
- reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
- (cf->len << 16) | AT91_MCR_MTCR;
+
+ reg_mcr = FIELD_PREP(AT91_MCR_MDLC_MASK, cf->len) |
+ AT91_MCR_MTCR;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ reg_mcr |= AT91_MCR_MRTR;
/* disable MB while writing ID (see datasheet) */
set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
@@ -480,22 +506,20 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* This triggers transmission */
at91_write(priv, AT91_MCR(mb), reg_mcr);
- stats->tx_bytes += cf->len;
-
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
/* we have to stop the queue and deliver all messages in case
* of a prio+mb counter wrap around. This is the case if
- * tx_next buffer prio and mailbox equals 0.
+ * tx_head buffer prio and mailbox equals 0.
*
* also stop the queue if next buffer is still in use
* (== not ready)
*/
- priv->tx_next++;
- if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
+ priv->tx_head++;
+ if (!(at91_read(priv, AT91_MSR(get_tx_head_mb(priv))) &
AT91_MSR_MRDY) ||
- (priv->tx_next & get_next_mask(priv)) == 0)
+ (priv->tx_head & get_head_mask(priv)) == 0)
netif_stop_queue(dev);
/* Enable interrupt for this mailbox */
@@ -504,32 +528,20 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-/**
- * at91_activate_rx_low - activate lower rx mailboxes
- * @priv: a91 context
- *
- * Reenables the lower mailboxes for reception of new CAN messages
- */
-static inline void at91_activate_rx_low(const struct at91_priv *priv)
+static inline u32 at91_get_timestamp(const struct at91_priv *priv)
{
- u32 mask = get_mb_rx_low_mask(priv);
-
- at91_write(priv, AT91_TCR, mask);
+ return at91_read(priv, AT91_TIM);
}
-/**
- * at91_activate_rx_mb - reactive single rx mailbox
- * @priv: a91 context
- * @mb: mailbox to reactivate
- *
- * Reenables given mailbox for reception of new CAN messages
- */
-static inline void at91_activate_rx_mb(const struct at91_priv *priv,
- unsigned int mb)
+static inline struct sk_buff *
+at91_alloc_can_err_skb(struct net_device *dev,
+ struct can_frame **cf, u32 *timestamp)
{
- u32 mask = 1 << mb;
+ const struct at91_priv *priv = netdev_priv(dev);
- at91_write(priv, AT91_TCR, mask);
+ *timestamp = at91_get_timestamp(priv);
+
+ return alloc_can_err_skb(dev, cf);
}
/**
@@ -540,47 +552,71 @@ static void at91_rx_overflow_err(struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
+ struct at91_priv *priv = netdev_priv(dev);
struct can_frame *cf;
+ u32 timestamp;
+ int err;
netdev_dbg(dev, "RX buffer overflow\n");
stats->rx_over_errors++;
stats->rx_errors++;
- skb = alloc_can_err_skb(dev, &cf);
+ skb = at91_alloc_can_err_skb(dev, &cf, &timestamp);
if (unlikely(!skb))
return;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_receive_skb(skb);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
}
/**
- * at91_read_mb - read CAN msg from mailbox (lowlevel impl)
- * @dev: net device
+ * at91_mailbox_read - read CAN msg from mailbox
+ * @offload: rx-offload
* @mb: mailbox number to read from
- * @cf: can frame where to store message
+ * @timestamp: pointer to 32 bit timestamp
+ * @drop: true indicated mailbox to mark as read and drop frame
*
- * Reads a CAN message from the given mailbox and stores data into
- * given can frame. "mb" and "cf" must be valid.
+ * Reads a CAN message from the given mailbox if not empty.
*/
-static void at91_read_mb(struct net_device *dev, unsigned int mb,
- struct can_frame *cf)
+static struct sk_buff *at91_mailbox_read(struct can_rx_offload *offload,
+ unsigned int mb, u32 *timestamp,
+ bool drop)
{
- const struct at91_priv *priv = netdev_priv(dev);
+ const struct at91_priv *priv = rx_offload_to_priv(offload);
+ struct can_frame *cf;
+ struct sk_buff *skb;
u32 reg_msr, reg_mid;
+ reg_msr = at91_read(priv, AT91_MSR(mb));
+ if (!(reg_msr & AT91_MSR_MRDY))
+ return NULL;
+
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
+ skb = alloc_can_skb(offload->dev, &cf);
+ if (unlikely(!skb)) {
+ skb = ERR_PTR(-ENOMEM);
+ goto mark_as_read;
+ }
+
reg_mid = at91_read(priv, AT91_MID(mb));
if (reg_mid & AT91_MID_MIDE)
- cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, reg_mid) |
+ CAN_EFF_FLAG;
else
- cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
+ cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK, reg_mid);
- reg_msr = at91_read(priv, AT91_MSR(mb));
- cf->len = can_cc_dlc2len((reg_msr >> 16) & 0xf);
+ /* extend timestamp to full 32 bit */
+ *timestamp = FIELD_GET(AT91_MSR_MTIMESTAMP_MASK, reg_msr) << 16;
+
+ cf->len = can_cc_dlc2len(FIELD_GET(AT91_MSR_MDLC_MASK, reg_msr));
if (reg_msr & AT91_MSR_MRTR) {
cf->can_id |= CAN_RTR_FLAG;
@@ -593,236 +629,21 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI))
- at91_rx_overflow_err(dev);
-}
-
-/**
- * at91_read_msg - read CAN message from mailbox
- * @dev: net device
- * @mb: mail box to read from
- *
- * Reads a CAN message from given mailbox, and put into linux network
- * RX queue, does all housekeeping chores (stats, ...)
- */
-static void at91_read_msg(struct net_device *dev, unsigned int mb)
-{
- struct net_device_stats *stats = &dev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
-
- skb = alloc_can_skb(dev, &cf);
- if (unlikely(!skb)) {
- stats->rx_dropped++;
- return;
- }
-
- at91_read_mb(dev, mb, cf);
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_receive_skb(skb);
-
- can_led_event(dev, CAN_LED_EVENT_RX);
-}
-
-/**
- * at91_poll_rx - read multiple CAN messages from mailboxes
- * @dev: net device
- * @quota: max number of pkgs we're allowed to receive
- *
- * Theory of Operation:
- *
- * About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last())
- * on the chip are reserved for RX. We split them into 2 groups. The
- * lower group ranges from get_mb_rx_first() to get_mb_rx_low_last().
- *
- * Like it or not, but the chip always saves a received CAN message
- * into the first free mailbox it finds (starting with the
- * lowest). This makes it very difficult to read the messages in the
- * right order from the chip. This is how we work around that problem:
- *
- * The first message goes into mb nr. 1 and issues an interrupt. All
- * rx ints are disabled in the interrupt handler and a napi poll is
- * scheduled. We read the mailbox, but do _not_ re-enable the mb (to
- * receive another message).
- *
- * lower mbxs upper
- * ____^______ __^__
- * / \ / \
- * +-+-+-+-+-+-+-+-++-+-+-+-+
- * | |x|x|x|x|x|x|x|| | | | |
- * +-+-+-+-+-+-+-+-++-+-+-+-+
- * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
- * 0 1 2 3 4 5 6 7 8 9 0 1 / box
- * ^
- * |
- * \
- * unused, due to chip bug
- *
- * The variable priv->rx_next points to the next mailbox to read a
- * message from. As long we're in the lower mailboxes we just read the
- * mailbox but not re-enable it.
- *
- * With completion of the last of the lower mailboxes, we re-enable the
- * whole first group, but continue to look for filled mailboxes in the
- * upper mailboxes. Imagine the second group like overflow mailboxes,
- * which takes CAN messages if the lower goup is full. While in the
- * upper group we re-enable the mailbox right after reading it. Giving
- * the chip more room to store messages.
- *
- * After finishing we look again in the lower group if we've still
- * quota.
- *
- */
-static int at91_poll_rx(struct net_device *dev, int quota)
-{
- struct at91_priv *priv = netdev_priv(dev);
- u32 reg_sr = at91_read(priv, AT91_SR);
- const unsigned long *addr = (unsigned long *)&reg_sr;
- unsigned int mb;
- int received = 0;
-
- if (priv->rx_next > get_mb_rx_low_last(priv) &&
- reg_sr & get_mb_rx_low_mask(priv))
- netdev_info(dev,
- "order of incoming frames cannot be guaranteed\n");
-
- again:
- for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
- mb < get_mb_tx_first(priv) && quota > 0;
- reg_sr = at91_read(priv, AT91_SR),
- mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) {
- at91_read_msg(dev, mb);
-
- /* reactivate mailboxes */
- if (mb == get_mb_rx_low_last(priv))
- /* all lower mailboxed, if just finished it */
- at91_activate_rx_low(priv);
- else if (mb > get_mb_rx_low_last(priv))
- /* only the mailbox we read */
- at91_activate_rx_mb(priv, mb);
-
- received++;
- quota--;
- }
-
- /* upper group completed, look again in lower */
- if (priv->rx_next > get_mb_rx_low_last(priv) &&
- mb > get_mb_rx_last(priv)) {
- priv->rx_next = get_mb_rx_first(priv);
- if (quota > 0)
- goto again;
- }
-
- return received;
-}
-
-static void at91_poll_err_frame(struct net_device *dev,
- struct can_frame *cf, u32 reg_sr)
-{
- struct at91_priv *priv = netdev_priv(dev);
-
- /* CRC error */
- if (reg_sr & AT91_IRQ_CERR) {
- netdev_dbg(dev, "CERR irq\n");
- dev->stats.rx_errors++;
- priv->can.can_stats.bus_error++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- }
-
- /* Stuffing Error */
- if (reg_sr & AT91_IRQ_SERR) {
- netdev_dbg(dev, "SERR irq\n");
- dev->stats.rx_errors++;
- priv->can.can_stats.bus_error++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- }
-
- /* Acknowledgement Error */
- if (reg_sr & AT91_IRQ_AERR) {
- netdev_dbg(dev, "AERR irq\n");
- dev->stats.tx_errors++;
- cf->can_id |= CAN_ERR_ACK;
- }
-
- /* Form error */
- if (reg_sr & AT91_IRQ_FERR) {
- netdev_dbg(dev, "FERR irq\n");
- dev->stats.rx_errors++;
- priv->can.can_stats.bus_error++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->data[2] |= CAN_ERR_PROT_FORM;
- }
-
- /* Bit Error */
- if (reg_sr & AT91_IRQ_BERR) {
- netdev_dbg(dev, "BERR irq\n");
- dev->stats.tx_errors++;
- priv->can.can_stats.bus_error++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->data[2] |= CAN_ERR_PROT_BIT;
- }
-}
-
-static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
-{
- struct sk_buff *skb;
- struct can_frame *cf;
-
- if (quota == 0)
- return 0;
-
- skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
- return 0;
-
- at91_poll_err_frame(dev, cf, reg_sr);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += cf->len;
- netif_receive_skb(skb);
-
- return 1;
-}
-
-static int at91_poll(struct napi_struct *napi, int quota)
-{
- struct net_device *dev = napi->dev;
- const struct at91_priv *priv = netdev_priv(dev);
- u32 reg_sr = at91_read(priv, AT91_SR);
- int work_done = 0;
+ at91_rx_overflow_err(offload->dev);
- if (reg_sr & get_irq_mb_rx(priv))
- work_done += at91_poll_rx(dev, quota - work_done);
+ mark_as_read:
+ at91_write(priv, AT91_MCR(mb), AT91_MCR_MTCR);
- /* The error bits are clear on read,
- * so use saved value from irq handler.
- */
- reg_sr |= priv->reg_sr;
- if (reg_sr & AT91_IRQ_ERR_FRAME)
- work_done += at91_poll_err(dev, quota - work_done, reg_sr);
-
- if (work_done < quota) {
- /* enable IRQs for frame errors and all mailboxes >= rx_next */
- u32 reg_ier = AT91_IRQ_ERR_FRAME;
-
- reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
-
- napi_complete_done(napi, work_done);
- at91_write(priv, AT91_IER, reg_ier);
- }
-
- return work_done;
+ return skb;
}
/* theory of operation:
*
- * priv->tx_echo holds the number of the oldest can_frame put for
+ * priv->tx_tail holds the number of the oldest can_frame put for
* transmission into the hardware, but not yet ACKed by the CAN tx
* complete IRQ.
*
- * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * We iterate from priv->tx_tail to priv->tx_head and check if the
* packet has been transmitted, echo it back to the CAN framework. If
* we discover a not yet transmitted package, stop looking for more.
*
@@ -833,10 +654,8 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
u32 reg_msr;
unsigned int mb;
- /* masking of reg_sr not needed, already done by at91_irq */
-
- for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
- mb = get_tx_echo_mb(priv);
+ for (/* nix */; (priv->tx_head - priv->tx_tail) > 0; priv->tx_tail++) {
+ mb = get_tx_tail_mb(priv);
/* no event in mailbox? */
if (!(reg_sr & (1 << mb)))
@@ -851,236 +670,202 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
* parked in the echo queue.
*/
reg_msr = at91_read(priv, AT91_MSR(mb));
- if (likely(reg_msr & AT91_MSR_MRDY &&
- ~reg_msr & AT91_MSR_MABT)) {
- /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
+ if (unlikely(!(reg_msr & AT91_MSR_MRDY &&
+ ~reg_msr & AT91_MSR_MABT)))
+ continue;
+
+ /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
+ dev->stats.tx_bytes +=
can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL);
- dev->stats.tx_packets++;
- can_led_event(dev, CAN_LED_EVENT_TX);
- }
+ dev->stats.tx_packets++;
}
/* restart queue if we don't have a wrap around but restart if
* we get a TX int for the last can frame directly before a
* wrap around.
*/
- if ((priv->tx_next & get_next_mask(priv)) != 0 ||
- (priv->tx_echo & get_next_mask(priv)) == 0)
+ if ((priv->tx_head & get_head_mask(priv)) != 0 ||
+ (priv->tx_tail & get_head_mask(priv)) == 0)
netif_wake_queue(dev);
}
-static void at91_irq_err_state(struct net_device *dev,
- struct can_frame *cf, enum can_state new_state)
+static void at91_irq_err_line(struct net_device *dev, const u32 reg_sr)
{
+ struct net_device_stats *stats = &dev->stats;
+ enum can_state new_state, rx_state, tx_state;
struct at91_priv *priv = netdev_priv(dev);
- u32 reg_idr = 0, reg_ier = 0;
struct can_berr_counter bec;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ u32 timestamp;
+ int err;
at91_get_berr_counter(dev, &bec);
+ can_state_get_by_berr_counter(dev, &bec, &tx_state, &rx_state);
- switch (priv->can.state) {
- case CAN_STATE_ERROR_ACTIVE:
- /* from: ERROR_ACTIVE
- * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
- * => : there was a warning int
- */
- if (new_state >= CAN_STATE_ERROR_WARNING &&
- new_state <= CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "Error Warning IRQ\n");
- priv->can.can_stats.error_warning++;
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (bec.txerr > bec.rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- }
- fallthrough;
- case CAN_STATE_ERROR_WARNING:
- /* from: ERROR_ACTIVE, ERROR_WARNING
- * to : ERROR_PASSIVE, BUS_OFF
- * => : error passive int
- */
- if (new_state >= CAN_STATE_ERROR_PASSIVE &&
- new_state <= CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "Error Passive IRQ\n");
- priv->can.can_stats.error_passive++;
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (bec.txerr > bec.rxerr) ?
- CAN_ERR_CRTL_TX_PASSIVE :
- CAN_ERR_CRTL_RX_PASSIVE;
- }
- break;
- case CAN_STATE_BUS_OFF:
- /* from: BUS_OFF
- * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
- */
- if (new_state <= CAN_STATE_ERROR_PASSIVE) {
- cf->can_id |= CAN_ERR_RESTARTED;
+ /* The chip automatically recovers from bus-off after 128
+ * occurrences of 11 consecutive recessive bits.
+ *
+ * After an auto-recovered bus-off, the error counters no
+ * longer reflect this fact. On the sam9263 the state bits in
+ * the SR register show the current state (based on the
+ * current error counters), while on sam9x5 and newer SoCs
+ * these bits are latched.
+ *
+ * Take any latched bus-off information from the SR register
+ * into account when calculating the CAN new state, to start
+ * the standard CAN bus off handling.
+ */
+ if (reg_sr & AT91_IRQ_BOFF)
+ rx_state = CAN_STATE_BUS_OFF;
- netdev_dbg(dev, "restarted\n");
- priv->can.can_stats.restarts++;
+ new_state = max(tx_state, rx_state);
- netif_carrier_on(dev);
- netif_wake_queue(dev);
- }
- break;
- default:
- break;
- }
+ /* state hasn't changed */
+ if (likely(new_state == priv->can.state))
+ return;
- /* process state changes depending on the new state */
- switch (new_state) {
- case CAN_STATE_ERROR_ACTIVE:
- /* actually we want to enable AT91_IRQ_WARN here, but
- * it screws up the system under certain
- * circumstances. so just enable AT91_IRQ_ERRP, thus
- * the "fallthrough"
- */
- netdev_dbg(dev, "Error Active\n");
- cf->can_id |= CAN_ERR_PROT;
- cf->data[2] = CAN_ERR_PROT_ACTIVE;
- fallthrough;
- case CAN_STATE_ERROR_WARNING:
- reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
- reg_ier = AT91_IRQ_ERRP;
- break;
- case CAN_STATE_ERROR_PASSIVE:
- reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
- reg_ier = AT91_IRQ_BOFF;
- break;
- case CAN_STATE_BUS_OFF:
- reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
- AT91_IRQ_WARN | AT91_IRQ_BOFF;
- reg_ier = 0;
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ skb = at91_alloc_can_err_skb(dev, &cf, &timestamp);
+ can_change_state(dev, cf, tx_state, rx_state);
- cf->can_id |= CAN_ERR_BUSOFF;
+ if (new_state == CAN_STATE_BUS_OFF) {
+ at91_chip_stop(dev, CAN_STATE_BUS_OFF);
+ can_bus_off(dev);
+ }
- netdev_dbg(dev, "bus-off\n");
- netif_carrier_off(dev);
- priv->can.can_stats.bus_off++;
+ if (unlikely(!skb))
+ return;
- /* turn off chip, if restart is disabled */
- if (!priv->can.restart_ms) {
- at91_chip_stop(dev, CAN_STATE_BUS_OFF);
- return;
- }
- break;
- default:
- break;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
}
- at91_write(priv, AT91_IDR, reg_idr);
- at91_write(priv, AT91_IER, reg_ier);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
}
-static int at91_get_state_by_bec(const struct net_device *dev,
- enum can_state *state)
+static void at91_irq_err_frame(struct net_device *dev, const u32 reg_sr)
{
- struct can_berr_counter bec;
+ struct net_device_stats *stats = &dev->stats;
+ struct at91_priv *priv = netdev_priv(dev);
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 timestamp;
int err;
- err = at91_get_berr_counter(dev, &bec);
- if (err)
- return err;
+ priv->can.can_stats.bus_error++;
- if (bec.txerr < 96 && bec.rxerr < 96)
- *state = CAN_STATE_ERROR_ACTIVE;
- else if (bec.txerr < 128 && bec.rxerr < 128)
- *state = CAN_STATE_ERROR_WARNING;
- else if (bec.txerr < 256 && bec.rxerr < 256)
- *state = CAN_STATE_ERROR_PASSIVE;
- else
- *state = CAN_STATE_BUS_OFF;
+ skb = at91_alloc_can_err_skb(dev, &cf, &timestamp);
+ if (cf)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- return 0;
-}
+ if (reg_sr & AT91_IRQ_CERR) {
+ netdev_dbg(dev, "CRC error\n");
-static void at91_irq_err(struct net_device *dev)
-{
- struct at91_priv *priv = netdev_priv(dev);
- struct sk_buff *skb;
- struct can_frame *cf;
- enum can_state new_state;
- u32 reg_sr;
- int err;
+ stats->rx_errors++;
+ if (cf)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+
+ if (reg_sr & AT91_IRQ_SERR) {
+ netdev_dbg(dev, "Stuff error\n");
- if (at91_is_sam9263(priv)) {
- reg_sr = at91_read(priv, AT91_SR);
-
- /* we need to look at the unmasked reg_sr */
- if (unlikely(reg_sr & AT91_IRQ_BOFF)) {
- new_state = CAN_STATE_BUS_OFF;
- } else if (unlikely(reg_sr & AT91_IRQ_ERRP)) {
- new_state = CAN_STATE_ERROR_PASSIVE;
- } else if (unlikely(reg_sr & AT91_IRQ_WARN)) {
- new_state = CAN_STATE_ERROR_WARNING;
- } else if (likely(reg_sr & AT91_IRQ_ERRA)) {
- new_state = CAN_STATE_ERROR_ACTIVE;
- } else {
- netdev_err(dev, "BUG! hardware in undefined state\n");
- return;
+ stats->rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+
+ if (reg_sr & AT91_IRQ_AERR) {
+ netdev_dbg(dev, "NACK error\n");
+
+ stats->tx_errors++;
+ if (cf) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[2] |= CAN_ERR_PROT_TX;
}
- } else {
- err = at91_get_state_by_bec(dev, &new_state);
- if (err)
- return;
}
- /* state hasn't changed */
- if (likely(new_state == priv->can.state))
- return;
+ if (reg_sr & AT91_IRQ_FERR) {
+ netdev_dbg(dev, "Format error\n");
- skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
+ stats->rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+
+ if (reg_sr & AT91_IRQ_BERR) {
+ netdev_dbg(dev, "Bit error\n");
+
+ stats->tx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT;
+ }
+
+ if (!cf)
return;
- at91_irq_err_state(dev, cf, new_state);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+}
+
+static u32 at91_get_reg_sr_rx(const struct at91_priv *priv, u32 *reg_sr_p)
+{
+ const u32 reg_sr = at91_read(priv, AT91_SR);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += cf->len;
- netif_rx(skb);
+ *reg_sr_p |= reg_sr;
- priv->can.state = new_state;
+ return reg_sr & get_irq_mb_rx(priv);
}
-/* interrupt handler
- */
static irqreturn_t at91_irq(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct at91_priv *priv = netdev_priv(dev);
irqreturn_t handled = IRQ_NONE;
- u32 reg_sr, reg_imr;
-
- reg_sr = at91_read(priv, AT91_SR);
- reg_imr = at91_read(priv, AT91_IMR);
-
- /* Ignore masked interrupts */
- reg_sr &= reg_imr;
- if (!reg_sr)
- goto exit;
+ u32 reg_sr = 0, reg_sr_rx;
+ int ret;
- handled = IRQ_HANDLED;
+ /* Receive interrupt
+ * Some bits of AT91_SR are cleared on read, keep them in reg_sr.
+ */
+ while ((reg_sr_rx = at91_get_reg_sr_rx(priv, &reg_sr))) {
+ ret = can_rx_offload_irq_offload_timestamp(&priv->offload,
+ reg_sr_rx);
+ handled = IRQ_HANDLED;
- /* Receive or error interrupt? -> napi */
- if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
- /* The error bits are clear on read,
- * save for later use.
- */
- priv->reg_sr = reg_sr;
- at91_write(priv, AT91_IDR,
- get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME);
- napi_schedule(&priv->napi);
+ if (!ret)
+ break;
}
/* Transmission complete interrupt */
- if (reg_sr & get_irq_mb_tx(priv))
+ if (reg_sr & get_irq_mb_tx(priv)) {
at91_irq_tx(dev, reg_sr);
+ handled = IRQ_HANDLED;
+ }
- at91_irq_err(dev);
+ /* Line Error interrupt */
+ if (reg_sr & AT91_IRQ_ERR_LINE ||
+ priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+ at91_irq_err_line(dev, reg_sr);
+ handled = IRQ_HANDLED;
+ }
+
+ /* Frame Error Interrupt */
+ if (reg_sr & AT91_IRQ_ERR_FRAME) {
+ at91_irq_err_frame(dev, reg_sr);
+ handled = IRQ_HANDLED;
+ }
+
+ if (handled)
+ can_rx_offload_irq_finish(&priv->offload);
- exit:
return handled;
}
@@ -1089,35 +874,38 @@ static int at91_open(struct net_device *dev)
struct at91_priv *priv = netdev_priv(dev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = phy_power_on(priv->transceiver);
if (err)
return err;
/* check or determine and set bittime */
err = open_candev(dev);
if (err)
- goto out;
+ goto out_phy_power_off;
- /* register interrupt handler */
- if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
- dev->name, dev)) {
- err = -EAGAIN;
- goto out_close;
- }
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ goto out_close_candev;
- can_led_event(dev, CAN_LED_EVENT_OPEN);
+ /* register interrupt handler */
+ err = request_irq(dev->irq, at91_irq, IRQF_SHARED,
+ dev->name, dev);
+ if (err)
+ goto out_clock_disable_unprepare;
/* start chip and queuing */
at91_chip_start(dev);
- napi_enable(&priv->napi);
+ can_rx_offload_enable(&priv->offload);
netif_start_queue(dev);
return 0;
- out_close:
- close_candev(dev);
- out:
+ out_clock_disable_unprepare:
clk_disable_unprepare(priv->clk);
+ out_close_candev:
+ close_candev(dev);
+ out_phy_power_off:
+ phy_power_off(priv->transceiver);
return err;
}
@@ -1129,16 +917,15 @@ static int at91_close(struct net_device *dev)
struct at91_priv *priv = netdev_priv(dev);
netif_stop_queue(dev);
- napi_disable(&priv->napi);
+ can_rx_offload_disable(&priv->offload);
at91_chip_stop(dev, CAN_STATE_STOPPED);
free_irq(dev->irq, dev);
clk_disable_unprepare(priv->clk);
+ phy_power_off(priv->transceiver);
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -1161,7 +948,10 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_open = at91_open,
.ndo_stop = at91_close,
.ndo_start_xmit = at91_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops at91_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
static ssize_t mb0_id_show(struct device *dev,
@@ -1256,6 +1046,7 @@ static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_
static int at91_can_probe(struct platform_device *pdev)
{
const struct at91_devtype_data *devtype_data;
+ struct phy *transceiver;
struct net_device *dev;
struct at91_priv *priv;
struct resource *res;
@@ -1304,7 +1095,15 @@ static int at91_can_probe(struct platform_device *pdev)
goto exit_iounmap;
}
+ transceiver = devm_phy_optional_get(&pdev->dev, NULL);
+ if (IS_ERR(transceiver)) {
+ err = PTR_ERR(transceiver);
+ dev_err_probe(&pdev->dev, err, "failed to get phy\n");
+ goto exit_iounmap;
+ }
+
dev->netdev_ops = &at91_netdev_ops;
+ dev->ethtool_ops = &at91_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -1320,8 +1119,14 @@ static int at91_can_probe(struct platform_device *pdev)
priv->clk = clk;
priv->pdata = dev_get_platdata(&pdev->dev);
priv->mb0_id = 0x7ff;
+ priv->offload.mailbox_read = at91_mailbox_read;
+ priv->offload.mb_first = devtype_data->rx_first;
+ priv->offload.mb_last = devtype_data->rx_last;
+
+ can_rx_offload_add_timestamp(dev, &priv->offload);
- netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
+ if (transceiver)
+ priv->can.bitrate_max = transceiver->attrs.max_link_rate;
if (at91_is_sam9263(priv))
dev->sysfs_groups[0] = &at91_sysfs_attr_group;
@@ -1335,8 +1140,6 @@ static int at91_can_probe(struct platform_device *pdev)
goto exit_free;
}
- devm_can_led_init(dev);
-
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
priv->reg_base, dev->irq);
@@ -1354,7 +1157,7 @@ static int at91_can_probe(struct platform_device *pdev)
return err;
}
-static int at91_can_remove(struct platform_device *pdev)
+static void at91_can_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct at91_priv *priv = netdev_priv(dev);
@@ -1370,8 +1173,6 @@ static int at91_can_remove(struct platform_device *pdev)
clk_put(priv->clk);
free_candev(dev);
-
- return 0;
}
static const struct platform_device_id at91_can_id_table[] = {
diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c
new file mode 100644
index 000000000000..baf494d20bef
--- /dev/null
+++ b/drivers/net/can/bxcan.c
@@ -0,0 +1,1101 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// bxcan.c - STM32 Basic Extended CAN controller driver
+//
+// Copyright (c) 2022 Dario Binacchi <dario.binacchi@amarulasolutions.com>
+//
+// NOTE: The ST documentation uses the terms master/slave instead of
+// primary/secondary.
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/rx-offload.h>
+#include <linux/clk.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define BXCAN_NAPI_WEIGHT 3
+#define BXCAN_TIMEOUT_US 10000
+
+#define BXCAN_RX_MB_NUM 2
+#define BXCAN_TX_MB_NUM 3
+
+/* Primary control register (MCR) bits */
+#define BXCAN_MCR_RESET BIT(15)
+#define BXCAN_MCR_TTCM BIT(7)
+#define BXCAN_MCR_ABOM BIT(6)
+#define BXCAN_MCR_AWUM BIT(5)
+#define BXCAN_MCR_NART BIT(4)
+#define BXCAN_MCR_RFLM BIT(3)
+#define BXCAN_MCR_TXFP BIT(2)
+#define BXCAN_MCR_SLEEP BIT(1)
+#define BXCAN_MCR_INRQ BIT(0)
+
+/* Primary status register (MSR) bits */
+#define BXCAN_MSR_ERRI BIT(2)
+#define BXCAN_MSR_SLAK BIT(1)
+#define BXCAN_MSR_INAK BIT(0)
+
+/* Transmit status register (TSR) bits */
+#define BXCAN_TSR_RQCP2 BIT(16)
+#define BXCAN_TSR_RQCP1 BIT(8)
+#define BXCAN_TSR_RQCP0 BIT(0)
+
+/* Receive FIFO 0 register (RF0R) bits */
+#define BXCAN_RF0R_RFOM0 BIT(5)
+#define BXCAN_RF0R_FMP0_MASK GENMASK(1, 0)
+
+/* Interrupt enable register (IER) bits */
+#define BXCAN_IER_SLKIE BIT(17)
+#define BXCAN_IER_WKUIE BIT(16)
+#define BXCAN_IER_ERRIE BIT(15)
+#define BXCAN_IER_LECIE BIT(11)
+#define BXCAN_IER_BOFIE BIT(10)
+#define BXCAN_IER_EPVIE BIT(9)
+#define BXCAN_IER_EWGIE BIT(8)
+#define BXCAN_IER_FOVIE1 BIT(6)
+#define BXCAN_IER_FFIE1 BIT(5)
+#define BXCAN_IER_FMPIE1 BIT(4)
+#define BXCAN_IER_FOVIE0 BIT(3)
+#define BXCAN_IER_FFIE0 BIT(2)
+#define BXCAN_IER_FMPIE0 BIT(1)
+#define BXCAN_IER_TMEIE BIT(0)
+
+/* Error status register (ESR) bits */
+#define BXCAN_ESR_REC_MASK GENMASK(31, 24)
+#define BXCAN_ESR_TEC_MASK GENMASK(23, 16)
+#define BXCAN_ESR_LEC_MASK GENMASK(6, 4)
+#define BXCAN_ESR_BOFF BIT(2)
+#define BXCAN_ESR_EPVF BIT(1)
+#define BXCAN_ESR_EWGF BIT(0)
+
+/* Bit timing register (BTR) bits */
+#define BXCAN_BTR_SILM BIT(31)
+#define BXCAN_BTR_LBKM BIT(30)
+#define BXCAN_BTR_SJW_MASK GENMASK(25, 24)
+#define BXCAN_BTR_TS2_MASK GENMASK(22, 20)
+#define BXCAN_BTR_TS1_MASK GENMASK(19, 16)
+#define BXCAN_BTR_BRP_MASK GENMASK(9, 0)
+
+/* TX mailbox identifier register (TIxR, x = 0..2) bits */
+#define BXCAN_TIxR_STID_MASK GENMASK(31, 21)
+#define BXCAN_TIxR_EXID_MASK GENMASK(31, 3)
+#define BXCAN_TIxR_IDE BIT(2)
+#define BXCAN_TIxR_RTR BIT(1)
+#define BXCAN_TIxR_TXRQ BIT(0)
+
+/* TX mailbox data length and time stamp register (TDTxR, x = 0..2 bits */
+#define BXCAN_TDTxR_DLC_MASK GENMASK(3, 0)
+
+/* RX FIFO mailbox identifier register (RIxR, x = 0..1 */
+#define BXCAN_RIxR_STID_MASK GENMASK(31, 21)
+#define BXCAN_RIxR_EXID_MASK GENMASK(31, 3)
+#define BXCAN_RIxR_IDE BIT(2)
+#define BXCAN_RIxR_RTR BIT(1)
+
+/* RX FIFO mailbox data length and timestamp register (RDTxR, x = 0..1) bits */
+#define BXCAN_RDTxR_TIME_MASK GENMASK(31, 16)
+#define BXCAN_RDTxR_DLC_MASK GENMASK(3, 0)
+
+#define BXCAN_FMR_REG 0x00
+#define BXCAN_FM1R_REG 0x04
+#define BXCAN_FS1R_REG 0x0c
+#define BXCAN_FFA1R_REG 0x14
+#define BXCAN_FA1R_REG 0x1c
+#define BXCAN_FiR1_REG(b) (0x40 + (b) * 8)
+#define BXCAN_FiR2_REG(b) (0x44 + (b) * 8)
+
+#define BXCAN_FILTER_ID(cfg) ((cfg) == BXCAN_CFG_DUAL_SECONDARY ? 14 : 0)
+
+/* Filter primary register (FMR) bits */
+#define BXCAN_FMR_CANSB_MASK GENMASK(13, 8)
+#define BXCAN_FMR_FINIT BIT(0)
+
+enum bxcan_lec_code {
+ BXCAN_LEC_NO_ERROR = 0,
+ BXCAN_LEC_STUFF_ERROR,
+ BXCAN_LEC_FORM_ERROR,
+ BXCAN_LEC_ACK_ERROR,
+ BXCAN_LEC_BIT1_ERROR,
+ BXCAN_LEC_BIT0_ERROR,
+ BXCAN_LEC_CRC_ERROR,
+ BXCAN_LEC_UNUSED
+};
+
+enum bxcan_cfg {
+ BXCAN_CFG_SINGLE = 0,
+ BXCAN_CFG_DUAL_PRIMARY,
+ BXCAN_CFG_DUAL_SECONDARY
+};
+
+/* Structure of the message buffer */
+struct bxcan_mb {
+ u32 id; /* can identifier */
+ u32 dlc; /* data length control and timestamp */
+ u32 data[2]; /* data */
+};
+
+/* Structure of the hardware registers */
+struct bxcan_regs {
+ u32 mcr; /* 0x00 - primary control */
+ u32 msr; /* 0x04 - primary status */
+ u32 tsr; /* 0x08 - transmit status */
+ u32 rf0r; /* 0x0c - FIFO 0 */
+ u32 rf1r; /* 0x10 - FIFO 1 */
+ u32 ier; /* 0x14 - interrupt enable */
+ u32 esr; /* 0x18 - error status */
+ u32 btr; /* 0x1c - bit timing*/
+ u32 reserved0[88]; /* 0x20 */
+ struct bxcan_mb tx_mb[BXCAN_TX_MB_NUM]; /* 0x180 - tx mailbox */
+ struct bxcan_mb rx_mb[BXCAN_RX_MB_NUM]; /* 0x1b0 - rx mailbox */
+};
+
+struct bxcan_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct device *dev;
+ struct net_device *ndev;
+
+ struct bxcan_regs __iomem *regs;
+ struct regmap *gcan;
+ int tx_irq;
+ int sce_irq;
+ enum bxcan_cfg cfg;
+ struct clk *clk;
+ spinlock_t rmw_lock; /* lock for read-modify-write operations */
+ unsigned int tx_head;
+ unsigned int tx_tail;
+ u32 timestamp;
+};
+
+static const struct can_bittiming_const bxcan_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr,
+ u32 clear, u32 set)
+{
+ unsigned long flags;
+ u32 old, val;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ old = readl(addr);
+ val = (old & ~clear) | set;
+ if (val != old)
+ writel(val, addr);
+
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static void bxcan_disable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
+{
+ unsigned int fid = BXCAN_FILTER_ID(cfg);
+ u32 fmask = BIT(fid);
+
+ regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
+}
+
+static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
+{
+ unsigned int fid = BXCAN_FILTER_ID(cfg);
+ u32 fmask = BIT(fid);
+
+ /* Filter settings:
+ *
+ * Accept all messages.
+ * Assign filter 0 to CAN1 and filter 14 to CAN2 in identifier
+ * mask mode with 32 bits width.
+ */
+
+ /* Enter filter initialization mode and assign filters to CAN
+ * controllers.
+ */
+ regmap_update_bits(priv->gcan, BXCAN_FMR_REG,
+ BXCAN_FMR_CANSB_MASK | BXCAN_FMR_FINIT,
+ FIELD_PREP(BXCAN_FMR_CANSB_MASK, 14) |
+ BXCAN_FMR_FINIT);
+
+ /* Deactivate filter */
+ regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
+
+ /* Two 32-bit registers in identifier mask mode */
+ regmap_update_bits(priv->gcan, BXCAN_FM1R_REG, fmask, 0);
+
+ /* Single 32-bit scale configuration */
+ regmap_update_bits(priv->gcan, BXCAN_FS1R_REG, fmask, fmask);
+
+ /* Assign filter to FIFO 0 */
+ regmap_update_bits(priv->gcan, BXCAN_FFA1R_REG, fmask, 0);
+
+ /* Accept all messages */
+ regmap_write(priv->gcan, BXCAN_FiR1_REG(fid), 0);
+ regmap_write(priv->gcan, BXCAN_FiR2_REG(fid), 0);
+
+ /* Activate filter */
+ regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, fmask);
+
+ /* Exit filter initialization mode */
+ regmap_update_bits(priv->gcan, BXCAN_FMR_REG, BXCAN_FMR_FINIT, 0);
+}
+
+static inline u8 bxcan_get_tx_head(const struct bxcan_priv *priv)
+{
+ return priv->tx_head % BXCAN_TX_MB_NUM;
+}
+
+static inline u8 bxcan_get_tx_tail(const struct bxcan_priv *priv)
+{
+ return priv->tx_tail % BXCAN_TX_MB_NUM;
+}
+
+static inline u8 bxcan_get_tx_free(const struct bxcan_priv *priv)
+{
+ return BXCAN_TX_MB_NUM - (priv->tx_head - priv->tx_tail);
+}
+
+static bool bxcan_tx_busy(const struct bxcan_priv *priv)
+{
+ if (bxcan_get_tx_free(priv) > 0)
+ return false;
+
+ netif_stop_queue(priv->ndev);
+
+ /* Memory barrier before checking tx_free (head and tail) */
+ smp_mb();
+
+ if (bxcan_get_tx_free(priv) == 0) {
+ netdev_dbg(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
+ priv->tx_head, priv->tx_tail,
+ priv->tx_head - priv->tx_tail);
+
+ return true;
+ }
+
+ netif_start_queue(priv->ndev);
+
+ return false;
+}
+
+static int bxcan_chip_softreset(struct bxcan_priv *priv)
+{
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 value;
+
+ bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_RESET);
+ return readx_poll_timeout(readl, &regs->msr, value,
+ value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US,
+ USEC_PER_SEC);
+}
+
+static int bxcan_enter_init_mode(struct bxcan_priv *priv)
+{
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 value;
+
+ bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_INRQ);
+ return readx_poll_timeout(readl, &regs->msr, value,
+ value & BXCAN_MSR_INAK, BXCAN_TIMEOUT_US,
+ USEC_PER_SEC);
+}
+
+static int bxcan_leave_init_mode(struct bxcan_priv *priv)
+{
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 value;
+
+ bxcan_rmw(priv, &regs->mcr, BXCAN_MCR_INRQ, 0);
+ return readx_poll_timeout(readl, &regs->msr, value,
+ !(value & BXCAN_MSR_INAK), BXCAN_TIMEOUT_US,
+ USEC_PER_SEC);
+}
+
+static int bxcan_enter_sleep_mode(struct bxcan_priv *priv)
+{
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 value;
+
+ bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_SLEEP);
+ return readx_poll_timeout(readl, &regs->msr, value,
+ value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US,
+ USEC_PER_SEC);
+}
+
+static int bxcan_leave_sleep_mode(struct bxcan_priv *priv)
+{
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 value;
+
+ bxcan_rmw(priv, &regs->mcr, BXCAN_MCR_SLEEP, 0);
+ return readx_poll_timeout(readl, &regs->msr, value,
+ !(value & BXCAN_MSR_SLAK), BXCAN_TIMEOUT_US,
+ USEC_PER_SEC);
+}
+
+static inline
+struct bxcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
+{
+ return container_of(offload, struct bxcan_priv, offload);
+}
+
+static struct sk_buff *bxcan_mailbox_read(struct can_rx_offload *offload,
+ unsigned int mbxno, u32 *timestamp,
+ bool drop)
+{
+ struct bxcan_priv *priv = rx_offload_to_priv(offload);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ struct bxcan_mb __iomem *mb_regs = &regs->rx_mb[0];
+ struct sk_buff *skb = NULL;
+ struct can_frame *cf;
+ u32 rf0r, id, dlc;
+
+ rf0r = readl(&regs->rf0r);
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
+ if (!(rf0r & BXCAN_RF0R_FMP0_MASK))
+ goto mark_as_read;
+
+ skb = alloc_can_skb(offload->dev, &cf);
+ if (unlikely(!skb)) {
+ skb = ERR_PTR(-ENOMEM);
+ goto mark_as_read;
+ }
+
+ id = readl(&mb_regs->id);
+ if (id & BXCAN_RIxR_IDE)
+ cf->can_id = FIELD_GET(BXCAN_RIxR_EXID_MASK, id) | CAN_EFF_FLAG;
+ else
+ cf->can_id = FIELD_GET(BXCAN_RIxR_STID_MASK, id) & CAN_SFF_MASK;
+
+ dlc = readl(&mb_regs->dlc);
+ priv->timestamp = FIELD_GET(BXCAN_RDTxR_TIME_MASK, dlc);
+ cf->len = can_cc_dlc2len(FIELD_GET(BXCAN_RDTxR_DLC_MASK, dlc));
+
+ if (id & BXCAN_RIxR_RTR) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ int i, j;
+
+ for (i = 0, j = 0; i < cf->len; i += 4, j++)
+ *(u32 *)(cf->data + i) = readl(&mb_regs->data[j]);
+ }
+
+ mark_as_read:
+ rf0r |= BXCAN_RF0R_RFOM0;
+ writel(rf0r, &regs->rf0r);
+ return skb;
+}
+
+static irqreturn_t bxcan_rx_isr(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 rf0r;
+
+ rf0r = readl(&regs->rf0r);
+ if (!(rf0r & BXCAN_RF0R_FMP0_MASK))
+ return IRQ_NONE;
+
+ can_rx_offload_irq_offload_fifo(&priv->offload);
+ can_rx_offload_irq_finish(&priv->offload);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bxcan_tx_isr(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ struct net_device_stats *stats = &ndev->stats;
+ u32 tsr, rqcp_bit;
+ int idx;
+
+ tsr = readl(&regs->tsr);
+ if (!(tsr & (BXCAN_TSR_RQCP0 | BXCAN_TSR_RQCP1 | BXCAN_TSR_RQCP2)))
+ return IRQ_NONE;
+
+ while (priv->tx_head - priv->tx_tail > 0) {
+ idx = bxcan_get_tx_tail(priv);
+ rqcp_bit = BXCAN_TSR_RQCP0 << (idx << 3);
+ if (!(tsr & rqcp_bit))
+ break;
+
+ stats->tx_packets++;
+ stats->tx_bytes += can_get_echo_skb(ndev, idx, NULL);
+ priv->tx_tail++;
+ }
+
+ writel(tsr, &regs->tsr);
+
+ if (bxcan_get_tx_free(priv)) {
+ /* Make sure that anybody stopping the queue after
+ * this sees the new tx_ring->tail.
+ */
+ smp_mb();
+ netif_wake_queue(ndev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void bxcan_handle_state_change(struct net_device *ndev, u32 esr)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ enum can_state new_state = priv->can.state;
+ struct can_berr_counter bec;
+ enum can_state rx_state, tx_state;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ /* Early exit if no error flag is set */
+ if (!(esr & (BXCAN_ESR_EWGF | BXCAN_ESR_EPVF | BXCAN_ESR_BOFF)))
+ return;
+
+ bec.txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr);
+ bec.rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr);
+
+ if (esr & BXCAN_ESR_BOFF)
+ new_state = CAN_STATE_BUS_OFF;
+ else if (esr & BXCAN_ESR_EPVF)
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (esr & BXCAN_ESR_EWGF)
+ new_state = CAN_STATE_ERROR_WARNING;
+
+ /* state hasn't changed */
+ if (unlikely(new_state == priv->can.state))
+ return;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ tx_state = bec.txerr >= bec.rxerr ? new_state : 0;
+ rx_state = bec.txerr <= bec.rxerr ? new_state : 0;
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ can_bus_off(ndev);
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ if (skb) {
+ int err;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb,
+ priv->timestamp);
+ if (err)
+ ndev->stats.rx_fifo_errors++;
+ }
+}
+
+static void bxcan_handle_bus_err(struct net_device *ndev, u32 esr)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ enum bxcan_lec_code lec_code;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ lec_code = FIELD_GET(BXCAN_ESR_LEC_MASK, esr);
+
+ /* Early exit if no lec update or no error.
+ * No lec update means that no CAN bus event has been detected
+ * since CPU wrote BXCAN_LEC_UNUSED value to status reg.
+ */
+ if (lec_code == BXCAN_LEC_UNUSED || lec_code == BXCAN_LEC_NO_ERROR)
+ return;
+
+ /* Common for all type of bus errors */
+ priv->can.can_stats.bus_error++;
+
+ /* Propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (skb)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (lec_code) {
+ case BXCAN_LEC_STUFF_ERROR:
+ netdev_dbg(ndev, "Stuff error\n");
+ ndev->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+
+ case BXCAN_LEC_FORM_ERROR:
+ netdev_dbg(ndev, "Form error\n");
+ ndev->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+
+ case BXCAN_LEC_ACK_ERROR:
+ netdev_dbg(ndev, "Ack error\n");
+ ndev->stats.tx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
+ break;
+
+ case BXCAN_LEC_BIT1_ERROR:
+ netdev_dbg(ndev, "Bit error (recessive)\n");
+ ndev->stats.tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ break;
+
+ case BXCAN_LEC_BIT0_ERROR:
+ netdev_dbg(ndev, "Bit error (dominant)\n");
+ ndev->stats.tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ break;
+
+ case BXCAN_LEC_CRC_ERROR:
+ netdev_dbg(ndev, "CRC error\n");
+ ndev->stats.rx_errors++;
+ if (skb) {
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (skb) {
+ int err;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb,
+ priv->timestamp);
+ if (err)
+ ndev->stats.rx_fifo_errors++;
+ }
+}
+
+static irqreturn_t bxcan_state_change_isr(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 msr, esr;
+
+ msr = readl(&regs->msr);
+ if (!(msr & BXCAN_MSR_ERRI))
+ return IRQ_NONE;
+
+ esr = readl(&regs->esr);
+ bxcan_handle_state_change(ndev, esr);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ bxcan_handle_bus_err(ndev, esr);
+
+ msr |= BXCAN_MSR_ERRI;
+ writel(msr, &regs->msr);
+ can_rx_offload_irq_finish(&priv->offload);
+
+ return IRQ_HANDLED;
+}
+
+static int bxcan_chip_start(struct net_device *ndev)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u32 clr, set;
+ int err;
+
+ err = bxcan_chip_softreset(priv);
+ if (err) {
+ netdev_err(ndev, "failed to reset chip, error %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = bxcan_leave_sleep_mode(priv);
+ if (err) {
+ netdev_err(ndev, "failed to leave sleep mode, error %pe\n",
+ ERR_PTR(err));
+ goto failed_leave_sleep;
+ }
+
+ err = bxcan_enter_init_mode(priv);
+ if (err) {
+ netdev_err(ndev, "failed to enter init mode, error %pe\n",
+ ERR_PTR(err));
+ goto failed_enter_init;
+ }
+
+ /* MCR
+ *
+ * select request order priority
+ * enable time triggered mode
+ * bus-off state left on sw request
+ * sleep mode left on sw request
+ * retransmit automatically on error
+ * do not lock RX FIFO on overrun
+ */
+ bxcan_rmw(priv, &regs->mcr,
+ BXCAN_MCR_ABOM | BXCAN_MCR_AWUM | BXCAN_MCR_NART |
+ BXCAN_MCR_RFLM, BXCAN_MCR_TTCM | BXCAN_MCR_TXFP);
+
+ /* Bit timing register settings */
+ set = FIELD_PREP(BXCAN_BTR_BRP_MASK, bt->brp - 1) |
+ FIELD_PREP(BXCAN_BTR_TS1_MASK, bt->phase_seg1 +
+ bt->prop_seg - 1) |
+ FIELD_PREP(BXCAN_BTR_TS2_MASK, bt->phase_seg2 - 1) |
+ FIELD_PREP(BXCAN_BTR_SJW_MASK, bt->sjw - 1);
+
+ /* loopback + silent mode put the controller in test mode,
+ * useful for hot self-test
+ */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ set |= BXCAN_BTR_LBKM;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ set |= BXCAN_BTR_SILM;
+
+ bxcan_rmw(priv, &regs->btr, BXCAN_BTR_SILM | BXCAN_BTR_LBKM |
+ BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK |
+ BXCAN_BTR_SJW_MASK, set);
+
+ bxcan_enable_filters(priv, priv->cfg);
+
+ /* Clear all internal status */
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+
+ err = bxcan_leave_init_mode(priv);
+ if (err) {
+ netdev_err(ndev, "failed to leave init mode, error %pe\n",
+ ERR_PTR(err));
+ goto failed_leave_init;
+ }
+
+ /* Set a `lec` value so that we can check for updates later */
+ bxcan_rmw(priv, &regs->esr, BXCAN_ESR_LEC_MASK,
+ FIELD_PREP(BXCAN_ESR_LEC_MASK, BXCAN_LEC_UNUSED));
+
+ /* IER
+ *
+ * Enable interrupt for:
+ * bus-off
+ * passive error
+ * warning error
+ * last error code
+ * RX FIFO pending message
+ * TX mailbox empty
+ */
+ clr = BXCAN_IER_WKUIE | BXCAN_IER_SLKIE | BXCAN_IER_FOVIE1 |
+ BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
+ BXCAN_IER_FFIE0;
+ set = BXCAN_IER_ERRIE | BXCAN_IER_BOFIE | BXCAN_IER_EPVIE |
+ BXCAN_IER_EWGIE | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ set |= BXCAN_IER_LECIE;
+ else
+ clr |= BXCAN_IER_LECIE;
+
+ bxcan_rmw(priv, &regs->ier, clr, set);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
+
+failed_leave_init:
+failed_enter_init:
+failed_leave_sleep:
+ bxcan_chip_softreset(priv);
+ return err;
+}
+
+static int bxcan_open(struct net_device *ndev)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ netdev_err(ndev, "failed to enable clock, error %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = open_candev(ndev);
+ if (err) {
+ netdev_err(ndev, "open_candev() failed, error %pe\n",
+ ERR_PTR(err));
+ goto out_disable_clock;
+ }
+
+ can_rx_offload_enable(&priv->offload);
+ err = request_irq(ndev->irq, bxcan_rx_isr, IRQF_SHARED, ndev->name,
+ ndev);
+ if (err) {
+ netdev_err(ndev, "failed to register rx irq(%d), error %pe\n",
+ ndev->irq, ERR_PTR(err));
+ goto out_close_candev;
+ }
+
+ err = request_irq(priv->tx_irq, bxcan_tx_isr, IRQF_SHARED, ndev->name,
+ ndev);
+ if (err) {
+ netdev_err(ndev, "failed to register tx irq(%d), error %pe\n",
+ priv->tx_irq, ERR_PTR(err));
+ goto out_free_rx_irq;
+ }
+
+ err = request_irq(priv->sce_irq, bxcan_state_change_isr, IRQF_SHARED,
+ ndev->name, ndev);
+ if (err) {
+ netdev_err(ndev, "failed to register sce irq(%d), error %pe\n",
+ priv->sce_irq, ERR_PTR(err));
+ goto out_free_tx_irq;
+ }
+
+ err = bxcan_chip_start(ndev);
+ if (err)
+ goto out_free_sce_irq;
+
+ netif_start_queue(ndev);
+ return 0;
+
+out_free_sce_irq:
+ free_irq(priv->sce_irq, ndev);
+out_free_tx_irq:
+ free_irq(priv->tx_irq, ndev);
+out_free_rx_irq:
+ free_irq(ndev->irq, ndev);
+out_close_candev:
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+out_disable_clock:
+ clk_disable_unprepare(priv->clk);
+ return err;
+}
+
+static void bxcan_chip_stop(struct net_device *ndev)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+
+ /* disable all interrupts */
+ bxcan_rmw(priv, &regs->ier, BXCAN_IER_SLKIE | BXCAN_IER_WKUIE |
+ BXCAN_IER_ERRIE | BXCAN_IER_LECIE | BXCAN_IER_BOFIE |
+ BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 |
+ BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
+ BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0);
+ bxcan_disable_filters(priv, priv->cfg);
+ bxcan_enter_sleep_mode(priv);
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int bxcan_stop(struct net_device *ndev)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ bxcan_chip_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ free_irq(priv->tx_irq, ndev);
+ free_irq(priv->sce_irq, ndev);
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct bxcan_regs __iomem *regs = priv->regs;
+ struct bxcan_mb __iomem *mb_regs;
+ unsigned int idx;
+ u32 id;
+ int i, j;
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (bxcan_tx_busy(priv))
+ return NETDEV_TX_BUSY;
+
+ idx = bxcan_get_tx_head(priv);
+ priv->tx_head++;
+ if (bxcan_get_tx_free(priv) == 0)
+ netif_stop_queue(ndev);
+
+ mb_regs = &regs->tx_mb[idx];
+ if (cf->can_id & CAN_EFF_FLAG)
+ id = FIELD_PREP(BXCAN_TIxR_EXID_MASK, cf->can_id) |
+ BXCAN_TIxR_IDE;
+ else
+ id = FIELD_PREP(BXCAN_TIxR_STID_MASK, cf->can_id);
+
+ if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
+ id |= BXCAN_TIxR_RTR;
+ } else {
+ for (i = 0, j = 0; i < cf->len; i += 4, j++)
+ writel(*(u32 *)(cf->data + i), &mb_regs->data[j]);
+ }
+
+ writel(FIELD_PREP(BXCAN_TDTxR_DLC_MASK, cf->len), &mb_regs->dlc);
+
+ can_put_echo_skb(skb, ndev, idx, 0);
+
+ /* Start transmission */
+ writel(id | BXCAN_TIxR_TXRQ, &mb_regs->id);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops bxcan_netdev_ops = {
+ .ndo_open = bxcan_open,
+ .ndo_stop = bxcan_stop,
+ .ndo_start_xmit = bxcan_start_xmit,
+};
+
+static const struct ethtool_ops bxcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static int bxcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int err;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = bxcan_chip_start(ndev);
+ if (err)
+ return err;
+
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int bxcan_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 esr;
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+
+ esr = readl(&regs->esr);
+ bec->txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr);
+ bec->rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static int bxcan_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct bxcan_priv *priv;
+ struct clk *clk = NULL;
+ void __iomem *regs;
+ struct regmap *gcan;
+ enum bxcan_cfg cfg;
+ int err, rx_irq, tx_irq, sce_irq;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs)) {
+ dev_err(dev, "failed to get base address\n");
+ return PTR_ERR(regs);
+ }
+
+ gcan = syscon_regmap_lookup_by_phandle(np, "st,gcan");
+ if (IS_ERR(gcan)) {
+ dev_err(dev, "failed to get shared memory base address\n");
+ return PTR_ERR(gcan);
+ }
+
+ if (of_property_read_bool(np, "st,can-primary"))
+ cfg = BXCAN_CFG_DUAL_PRIMARY;
+ else if (of_property_read_bool(np, "st,can-secondary"))
+ cfg = BXCAN_CFG_DUAL_SECONDARY;
+ else
+ cfg = BXCAN_CFG_SINGLE;
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+
+ rx_irq = platform_get_irq_byname(pdev, "rx0");
+ if (rx_irq < 0)
+ return rx_irq;
+
+ tx_irq = platform_get_irq_byname(pdev, "tx");
+ if (tx_irq < 0)
+ return tx_irq;
+
+ sce_irq = platform_get_irq_byname(pdev, "sce");
+ if (sce_irq < 0)
+ return sce_irq;
+
+ ndev = alloc_candev(sizeof(struct bxcan_priv), BXCAN_TX_MB_NUM);
+ if (!ndev) {
+ dev_err(dev, "alloc_candev() failed\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(ndev);
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ ndev->netdev_ops = &bxcan_netdev_ops;
+ ndev->ethtool_ops = &bxcan_ethtool_ops;
+ ndev->irq = rx_irq;
+ ndev->flags |= IFF_ECHO;
+
+ priv->dev = dev;
+ priv->ndev = ndev;
+ priv->regs = regs;
+ priv->gcan = gcan;
+ priv->clk = clk;
+ priv->tx_irq = tx_irq;
+ priv->sce_irq = sce_irq;
+ priv->cfg = cfg;
+ priv->can.clock.freq = clk_get_rate(clk);
+ spin_lock_init(&priv->rmw_lock);
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ priv->can.bittiming_const = &bxcan_bittiming_const;
+ priv->can.do_set_mode = bxcan_do_set_mode;
+ priv->can.do_get_berr_counter = bxcan_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING;
+
+ priv->offload.mailbox_read = bxcan_mailbox_read;
+ err = can_rx_offload_add_fifo(ndev, &priv->offload, BXCAN_NAPI_WEIGHT);
+ if (err) {
+ dev_err(dev, "failed to add FIFO rx_offload\n");
+ goto out_free_candev;
+ }
+
+ err = register_candev(ndev);
+ if (err) {
+ dev_err(dev, "failed to register netdev\n");
+ goto out_can_rx_offload_del;
+ }
+
+ dev_info(dev, "clk: %d Hz, IRQs: %d, %d, %d\n", priv->can.clock.freq,
+ tx_irq, rx_irq, sce_irq);
+ return 0;
+
+out_can_rx_offload_del:
+ can_rx_offload_del(&priv->offload);
+out_free_candev:
+ free_candev(ndev);
+ return err;
+}
+
+static void bxcan_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct bxcan_priv *priv = netdev_priv(ndev);
+
+ unregister_candev(ndev);
+ clk_disable_unprepare(priv->clk);
+ can_rx_offload_del(&priv->offload);
+ free_candev(ndev);
+}
+
+static int __maybe_unused bxcan_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct bxcan_priv *priv = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+
+ bxcan_enter_sleep_mode(priv);
+ priv->can.state = CAN_STATE_SLEEPING;
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static int __maybe_unused bxcan_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct bxcan_priv *priv = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return 0;
+
+ clk_prepare_enable(priv->clk);
+ bxcan_leave_sleep_mode(priv);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(bxcan_pm_ops, bxcan_suspend, bxcan_resume);
+
+static const struct of_device_id bxcan_of_match[] = {
+ {.compatible = "st,stm32f4-bxcan"},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bxcan_of_match);
+
+static struct platform_driver bxcan_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .pm = &bxcan_pm_ops,
+ .of_match_table = bxcan_of_match,
+ },
+ .probe = bxcan_probe,
+ .remove = bxcan_remove,
+};
+
+module_platform_driver(bxcan_driver);
+
+MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
+MODULE_DESCRIPTION("STMicroelectronics Basic Extended CAN controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 962725788b0a..1f0e9acb69ec 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -20,5 +20,6 @@ config CAN_C_CAN_PCI
depends on PCI
help
This driver adds support for the C_CAN/D_CAN chips connected
- to the PCI bus.
+ to the PCI bus. E.g. for the C_CAN controller IP inside the
+ Intel Atom E6xx series IOH (aka EG20T 'PCH CAN').
endif
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 08b6efa7a1a7..029cd8194ed5 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -211,7 +211,6 @@ struct c_can_priv {
struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */
void (*raminit)(const struct c_can_priv *priv, bool enable);
u32 comm_rcv_high;
- u32 dlc[];
};
struct net_device *alloc_c_can_dev(int msg_obj_num);
@@ -224,7 +223,7 @@ int c_can_power_up(struct net_device *dev);
int c_can_power_down(struct net_device *dev);
#endif
-void c_can_set_ethtool_ops(struct net_device *dev);
+extern const struct ethtool_ops c_can_ethtool_ops;
static inline u8 c_can_get_tx_head(const struct c_can_tx_ring *ring)
{
@@ -236,9 +235,22 @@ static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
+static inline u8 c_can_get_tx_free(const struct c_can_priv *priv,
+ const struct c_can_tx_ring *ring)
{
- return ring->obj_num - (ring->head - ring->tail);
+ u8 head = c_can_get_tx_head(ring);
+ u8 tail = c_can_get_tx_tail(ring);
+
+ if (priv->type == BOSCH_D_CAN)
+ return ring->obj_num - (ring->head - ring->tail);
+
+ /* This is not a FIFO. C/D_CAN sends out the buffers
+ * prioritized. The lowest buffer number wins.
+ */
+ if (head < tail)
+ return 0;
+
+ return ring->obj_num - head;
}
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
index 377c7d2e7612..e41167eda673 100644
--- a/drivers/net/can/c_can/c_can_ethtool.c
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -11,16 +11,10 @@
#include "c_can.h"
-static void c_can_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *info)
-{
- struct c_can_priv *priv = netdev_priv(netdev);
- strscpy(info->driver, "c_can", sizeof(info->driver));
- strscpy(info->bus_info, dev_name(priv->device), sizeof(info->bus_info));
-}
-
static void c_can_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct c_can_priv *priv = netdev_priv(netdev);
@@ -30,12 +24,7 @@ static void c_can_get_ringparam(struct net_device *netdev,
ring->tx_pending = priv->msg_obj_tx_num;
}
-static const struct ethtool_ops c_can_ethtool_ops = {
- .get_drvinfo = c_can_get_drvinfo,
+const struct ethtool_ops c_can_ethtool_ops = {
.get_ringparam = c_can_get_ringparam,
+ .get_ts_info = ethtool_op_get_ts_info,
};
-
-void c_can_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &c_can_ethtool_ops;
-}
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index 52671d1ea17d..3702cac7fbf0 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -40,7 +40,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include "c_can.h"
@@ -403,10 +402,10 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
frame->data[i + 1] = data >> 8;
}
}
- }
+ stats->rx_bytes += frame->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += frame->len;
netif_receive_skb(skb);
return 0;
@@ -430,7 +429,7 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
static bool c_can_tx_busy(const struct c_can_priv *priv,
const struct c_can_tx_ring *tx_ring)
{
- if (c_can_get_tx_free(tx_ring) > 0)
+ if (c_can_get_tx_free(priv, tx_ring) > 0)
return false;
netif_stop_queue(priv->dev);
@@ -438,7 +437,7 @@ static bool c_can_tx_busy(const struct c_can_priv *priv,
/* Memory barrier before checking tx_free (head and tail) */
smp_mb();
- if (c_can_get_tx_free(tx_ring) == 0) {
+ if (c_can_get_tx_free(priv, tx_ring) == 0) {
netdev_dbg(priv->dev,
"Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
tx_ring->head, tx_ring->tail,
@@ -458,7 +457,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
struct c_can_tx_ring *tx_ring = &priv->tx;
u32 idx, obj, cmd = IF_COMM_TX;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
if (c_can_tx_busy(priv, tx_ring))
@@ -466,7 +465,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
idx = c_can_get_tx_head(tx_ring);
tx_ring->head++;
- if (c_can_get_tx_free(tx_ring) == 0)
+ if (c_can_get_tx_free(priv, tx_ring) == 0)
netif_stop_queue(dev);
if (idx < c_can_get_tx_tail(tx_ring))
@@ -477,7 +476,6 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
* transmit as we might race against do_tx().
*/
c_can_setup_tx_object(dev, IF_TX, frame, idx);
- priv->dlc[idx] = frame->len;
can_put_echo_skb(skb, dev, idx, 0);
obj = idx + priv->msg_obj_tx_first;
c_can_object_put(dev, IF_TX, obj, cmd);
@@ -742,8 +740,7 @@ static void c_can_do_tx(struct net_device *dev)
* NAPI. We are not transmitting.
*/
c_can_inval_tx_object(dev, IF_NAPI, obj);
- can_get_echo_skb(dev, idx, NULL);
- bytes += priv->dlc[idx];
+ bytes += can_get_echo_skb(dev, idx, NULL);
pkts++;
}
@@ -751,7 +748,7 @@ static void c_can_do_tx(struct net_device *dev)
return;
tx_ring->tail += pkts;
- if (c_can_get_tx_free(tx_ring)) {
+ if (c_can_get_tx_free(priv, tx_ring)) {
/* Make sure that anybody stopping the queue after
* this sees the new tx_ring->tail.
*/
@@ -761,11 +758,9 @@ static void c_can_do_tx(struct net_device *dev)
stats->tx_bytes += bytes;
stats->tx_packets += pkts;
- can_led_event(dev, CAN_LED_EVENT_TX);
tail = c_can_get_tx_tail(tx_ring);
-
- if (tail == 0) {
+ if (priv->type == BOSCH_D_CAN && tail == 0) {
u8 head = c_can_get_tx_head(tx_ring);
/* Start transmission for all cached messages */
@@ -908,9 +903,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
quota -= n;
}
- if (pkts)
- can_led_event(dev, CAN_LED_EVENT_RX);
-
return pkts;
}
@@ -920,7 +912,6 @@ static int c_can_handle_state_change(struct net_device *dev,
unsigned int reg_err_counter;
unsigned int rx_err_passive;
struct c_can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
@@ -960,15 +951,14 @@ static int c_can_handle_state_change(struct net_device *dev,
switch (error_type) {
case C_CAN_NO_ERROR:
- /* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case C_CAN_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -978,7 +968,7 @@ static int c_can_handle_state_change(struct net_device *dev,
break;
case C_CAN_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
if (rx_err_passive)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
@@ -996,8 +986,6 @@ static int c_can_handle_state_change(struct net_device *dev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -1023,49 +1011,60 @@ static int c_can_handle_bus_err(struct net_device *dev,
/* common for all type of bus errors */
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
- return 0;
/* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (lec_type) {
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ stats->rx_errors++;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ stats->rx_errors++;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ stats->tx_errors++;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ stats->tx_errors++;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ stats->tx_errors++;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ stats->rx_errors++;
break;
default:
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (unlikely(!skb))
+ return 0;
+
netif_receive_skb(skb);
return 1;
}
@@ -1189,8 +1188,6 @@ static int c_can_open(struct net_device *dev)
if (err)
goto exit_start_fail;
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
napi_enable(&priv->napi);
/* enable status change, error and module interrupts */
c_can_irq_control(priv, true);
@@ -1221,8 +1218,6 @@ static int c_can_close(struct net_device *dev)
c_can_reset_ram(priv, false);
c_can_pm_runtime_put_sync(priv);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -1232,8 +1227,7 @@ struct net_device *alloc_c_can_dev(int msg_obj_num)
struct c_can_priv *priv;
int msg_obj_tx_num = msg_obj_num / 2;
- dev = alloc_candev(struct_size(priv, dlc, msg_obj_tx_num),
- msg_obj_tx_num);
+ dev = alloc_candev(sizeof(*priv), msg_obj_tx_num);
if (!dev)
return NULL;
@@ -1254,7 +1248,8 @@ struct net_device *alloc_c_can_dev(int msg_obj_num)
priv->tx.tail = 0;
priv->tx.obj_num = msg_obj_tx_num;
- netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num);
+ netif_napi_add_weight(dev, &priv->napi, c_can_poll,
+ priv->msg_obj_rx_num);
priv->dev = dev;
priv->can.bittiming_const = &c_can_bittiming_const;
@@ -1367,13 +1362,10 @@ static const struct net_device_ops c_can_netdev_ops = {
.ndo_open = c_can_open,
.ndo_stop = c_can_close,
.ndo_start_xmit = c_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
int register_c_can_dev(struct net_device *dev)
{
- int err;
-
/* Deactivate pins to prevent DRA7 DCAN IP from being
* stuck in transition when module is disabled.
* Pins are activated in c_can_start() and deactivated
@@ -1383,12 +1375,9 @@ int register_c_can_dev(struct net_device *dev)
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &c_can_netdev_ops;
- c_can_set_ethtool_ops(dev);
+ dev->ethtool_ops = &c_can_ethtool_ops;
- err = register_candev(dev);
- if (!err)
- devm_can_led_init(dev);
- return err;
+ return register_candev(dev);
}
EXPORT_SYMBOL_GPL(register_c_can_dev);
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index bf2f8c3da1c1..093bea597f4e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -227,7 +227,6 @@ out_iounmap:
pci_iounmap(pdev, addr);
out_release_regions:
pci_disable_msi(pdev);
- pci_clear_master(pdev);
pci_release_regions(pdev);
out_disable_device:
pci_disable_device(pdev);
@@ -247,7 +246,6 @@ static void c_can_pci_remove(struct pci_dev *pdev)
pci_iounmap(pdev, addr);
pci_disable_msi(pdev);
- pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 86e95e9d6533..19c86b94a40e 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -30,9 +30,9 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -259,50 +259,32 @@ static int c_can_plat_probe(struct platform_device *pdev)
void __iomem *addr;
struct net_device *dev;
struct c_can_priv *priv;
- const struct of_device_id *match;
struct resource *mem;
int irq;
struct clk *clk;
const struct c_can_driver_data *drvdata;
struct device_node *np = pdev->dev.of_node;
- match = of_match_device(c_can_of_table, &pdev->dev);
- if (match) {
- drvdata = match->data;
- } else if (pdev->id_entry->driver_data) {
- drvdata = (struct c_can_driver_data *)
- platform_get_device_id(pdev)->driver_data;
- } else {
- return -ENODEV;
- }
+ drvdata = device_get_match_data(&pdev->dev);
/* get the appropriate clk */
clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto exit;
- }
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
/* get the platform data */
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- ret = -ENODEV;
- goto exit;
- }
+ if (irq < 0)
+ return irq;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto exit;
- }
+ addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
/* allocate the c_can device */
dev = alloc_c_can_dev(drvdata->msg_obj_num);
- if (!dev) {
- ret = -ENOMEM;
- goto exit;
- }
+ if (!dev)
+ return -ENOMEM;
priv = netdev_priv(dev);
switch (drvdata->id) {
@@ -334,33 +316,22 @@ static int c_can_plat_probe(struct platform_device *pdev)
/* Check if we need custom RAMINIT via syscon. Mostly for TI
* platforms. Only supported with DT boot.
*/
- if (np && of_property_read_bool(np, "syscon-raminit")) {
+ if (np && of_property_present(np, "syscon-raminit")) {
+ unsigned int args[2];
u32 id;
struct c_can_raminit *raminit = &priv->raminit_sys;
ret = -EINVAL;
- raminit->syscon = syscon_regmap_lookup_by_phandle(np,
- "syscon-raminit");
+ raminit->syscon = syscon_regmap_lookup_by_phandle_args(np,
+ "syscon-raminit",
+ 2, args);
if (IS_ERR(raminit->syscon)) {
- /* can fail with -EPROBE_DEFER */
ret = PTR_ERR(raminit->syscon);
- free_c_can_dev(dev);
- return ret;
- }
-
- if (of_property_read_u32_index(np, "syscon-raminit", 1,
- &raminit->reg)) {
- dev_err(&pdev->dev,
- "couldn't get the RAMINIT reg. offset!\n");
goto exit_free_device;
}
- if (of_property_read_u32_index(np, "syscon-raminit", 2,
- &id)) {
- dev_err(&pdev->dev,
- "couldn't get the CAN instance ID\n");
- goto exit_free_device;
- }
+ raminit->reg = args[0];
+ id = args[1];
if (id >= drvdata->raminit_num) {
dev_err(&pdev->dev,
@@ -395,23 +366,22 @@ static int c_can_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
KBUILD_MODNAME, ret);
- goto exit_free_device;
+ goto exit_pm_runtime;
}
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
KBUILD_MODNAME, priv->base, dev->irq);
return 0;
-exit_free_device:
+exit_pm_runtime:
pm_runtime_disable(priv->device);
+exit_free_device:
free_c_can_dev(dev);
-exit:
- dev_err(&pdev->dev, "probe failed\n");
return ret;
}
-static int c_can_plat_remove(struct platform_device *pdev)
+static void c_can_plat_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct c_can_priv *priv = netdev_priv(dev);
@@ -419,8 +389,6 @@ static int c_can_plat_remove(struct platform_device *pdev)
unregister_c_can_dev(dev);
pm_runtime_disable(priv->device);
free_c_can_dev(dev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c
new file mode 100644
index 000000000000..b66fc16aedd2
--- /dev/null
+++ b/drivers/net/can/can327.c
@@ -0,0 +1,1141 @@
+// SPDX-License-Identifier: GPL-2.0
+/* ELM327 based CAN interface driver (tty line discipline)
+ *
+ * This driver started as a derivative of linux/drivers/net/can/slcan.c
+ * and my thanks go to the original authors for their inspiration.
+ *
+ * can327.c Author : Max Staudt <max-linux@enpas.org>
+ * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/tty_ldisc.h>
+#include <linux/workqueue.h>
+
+#include <uapi/linux/tty.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/rx-offload.h>
+
+#define CAN327_NAPI_WEIGHT 4
+
+#define CAN327_SIZE_TXBUF 32
+#define CAN327_SIZE_RXBUF 1024
+
+#define CAN327_CAN_CONFIG_SEND_SFF 0x8000
+#define CAN327_CAN_CONFIG_VARIABLE_DLC 0x4000
+#define CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF 0x2000
+#define CAN327_CAN_CONFIG_BAUDRATE_MULT_8_7 0x1000
+
+#define CAN327_DUMMY_CHAR 'y'
+#define CAN327_DUMMY_STRING "y"
+#define CAN327_READY_CHAR '>'
+
+/* Bits in elm->cmds_todo */
+enum can327_tx_do {
+ CAN327_TX_DO_CAN_DATA = 0,
+ CAN327_TX_DO_CANID_11BIT,
+ CAN327_TX_DO_CANID_29BIT_LOW,
+ CAN327_TX_DO_CANID_29BIT_HIGH,
+ CAN327_TX_DO_CAN_CONFIG_PART2,
+ CAN327_TX_DO_CAN_CONFIG,
+ CAN327_TX_DO_RESPONSES,
+ CAN327_TX_DO_SILENT_MONITOR,
+ CAN327_TX_DO_INIT,
+};
+
+struct can327 {
+ /* This must be the first member when using alloc_candev() */
+ struct can_priv can;
+
+ struct can_rx_offload offload;
+
+ /* TTY buffers */
+ u8 txbuf[CAN327_SIZE_TXBUF];
+ u8 rxbuf[CAN327_SIZE_RXBUF];
+
+ /* Per-channel lock */
+ spinlock_t lock;
+
+ /* TTY and netdev devices that we're bridging */
+ struct tty_struct *tty;
+ struct net_device *dev;
+
+ /* TTY buffer accounting */
+ struct work_struct tx_work; /* Flushes TTY TX buffer */
+ u8 *txhead; /* Next TX byte */
+ size_t txleft; /* Bytes left to TX */
+ int rxfill; /* Bytes already RX'd in buffer */
+
+ /* State machine */
+ enum {
+ CAN327_STATE_NOTINIT = 0,
+ CAN327_STATE_GETDUMMYCHAR,
+ CAN327_STATE_GETPROMPT,
+ CAN327_STATE_RECEIVING,
+ } state;
+
+ /* Things we have yet to send */
+ char **next_init_cmd;
+ unsigned long cmds_todo;
+
+ /* The CAN frame and config the ELM327 is sending/using,
+ * or will send/use after finishing all cmds_todo
+ */
+ struct can_frame can_frame_to_send;
+ u16 can_config;
+ u8 can_bitrate_divisor;
+
+ /* Parser state */
+ bool drop_next_line;
+
+ /* Stop the channel on UART side hardware failure, e.g. stray
+ * characters or neverending lines. This may be caused by bad
+ * UART wiring, a bad ELM327, a bad UART bridge...
+ * Once this is true, nothing will be sent to the TTY.
+ */
+ bool uart_side_failure;
+};
+
+static inline void can327_uart_side_failure(struct can327 *elm);
+
+static void can327_send(struct can327 *elm, const void *buf, size_t len)
+{
+ int written;
+
+ lockdep_assert_held(&elm->lock);
+
+ if (elm->uart_side_failure)
+ return;
+
+ memcpy(elm->txbuf, buf, len);
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside the ops->write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ set_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+ written = elm->tty->ops->write(elm->tty, elm->txbuf, len);
+ if (written < 0) {
+ netdev_err(elm->dev, "Failed to write to tty %s.\n",
+ elm->tty->name);
+ can327_uart_side_failure(elm);
+ return;
+ }
+
+ elm->txleft = len - written;
+ elm->txhead = elm->txbuf + written;
+}
+
+/* Take the ELM327 out of almost any state and back into command mode.
+ * We send CAN327_DUMMY_CHAR which will either abort any running
+ * operation, or be echoed back to us in case we're already in command
+ * mode.
+ */
+static void can327_kick_into_cmd_mode(struct can327 *elm)
+{
+ lockdep_assert_held(&elm->lock);
+
+ if (elm->state != CAN327_STATE_GETDUMMYCHAR &&
+ elm->state != CAN327_STATE_GETPROMPT) {
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+
+ elm->state = CAN327_STATE_GETDUMMYCHAR;
+ }
+}
+
+/* Schedule a CAN frame and necessary config changes to be sent to the TTY. */
+static void can327_send_frame(struct can327 *elm, struct can_frame *frame)
+{
+ lockdep_assert_held(&elm->lock);
+
+ /* Schedule any necessary changes in ELM327's CAN configuration */
+ if (elm->can_frame_to_send.can_id != frame->can_id) {
+ /* Set the new CAN ID for transmission. */
+ if ((frame->can_id ^ elm->can_frame_to_send.can_id)
+ & CAN_EFF_FLAG) {
+ elm->can_config =
+ (frame->can_id & CAN_EFF_FLAG ? 0 : CAN327_CAN_CONFIG_SEND_SFF) |
+ CAN327_CAN_CONFIG_VARIABLE_DLC |
+ CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF |
+ elm->can_bitrate_divisor;
+
+ set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo);
+ }
+
+ if (frame->can_id & CAN_EFF_FLAG) {
+ clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo);
+ } else {
+ set_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo);
+ clear_bit(CAN327_TX_DO_CANID_29BIT_LOW,
+ &elm->cmds_todo);
+ clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH,
+ &elm->cmds_todo);
+ }
+ }
+
+ /* Schedule the CAN frame itself. */
+ elm->can_frame_to_send = *frame;
+ set_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo);
+
+ can327_kick_into_cmd_mode(elm);
+}
+
+/* ELM327 initialisation sequence.
+ * The line length is limited by the buffer in can327_handle_prompt().
+ */
+static char *can327_init_script[] = {
+ "AT WS\r", /* v1.0: Warm Start */
+ "AT PP FF OFF\r", /* v1.0: All Programmable Parameters Off */
+ "AT M0\r", /* v1.0: Memory Off */
+ "AT AL\r", /* v1.0: Allow Long messages */
+ "AT BI\r", /* v1.0: Bypass Initialisation */
+ "AT CAF0\r", /* v1.0: CAN Auto Formatting Off */
+ "AT CFC0\r", /* v1.0: CAN Flow Control Off */
+ "AT CF 000\r", /* v1.0: Reset CAN ID Filter */
+ "AT CM 000\r", /* v1.0: Reset CAN ID Mask */
+ "AT E1\r", /* v1.0: Echo On */
+ "AT H1\r", /* v1.0: Headers On */
+ "AT L0\r", /* v1.0: Linefeeds Off */
+ "AT SH 7DF\r", /* v1.0: Set CAN sending ID to 0x7df */
+ "AT ST FF\r", /* v1.0: Set maximum Timeout for response after TX */
+ "AT AT0\r", /* v1.2: Adaptive Timing Off */
+ "AT D1\r", /* v1.3: Print DLC On */
+ "AT S1\r", /* v1.3: Spaces On */
+ "AT TP B\r", /* v1.0: Try Protocol B */
+ NULL
+};
+
+static void can327_init_device(struct can327 *elm)
+{
+ lockdep_assert_held(&elm->lock);
+
+ elm->state = CAN327_STATE_NOTINIT;
+ elm->can_frame_to_send.can_id = 0x7df; /* ELM327 HW default */
+ elm->rxfill = 0;
+ elm->drop_next_line = 0;
+
+ /* We can only set the bitrate as a fraction of 500000.
+ * The bitrates listed in can327_bitrate_const will
+ * limit the user to the right values.
+ */
+ elm->can_bitrate_divisor = 500000 / elm->can.bittiming.bitrate;
+ elm->can_config =
+ CAN327_CAN_CONFIG_SEND_SFF | CAN327_CAN_CONFIG_VARIABLE_DLC |
+ CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF | elm->can_bitrate_divisor;
+
+ /* Configure ELM327 and then start monitoring */
+ elm->next_init_cmd = &can327_init_script[0];
+ set_bit(CAN327_TX_DO_INIT, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo);
+
+ can327_kick_into_cmd_mode(elm);
+}
+
+static void can327_feed_frame_to_netdev(struct can327 *elm, struct sk_buff *skb)
+{
+ lockdep_assert_held(&elm->lock);
+
+ if (!netif_running(elm->dev)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ /* Queue for NAPI pickup.
+ * rx-offload will update stats and LEDs for us.
+ */
+ if (can_rx_offload_queue_tail(&elm->offload, skb))
+ elm->dev->stats.rx_fifo_errors++;
+
+ /* Wake NAPI */
+ can_rx_offload_irq_finish(&elm->offload);
+}
+
+/* Called when we're out of ideas and just want it all to end. */
+static inline void can327_uart_side_failure(struct can327 *elm)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&elm->lock);
+
+ elm->uart_side_failure = true;
+
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+
+ elm->can.can_stats.bus_off++;
+ netif_stop_queue(elm->dev);
+ elm->can.state = CAN_STATE_BUS_OFF;
+ can_bus_off(elm->dev);
+
+ netdev_err(elm->dev,
+ "ELM327 misbehaved. Blocking further communication.\n");
+
+ skb = alloc_can_err_skb(elm->dev, &frame);
+ if (!skb)
+ return;
+
+ frame->can_id |= CAN_ERR_BUSOFF;
+ can327_feed_frame_to_netdev(elm, skb);
+}
+
+/* Compares a byte buffer (non-NUL terminated) to the payload part of
+ * a string, and returns true iff the buffer (content *and* length) is
+ * exactly that string, without the terminating NUL byte.
+ *
+ * Example: If reference is "BUS ERROR", then this returns true iff nbytes == 9
+ * and !memcmp(buf, "BUS ERROR", 9).
+ *
+ * The reason to use strings is so we can easily include them in the C
+ * code, and to avoid hardcoding lengths.
+ */
+static inline bool can327_rxbuf_cmp(const u8 *buf, size_t nbytes,
+ const char *reference)
+{
+ size_t ref_len = strlen(reference);
+
+ return (nbytes == ref_len) && !memcmp(buf, reference, ref_len);
+}
+
+static void can327_parse_error(struct can327 *elm, size_t len)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&elm->lock);
+
+ skb = alloc_can_err_skb(elm->dev, &frame);
+ if (!skb)
+ /* It's okay to return here:
+ * The outer parsing loop will drop this UART buffer.
+ */
+ return;
+
+ /* Filter possible error messages based on length of RX'd line */
+ if (can327_rxbuf_cmp(elm->rxbuf, len, "UNABLE TO CONNECT")) {
+ netdev_err(elm->dev,
+ "ELM327 reported UNABLE TO CONNECT. Please check your setup.\n");
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUFFER FULL")) {
+ /* This will only happen if the last data line was complete.
+ * Otherwise, can327_parse_frame() will heuristically
+ * emit this kind of error frame instead.
+ */
+ frame->can_id |= CAN_ERR_CRTL;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS ERROR")) {
+ frame->can_id |= CAN_ERR_BUSERROR;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "CAN ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "<RX ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS BUSY")) {
+ frame->can_id |= CAN_ERR_PROT;
+ frame->data[2] = CAN_ERR_PROT_OVERLOAD;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "FB ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ frame->data[2] = CAN_ERR_PROT_TX;
+ } else if (len == 5 && !memcmp(elm->rxbuf, "ERR", 3)) {
+ /* ERR is followed by two digits, hence line length 5 */
+ netdev_err(elm->dev, "ELM327 reported an ERR%c%c. Please power it off and on again.\n",
+ elm->rxbuf[3], elm->rxbuf[4]);
+ frame->can_id |= CAN_ERR_CRTL;
+ } else {
+ /* Something else has happened.
+ * Maybe garbage on the UART line.
+ * Emit a generic error frame.
+ */
+ }
+
+ can327_feed_frame_to_netdev(elm, skb);
+}
+
+/* Parse CAN frames coming as ASCII from ELM327.
+ * They can be of various formats:
+ *
+ * 29-bit ID (EFF): 12 34 56 78 D PL PL PL PL PL PL PL PL
+ * 11-bit ID (!EFF): 123 D PL PL PL PL PL PL PL PL
+ *
+ * where D = DLC, PL = payload byte
+ *
+ * Instead of a payload, RTR indicates a remote request.
+ *
+ * We will use the spaces and line length to guess the format.
+ */
+static int can327_parse_frame(struct can327 *elm, size_t len)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+ int hexlen;
+ int datastart;
+ int i;
+
+ lockdep_assert_held(&elm->lock);
+
+ skb = alloc_can_skb(elm->dev, &frame);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Find first non-hex and non-space character:
+ * - In the simplest case, there is none.
+ * - For RTR frames, 'R' is the first non-hex character.
+ * - An error message may replace the end of the data line.
+ */
+ for (hexlen = 0; hexlen <= len; hexlen++) {
+ if (hex_to_bin(elm->rxbuf[hexlen]) < 0 &&
+ elm->rxbuf[hexlen] != ' ') {
+ break;
+ }
+ }
+
+ /* Sanity check whether the line is really a clean hexdump,
+ * or terminated by an error message, or contains garbage.
+ */
+ if (hexlen < len && !isdigit(elm->rxbuf[hexlen]) &&
+ !isupper(elm->rxbuf[hexlen]) && '<' != elm->rxbuf[hexlen] &&
+ ' ' != elm->rxbuf[hexlen]) {
+ /* The line is likely garbled anyway, so bail.
+ * The main code will restart listening.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ /* Use spaces in CAN ID to distinguish 29 or 11 bit address length.
+ * No out-of-bounds access:
+ * We use the fact that we can always read from elm->rxbuf.
+ */
+ if (elm->rxbuf[2] == ' ' && elm->rxbuf[5] == ' ' &&
+ elm->rxbuf[8] == ' ' && elm->rxbuf[11] == ' ' &&
+ elm->rxbuf[13] == ' ') {
+ frame->can_id = CAN_EFF_FLAG;
+ datastart = 14;
+ } else if (elm->rxbuf[3] == ' ' && elm->rxbuf[5] == ' ') {
+ datastart = 6;
+ } else {
+ /* This is not a well-formatted data line.
+ * Assume it's an error message.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ if (hexlen < datastart) {
+ /* The line is too short to be a valid frame hex dump.
+ * Something interrupted the hex dump or it is invalid.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ /* From here on all chars up to buf[hexlen] are hex or spaces,
+ * at well-defined offsets.
+ */
+
+ /* Read CAN data length */
+ frame->len = (hex_to_bin(elm->rxbuf[datastart - 2]) << 0);
+
+ /* Read CAN ID */
+ if (frame->can_id & CAN_EFF_FLAG) {
+ frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 28) |
+ (hex_to_bin(elm->rxbuf[1]) << 24) |
+ (hex_to_bin(elm->rxbuf[3]) << 20) |
+ (hex_to_bin(elm->rxbuf[4]) << 16) |
+ (hex_to_bin(elm->rxbuf[6]) << 12) |
+ (hex_to_bin(elm->rxbuf[7]) << 8) |
+ (hex_to_bin(elm->rxbuf[9]) << 4) |
+ (hex_to_bin(elm->rxbuf[10]) << 0);
+ } else {
+ frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 8) |
+ (hex_to_bin(elm->rxbuf[1]) << 4) |
+ (hex_to_bin(elm->rxbuf[2]) << 0);
+ }
+
+ /* Check for RTR frame */
+ if (elm->rxfill >= hexlen + 3 &&
+ !memcmp(&elm->rxbuf[hexlen], "RTR", 3)) {
+ frame->can_id |= CAN_RTR_FLAG;
+ }
+
+ /* Is the line long enough to hold the advertised payload?
+ * Note: RTR frames have a DLC, but no actual payload.
+ */
+ if (!(frame->can_id & CAN_RTR_FLAG) &&
+ (hexlen < frame->len * 3 + datastart)) {
+ /* Incomplete frame.
+ * Probably the ELM327's RS232 TX buffer was full.
+ * Emit an error frame and exit.
+ */
+ frame->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+ frame->len = CAN_ERR_DLC;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ can327_feed_frame_to_netdev(elm, skb);
+
+ /* Signal failure to parse.
+ * The line will be re-parsed as an error line, which will fail.
+ * However, this will correctly drop the state machine back into
+ * command mode.
+ */
+ return -ENODATA;
+ }
+
+ /* Parse the data nibbles. */
+ for (i = 0; i < frame->len; i++) {
+ frame->data[i] =
+ (hex_to_bin(elm->rxbuf[datastart + 3 * i]) << 4) |
+ (hex_to_bin(elm->rxbuf[datastart + 3 * i + 1]));
+ }
+
+ /* Feed the frame to the network layer. */
+ can327_feed_frame_to_netdev(elm, skb);
+
+ return 0;
+}
+
+static void can327_parse_line(struct can327 *elm, size_t len)
+{
+ lockdep_assert_held(&elm->lock);
+
+ /* Skip empty lines */
+ if (!len)
+ return;
+
+ /* Skip echo lines */
+ if (elm->drop_next_line) {
+ elm->drop_next_line = 0;
+ return;
+ } else if (!memcmp(elm->rxbuf, "AT", 2)) {
+ return;
+ }
+
+ /* Regular parsing */
+ if (elm->state == CAN327_STATE_RECEIVING &&
+ can327_parse_frame(elm, len)) {
+ /* Parse an error line. */
+ can327_parse_error(elm, len);
+
+ /* Start afresh. */
+ can327_kick_into_cmd_mode(elm);
+ }
+}
+
+static void can327_handle_prompt(struct can327 *elm)
+{
+ struct can_frame *frame = &elm->can_frame_to_send;
+ /* Size this buffer for the largest ELM327 line we may generate,
+ * which is currently an 8 byte CAN frame's payload hexdump.
+ * Items in can327_init_script must fit here, too!
+ */
+ char local_txbuf[sizeof("0102030405060708\r")];
+
+ lockdep_assert_held(&elm->lock);
+
+ if (!elm->cmds_todo) {
+ /* Enter CAN monitor mode */
+ can327_send(elm, "ATMA\r", 5);
+ elm->state = CAN327_STATE_RECEIVING;
+
+ /* We will be in the default state once this command is
+ * sent, so enable the TX packet queue.
+ */
+ netif_wake_queue(elm->dev);
+
+ return;
+ }
+
+ /* Reconfigure ELM327 step by step as indicated by elm->cmds_todo */
+ if (test_bit(CAN327_TX_DO_INIT, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf), "%s",
+ *elm->next_init_cmd);
+
+ elm->next_init_cmd++;
+ if (!(*elm->next_init_cmd)) {
+ clear_bit(CAN327_TX_DO_INIT, &elm->cmds_todo);
+ /* Init finished. */
+ }
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATCSM%i\r",
+ !!(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATR%i\r",
+ !(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATPC\r");
+ set_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATPB%04X\r",
+ elm->can_config);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATCP%02X\r",
+ (frame->can_id & CAN_EFF_MASK) >> 24);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATSH%06X\r",
+ frame->can_id & CAN_EFF_MASK & ((1 << 24) - 1));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATSH%03X\r",
+ frame->can_id & CAN_SFF_MASK);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo)) {
+ if (frame->can_id & CAN_RTR_FLAG) {
+ /* Send an RTR frame. Their DLC is fixed.
+ * Some chips don't send them at all.
+ */
+ snprintf(local_txbuf, sizeof(local_txbuf), "ATRTR\r");
+ } else {
+ /* Send a regular CAN data frame */
+ int i;
+
+ for (i = 0; i < frame->len; i++) {
+ snprintf(&local_txbuf[2 * i],
+ sizeof(local_txbuf), "%02X",
+ frame->data[i]);
+ }
+
+ snprintf(&local_txbuf[2 * i], sizeof(local_txbuf),
+ "\r");
+ }
+
+ elm->drop_next_line = 1;
+ elm->state = CAN327_STATE_RECEIVING;
+
+ /* We will be in the default state once this command is
+ * sent, so enable the TX packet queue.
+ */
+ netif_wake_queue(elm->dev);
+ }
+
+ can327_send(elm, local_txbuf, strlen(local_txbuf));
+}
+
+static bool can327_is_ready_char(char c)
+{
+ /* Bits 0xc0 are sometimes set (randomly), hence the mask.
+ * Probably bad hardware.
+ */
+ return (c & 0x3f) == CAN327_READY_CHAR;
+}
+
+static void can327_drop_bytes(struct can327 *elm, size_t i)
+{
+ lockdep_assert_held(&elm->lock);
+
+ memmove(&elm->rxbuf[0], &elm->rxbuf[i], CAN327_SIZE_RXBUF - i);
+ elm->rxfill -= i;
+}
+
+static void can327_parse_rxbuf(struct can327 *elm, size_t first_new_char_idx)
+{
+ size_t len, pos;
+
+ lockdep_assert_held(&elm->lock);
+
+ switch (elm->state) {
+ case CAN327_STATE_NOTINIT:
+ elm->rxfill = 0;
+ break;
+
+ case CAN327_STATE_GETDUMMYCHAR:
+ /* Wait for 'y' or '>' */
+ for (pos = 0; pos < elm->rxfill; pos++) {
+ if (elm->rxbuf[pos] == CAN327_DUMMY_CHAR) {
+ can327_send(elm, "\r", 1);
+ elm->state = CAN327_STATE_GETPROMPT;
+ pos++;
+ break;
+ } else if (can327_is_ready_char(elm->rxbuf[pos])) {
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+ pos++;
+ break;
+ }
+ }
+
+ can327_drop_bytes(elm, pos);
+ break;
+
+ case CAN327_STATE_GETPROMPT:
+ /* Wait for '>' */
+ if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1]))
+ can327_handle_prompt(elm);
+
+ elm->rxfill = 0;
+ break;
+
+ case CAN327_STATE_RECEIVING:
+ /* Find <CR> delimiting feedback lines. */
+ len = first_new_char_idx;
+ while (len < elm->rxfill && elm->rxbuf[len] != '\r')
+ len++;
+
+ if (len == CAN327_SIZE_RXBUF) {
+ /* Assume the buffer ran full with garbage.
+ * Did we even connect at the right baud rate?
+ */
+ netdev_err(elm->dev,
+ "RX buffer overflow. Faulty ELM327 or UART?\n");
+ can327_uart_side_failure(elm);
+ } else if (len == elm->rxfill) {
+ if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1])) {
+ /* The ELM327's AT ST response timeout ran out,
+ * so we got a prompt.
+ * Clear RX buffer and restart listening.
+ */
+ elm->rxfill = 0;
+
+ can327_handle_prompt(elm);
+ }
+
+ /* No <CR> found - we haven't received a full line yet.
+ * Wait for more data.
+ */
+ } else {
+ /* We have a full line to parse. */
+ can327_parse_line(elm, len);
+
+ /* Remove parsed data from RX buffer. */
+ can327_drop_bytes(elm, len + 1);
+
+ /* More data to parse? */
+ if (elm->rxfill)
+ can327_parse_rxbuf(elm, 0);
+ }
+ }
+}
+
+static int can327_netdev_open(struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+ int err;
+
+ spin_lock_bh(&elm->lock);
+
+ if (!elm->tty) {
+ spin_unlock_bh(&elm->lock);
+ return -ENODEV;
+ }
+
+ if (elm->uart_side_failure)
+ netdev_warn(elm->dev,
+ "Reopening netdev after a UART side fault has been detected.\n");
+
+ /* Clear TTY buffers */
+ elm->rxfill = 0;
+ elm->txleft = 0;
+
+ /* open_candev() checks for elm->can.bittiming.bitrate != 0 */
+ err = open_candev(dev);
+ if (err) {
+ spin_unlock_bh(&elm->lock);
+ return err;
+ }
+
+ can327_init_device(elm);
+ spin_unlock_bh(&elm->lock);
+
+ err = can_rx_offload_add_manual(dev, &elm->offload, CAN327_NAPI_WEIGHT);
+ if (err) {
+ close_candev(dev);
+ return err;
+ }
+
+ can_rx_offload_enable(&elm->offload);
+
+ elm->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int can327_netdev_close(struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+
+ /* Interrupt whatever the ELM327 is doing right now */
+ spin_lock_bh(&elm->lock);
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+ spin_unlock_bh(&elm->lock);
+
+ netif_stop_queue(dev);
+
+ /* We don't flush the UART TX queue here, as we want final stop
+ * commands (like the above dummy char) to be flushed out.
+ */
+
+ can_rx_offload_disable(&elm->offload);
+ elm->can.state = CAN_STATE_STOPPED;
+ can_rx_offload_del(&elm->offload);
+ close_candev(dev);
+
+ return 0;
+}
+
+/* Send a can_frame to a TTY. */
+static netdev_tx_t can327_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+ struct can_frame *frame = (struct can_frame *)skb->data;
+
+ if (can_dev_dropped_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ /* We shouldn't get here after a hardware fault:
+ * can_bus_off() calls netif_carrier_off()
+ */
+ if (elm->uart_side_failure) {
+ WARN_ON_ONCE(elm->uart_side_failure);
+ goto out;
+ }
+
+ netif_stop_queue(dev);
+
+ /* BHs are already disabled, so no spin_lock_bh().
+ * See Documentation/networking/netdevices.rst
+ */
+ spin_lock(&elm->lock);
+ can327_send_frame(elm, frame);
+ spin_unlock(&elm->lock);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += frame->can_id & CAN_RTR_FLAG ? 0 : frame->len;
+
+ skb_tx_timestamp(skb);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops can327_netdev_ops = {
+ .ndo_open = can327_netdev_open,
+ .ndo_stop = can327_netdev_close,
+ .ndo_start_xmit = can327_netdev_start_xmit,
+};
+
+static const struct ethtool_ops can327_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static bool can327_is_valid_rx_char(u8 c)
+{
+ static const bool lut_char_is_valid['z'] = {
+ ['\r'] = true,
+ [' '] = true,
+ ['.'] = true,
+ ['0'] = true, true, true, true, true,
+ ['5'] = true, true, true, true, true,
+ ['<'] = true,
+ [CAN327_READY_CHAR] = true,
+ ['?'] = true,
+ ['A'] = true, true, true, true, true, true, true,
+ ['H'] = true, true, true, true, true, true, true,
+ ['O'] = true, true, true, true, true, true, true,
+ ['V'] = true, true, true, true, true,
+ ['a'] = true,
+ ['b'] = true,
+ ['v'] = true,
+ [CAN327_DUMMY_CHAR] = true,
+ };
+ BUILD_BUG_ON(CAN327_DUMMY_CHAR >= 'z');
+
+ return (c < ARRAY_SIZE(lut_char_is_valid) && lut_char_is_valid[c]);
+}
+
+/* Handle incoming ELM327 ASCII data.
+ * This will not be re-entered while running, but other ldisc
+ * functions may be called in parallel.
+ */
+static void can327_ldisc_rx(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
+{
+ struct can327 *elm = tty->disc_data;
+ size_t first_new_char_idx;
+
+ if (elm->uart_side_failure)
+ return;
+
+ spin_lock_bh(&elm->lock);
+
+ /* Store old rxfill, so can327_parse_rxbuf() will have
+ * the option of skipping already checked characters.
+ */
+ first_new_char_idx = elm->rxfill;
+
+ while (count--) {
+ if (elm->rxfill >= CAN327_SIZE_RXBUF) {
+ netdev_err(elm->dev,
+ "Receive buffer overflowed. Bad chip or wiring? count = %zu",
+ count);
+ goto uart_failure;
+ }
+ if (fp && *fp++) {
+ netdev_err(elm->dev,
+ "Error in received character stream. Check your wiring.");
+ goto uart_failure;
+ }
+
+ /* Ignore NUL characters, which the PIC microcontroller may
+ * inadvertently insert due to a known hardware bug.
+ * See ELM327 documentation, which refers to a Microchip PIC
+ * bug description.
+ */
+ if (*cp) {
+ /* Check for stray characters on the UART line.
+ * Likely caused by bad hardware.
+ */
+ if (!can327_is_valid_rx_char(*cp)) {
+ netdev_err(elm->dev,
+ "Received illegal character %02x.\n",
+ *cp);
+ goto uart_failure;
+ }
+
+ elm->rxbuf[elm->rxfill++] = *cp;
+ }
+
+ cp++;
+ }
+
+ can327_parse_rxbuf(elm, first_new_char_idx);
+ spin_unlock_bh(&elm->lock);
+
+ return;
+uart_failure:
+ can327_uart_side_failure(elm);
+ spin_unlock_bh(&elm->lock);
+}
+
+/* Write out remaining transmit buffer.
+ * Scheduled when TTY is writable.
+ */
+static void can327_ldisc_tx_worker(struct work_struct *work)
+{
+ struct can327 *elm = container_of(work, struct can327, tx_work);
+ ssize_t written;
+
+ if (elm->uart_side_failure)
+ return;
+
+ spin_lock_bh(&elm->lock);
+
+ if (elm->txleft) {
+ written = elm->tty->ops->write(elm->tty, elm->txhead,
+ elm->txleft);
+ if (written < 0) {
+ netdev_err(elm->dev, "Failed to write to tty %s.\n",
+ elm->tty->name);
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ elm->txleft -= written;
+ elm->txhead += written;
+ }
+
+ if (!elm->txleft)
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+
+ spin_unlock_bh(&elm->lock);
+}
+
+/* Called by the driver when there's room for more data. */
+static void can327_ldisc_tx_wakeup(struct tty_struct *tty)
+{
+ struct can327 *elm = tty->disc_data;
+
+ schedule_work(&elm->tx_work);
+}
+
+/* ELM327 can only handle bitrates that are integer divisors of 500 kHz,
+ * or 7/8 of that. Divisors are 1 to 64.
+ * Currently we don't implement support for 7/8 rates.
+ */
+static const u32 can327_bitrate_const[] = {
+ 7812, 7936, 8064, 8196, 8333, 8474, 8620, 8771,
+ 8928, 9090, 9259, 9433, 9615, 9803, 10000, 10204,
+ 10416, 10638, 10869, 11111, 11363, 11627, 11904, 12195,
+ 12500, 12820, 13157, 13513, 13888, 14285, 14705, 15151,
+ 15625, 16129, 16666, 17241, 17857, 18518, 19230, 20000,
+ 20833, 21739, 22727, 23809, 25000, 26315, 27777, 29411,
+ 31250, 33333, 35714, 38461, 41666, 45454, 50000, 55555,
+ 62500, 71428, 83333, 100000, 125000, 166666, 250000, 500000
+};
+
+static int can327_ldisc_open(struct tty_struct *tty)
+{
+ struct net_device *dev;
+ struct can327 *elm;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ dev = alloc_candev(sizeof(struct can327), 0);
+ if (!dev)
+ return -ENFILE;
+ elm = netdev_priv(dev);
+
+ /* Configure TTY interface */
+ tty->receive_room = 65536; /* We don't flow control */
+ spin_lock_init(&elm->lock);
+ INIT_WORK(&elm->tx_work, can327_ldisc_tx_worker);
+
+ /* Configure CAN metadata */
+ elm->can.bitrate_const = can327_bitrate_const;
+ elm->can.bitrate_const_cnt = ARRAY_SIZE(can327_bitrate_const);
+ elm->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+ /* Configure netdev interface */
+ elm->dev = dev;
+ dev->netdev_ops = &can327_netdev_ops;
+ dev->ethtool_ops = &can327_ethtool_ops;
+
+ /* Mark ldisc channel as alive */
+ elm->tty = tty;
+ tty->disc_data = elm;
+
+ /* Let 'er rip */
+ err = register_candev(elm->dev);
+ if (err) {
+ free_candev(elm->dev);
+ return err;
+ }
+
+ netdev_info(elm->dev, "can327 on %s.\n", tty->name);
+
+ return 0;
+}
+
+/* Close down a can327 channel.
+ * This means flushing out any pending queues, and then returning.
+ * This call is serialized against other ldisc functions:
+ * Once this is called, no other ldisc function of ours is entered.
+ *
+ * We also use this function for a hangup event.
+ */
+static void can327_ldisc_close(struct tty_struct *tty)
+{
+ struct can327 *elm = tty->disc_data;
+
+ /* unregister_netdev() calls .ndo_stop() so we don't have to. */
+ unregister_candev(elm->dev);
+
+ /* Give UART one final chance to flush.
+ * No need to clear TTY_DO_WRITE_WAKEUP since .write_wakeup() is
+ * serialised against .close() and will not be called once we return.
+ */
+ flush_work(&elm->tx_work);
+
+ /* Mark channel as dead */
+ spin_lock_bh(&elm->lock);
+ tty->disc_data = NULL;
+ elm->tty = NULL;
+ spin_unlock_bh(&elm->lock);
+
+ netdev_info(elm->dev, "can327 off %s.\n", tty->name);
+
+ free_candev(elm->dev);
+}
+
+static int can327_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct can327 *elm = tty->disc_data;
+ unsigned int tmp;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ tmp = strnlen(elm->dev->name, IFNAMSIZ - 1) + 1;
+ if (copy_to_user((void __user *)arg, elm->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+ default:
+ return tty_mode_ioctl(tty, cmd, arg);
+ }
+}
+
+static struct tty_ldisc_ops can327_ldisc = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .num = N_CAN327,
+ .receive_buf = can327_ldisc_rx,
+ .write_wakeup = can327_ldisc_tx_wakeup,
+ .open = can327_ldisc_open,
+ .close = can327_ldisc_close,
+ .ioctl = can327_ldisc_ioctl,
+};
+
+static int __init can327_init(void)
+{
+ int status;
+
+ status = tty_register_ldisc(&can327_ldisc);
+ if (status)
+ pr_err("Can't register line discipline\n");
+
+ return status;
+}
+
+static void __exit can327_exit(void)
+{
+ /* This will only be called when all channels have been closed by
+ * userspace - tty_ldisc.c takes care of the module's refcount.
+ */
+ tty_unregister_ldisc(&can327_ldisc);
+}
+
+module_init(can327_init);
+module_exit(can327_exit);
+
+MODULE_ALIAS_LDISC(N_CAN327);
+MODULE_DESCRIPTION("ELM327 based CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Max Staudt <max@enpas.org>");
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
index 9ef1359319f0..aae25c2f849e 100644
--- a/drivers/net/can/cc770/Kconfig
+++ b/drivers/net/can/cc770/Kconfig
@@ -7,6 +7,7 @@ if CAN_CC770
config CAN_CC770_ISA
tristate "ISA Bus based legacy CC770 driver"
+ depends on HAS_IOPORT
help
This driver adds legacy support for CC770 and AN82527 chips
connected to the ISA bus using I/O port, memory mapped or
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index f8a130f594e2..8d5abd643c06 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -17,6 +17,7 @@
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -428,7 +429,7 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cc770_priv *priv = netdev_priv(dev);
unsigned int mo = obj2msgobj(CC770_OBJ_TX);
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -489,17 +490,17 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
cf->len = can_cc_dlc2len((config & 0xf0) >> 4);
for (i = 0; i < cf->len; i++)
cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
- }
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
static int cc770_err(struct net_device *dev, u8 status)
{
struct cc770_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
u8 lec;
@@ -512,6 +513,7 @@ static int cc770_err(struct net_device *dev, u8 status)
/* Use extended functions of the CC770 */
if (priv->control_normal_mode & CTRL_EAF) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = cc770_read_reg(priv, tx_error_counter);
cf->data[7] = cc770_read_reg(priv, rx_error_counter);
}
@@ -571,8 +573,6 @@ static int cc770_err(struct net_device *dev, u8 status)
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -666,7 +666,6 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
struct cc770_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
unsigned int mo = obj2msgobj(o);
- struct can_frame *cf;
u8 ctrl1;
ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
@@ -698,12 +697,9 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
return;
}
- cf = (struct can_frame *)priv->tx_skb->data;
- stats->tx_bytes += cf->len;
- stats->tx_packets++;
-
can_put_echo_skb(priv->tx_skb, dev, 0, 0);
- can_get_echo_skb(dev, 0, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
+ stats->tx_packets++;
priv->tx_skb = NULL;
netif_wake_queue(dev);
@@ -838,7 +834,10 @@ static const struct net_device_ops cc770_netdev_ops = {
.ndo_open = cc770_open,
.ndo_stop = cc770_close,
.ndo_start_xmit = cc770_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops cc770_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
int register_cc770dev(struct net_device *dev)
@@ -851,6 +850,7 @@ int register_cc770dev(struct net_device *dev)
return err;
dev->netdev_ops = &cc770_netdev_ops;
+ dev->ethtool_ops = &cc770_ethtool_ops;
dev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 194c86e0f340..d06762817153 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -264,26 +264,28 @@ static int cc770_isa_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"couldn't register device (err=%d)\n", err);
- goto exit_unmap;
+ goto exit_free;
}
dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n",
priv->reg_base, dev->irq);
return 0;
- exit_unmap:
+exit_free:
+ free_cc770dev(dev);
+exit_unmap:
if (mem[idx])
iounmap(base);
- exit_release:
+exit_release:
if (mem[idx])
release_mem_region(mem[idx], iosize);
else
release_region(port[idx], iosize);
- exit:
+exit:
return err;
}
-static int cc770_isa_remove(struct platform_device *pdev)
+static void cc770_isa_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct cc770_priv *priv = netdev_priv(dev);
@@ -301,8 +303,6 @@ static int cc770_isa_remove(struct platform_device *pdev)
release_region(port[idx], CC770_IOSIZE);
}
free_cc770dev(dev);
-
- return 0;
}
static struct platform_driver cc770_isa_driver = {
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 8d916e2ee6c2..b6c4f02ffb97 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -70,17 +70,10 @@ static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
static int cc770_get_of_node_data(struct platform_device *pdev,
struct cc770_priv *priv)
{
+ u32 clkext = CC770_PLATFORM_CAN_CLOCK, clkout = 0;
struct device_node *np = pdev->dev.of_node;
- const u32 *prop;
- int prop_size;
- u32 clkext;
-
- prop = of_get_property(np, "bosch,external-clock-frequency",
- &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- clkext = *prop;
- else
- clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+
+ of_property_read_u32(np, "bosch,external-clock-frequency", &clkext);
priv->can.clock.freq = clkext;
/* The system clock may not exceed 10 MHz */
@@ -93,41 +86,38 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
if (priv->can.clock.freq > 8000000)
priv->cpu_interface |= CPUIF_DMC;
- if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+ if (of_property_read_bool(np, "bosch,divide-memory-clock"))
priv->cpu_interface |= CPUIF_DMC;
- if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+ if (of_property_read_bool(np, "bosch,iso-low-speed-mux"))
priv->cpu_interface |= CPUIF_MUX;
- if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+ if (!of_property_read_bool(np, "bosch,no-comperator-bypass"))
priv->bus_config |= BUSCFG_CBY;
- if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-rx0-input"))
priv->bus_config |= BUSCFG_DR0;
- if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-rx1-input"))
priv->bus_config |= BUSCFG_DR1;
- if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-tx1-output"))
priv->bus_config |= BUSCFG_DT1;
- if (of_get_property(np, "bosch,polarity-dominant", NULL))
+ if (of_property_read_bool(np, "bosch,polarity-dominant"))
priv->bus_config |= BUSCFG_POL;
- prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
- if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
- u32 cdv = clkext / *prop;
- int slew;
+ of_property_read_u32(np, "bosch,clock-out-frequency", &clkout);
+ if (clkout > 0) {
+ u32 cdv = clkext / clkout;
if (cdv > 0 && cdv < 16) {
+ u32 slew;
+
priv->cpu_interface |= CPUIF_CEN;
priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
- prop = of_get_property(np, "bosch,slew-rate",
- &prop_size);
- if (prop && (prop_size == sizeof(u32))) {
- slew = *prop;
- } else {
+ if (of_property_read_u32(np, "bosch,slew-rate", &slew)) {
/* Determine default slew rate */
slew = (CLKOUT_SL_MASK >>
CLKOUT_SL_SHIFT) -
((cdv * clkext - 1) / 8000000);
- if (slew < 0)
+ if (slew > (CLKOUT_SL_MASK >> CLKOUT_SL_SHIFT))
slew = 0;
}
priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
@@ -230,7 +220,7 @@ exit_release_mem:
return err;
}
-static int cc770_platform_remove(struct platform_device *pdev)
+static void cc770_platform_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct cc770_priv *priv = netdev_priv(dev);
@@ -242,8 +232,6 @@ static int cc770_platform_remove(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
-
- return 0;
}
static const struct of_device_id cc770_platform_table[] = {
diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig
new file mode 100644
index 000000000000..f52407f5c5d8
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Kconfig
@@ -0,0 +1,34 @@
+config CAN_CTUCANFD
+ tristate "CTU CAN-FD IP core" if COMPILE_TEST
+ help
+ This driver adds support for the CTU CAN FD open-source IP core.
+ More documentation and core sources at project page
+ (https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core).
+ The core integration to Xilinx Zynq system as platform driver
+ is available (https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top).
+ Implementation on Intel FPGA-based PCI Express board is available
+ from project (https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd) and
+ on Intel SoC from project (https://gitlab.fel.cvut.cz/canbus/intel-soc-ctucanfd).
+ Guidepost CTU FEE CAN bus projects page https://canbus.pages.fel.cvut.cz/ .
+
+config CAN_CTUCANFD_PCI
+ tristate "CTU CAN-FD IP core PCI/PCIe driver"
+ depends on PCI
+ select CAN_CTUCANFD
+ help
+ This driver adds PCI/PCIe support for CTU CAN-FD IP core.
+ The project providing FPGA design for Intel EP4CGX15 based DB4CGX15
+ PCIe board with PiKRON.com designed transceiver riser shield is available
+ at https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd .
+
+config CAN_CTUCANFD_PLATFORM
+ tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver"
+ depends on HAS_IOMEM && OF
+ select CAN_CTUCANFD
+ help
+ The core has been tested together with OpenCores SJA1000
+ modified to be CAN FD frames tolerant on MicroZed Zynq based
+ MZ_APO education kits designed by Petr Porazil from PiKRON.com
+ company. FPGA design https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top.
+ The kit description at the Computer Architectures course pages
+ https://cw.fel.cvut.cz/wiki/courses/b35apo/documentation/mz_apo/start .
diff --git a/drivers/net/can/ctucanfd/Makefile b/drivers/net/can/ctucanfd/Makefile
new file mode 100644
index 000000000000..8078f1f2c30f
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for the CTU CAN-FD IP module drivers
+#
+
+obj-$(CONFIG_CAN_CTUCANFD) := ctucanfd.o
+ctucanfd-y := ctucanfd_base.o
+
+obj-$(CONFIG_CAN_CTUCANFD_PCI) += ctucanfd_pci.o
+obj-$(CONFIG_CAN_CTUCANFD_PLATFORM) += ctucanfd_platform.o
diff --git a/drivers/net/can/ctucanfd/ctucanfd.h b/drivers/net/can/ctucanfd/ctucanfd.h
new file mode 100644
index 000000000000..0e9904f6a05d
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#ifndef __CTUCANFD__
+#define __CTUCANFD__
+
+#include <linux/netdevice.h>
+#include <linux/can/dev.h>
+#include <linux/list.h>
+
+enum ctu_can_fd_can_registers;
+
+struct ctucan_priv {
+ struct can_priv can; /* must be first member! */
+
+ void __iomem *mem_base;
+ u32 (*read_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg);
+ void (*write_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val);
+
+ unsigned int txb_head;
+ unsigned int txb_tail;
+ u32 txb_prio;
+ unsigned int ntxbufs;
+ spinlock_t tx_lock; /* spinlock to serialize allocation and processing of TX buffers */
+
+ struct napi_struct napi;
+ struct device *dev;
+ struct clk *can_clk;
+
+ int irq_flags;
+ unsigned long drv_flags;
+
+ u32 rxfrm_first_word;
+
+ struct list_head peers_on_pdev;
+};
+
+/**
+ * ctucan_probe_common - Device type independent registration call
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * @dev: Handle to the generic device structure
+ * @addr: Base address of CTU CAN FD core address
+ * @irq: Interrupt number
+ * @ntxbufs: Number of implemented Tx buffers
+ * @can_clk_rate: Clock rate, if 0 then clock are taken from device node
+ * @pm_enable_call: Whether pm_runtime_enable should be called
+ * @set_drvdata_fnc: Function to set network driver data for physical device
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ctucan_probe_common(struct device *dev, void __iomem *addr,
+ int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate,
+ int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev,
+ struct net_device *ndev));
+
+int ctucan_suspend(struct device *dev) __maybe_unused;
+int ctucan_resume(struct device *dev) __maybe_unused;
+
+#endif /*__CTUCANFD__*/
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
new file mode 100644
index 000000000000..1e6b9e3dc2fe
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -0,0 +1,1460 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/can/error.h>
+#include <linux/pm_runtime.h>
+
+#include "ctucanfd.h"
+#include "ctucanfd_kregs.h"
+#include "ctucanfd_kframe.h"
+
+#ifdef DEBUG
+#define ctucan_netdev_dbg(ndev, args...) \
+ netdev_dbg(ndev, args)
+#else
+#define ctucan_netdev_dbg(...) do { } while (0)
+#endif
+
+#define CTUCANFD_ID 0xCAFD
+
+/* TX buffer rotation:
+ * - when a buffer transitions to empty state, rotate order and priorities
+ * - if more buffers seem to transition at the same time, rotate by the number of buffers
+ * - it may be assumed that buffers transition to empty state in FIFO order (because we manage
+ * priorities that way)
+ * - at frame filling, do not rotate anything, just increment buffer modulo counter
+ */
+
+#define CTUCANFD_FLAG_RX_FFW_BUFFERED 1
+
+#define CTUCAN_STATE_TO_TEXT_ENTRY(st) \
+ [st] = #st
+
+enum ctucan_txtb_status {
+ TXT_NOT_EXIST = 0x0,
+ TXT_RDY = 0x1,
+ TXT_TRAN = 0x2,
+ TXT_ABTP = 0x3,
+ TXT_TOK = 0x4,
+ TXT_ERR = 0x6,
+ TXT_ABT = 0x7,
+ TXT_ETY = 0x8,
+};
+
+enum ctucan_txtb_command {
+ TXT_CMD_SET_EMPTY = 0x01,
+ TXT_CMD_SET_READY = 0x02,
+ TXT_CMD_SET_ABORT = 0x04
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 190,
+ .tseg2_min = 1,
+ .tseg2_max = 63,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 8,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_data_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 94,
+ .tseg2_min = 1,
+ .tseg2_max = 31,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 2,
+ .brp_inc = 1,
+};
+
+static const char * const ctucan_state_strings[CAN_STATE_MAX] = {
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_ACTIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_WARNING),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_PASSIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_BUS_OFF),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_STOPPED),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_SLEEPING)
+};
+
+static void ctucan_write32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32(val, priv->mem_base + reg);
+}
+
+static void ctucan_write32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32be(val, priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32(priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32be(priv->mem_base + reg);
+}
+
+static void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, u32 val)
+{
+ priv->write_reg(priv, reg, val);
+}
+
+static u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg)
+{
+ return priv->read_reg(priv, reg);
+}
+
+static void ctucan_write_txt_buf(struct ctucan_priv *priv, enum ctu_can_fd_can_registers buf_base,
+ u32 offset, u32 val)
+{
+ priv->write_reg(priv, buf_base + offset, val);
+}
+
+#define CTU_CAN_FD_TXTNF(priv) (!!FIELD_GET(REG_STATUS_TXNF, ctucan_read32(priv, CTUCANFD_STATUS)))
+#define CTU_CAN_FD_ENABLED(priv) (!!FIELD_GET(REG_MODE_ENA, ctucan_read32(priv, CTUCANFD_MODE)))
+
+/**
+ * ctucan_state_to_str() - Converts CAN controller state code to corresponding text
+ * @state: CAN controller state code
+ *
+ * Return: Pointer to string representation of the error state
+ */
+static const char *ctucan_state_to_str(enum can_state state)
+{
+ const char *txt = NULL;
+
+ if (state >= 0 && state < CAN_STATE_MAX)
+ txt = ctucan_state_strings[state];
+ return txt ? txt : "UNKNOWN";
+}
+
+/**
+ * ctucan_reset() - Issues software reset request to CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 for success, -%ETIMEDOUT if CAN controller does not leave reset
+ */
+static int ctucan_reset(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int i = 100;
+
+ ctucan_write32(priv, CTUCANFD_MODE, REG_MODE_RST);
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+
+ do {
+ u16 device_id = FIELD_GET(REG_DEVICE_ID_DEVICE_ID,
+ ctucan_read32(priv, CTUCANFD_DEVICE_ID));
+
+ if (device_id == 0xCAFD)
+ return 0;
+ if (!i--) {
+ netdev_warn(ndev, "device did not leave reset\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(100, 200);
+ } while (1);
+}
+
+/**
+ * ctucan_set_btr() - Sets CAN bus bit timing in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ * @bt: Pointer to Bit timing structure
+ * @nominal: True - Nominal bit timing, False - Data bit timing
+ *
+ * Return: 0 - OK, -%EPERM if controller is enabled
+ */
+static int ctucan_set_btr(struct net_device *ndev, struct can_bittiming *bt, bool nominal)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int max_ph1_len = 31;
+ u32 btr = 0;
+ u32 prop_seg = bt->prop_seg;
+ u32 phase_seg1 = bt->phase_seg1;
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set bittiming - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ if (nominal)
+ max_ph1_len = 63;
+
+ /* The timing calculation functions have only constraints on tseg1, which is prop_seg +
+ * phase1_seg combined. tseg1 is then split in half and stored into prog_seg and phase_seg1.
+ * In CTU CAN FD, PROP is 6/7 bits wide but PH1 only 6/5, so we must re-distribute the
+ * values here.
+ */
+ if (phase_seg1 > max_ph1_len) {
+ prop_seg += phase_seg1 - max_ph1_len;
+ phase_seg1 = max_ph1_len;
+ bt->prop_seg = prop_seg;
+ bt->phase_seg1 = phase_seg1;
+ }
+
+ if (nominal) {
+ btr = FIELD_PREP(REG_BTR_PROP, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_PH1, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_PH2, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_BRP, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_SJW, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR, btr);
+ } else {
+ btr = FIELD_PREP(REG_BTR_FD_PROP_FD, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_FD_PH1_FD, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_FD_PH2_FD, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_FD_BRP_FD, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_FD_SJW_FD, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR_FD, btr);
+ }
+
+ return 0;
+}
+
+/**
+ * ctucan_set_bittiming() - CAN set nominal bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+
+ /* Note that bt may be modified here */
+ return ctucan_set_btr(ndev, bt, true);
+}
+
+/**
+ * ctucan_set_data_bittiming() - CAN set data bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_data_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+
+ /* Note that dbt may be modified here */
+ return ctucan_set_btr(ndev, dbt, false);
+}
+
+/**
+ * ctucan_set_secondary_sample_point() - Sets secondary sample point in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM if controller is enabled
+ */
+static int ctucan_set_secondary_sample_point(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ int ssp_offset = 0;
+ u32 ssp_cfg = 0; /* No SSP by default */
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set SSP - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ /* Use SSP for bit-rates above 1 Mbits/s */
+ if (dbt->bitrate > 1000000) {
+ /* Calculate SSP in minimal time quanta */
+ ssp_offset = (priv->can.clock.freq / 1000) * dbt->sample_point / dbt->bitrate;
+
+ if (ssp_offset > 127) {
+ netdev_warn(ndev, "SSP offset saturated to 127\n");
+ ssp_offset = 127;
+ }
+
+ ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset);
+ ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1);
+ }
+
+ ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg);
+
+ return 0;
+}
+
+/**
+ * ctucan_set_mode() - Sets CTU CAN FDs mode
+ * @priv: Pointer to private data
+ * @mode: Pointer to controller modes to be set
+ */
+static void ctucan_set_mode(struct ctucan_priv *priv, const struct can_ctrlmode *mode)
+{
+ u32 mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LOOPBACK) ?
+ (mode_reg | REG_MODE_ILBP) :
+ (mode_reg & ~REG_MODE_ILBP);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LISTENONLY) ?
+ (mode_reg | REG_MODE_BMM) :
+ (mode_reg & ~REG_MODE_BMM);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD) ?
+ (mode_reg | REG_MODE_FDE) :
+ (mode_reg & ~REG_MODE_FDE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_PRESUME_ACK) ?
+ (mode_reg | REG_MODE_ACF) :
+ (mode_reg & ~REG_MODE_ACF);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD_NON_ISO) ?
+ (mode_reg | REG_MODE_NISOFD) :
+ (mode_reg & ~REG_MODE_NISOFD);
+
+ /* One shot mode supported indirectly via Retransmit limit */
+ mode_reg &= ~FIELD_PREP(REG_MODE_RTRTH, 0xF);
+ mode_reg = (mode->flags & CAN_CTRLMODE_ONE_SHOT) ?
+ (mode_reg | REG_MODE_RTRLE) :
+ (mode_reg & ~REG_MODE_RTRLE);
+
+ /* Some bits fixed:
+ * TSTM - Off, User shall not be able to change REC/TEC by hand during operation
+ */
+ mode_reg &= ~REG_MODE_TSTM;
+
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+}
+
+/**
+ * ctucan_chip_start() - This routine starts the driver
+ * @ndev: Pointer to net_device structure
+ *
+ * Routine expects that chip is in reset state. It setups initial
+ * Tx buffers for FIFO priorities, sets bittiming, enables interrupts,
+ * switches core to operational mode and changes controller
+ * state to %CAN_STATE_STOPPED.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_chip_start(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 int_ena, int_msk;
+ u32 mode_reg;
+ int err;
+ struct can_ctrlmode mode;
+
+ priv->txb_prio = 0x01234567;
+ priv->txb_head = 0;
+ priv->txb_tail = 0;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, priv->txb_prio);
+
+ /* Configure bit-rates and ssp */
+ err = ctucan_set_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_data_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_secondary_sample_point(ndev);
+ if (err < 0)
+ return err;
+
+ /* Configure modes */
+ mode.flags = priv->can.ctrlmode;
+ mode.mask = 0xFFFFFFFF;
+ ctucan_set_mode(priv, &mode);
+
+ /* Configure interrupts */
+ int_ena = REG_INT_STAT_RBNEI |
+ REG_INT_STAT_TXBHCI |
+ REG_INT_STAT_EWLI |
+ REG_INT_STAT_FCSI;
+
+ /* Bus error reporting -> Allow Error/Arb.lost interrupts */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ int_ena |= REG_INT_STAT_ALI |
+ REG_INT_STAT_BEI;
+ }
+
+ int_msk = ~int_ena; /* Mask all disabled interrupts */
+
+ /* It's after reset, so there is no need to clear anything */
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, int_msk);
+ ctucan_write32(priv, CTUCANFD_INT_ENA_SET, int_ena);
+
+ /* Controller enters ERROR_ACTIVE on initial FCSI */
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Enable the controller */
+ mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+ mode_reg |= REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+
+ return 0;
+}
+
+/**
+ * ctucan_do_set_mode() - Sets mode of the driver
+ * @ndev: Pointer to net_device structure
+ * @mode: Tells the mode of the driver
+ *
+ * This check the drivers state and calls the corresponding modes to set.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ return ret;
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ return ret;
+ }
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * ctucan_get_tx_status() - Gets status of TXT buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: Status of TXT buffer
+ */
+static enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf)
+{
+ u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS);
+ enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7;
+
+ return status;
+}
+
+/**
+ * ctucan_is_txt_buf_writable() - Checks if frame can be inserted to TXT Buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: True - Frame can be inserted to TXT Buffer, False - If attempted, frame will not be
+ * inserted to TXT Buffer
+ */
+static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf)
+{
+ enum ctucan_txtb_status buf_status;
+
+ buf_status = ctucan_get_tx_status(priv, buf);
+ if (buf_status == TXT_RDY || buf_status == TXT_TRAN || buf_status == TXT_ABTP)
+ return false;
+
+ return true;
+}
+
+/**
+ * ctucan_insert_frame() - Inserts frame to TXT buffer
+ * @priv: Pointer to private data
+ * @cf: Pointer to CAN frame to be inserted
+ * @buf: TXT Buffer index to which frame is inserted (0-based)
+ * @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame
+ *
+ * Return:
+ * * True - Frame inserted successfully
+ * * False - Frame was not inserted due to one of:
+ * 1. TXT Buffer is not writable (it is in wrong state)
+ * 2. Invalid TXT buffer index
+ * 3. Invalid frame length
+ */
+static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf,
+ bool isfdf)
+{
+ u32 buf_base;
+ u32 ffw = 0;
+ u32 idw = 0;
+ unsigned int i;
+
+ if (buf >= priv->ntxbufs)
+ return false;
+
+ if (!ctucan_is_txt_buf_writable(priv, buf))
+ return false;
+
+ if (cf->len > CANFD_MAX_DLEN)
+ return false;
+
+ /* Prepare Frame format */
+ if (cf->can_id & CAN_RTR_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_RTR;
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_IDE;
+
+ if (isfdf) {
+ ffw |= REG_FRAME_FORMAT_W_FDF;
+ if (cf->flags & CANFD_BRS)
+ ffw |= REG_FRAME_FORMAT_W_BRS;
+ }
+
+ ffw |= FIELD_PREP(REG_FRAME_FORMAT_W_DLC, can_fd_len2dlc(cf->len));
+
+ /* Prepare identifier */
+ if (cf->can_id & CAN_EFF_FLAG)
+ idw = cf->can_id & CAN_EFF_MASK;
+ else
+ idw = FIELD_PREP(REG_IDENTIFIER_W_IDENTIFIER_BASE, cf->can_id & CAN_SFF_MASK);
+
+ /* Write ID, Frame format, Don't write timestamp -> Time triggered transmission disabled */
+ buf_base = (buf + 1) * 0x100;
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_FRAME_FORMAT_W, ffw);
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_IDENTIFIER_W, idw);
+
+ /* Write Data payload */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i += 4) {
+ u32 data = le32_to_cpu(*(__le32 *)(cf->data + i));
+
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_DATA_1_4_W + i, data);
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ctucan_give_txtb_cmd() - Applies command on TXT buffer
+ * @priv: Pointer to private data
+ * @cmd: Command to give
+ * @buf: Buffer index (0-based)
+ */
+static void ctucan_give_txtb_cmd(struct ctucan_priv *priv, enum ctucan_txtb_command cmd, u8 buf)
+{
+ u32 tx_cmd = cmd;
+
+ tx_cmd |= 1 << (buf + 8);
+ ctucan_write32(priv, CTUCANFD_TX_COMMAND, tx_cmd);
+}
+
+/**
+ * ctucan_start_xmit() - Starts the transmission
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
+ *
+ * Invoked from upper layers to initiate transmission. Uses the next available free TXT Buffer and
+ * populates its fields to start the transmission.
+ *
+ * Return: %NETDEV_TX_OK on success, %NETDEV_TX_BUSY when no free TXT buffer is available,
+ * negative return values reserved for error cases
+ */
+static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 txtb_id;
+ bool ok;
+ unsigned long flags;
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (unlikely(!CTU_CAN_FD_TXTNF(priv))) {
+ netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG!, no TXB free when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ txtb_id = priv->txb_head % priv->ntxbufs;
+ ctucan_netdev_dbg(ndev, "%s: using TXB#%u\n", __func__, txtb_id);
+ ok = ctucan_insert_frame(priv, cf, txtb_id, can_is_canfd_skb(skb));
+
+ if (!ok) {
+ netdev_err(ndev, "BUG! TXNF set but cannot insert frame into TXTB! HW Bug?");
+ kfree_skb(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ can_put_echo_skb(skb, ndev, txtb_id, 0);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_READY, txtb_id);
+ priv->txb_head++;
+
+ /* Check if all TX buffers are full */
+ if (!CTU_CAN_FD_TXTNF(priv))
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ctucan_read_rx_frame() - Reads frame from RX FIFO
+ * @priv: Pointer to CTU CAN FD's private data
+ * @cf: Pointer to CAN frame struct
+ * @ffw: Previously read frame format word
+ *
+ * Note: Frame format word must be read separately and provided in 'ffw'.
+ */
+static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *cf, u32 ffw)
+{
+ u32 idw;
+ unsigned int i;
+ unsigned int wc;
+ unsigned int len;
+
+ idw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ if (FIELD_GET(REG_FRAME_FORMAT_W_IDE, ffw))
+ cf->can_id = (idw & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (idw >> 18) & CAN_SFF_MASK;
+
+ /* BRS, ESI, RTR Flags */
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw))
+ cf->flags |= CANFD_BRS;
+ if (FIELD_GET(REG_FRAME_FORMAT_W_ESI_RSV, ffw))
+ cf->flags |= CANFD_ESI;
+ } else if (FIELD_GET(REG_FRAME_FORMAT_W_RTR, ffw)) {
+ cf->can_id |= CAN_RTR_FLAG;
+ }
+
+ wc = FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw) - 3;
+
+ /* DLC */
+ if (FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw) <= 8) {
+ len = FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw);
+ } else {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ len = wc << 2;
+ else
+ len = 8;
+ }
+ cf->len = len;
+ if (unlikely(len > wc * 4))
+ len = wc * 4;
+
+ /* Timestamp - Read and throw away */
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+
+ /* Data */
+ for (i = 0; i < len; i += 4) {
+ u32 data = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ *(__le32 *)(cf->data + i) = cpu_to_le32(data);
+ }
+ while (unlikely(i < wc * 4)) {
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ i += 4;
+ }
+}
+
+/**
+ * ctucan_rx() - Called from CAN ISR to complete the received frame processing
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It does minimal
+ * processing and invokes "netif_receive_skb" to complete further processing.
+ * Return: 1 when frame is passed to the network layer, 0 when the first frame word is read but
+ * system is out of free SKBs temporally and left code to resolve SKB allocation later,
+ * -%EAGAIN in a case of empty Rx FIFO.
+ */
+static int ctucan_rx(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 ffw;
+
+ if (test_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags)) {
+ ffw = priv->rxfrm_first_word;
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ } else {
+ ffw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ }
+
+ if (!FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw))
+ return -EAGAIN;
+
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ skb = alloc_canfd_skb(ndev, &cf);
+ else
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
+
+ if (unlikely(!skb)) {
+ priv->rxfrm_first_word = ffw;
+ set_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ return 0;
+ }
+
+ ctucan_read_rx_frame(priv, cf, ffw);
+
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+/**
+ * ctucan_read_fault_state() - Reads CTU CAN FDs fault confinement state.
+ * @priv: Pointer to private data
+ *
+ * Returns: Fault confinement state of controller
+ */
+static enum can_state ctucan_read_fault_state(struct ctucan_priv *priv)
+{
+ u32 fs;
+ u32 rec_tec;
+ u32 ewl;
+
+ fs = ctucan_read32(priv, CTUCANFD_EWL);
+ rec_tec = ctucan_read32(priv, CTUCANFD_REC);
+ ewl = FIELD_GET(REG_EWL_EW_LIMIT, fs);
+
+ if (FIELD_GET(REG_EWL_ERA, fs)) {
+ if (ewl > FIELD_GET(REG_REC_REC_VAL, rec_tec) &&
+ ewl > FIELD_GET(REG_REC_TEC_VAL, rec_tec))
+ return CAN_STATE_ERROR_ACTIVE;
+ else
+ return CAN_STATE_ERROR_WARNING;
+ } else if (FIELD_GET(REG_EWL_ERP, fs)) {
+ return CAN_STATE_ERROR_PASSIVE;
+ } else if (FIELD_GET(REG_EWL_BOF, fs)) {
+ return CAN_STATE_BUS_OFF;
+ }
+
+ WARN(true, "Invalid error state");
+ return CAN_STATE_ERROR_PASSIVE;
+}
+
+/**
+ * ctucan_get_rec_tec() - Reads REC/TEC counter values from controller
+ * @priv: Pointer to private data
+ * @bec: Pointer to Error counter structure
+ */
+static void ctucan_get_rec_tec(struct ctucan_priv *priv, struct can_berr_counter *bec)
+{
+ u32 err_ctrs = ctucan_read32(priv, CTUCANFD_REC);
+
+ bec->rxerr = FIELD_GET(REG_REC_REC_VAL, err_ctrs);
+ bec->txerr = FIELD_GET(REG_REC_TEC_VAL, err_ctrs);
+}
+
+/**
+ * ctucan_err_interrupt() - Error frame ISR
+ * @ndev: net_device pointer
+ * @isr: interrupt status register value
+ *
+ * This is the CAN error interrupt and it will check the type of error and forward the error
+ * frame to upper layers.
+ */
+static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ enum can_state state;
+ struct can_berr_counter bec;
+ u32 err_capt_alc;
+ int dologerr = net_ratelimit();
+
+ ctucan_get_rec_tec(priv, &bec);
+ state = ctucan_read_fault_state(priv);
+ err_capt_alc = ctucan_read32(priv, CTUCANFD_ERR_CAPT);
+
+ if (dologerr)
+ netdev_info(ndev, "%s: ISR = 0x%08x, rxerr %d, txerr %d, error type %lu, pos %lu, ALC id_field %lu, bit %lu\n",
+ __func__, isr, bec.rxerr, bec.txerr,
+ FIELD_GET(REG_ERR_CAPT_ERR_TYPE, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ERR_POS, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_ID_FIELD, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_BIT, err_capt_alc));
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ /* EWLI: error warning limit condition met
+ * FCSI: fault confinement state changed
+ * ALI: arbitration lost (just informative)
+ * BEI: bus error interrupt
+ */
+ if (FIELD_GET(REG_INT_STAT_FCSI, isr) || FIELD_GET(REG_INT_STAT_EWLI, isr)) {
+ netdev_info(ndev, "state changes from %s to %s\n",
+ ctucan_state_to_str(priv->can.state),
+ ctucan_state_to_str(state));
+
+ if (priv->can.state == state)
+ netdev_warn(ndev,
+ "current and previous state is the same! (missed interrupt?)\n");
+
+ priv->can.state = state;
+ switch (state) {
+ case CAN_STATE_BUS_OFF:
+ priv->can.can_stats.bus_off++;
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can.can_stats.error_passive++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = (bec.rxerr > 127) ?
+ CAN_ERR_CRTL_RX_PASSIVE :
+ CAN_ERR_CRTL_TX_PASSIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_WARNING:
+ priv->can.can_stats.error_warning++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] |= (bec.txerr > bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_ACTIVE:
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ default:
+ netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
+ state, ctucan_state_to_str(state));
+ break;
+ }
+ }
+
+ /* Check for Arbitration Lost interrupt */
+ if (FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ if (dologerr)
+ netdev_info(ndev, "arbitration lost\n");
+ priv->can.can_stats.arbitration_lost++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
+ }
+ }
+
+ /* Check for Bus Error interrupt */
+ if (FIELD_GET(REG_INT_STAT_BEI, isr)) {
+ netdev_info(ndev, "bus error\n");
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] = CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = CAN_ERR_PROT_LOC_UNSPEC;
+ }
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+/**
+ * ctucan_rx_poll() - Poll routine for rx packets (NAPI)
+ * @napi: NAPI structure pointer
+ * @quota: Max number of rx packets to be processed.
+ *
+ * This is the poll routine for rx part. It will process the packets maximux quota value.
+ *
+ * Return: Number of packets received
+ */
+static int ctucan_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int work_done = 0;
+ u32 status;
+ u32 framecnt;
+ int res = 1;
+
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ while (framecnt && work_done < quota && res > 0) {
+ res = ctucan_rx(ndev);
+ work_done++;
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ }
+
+ /* Check for RX FIFO Overflow */
+ status = ctucan_read32(priv, CTUCANFD_STATUS);
+ if (FIELD_GET(REG_STATUS_DOR, status)) {
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ netdev_info(ndev, "rx_poll: rx fifo overflow\n");
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+
+ /* Clear Data Overrun */
+ ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO);
+ }
+
+ if (!framecnt && res != 0) {
+ if (napi_complete_done(napi, work_done)) {
+ /* Clear and enable RBNEI. It is level-triggered, so
+ * there is no race condition.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_RBNEI);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_CLR, REG_INT_STAT_RBNEI);
+ }
+ }
+
+ return work_done;
+}
+
+/**
+ * ctucan_rotate_txb_prio() - Rotates priorities of TXT Buffers
+ * @ndev: net_device pointer
+ */
+static void ctucan_rotate_txb_prio(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 prio = priv->txb_prio;
+
+ prio = (prio << 4) | ((prio >> ((priv->ntxbufs - 1) * 4)) & 0xF);
+ ctucan_netdev_dbg(ndev, "%s: from 0x%08x to 0x%08x\n", __func__, priv->txb_prio, prio);
+ priv->txb_prio = prio;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, prio);
+}
+
+/**
+ * ctucan_tx_interrupt() - Tx done Isr
+ * @ndev: net_device pointer
+ */
+static void ctucan_tx_interrupt(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ bool first = true;
+ bool some_buffers_processed;
+ unsigned long flags;
+ enum ctucan_txtb_status txtb_status;
+ u32 txtb_id;
+
+ /* read tx_status
+ * if txb[n].finished (bit 2)
+ * if ok -> echo
+ * if error / aborted -> ?? (find how to handle oneshot mode)
+ * txb_tail++
+ */
+ do {
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ some_buffers_processed = false;
+ while ((int)(priv->txb_head - priv->txb_tail) > 0) {
+ txtb_id = priv->txb_tail % priv->ntxbufs;
+ txtb_status = ctucan_get_tx_status(priv, txtb_id);
+
+ ctucan_netdev_dbg(ndev, "TXI: TXB#%u: status 0x%x\n", txtb_id, txtb_status);
+
+ switch (txtb_status) {
+ case TXT_TOK:
+ ctucan_netdev_dbg(ndev, "TXT_OK\n");
+ stats->tx_bytes += can_get_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_packets++;
+ break;
+ case TXT_ERR:
+ /* This indicated that retransmit limit has been reached. Obviously
+ * we should not echo the frame, but also not indicate any kind of
+ * error. If desired, it was already reported (possible multiple
+ * times) on each arbitration lost.
+ */
+ netdev_warn(ndev, "TXB in Error state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ case TXT_ABT:
+ /* Same as for TXT_ERR, only with different cause. We *could*
+ * re-queue the frame, but multiqueue/abort is not supported yet
+ * anyway.
+ */
+ netdev_warn(ndev, "TXB in Aborted state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ default:
+ /* Bug only if the first buffer is not finished, otherwise it is
+ * pretty much expected.
+ */
+ if (first) {
+ netdev_err(ndev,
+ "BUG: TXB#%u not in a finished state (0x%x)!\n",
+ txtb_id, txtb_status);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ /* do not clear nor wake */
+ return;
+ }
+ goto clear;
+ }
+ priv->txb_tail++;
+ first = false;
+ some_buffers_processed = true;
+ /* Adjust priorities *before* marking the buffer as empty. */
+ ctucan_rotate_txb_prio(ndev);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_EMPTY, txtb_id);
+ }
+clear:
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ /* If no buffers were processed this time, we cannot clear - that would introduce
+ * a race condition.
+ */
+ if (some_buffers_processed) {
+ /* Clear the interrupt again. We do not want to receive again interrupt for
+ * the buffer already handled. If it is the last finished one then it would
+ * cause log of spurious interrupt.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_TXBHCI);
+ }
+ } while (some_buffers_processed);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ /* Check if at least one TX buffer is free */
+ if (CTU_CAN_FD_TXTNF(priv))
+ netif_wake_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+}
+
+/**
+ * ctucan_interrupt() - CAN Isr
+ * @irq: irq number
+ * @dev_id: device id pointer
+ *
+ * This is the CTU CAN FD ISR. It checks for the type of interrupt
+ * and invokes the corresponding ISR.
+ *
+ * Return:
+ * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
+ */
+static irqreturn_t ctucan_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 isr, icr;
+ u32 imask;
+ int irq_loops;
+
+ for (irq_loops = 0; irq_loops < 10000; irq_loops++) {
+ /* Get the interrupt status */
+ isr = ctucan_read32(priv, CTUCANFD_INT_STAT);
+
+ if (!isr)
+ return irq_loops ? IRQ_HANDLED : IRQ_NONE;
+
+ /* Receive Buffer Not Empty Interrupt */
+ if (FIELD_GET(REG_INT_STAT_RBNEI, isr)) {
+ ctucan_netdev_dbg(ndev, "RXBNEI\n");
+ /* Mask RXBNEI the first, then clear interrupt and schedule NAPI. Even if
+ * another IRQ fires, RBNEI will always be 0 (masked).
+ */
+ icr = REG_INT_STAT_RBNEI;
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ napi_schedule(&priv->napi);
+ }
+
+ /* TXT Buffer HW Command Interrupt */
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ ctucan_netdev_dbg(ndev, "TXBHCI\n");
+ /* Cleared inside */
+ ctucan_tx_interrupt(ndev);
+ }
+
+ /* Error interrupts */
+ if (FIELD_GET(REG_INT_STAT_EWLI, isr) ||
+ FIELD_GET(REG_INT_STAT_FCSI, isr) ||
+ FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ icr = isr & (REG_INT_STAT_EWLI | REG_INT_STAT_FCSI | REG_INT_STAT_ALI);
+
+ ctucan_netdev_dbg(ndev, "some ERR interrupt: clearing 0x%08x\n", icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ ctucan_err_interrupt(ndev, isr);
+ }
+ /* Ignore RI, TI, LFI, RFI, BSI */
+ }
+
+ netdev_err(ndev, "%s: stuck interrupt (isr=0x%08x), stopping\n", __func__, isr);
+
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ int i;
+
+ netdev_err(ndev, "txb_head=0x%08x txb_tail=0x%08x\n",
+ priv->txb_head, priv->txb_tail);
+ for (i = 0; i < priv->ntxbufs; i++) {
+ u32 status = ctucan_get_tx_status(priv, i);
+
+ netdev_err(ndev, "txb[%d] txb status=0x%08x\n", i, status);
+ }
+ }
+
+ imask = 0xffffffff;
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, imask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, imask);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ctucan_chip_stop() - Driver stop routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the drivers stop routine. It will disable the
+ * interrupts and disable the controller.
+ */
+static void ctucan_chip_stop(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 mask = 0xffffffff;
+ u32 mode;
+
+ /* Disable interrupts and disable CAN */
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, mask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, mask);
+ mode = ctucan_read32(priv, CTUCANFD_MODE);
+ mode &= ~REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode);
+
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+/**
+ * ctucan_open() - Driver open routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver open routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_open(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_reset;
+
+ /* Common open */
+ ret = open_candev(ndev);
+ if (ret) {
+ netdev_warn(ndev, "open_candev failed!\n");
+ goto err_open;
+ }
+
+ ret = request_irq(ndev->irq, ctucan_interrupt, priv->irq_flags, ndev->name, ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "irq allocation for CAN failed\n");
+ goto err_irq;
+ }
+
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ goto err_chip_start;
+ }
+
+ netdev_info(ndev, "ctu_can_fd device registered\n");
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_chip_start:
+ free_irq(ndev->irq, ndev);
+err_irq:
+ close_candev(ndev);
+err_open:
+err_reset:
+ pm_runtime_put(priv->dev);
+
+ return ret;
+}
+
+/**
+ * ctucan_close() - Driver close routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 always
+ */
+static int ctucan_close(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ ctucan_chip_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ close_candev(ndev);
+
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+/**
+ * ctucan_get_berr_counter() - error counter routine
+ * @ndev: Pointer to net_device structure
+ * @bec: Pointer to can_berr_counter structure
+ *
+ * This is the driver error counter routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ctucan_get_rec_tec(priv, bec);
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+static const struct net_device_ops ctucan_netdev_ops = {
+ .ndo_open = ctucan_open,
+ .ndo_stop = ctucan_close,
+ .ndo_start_xmit = ctucan_start_xmit,
+};
+
+static const struct ethtool_ops ctucan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+int ctucan_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_suspend);
+
+int ctucan_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_resume);
+
+int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate, int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev, struct net_device *ndev))
+{
+ struct ctucan_priv *priv;
+ struct net_device *ndev;
+ int ret;
+
+ /* Create a CAN device instance */
+ ndev = alloc_candev(sizeof(struct ctucan_priv), ntxbufs);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ spin_lock_init(&priv->tx_lock);
+ INIT_LIST_HEAD(&priv->peers_on_pdev);
+ priv->ntxbufs = ntxbufs;
+ priv->dev = dev;
+ priv->can.bittiming_const = &ctu_can_fd_bit_timing_max;
+ priv->can.fd.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
+ priv->can.do_set_mode = ctucan_do_set_mode;
+
+ /* Needed for timing adjustment to be performed as soon as possible */
+ priv->can.do_set_bittiming = ctucan_set_bittiming;
+ priv->can.fd.do_set_data_bittiming = ctucan_set_data_bittiming;
+
+ priv->can.do_get_berr_counter = ctucan_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK
+ | CAN_CTRLMODE_LISTENONLY
+ | CAN_CTRLMODE_FD
+ | CAN_CTRLMODE_PRESUME_ACK
+ | CAN_CTRLMODE_BERR_REPORTING
+ | CAN_CTRLMODE_FD_NON_ISO
+ | CAN_CTRLMODE_ONE_SHOT;
+ priv->mem_base = addr;
+
+ /* Get IRQ for the device */
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO; /* We support local echo */
+
+ if (set_drvdata_fnc)
+ set_drvdata_fnc(dev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ ndev->netdev_ops = &ctucan_netdev_ops;
+ ndev->ethtool_ops = &ctucan_ethtool_ops;
+
+ /* Getting the can_clk info */
+ if (!can_clk_rate) {
+ priv->can_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->can_clk)) {
+ dev_err(dev, "Device clock not found.\n");
+ ret = PTR_ERR(priv->can_clk);
+ goto err_free;
+ }
+ can_clk_rate = clk_get_rate(priv->can_clk);
+ }
+
+ priv->write_reg = ctucan_write32_le;
+ priv->read_reg = ctucan_read32_le;
+
+ if (pm_enable_call)
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ goto err_pmdisable;
+ }
+
+ /* Check for big-endianity and set according IO-accessors */
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ priv->write_reg = ctucan_write32_be;
+ priv->read_reg = ctucan_read32_be;
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ netdev_err(ndev, "CTU_CAN_FD signature not found\n");
+ ret = -ENODEV;
+ goto err_deviceoff;
+ }
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_deviceoff;
+
+ priv->can.clock.freq = can_clk_rate;
+
+ netif_napi_add(ndev, &priv->napi, ctucan_rx_poll);
+
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(dev, "fail to register failed (err=%d)\n", ret);
+ goto err_deviceoff;
+ }
+
+ pm_runtime_put(dev);
+
+ netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n",
+ priv->mem_base, ndev->irq, priv->can.clock.freq, priv->ntxbufs);
+
+ return 0;
+
+err_deviceoff:
+ pm_runtime_put(priv->dev);
+err_pmdisable:
+ if (pm_enable_call)
+ pm_runtime_disable(dev);
+err_free:
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ return ret;
+}
+EXPORT_SYMBOL(ctucan_probe_common);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek <martin.jerabek01@gmail.com>");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_AUTHOR("Ondrej Ille <ondrej.ille@gmail.com>");
+MODULE_DESCRIPTION("CTU CAN FD interface");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kframe.h b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
new file mode 100644
index 000000000000..3491299eaac2
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+
+#include <linux/bits.h>
+
+/* CAN_Frame_format memory map */
+enum ctu_can_fd_can_frame_format {
+ CTUCANFD_FRAME_FORMAT_W = 0x0,
+ CTUCANFD_IDENTIFIER_W = 0x4,
+ CTUCANFD_TIMESTAMP_L_W = 0x8,
+ CTUCANFD_TIMESTAMP_U_W = 0xc,
+ CTUCANFD_DATA_1_4_W = 0x10,
+ CTUCANFD_DATA_5_8_W = 0x14,
+ CTUCANFD_DATA_61_64_W = 0x4c,
+};
+
+/* CAN_FD_Frame_format memory region */
+
+/* FRAME_FORMAT_W registers */
+#define REG_FRAME_FORMAT_W_DLC GENMASK(3, 0)
+#define REG_FRAME_FORMAT_W_RTR BIT(5)
+#define REG_FRAME_FORMAT_W_IDE BIT(6)
+#define REG_FRAME_FORMAT_W_FDF BIT(7)
+#define REG_FRAME_FORMAT_W_BRS BIT(9)
+#define REG_FRAME_FORMAT_W_ESI_RSV BIT(10)
+#define REG_FRAME_FORMAT_W_RWCNT GENMASK(15, 11)
+
+/* IDENTIFIER_W registers */
+#define REG_IDENTIFIER_W_IDENTIFIER_EXT GENMASK(17, 0)
+#define REG_IDENTIFIER_W_IDENTIFIER_BASE GENMASK(28, 18)
+
+/* TIMESTAMP_L_W registers */
+#define REG_TIMESTAMP_L_W_TIME_STAMP_L_W GENMASK(31, 0)
+
+/* TIMESTAMP_U_W registers */
+#define REG_TIMESTAMP_U_W_TIMESTAMP_U_W GENMASK(31, 0)
+
+/* DATA_1_4_W registers */
+#define REG_DATA_1_4_W_DATA_1 GENMASK(7, 0)
+#define REG_DATA_1_4_W_DATA_2 GENMASK(15, 8)
+#define REG_DATA_1_4_W_DATA_3 GENMASK(23, 16)
+#define REG_DATA_1_4_W_DATA_4 GENMASK(31, 24)
+
+/* DATA_5_8_W registers */
+#define REG_DATA_5_8_W_DATA_5 GENMASK(7, 0)
+#define REG_DATA_5_8_W_DATA_6 GENMASK(15, 8)
+#define REG_DATA_5_8_W_DATA_7 GENMASK(23, 16)
+#define REG_DATA_5_8_W_DATA_8 GENMASK(31, 24)
+
+/* DATA_61_64_W registers */
+#define REG_DATA_61_64_W_DATA_61 GENMASK(7, 0)
+#define REG_DATA_61_64_W_DATA_62 GENMASK(15, 8)
+#define REG_DATA_61_64_W_DATA_63 GENMASK(23, 16)
+#define REG_DATA_61_64_W_DATA_64 GENMASK(31, 24)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kregs.h b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
new file mode 100644
index 000000000000..0c181ab51bf8
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+#define __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+
+#include <linux/bits.h>
+
+/* CAN_Registers memory map */
+enum ctu_can_fd_can_registers {
+ CTUCANFD_DEVICE_ID = 0x0,
+ CTUCANFD_VERSION = 0x2,
+ CTUCANFD_MODE = 0x4,
+ CTUCANFD_SETTINGS = 0x6,
+ CTUCANFD_STATUS = 0x8,
+ CTUCANFD_COMMAND = 0xc,
+ CTUCANFD_INT_STAT = 0x10,
+ CTUCANFD_INT_ENA_SET = 0x14,
+ CTUCANFD_INT_ENA_CLR = 0x18,
+ CTUCANFD_INT_MASK_SET = 0x1c,
+ CTUCANFD_INT_MASK_CLR = 0x20,
+ CTUCANFD_BTR = 0x24,
+ CTUCANFD_BTR_FD = 0x28,
+ CTUCANFD_EWL = 0x2c,
+ CTUCANFD_ERP = 0x2d,
+ CTUCANFD_FAULT_STATE = 0x2e,
+ CTUCANFD_REC = 0x30,
+ CTUCANFD_TEC = 0x32,
+ CTUCANFD_ERR_NORM = 0x34,
+ CTUCANFD_ERR_FD = 0x36,
+ CTUCANFD_CTR_PRES = 0x38,
+ CTUCANFD_FILTER_A_MASK = 0x3c,
+ CTUCANFD_FILTER_A_VAL = 0x40,
+ CTUCANFD_FILTER_B_MASK = 0x44,
+ CTUCANFD_FILTER_B_VAL = 0x48,
+ CTUCANFD_FILTER_C_MASK = 0x4c,
+ CTUCANFD_FILTER_C_VAL = 0x50,
+ CTUCANFD_FILTER_RAN_LOW = 0x54,
+ CTUCANFD_FILTER_RAN_HIGH = 0x58,
+ CTUCANFD_FILTER_CONTROL = 0x5c,
+ CTUCANFD_FILTER_STATUS = 0x5e,
+ CTUCANFD_RX_MEM_INFO = 0x60,
+ CTUCANFD_RX_POINTERS = 0x64,
+ CTUCANFD_RX_STATUS = 0x68,
+ CTUCANFD_RX_SETTINGS = 0x6a,
+ CTUCANFD_RX_DATA = 0x6c,
+ CTUCANFD_TX_STATUS = 0x70,
+ CTUCANFD_TX_COMMAND = 0x74,
+ CTUCANFD_TXTB_INFO = 0x76,
+ CTUCANFD_TX_PRIORITY = 0x78,
+ CTUCANFD_ERR_CAPT = 0x7c,
+ CTUCANFD_RETR_CTR = 0x7d,
+ CTUCANFD_ALC = 0x7e,
+ CTUCANFD_TS_INFO = 0x7f,
+ CTUCANFD_TRV_DELAY = 0x80,
+ CTUCANFD_SSP_CFG = 0x82,
+ CTUCANFD_RX_FR_CTR = 0x84,
+ CTUCANFD_TX_FR_CTR = 0x88,
+ CTUCANFD_DEBUG_REGISTER = 0x8c,
+ CTUCANFD_YOLO_REG = 0x90,
+ CTUCANFD_TIMESTAMP_LOW = 0x94,
+ CTUCANFD_TIMESTAMP_HIGH = 0x98,
+ CTUCANFD_TXTB1_DATA_1 = 0x100,
+ CTUCANFD_TXTB1_DATA_2 = 0x104,
+ CTUCANFD_TXTB1_DATA_20 = 0x14c,
+ CTUCANFD_TXTB2_DATA_1 = 0x200,
+ CTUCANFD_TXTB2_DATA_2 = 0x204,
+ CTUCANFD_TXTB2_DATA_20 = 0x24c,
+ CTUCANFD_TXTB3_DATA_1 = 0x300,
+ CTUCANFD_TXTB3_DATA_2 = 0x304,
+ CTUCANFD_TXTB3_DATA_20 = 0x34c,
+ CTUCANFD_TXTB4_DATA_1 = 0x400,
+ CTUCANFD_TXTB4_DATA_2 = 0x404,
+ CTUCANFD_TXTB4_DATA_20 = 0x44c,
+};
+
+/* Control_registers memory region */
+
+/* DEVICE_ID VERSION registers */
+#define REG_DEVICE_ID_DEVICE_ID GENMASK(15, 0)
+#define REG_DEVICE_ID_VER_MINOR GENMASK(23, 16)
+#define REG_DEVICE_ID_VER_MAJOR GENMASK(31, 24)
+
+/* MODE SETTINGS registers */
+#define REG_MODE_RST BIT(0)
+#define REG_MODE_BMM BIT(1)
+#define REG_MODE_STM BIT(2)
+#define REG_MODE_AFM BIT(3)
+#define REG_MODE_FDE BIT(4)
+#define REG_MODE_TTTM BIT(5)
+#define REG_MODE_ROM BIT(6)
+#define REG_MODE_ACF BIT(7)
+#define REG_MODE_TSTM BIT(8)
+#define REG_MODE_RXBAM BIT(9)
+#define REG_MODE_SAM BIT(11)
+#define REG_MODE_RTRLE BIT(16)
+#define REG_MODE_RTRTH GENMASK(20, 17)
+#define REG_MODE_ILBP BIT(21)
+#define REG_MODE_ENA BIT(22)
+#define REG_MODE_NISOFD BIT(23)
+#define REG_MODE_PEX BIT(24)
+#define REG_MODE_TBFBO BIT(25)
+#define REG_MODE_FDRF BIT(26)
+
+/* STATUS registers */
+#define REG_STATUS_RXNE BIT(0)
+#define REG_STATUS_DOR BIT(1)
+#define REG_STATUS_TXNF BIT(2)
+#define REG_STATUS_EFT BIT(3)
+#define REG_STATUS_RXS BIT(4)
+#define REG_STATUS_TXS BIT(5)
+#define REG_STATUS_EWL BIT(6)
+#define REG_STATUS_IDLE BIT(7)
+#define REG_STATUS_PEXS BIT(8)
+#define REG_STATUS_STCNT BIT(16)
+
+/* COMMAND registers */
+#define REG_COMMAND_RXRPMV BIT(1)
+#define REG_COMMAND_RRB BIT(2)
+#define REG_COMMAND_CDO BIT(3)
+#define REG_COMMAND_ERCRST BIT(4)
+#define REG_COMMAND_RXFCRST BIT(5)
+#define REG_COMMAND_TXFCRST BIT(6)
+#define REG_COMMAND_CPEXS BIT(7)
+
+/* INT_STAT registers */
+#define REG_INT_STAT_RXI BIT(0)
+#define REG_INT_STAT_TXI BIT(1)
+#define REG_INT_STAT_EWLI BIT(2)
+#define REG_INT_STAT_DOI BIT(3)
+#define REG_INT_STAT_FCSI BIT(4)
+#define REG_INT_STAT_ALI BIT(5)
+#define REG_INT_STAT_BEI BIT(6)
+#define REG_INT_STAT_OFI BIT(7)
+#define REG_INT_STAT_RXFI BIT(8)
+#define REG_INT_STAT_BSI BIT(9)
+#define REG_INT_STAT_RBNEI BIT(10)
+#define REG_INT_STAT_TXBHCI BIT(11)
+
+/* INT_ENA_SET registers */
+#define REG_INT_ENA_SET_INT_ENA_SET GENMASK(11, 0)
+
+/* INT_ENA_CLR registers */
+#define REG_INT_ENA_CLR_INT_ENA_CLR GENMASK(11, 0)
+
+/* INT_MASK_SET registers */
+#define REG_INT_MASK_SET_INT_MASK_SET GENMASK(11, 0)
+
+/* INT_MASK_CLR registers */
+#define REG_INT_MASK_CLR_INT_MASK_CLR GENMASK(11, 0)
+
+/* BTR registers */
+#define REG_BTR_PROP GENMASK(6, 0)
+#define REG_BTR_PH1 GENMASK(12, 7)
+#define REG_BTR_PH2 GENMASK(18, 13)
+#define REG_BTR_BRP GENMASK(26, 19)
+#define REG_BTR_SJW GENMASK(31, 27)
+
+/* BTR_FD registers */
+#define REG_BTR_FD_PROP_FD GENMASK(5, 0)
+#define REG_BTR_FD_PH1_FD GENMASK(11, 7)
+#define REG_BTR_FD_PH2_FD GENMASK(17, 13)
+#define REG_BTR_FD_BRP_FD GENMASK(26, 19)
+#define REG_BTR_FD_SJW_FD GENMASK(31, 27)
+
+/* EWL ERP FAULT_STATE registers */
+#define REG_EWL_EW_LIMIT GENMASK(7, 0)
+#define REG_EWL_ERP_LIMIT GENMASK(15, 8)
+#define REG_EWL_ERA BIT(16)
+#define REG_EWL_ERP BIT(17)
+#define REG_EWL_BOF BIT(18)
+
+/* REC TEC registers */
+#define REG_REC_REC_VAL GENMASK(8, 0)
+#define REG_REC_TEC_VAL GENMASK(24, 16)
+
+/* ERR_NORM ERR_FD registers */
+#define REG_ERR_NORM_ERR_NORM_VAL GENMASK(15, 0)
+#define REG_ERR_NORM_ERR_FD_VAL GENMASK(31, 16)
+
+/* CTR_PRES registers */
+#define REG_CTR_PRES_CTPV GENMASK(8, 0)
+#define REG_CTR_PRES_PTX BIT(9)
+#define REG_CTR_PRES_PRX BIT(10)
+#define REG_CTR_PRES_ENORM BIT(11)
+#define REG_CTR_PRES_EFD BIT(12)
+
+/* FILTER_A_MASK registers */
+#define REG_FILTER_A_MASK_BIT_MASK_A_VAL GENMASK(28, 0)
+
+/* FILTER_A_VAL registers */
+#define REG_FILTER_A_VAL_BIT_VAL_A_VAL GENMASK(28, 0)
+
+/* FILTER_B_MASK registers */
+#define REG_FILTER_B_MASK_BIT_MASK_B_VAL GENMASK(28, 0)
+
+/* FILTER_B_VAL registers */
+#define REG_FILTER_B_VAL_BIT_VAL_B_VAL GENMASK(28, 0)
+
+/* FILTER_C_MASK registers */
+#define REG_FILTER_C_MASK_BIT_MASK_C_VAL GENMASK(28, 0)
+
+/* FILTER_C_VAL registers */
+#define REG_FILTER_C_VAL_BIT_VAL_C_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_LOW registers */
+#define REG_FILTER_RAN_LOW_BIT_RAN_LOW_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_HIGH registers */
+#define REG_FILTER_RAN_HIGH_BIT_RAN_HIGH_VAL GENMASK(28, 0)
+
+/* FILTER_CONTROL FILTER_STATUS registers */
+#define REG_FILTER_CONTROL_FANB BIT(0)
+#define REG_FILTER_CONTROL_FANE BIT(1)
+#define REG_FILTER_CONTROL_FAFB BIT(2)
+#define REG_FILTER_CONTROL_FAFE BIT(3)
+#define REG_FILTER_CONTROL_FBNB BIT(4)
+#define REG_FILTER_CONTROL_FBNE BIT(5)
+#define REG_FILTER_CONTROL_FBFB BIT(6)
+#define REG_FILTER_CONTROL_FBFE BIT(7)
+#define REG_FILTER_CONTROL_FCNB BIT(8)
+#define REG_FILTER_CONTROL_FCNE BIT(9)
+#define REG_FILTER_CONTROL_FCFB BIT(10)
+#define REG_FILTER_CONTROL_FCFE BIT(11)
+#define REG_FILTER_CONTROL_FRNB BIT(12)
+#define REG_FILTER_CONTROL_FRNE BIT(13)
+#define REG_FILTER_CONTROL_FRFB BIT(14)
+#define REG_FILTER_CONTROL_FRFE BIT(15)
+#define REG_FILTER_CONTROL_SFA BIT(16)
+#define REG_FILTER_CONTROL_SFB BIT(17)
+#define REG_FILTER_CONTROL_SFC BIT(18)
+#define REG_FILTER_CONTROL_SFR BIT(19)
+
+/* RX_MEM_INFO registers */
+#define REG_RX_MEM_INFO_RX_BUFF_SIZE GENMASK(12, 0)
+#define REG_RX_MEM_INFO_RX_MEM_FREE GENMASK(28, 16)
+
+/* RX_POINTERS registers */
+#define REG_RX_POINTERS_RX_WPP GENMASK(11, 0)
+#define REG_RX_POINTERS_RX_RPP GENMASK(27, 16)
+
+/* RX_STATUS RX_SETTINGS registers */
+#define REG_RX_STATUS_RXE BIT(0)
+#define REG_RX_STATUS_RXF BIT(1)
+#define REG_RX_STATUS_RXMOF BIT(2)
+#define REG_RX_STATUS_RXFRC GENMASK(14, 4)
+#define REG_RX_STATUS_RTSOP BIT(16)
+
+/* RX_DATA registers */
+#define REG_RX_DATA_RX_DATA GENMASK(31, 0)
+
+/* TX_STATUS registers */
+#define REG_TX_STATUS_TX1S GENMASK(3, 0)
+#define REG_TX_STATUS_TX2S GENMASK(7, 4)
+#define REG_TX_STATUS_TX3S GENMASK(11, 8)
+#define REG_TX_STATUS_TX4S GENMASK(15, 12)
+#define REG_TX_STATUS_TX5S GENMASK(19, 16)
+#define REG_TX_STATUS_TX6S GENMASK(23, 20)
+#define REG_TX_STATUS_TX7S GENMASK(27, 24)
+#define REG_TX_STATUS_TX8S GENMASK(31, 28)
+
+/* TX_COMMAND TXTB_INFO registers */
+#define REG_TX_COMMAND_TXCE BIT(0)
+#define REG_TX_COMMAND_TXCR BIT(1)
+#define REG_TX_COMMAND_TXCA BIT(2)
+#define REG_TX_COMMAND_TXB1 BIT(8)
+#define REG_TX_COMMAND_TXB2 BIT(9)
+#define REG_TX_COMMAND_TXB3 BIT(10)
+#define REG_TX_COMMAND_TXB4 BIT(11)
+#define REG_TX_COMMAND_TXB5 BIT(12)
+#define REG_TX_COMMAND_TXB6 BIT(13)
+#define REG_TX_COMMAND_TXB7 BIT(14)
+#define REG_TX_COMMAND_TXB8 BIT(15)
+#define REG_TX_COMMAND_TXT_BUFFER_COUNT GENMASK(19, 16)
+
+/* TX_PRIORITY registers */
+#define REG_TX_PRIORITY_TXT1P GENMASK(2, 0)
+#define REG_TX_PRIORITY_TXT2P GENMASK(6, 4)
+#define REG_TX_PRIORITY_TXT3P GENMASK(10, 8)
+#define REG_TX_PRIORITY_TXT4P GENMASK(14, 12)
+#define REG_TX_PRIORITY_TXT5P GENMASK(18, 16)
+#define REG_TX_PRIORITY_TXT6P GENMASK(22, 20)
+#define REG_TX_PRIORITY_TXT7P GENMASK(26, 24)
+#define REG_TX_PRIORITY_TXT8P GENMASK(30, 28)
+
+/* ERR_CAPT RETR_CTR ALC TS_INFO registers */
+#define REG_ERR_CAPT_ERR_POS GENMASK(4, 0)
+#define REG_ERR_CAPT_ERR_TYPE GENMASK(7, 5)
+#define REG_ERR_CAPT_RETR_CTR_VAL GENMASK(11, 8)
+#define REG_ERR_CAPT_ALC_BIT GENMASK(20, 16)
+#define REG_ERR_CAPT_ALC_ID_FIELD GENMASK(23, 21)
+#define REG_ERR_CAPT_TS_BITS GENMASK(29, 24)
+
+/* TRV_DELAY SSP_CFG registers */
+#define REG_TRV_DELAY_TRV_DELAY_VALUE GENMASK(6, 0)
+#define REG_TRV_DELAY_SSP_OFFSET GENMASK(23, 16)
+#define REG_TRV_DELAY_SSP_SRC GENMASK(25, 24)
+
+/* RX_FR_CTR registers */
+#define REG_RX_FR_CTR_RX_FR_CTR_VAL GENMASK(31, 0)
+
+/* TX_FR_CTR registers */
+#define REG_TX_FR_CTR_TX_FR_CTR_VAL GENMASK(31, 0)
+
+/* DEBUG_REGISTER registers */
+#define REG_DEBUG_REGISTER_STUFF_COUNT GENMASK(2, 0)
+#define REG_DEBUG_REGISTER_DESTUFF_COUNT GENMASK(5, 3)
+#define REG_DEBUG_REGISTER_PC_ARB BIT(6)
+#define REG_DEBUG_REGISTER_PC_CON BIT(7)
+#define REG_DEBUG_REGISTER_PC_DAT BIT(8)
+#define REG_DEBUG_REGISTER_PC_STC BIT(9)
+#define REG_DEBUG_REGISTER_PC_CRC BIT(10)
+#define REG_DEBUG_REGISTER_PC_CRCD BIT(11)
+#define REG_DEBUG_REGISTER_PC_ACK BIT(12)
+#define REG_DEBUG_REGISTER_PC_ACKD BIT(13)
+#define REG_DEBUG_REGISTER_PC_EOF BIT(14)
+#define REG_DEBUG_REGISTER_PC_INT BIT(15)
+#define REG_DEBUG_REGISTER_PC_SUSP BIT(16)
+#define REG_DEBUG_REGISTER_PC_OVR BIT(17)
+#define REG_DEBUG_REGISTER_PC_SOF BIT(18)
+
+/* YOLO_REG registers */
+#define REG_YOLO_REG_YOLO_VAL GENMASK(31, 0)
+
+/* TIMESTAMP_LOW registers */
+#define REG_TIMESTAMP_LOW_TIMESTAMP_LOW GENMASK(31, 0)
+
+/* TIMESTAMP_HIGH registers */
+#define REG_TIMESTAMP_HIGH_TIMESTAMP_HIGH GENMASK(31, 0)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_pci.c b/drivers/net/can/ctucanfd/ctucanfd_pci.c
new file mode 100644
index 000000000000..9da09e7dd63a
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_pci.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ctucanfd.h"
+
+#ifndef PCI_DEVICE_DATA
+#define PCI_DEVICE_DATA(vend, dev, data) \
+.vendor = PCI_VENDOR_ID_##vend, \
+.device = PCI_DEVICE_ID_##vend##_##dev, \
+.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
+.driver_data = (kernel_ulong_t)(data)
+#endif
+
+#ifndef PCI_VENDOR_ID_TEDIA
+#define PCI_VENDOR_ID_TEDIA 0x1760
+#endif
+
+#ifndef PCI_DEVICE_ID_TEDIA_CTUCAN_VER21
+#define PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 0xff00
+#endif
+
+#define CTUCAN_BAR0_CTUCAN_ID 0x0000
+#define CTUCAN_BAR0_CRA_BASE 0x4000
+#define CYCLONE_IV_CRA_A2P_IE (0x0050)
+
+#define CTUCAN_WITHOUT_CTUCAN_ID 0
+#define CTUCAN_WITH_CTUCAN_ID 1
+
+struct ctucan_pci_board_data {
+ void __iomem *bar0_base;
+ void __iomem *cra_base;
+ void __iomem *bar1_base;
+ struct list_head ndev_list_head;
+ int use_msi;
+};
+
+static struct ctucan_pci_board_data *ctucan_pci_get_bdata(struct pci_dev *pdev)
+{
+ return (struct ctucan_pci_board_data *)pci_get_drvdata(pdev);
+}
+
+static void ctucan_pci_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ list_add(&priv->peers_on_pdev, &bdata->ndev_list_head);
+ priv->irq_flags = IRQF_SHARED;
+}
+
+/**
+ * ctucan_pci_probe - PCI registration call
+ * @pdev: Handle to the pci device structure
+ * @ent: Pointer to the entry from ctucan_pci_tbl
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long driver_data = ent->driver_data;
+ struct ctucan_pci_board_data *bdata;
+ void __iomem *addr;
+ void __iomem *cra_addr;
+ void __iomem *bar0_base;
+ u32 cra_a2p_ie;
+ u32 ctucan_id = 0;
+ int ret;
+ unsigned int ntxbufs;
+ unsigned int num_cores = 1;
+ unsigned int core_i = 0;
+ int irq;
+ int msi_ok = 0;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pci_enable_device FAILED\n");
+ goto err;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(dev, "pci_request_regions FAILED\n");
+ goto err_disable_device;
+ }
+
+ ret = pci_enable_msi(pdev);
+ if (!ret) {
+ dev_info(dev, "MSI enabled\n");
+ pci_set_master(pdev);
+ msi_ok = 1;
+ }
+
+ dev_info(dev, "ctucan BAR0 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 0),
+ (long long)pci_resource_len(pdev, 0));
+
+ dev_info(dev, "ctucan BAR1 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 1),
+ (long long)pci_resource_len(pdev, 1));
+
+ addr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
+ if (!addr) {
+ dev_err(dev, "PCI BAR 1 cannot be mapped\n");
+ ret = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Cyclone IV PCI Express Control Registers Area */
+ bar0_base = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!bar0_base) {
+ dev_err(dev, "PCI BAR 0 cannot be mapped\n");
+ ret = -EIO;
+ goto err_pci_iounmap_bar1;
+ }
+
+ if (driver_data == CTUCAN_WITHOUT_CTUCAN_ID) {
+ cra_addr = bar0_base;
+ num_cores = 2;
+ } else {
+ cra_addr = bar0_base + CTUCAN_BAR0_CRA_BASE;
+ ctucan_id = ioread32(bar0_base + CTUCAN_BAR0_CTUCAN_ID);
+ dev_info(dev, "ctucan_id 0x%08lx\n", (unsigned long)ctucan_id);
+ num_cores = ctucan_id & 0xf;
+ }
+
+ irq = pdev->irq;
+
+ ntxbufs = 4;
+
+ bdata = kzalloc(sizeof(*bdata), GFP_KERNEL);
+ if (!bdata) {
+ ret = -ENOMEM;
+ goto err_pci_iounmap_bar0;
+ }
+
+ INIT_LIST_HEAD(&bdata->ndev_list_head);
+ bdata->bar0_base = bar0_base;
+ bdata->cra_base = cra_addr;
+ bdata->bar1_base = addr;
+ bdata->use_msi = msi_ok;
+
+ pci_set_drvdata(pdev, bdata);
+
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0)
+ goto err_free_board;
+
+ core_i++;
+
+ while (core_i < num_cores) {
+ addr += 0x4000;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0) {
+ dev_info(dev, "CTU CAN FD core %d initialization failed\n",
+ core_i);
+ break;
+ }
+ core_i++;
+ }
+
+ /* enable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+ cra_a2p_ie |= 1;
+ iowrite32(cra_a2p_ie, cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+
+ return 0;
+
+err_free_board:
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+err_pci_iounmap_bar0:
+ pci_iounmap(pdev, cra_addr);
+err_pci_iounmap_bar1:
+ pci_iounmap(pdev, addr);
+err_release_regions:
+ if (msi_ok)
+ pci_disable_msi(pdev);
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err:
+ return ret;
+}
+
+/**
+ * ctucan_pci_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the pci device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static void ctucan_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ctucan_priv *priv = NULL;
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ dev_dbg(&pdev->dev, "ctucan_remove");
+
+ if (!bdata) {
+ dev_err(&pdev->dev, "%s: no list of devices\n", __func__);
+ return;
+ }
+
+ /* disable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ if (bdata->cra_base)
+ iowrite32(0, bdata->cra_base + CYCLONE_IV_CRA_A2P_IE);
+
+ while ((priv = list_first_entry_or_null(&bdata->ndev_list_head, struct ctucan_priv,
+ peers_on_pdev)) != NULL) {
+ ndev = priv->can.dev;
+
+ unregister_candev(ndev);
+
+ netif_napi_del(&priv->napi);
+
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ }
+
+ pci_iounmap(pdev, bdata->bar1_base);
+
+ if (bdata->use_msi)
+ pci_disable_msi(pdev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ pci_iounmap(pdev, bdata->bar0_base);
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_pci_pm_ops, ctucan_suspend, ctucan_resume);
+
+static const struct pci_device_id ctucan_pci_tbl[] = {
+ {PCI_DEVICE_DATA(TEDIA, CTUCAN_VER21,
+ CTUCAN_WITH_CTUCAN_ID)},
+ {},
+};
+
+static struct pci_driver ctucan_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ctucan_pci_tbl,
+ .probe = ctucan_pci_probe,
+ .remove = ctucan_pci_remove,
+ .driver.pm = &ctucan_pci_pm_ops,
+};
+
+module_pci_driver(ctucan_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_DESCRIPTION("CTU CAN FD for PCI bus");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
new file mode 100644
index 000000000000..70e2577c8541
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "ctucanfd.h"
+
+#define DRV_NAME "ctucanfd"
+
+static void ctucan_platform_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+
+ platform_set_drvdata(pdev, ndev);
+}
+
+/**
+ * ctucan_platform_probe - Platform registration call
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *addr;
+ int ret;
+ unsigned int ntxbufs;
+ int irq;
+
+ /* Get the virtual base address for the device */
+ addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err;
+ }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err;
+ }
+
+ /* Number of tx bufs might be change in HW for future. If so,
+ * it will be passed as property via device tree
+ */
+ ntxbufs = 4;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 0,
+ 1, ctucan_platform_set_drvdata);
+
+ if (ret < 0)
+ platform_set_drvdata(pdev, NULL);
+
+err:
+ return ret;
+}
+
+/**
+ * ctucan_platform_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the platform device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static void ctucan_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ netdev_dbg(ndev, "ctucan_remove");
+
+ unregister_candev(ndev);
+ pm_runtime_disable(&pdev->dev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_platform_pm_ops, ctucan_suspend, ctucan_resume);
+
+/* Match table for OF platform binding */
+static const struct of_device_id ctucan_of_match[] = {
+ { .compatible = "ctu,ctucanfd-2", },
+ { .compatible = "ctu,ctucanfd", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, ctucan_of_match);
+
+static struct platform_driver ctucanfd_driver = {
+ .probe = ctucan_platform_probe,
+ .remove = ctucan_platform_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &ctucan_platform_pm_ops,
+ .of_match_table = ctucan_of_match,
+ },
+};
+
+module_platform_driver(ctucanfd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek");
+MODULE_DESCRIPTION("CTU CAN FD for platform");
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
index 3e2e207861fc..633687d6b6c0 100644
--- a/drivers/net/can/dev/Makefile
+++ b/drivers/net/can/dev/Makefile
@@ -1,11 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-y += bittiming.o
-can-dev-y += dev.o
-can-dev-y += length.o
-can-dev-y += netlink.o
-can-dev-y += rx-offload.o
-can-dev-y += skb.o
+obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-$(CONFIG_CAN_LEDS) += led.o
+can-dev-y += skb.o
+
+can-dev-$(CONFIG_CAN_CALC_BITTIMING) += calc_bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += dev.o
+can-dev-$(CONFIG_CAN_NETLINK) += length.o
+can-dev-$(CONFIG_CAN_NETLINK) += netlink.o
+can-dev-$(CONFIG_CAN_RX_OFFLOAD) += rx-offload.o
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 0509625c3082..8f82418230ce 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -2,225 +2,86 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (c) 2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/can/dev.h>
-#ifdef CONFIG_CAN_CALC_BITTIMING
-#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
-
-/* Bit-timing calculation derived from:
- *
- * Code based on LinCAN sources and H8S2638 project
- * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
- * Copyright 2005 Stanislav Marek
- * email: pisa@cmp.felk.cvut.cz
- *
- * Calculates proper bit-timing parameters for a specified bit-rate
- * and sample-point, which can then be used to set the bit-timing
- * registers of the CAN controller. You can find more information
- * in the header file linux/can/netlink.h.
- */
-static int
-can_update_sample_point(const struct can_bittiming_const *btc,
- unsigned int sample_point_nominal, unsigned int tseg,
- unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
- unsigned int *sample_point_error_ptr)
+void can_sjw_set_default(struct can_bittiming *bt)
{
- unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
- unsigned int sample_point, best_sample_point = 0;
- unsigned int tseg1, tseg2;
- int i;
-
- for (i = 0; i <= 1; i++) {
- tseg2 = tseg + CAN_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
- 1000 - i;
- tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
- tseg1 = tseg - tseg2;
- if (tseg1 > btc->tseg1_max) {
- tseg1 = btc->tseg1_max;
- tseg2 = tseg - tseg1;
- }
-
- sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
- (tseg + CAN_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
-
- if (sample_point <= sample_point_nominal &&
- sample_point_error < best_sample_point_error) {
- best_sample_point = sample_point;
- best_sample_point_error = sample_point_error;
- *tseg1_ptr = tseg1;
- *tseg2_ptr = tseg2;
- }
- }
-
- if (sample_point_error_ptr)
- *sample_point_error_ptr = best_sample_point_error;
+ if (bt->sjw)
+ return;
- return best_sample_point;
+ /* If user space provides no sjw, use sane default of phase_seg2 / 2 */
+ bt->sjw = max(1U, min(bt->phase_seg1, bt->phase_seg2 / 2));
}
-int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
- unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
- unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
- unsigned int best_tseg = 0; /* current best value for tseg */
- unsigned int best_brp = 0; /* current best value for brp */
- unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
- u64 v64;
-
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800 * CAN_KBPS)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500 * CAN_KBPS)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
-
- /* tseg even = round down, odd = round up */
- for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
- tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = CAN_SYNC_SEG + tseg / 2;
-
- /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
- brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
-
- /* choose brp step which is possible in system */
- brp = (brp / btc->brp_inc) * btc->brp_inc;
- if (brp < btc->brp_min || brp > btc->brp_max)
- continue;
-
- bitrate = priv->clock.freq / (brp * tsegall);
- bitrate_error = abs(bt->bitrate - bitrate);
-
- /* tseg brp biterror */
- if (bitrate_error > best_bitrate_error)
- continue;
-
- /* reset sample point error if we have a better bitrate */
- if (bitrate_error < best_bitrate_error)
- best_sample_point_error = UINT_MAX;
-
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
- &tseg1, &tseg2, &sample_point_error);
- if (sample_point_error > best_sample_point_error)
- continue;
-
- best_sample_point_error = sample_point_error;
- best_bitrate_error = bitrate_error;
- best_tseg = tseg / 2;
- best_brp = brp;
-
- if (bitrate_error == 0 && sample_point_error == 0)
- break;
+ if (bt->sjw > btc->sjw_max) {
+ NL_SET_ERR_MSG_FMT(extack, "sjw: %u greater than max sjw: %u",
+ bt->sjw, btc->sjw_max);
+ return -EINVAL;
}
- if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
- do_div(v64, bt->bitrate);
- bitrate_error = (u32)v64;
- if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
- }
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
+ if (bt->sjw > bt->phase_seg1) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg1: %u",
+ bt->sjw, bt->phase_seg1);
+ return -EINVAL;
}
- /* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
- best_tseg, &tseg1, &tseg2,
- NULL);
-
- v64 = (u64)best_brp * 1000 * 1000 * 1000;
- do_div(v64, priv->clock.freq);
- bt->tq = (u32)v64;
- bt->prop_seg = tseg1 / 2;
- bt->phase_seg1 = tseg1 - bt->prop_seg;
- bt->phase_seg2 = tseg2;
-
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
+ if (bt->sjw > bt->phase_seg2) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg2: %u",
+ bt->sjw, bt->phase_seg2);
+ return -EINVAL;
}
- bt->brp = best_brp;
-
- /* real bitrate */
- bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
-
return 0;
}
-void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
- const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported)
-
-{
- if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
- return;
-
- *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
-
- /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
- * delay compensation" (TDC) is only applicable if data BRP is
- * one or two.
- */
- if (dbt->brp == 1 || dbt->brp == 2) {
- /* Sample point in clock periods */
- u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
- dbt->phase_seg1) * dbt->brp;
-
- if (sample_point_in_tc < tdc_const->tdco_min)
- return;
- tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
- *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
- }
-}
-#endif /* CONFIG_CAN_CALC_BITTIMING */
-
/* Checks the validity of the specified bit-timing parameters prop_seg,
* phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
* prescaler value brp. You can find more information in the header
* file linux/can/netlink.h.
*/
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ struct netlink_ext_ack *extack)
{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int tseg1, alltseg;
+ const unsigned int tseg1 = bt->prop_seg + bt->phase_seg1;
+ const struct can_priv *priv = netdev_priv(dev);
u64 brp64;
+ int err;
- tseg1 = bt->prop_seg + bt->phase_seg1;
- if (!bt->sjw)
- bt->sjw = 1;
- if (bt->sjw > btc->sjw_max ||
- tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
- bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
- return -ERANGE;
+ if (tseg1 < btc->tseg1_min) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u less than tseg1-min: %u",
+ tseg1, btc->tseg1_min);
+ return -EINVAL;
+ }
+ if (tseg1 > btc->tseg1_max) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u greater than tseg1-max: %u",
+ tseg1, btc->tseg1_max);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 < btc->tseg2_min) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u less than tseg2-min: %u",
+ bt->phase_seg2, btc->tseg2_min);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 > btc->tseg2_max) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u greater than tseg2-max: %u",
+ bt->phase_seg2, btc->tseg2_max);
+ return -EINVAL;
+ }
+
+ can_sjw_set_default(bt);
+
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
brp64 = (u64)priv->clock.freq * (u64)bt->tq;
if (btc->brp_inc > 1)
@@ -231,57 +92,125 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
brp64 *= btc->brp_inc;
bt->brp = (u32)brp64;
- if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+ if (bt->brp < btc->brp_min) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u less than brp-min: %u",
+ bt->brp, btc->brp_min);
+ return -EINVAL;
+ }
+ if (bt->brp > btc->brp_max) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u greater than brp-max: %u",
+ bt->brp, btc->brp_max);
return -EINVAL;
+ }
- alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
- bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
- bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+ bt->bitrate = priv->clock.freq / (bt->brp * can_bit_time(bt));
+ bt->sample_point = ((CAN_SYNC_SEG + tseg1) * 1000) / can_bit_time(bt);
+ bt->tq = DIV_U64_ROUND_CLOSEST(mul_u32_u32(bt->brp, NSEC_PER_SEC),
+ priv->clock.freq);
return 0;
}
/* Checks the validity of predefined bitrate settings */
static int
-can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
{
- struct can_priv *priv = netdev_priv(dev);
unsigned int i;
for (i = 0; i < bitrate_const_cnt; i++) {
if (bt->bitrate == bitrate_const[i])
- break;
+ return 0;
}
- if (i >= priv->bitrate_const_cnt)
- return -EINVAL;
+ NL_SET_ERR_MSG_FMT(extack, "bitrate %u bps not supported",
+ bt->brp);
- return 0;
+ return -EINVAL;
}
-int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
{
- int err;
-
/* Depending on the given can_bittiming parameter structure the CAN
* timing parameters are calculated based on the provided bitrate OR
* alternatively the CAN timing parameters (tq, prop_seg, etc.) are
* provided directly which are then checked and fixed up.
*/
if (!bt->tq && bt->bitrate && btc)
- err = can_calc_bittiming(dev, bt, btc);
- else if (bt->tq && !bt->bitrate && btc)
- err = can_fixup_bittiming(dev, bt, btc);
- else if (!bt->tq && bt->bitrate && bitrate_const)
- err = can_validate_bitrate(dev, bt, bitrate_const,
- bitrate_const_cnt);
- else
- err = -EINVAL;
+ return can_calc_bittiming(dev, bt, btc, extack);
+ if (bt->tq && !bt->bitrate && btc)
+ return can_fixup_bittiming(dev, bt, btc, extack);
+ if (!bt->tq && bt->bitrate && bitrate_const)
+ return can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt, extack);
+
+ return -EINVAL;
+}
+
+int can_validate_pwm_bittiming(const struct net_device *dev,
+ const struct can_pwm *pwm,
+ struct netlink_ext_ack *extack)
+{
+ const struct can_priv *priv = netdev_priv(dev);
+ u32 xl_bit_time_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming);
+ u32 nom_bit_time_tqmin = can_bit_time_tqmin(&priv->bittiming);
+ u32 pwms_ns = can_tqmin_to_ns(pwm->pwms, priv->clock.freq);
+ u32 pwml_ns = can_tqmin_to_ns(pwm->pwml, priv->clock.freq);
+
+ if (pwms_ns + pwml_ns > CAN_PWM_NS_MAX) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "The PWM symbol duration: %u ns may not exceed %u ns",
+ pwms_ns + pwml_ns, CAN_PWM_NS_MAX);
+ return -EINVAL;
+ }
- return err;
+ if (pwms_ns < CAN_PWM_DECODE_NS) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u ns shall be at least %u ns",
+ pwms_ns, CAN_PWM_DECODE_NS);
+ return -EINVAL;
+ }
+
+ if (pwm->pwms >= pwm->pwml) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u tqmin shall be smaller than PWML: %u tqmin",
+ pwm->pwms, pwm->pwml);
+ return -EINVAL;
+ }
+
+ if (pwml_ns - pwms_ns < 2 * CAN_PWM_DECODE_NS) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "At least %u ns shall separate PWMS: %u ns from PMWL: %u ns",
+ 2 * CAN_PWM_DECODE_NS, pwms_ns, pwml_ns);
+ return -EINVAL;
+ }
+
+ if (xl_bit_time_tqmin % (pwm->pwms + pwm->pwml) != 0) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWM duration: %u tqmin does not divide XL's bit time: %u tqmin",
+ pwm->pwms + pwm->pwml, xl_bit_time_tqmin);
+ return -EINVAL;
+ }
+
+ if (pwm->pwmo >= pwm->pwms + pwm->pwml) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMO: %u tqmin can not be greater than PWMS + PWML: %u tqmin",
+ pwm->pwmo, pwm->pwms + pwm->pwml);
+ return -EINVAL;
+ }
+
+ if (nom_bit_time_tqmin % (pwm->pwms + pwm->pwml) != pwm->pwmo) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can not assemble nominal bit time: %u tqmin out of PWMS + PMWL and PWMO",
+ nom_bit_time_tqmin);
+ return -EINVAL;
+ }
+
+ return 0;
}
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
new file mode 100644
index 000000000000..cc4022241553
--- /dev/null
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
+ */
+
+#include <linux/units.h>
+#include <linux/can/dev.h>
+
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+/* CiA recommended sample points for Non Return to Zero encoding. */
+static int can_calc_sample_point_nrz(const struct can_bittiming *bt)
+{
+ if (bt->bitrate > 800 * KILO /* BPS */)
+ return 750;
+
+ if (bt->bitrate > 500 * KILO /* BPS */)
+ return 800;
+
+ return 875;
+}
+
+/* Sample points for Pulse-Width Modulation encoding. */
+static int can_calc_sample_point_pwm(const struct can_bittiming *bt)
+{
+ if (bt->bitrate > 15 * MEGA /* BPS */)
+ return 625;
+
+ if (bt->bitrate > 9 * MEGA /* BPS */)
+ return 600;
+
+ if (bt->bitrate > 4 * MEGA /* BPS */)
+ return 560;
+
+ return 520;
+}
+
+/* Bit-timing calculation derived from:
+ *
+ * Code based on LinCAN sources and H8S2638 project
+ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+ * Copyright 2005 Stanislav Marek
+ * email: pisa@cmp.felk.cvut.cz
+ *
+ * Calculates proper bit-timing parameters for a specified bit-rate
+ * and sample-point, which can then be used to set the bit-timing
+ * registers of the CAN controller. You can find more information
+ * in the header file linux/can/netlink.h.
+ */
+static int
+can_update_sample_point(const struct can_bittiming_const *btc,
+ const unsigned int sample_point_reference, const unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
+{
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_SYNC_SEG -
+ (sample_point_reference * (tseg + CAN_SYNC_SEG)) /
+ 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+ (tseg + CAN_SYNC_SEG);
+ sample_point_error = abs(sample_point_reference - sample_point);
+
+ if (sample_point <= sample_point_reference &&
+ sample_point_error < best_sample_point_error) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
+ }
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
+}
+
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* diff between calculated and reference value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* diff between calculated and reference value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_reference; /* reference sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+ u64 v64;
+ int err;
+
+ if (bt->sample_point)
+ sample_point_reference = bt->sample_point;
+ else if (btc == priv->xl.data_bittiming_const &&
+ (priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ sample_point_reference = can_calc_sample_point_pwm(bt);
+ else
+ sample_point_reference = can_calc_sample_point_nrz(bt);
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+ tsegall = CAN_SYNC_SEG + tseg / 2;
+
+ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+
+ /* choose brp step which is possible in system */
+ brp = (brp / btc->brp_inc) * btc->brp_inc;
+ if (brp < btc->brp_min || brp > btc->brp_max)
+ continue;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
+ /* tseg brp biterror */
+ if (bitrate_error > best_bitrate_error)
+ continue;
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_reference, tseg / 2,
+ &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error >= best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
+ best_tseg = tseg / 2;
+ best_brp = brp;
+
+ if (bitrate_error == 0 && sample_point_error == 0)
+ break;
+ }
+
+ if (best_bitrate_error) {
+ /* Error in one-hundredth of a percent */
+ v64 = (u64)best_bitrate_error * 10000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ /* print at least 0.01% if the error is smaller */
+ bitrate_error = max(bitrate_error, 1U);
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%02u%% too high",
+ bitrate_error / 100,
+ bitrate_error % 100);
+ return -EINVAL;
+ }
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%02u%%",
+ bitrate_error / 100, bitrate_error % 100);
+ }
+
+ /* real sample point */
+ bt->sample_point = can_update_sample_point(btc, sample_point_reference,
+ best_tseg, &tseg1, &tseg2,
+ NULL);
+
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
+ do_div(v64, priv->clock.freq);
+ bt->tq = (u32)v64;
+ bt->prop_seg = tseg1 / 2;
+ bt->phase_seg1 = tseg1 - bt->prop_seg;
+ bt->phase_seg2 = tseg2;
+
+ can_sjw_set_default(bt);
+
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
+
+ bt->brp = best_brp;
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq /
+ (bt->brp * can_bit_time(bt));
+
+ return 0;
+}
+
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported)
+
+{
+ u32 tdc_auto = tdc_mask & CAN_CTRLMODE_TDC_AUTO_MASK;
+
+ if (!tdc_const || !(ctrlmode_supported & tdc_auto))
+ return;
+
+ *ctrlmode &= ~tdc_mask;
+
+ /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
+ * delay compensation" (TDC) is only applicable if data BRP is
+ * one or two.
+ */
+ if (dbt->brp == 1 || dbt->brp == 2) {
+ /* Sample point in clock periods */
+ u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ if (sample_point_in_tc < tdc_const->tdco_min)
+ return;
+ tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
+ *ctrlmode |= tdc_auto;
+ }
+}
+
+int can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ struct can_pwm *pwm = &priv->xl.pwm;
+ u32 xl_tqmin = can_bit_time_tqmin(&priv->xl.data_bittiming);
+ u32 xl_ns = can_tqmin_to_ns(xl_tqmin, priv->clock.freq);
+ u32 nom_tqmin = can_bit_time_tqmin(&priv->bittiming);
+ int pwm_per_bit_max = xl_tqmin / (pwm_const->pwms_min + pwm_const->pwml_min);
+ int pwm_per_bit;
+ u32 pwm_tqmin;
+
+ /* For 5 MB/s databitrate or greater, xl_ns < CAN_PWM_NS_MAX
+ * giving us a pwm_per_bit of 1 and the loop immediately breaks
+ */
+ for (pwm_per_bit = DIV_ROUND_UP(xl_ns, CAN_PWM_NS_MAX);
+ pwm_per_bit <= pwm_per_bit_max; pwm_per_bit++)
+ if (xl_tqmin % pwm_per_bit == 0)
+ break;
+
+ if (pwm_per_bit > pwm_per_bit_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Can not divide the XL data phase's bit time: %u tqmin into multiple PWM symbols",
+ xl_tqmin);
+ return -EINVAL;
+ }
+
+ pwm_tqmin = xl_tqmin / pwm_per_bit;
+ pwm->pwms = DIV_ROUND_UP_POW2(pwm_tqmin, 4);
+ pwm->pwml = pwm_tqmin - pwm->pwms;
+ pwm->pwmo = nom_tqmin % pwm_tqmin;
+
+ return 0;
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index e3d840b81357..091f30e94c61 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -4,25 +4,17 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/can-ml.h>
#include <linux/can/dev.h>
#include <linux/can/skb.h>
-#include <linux/can/led.h>
#include <linux/gpio/consumer.h>
+#include <linux/if_arp.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
-
-#define MOD_DESC "CAN device driver interface"
-
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+#include <linux/slab.h>
+#include <linux/workqueue.h>
static void can_update_state_error_stats(struct net_device *dev,
enum can_state new_state)
@@ -93,11 +85,74 @@ const char *can_get_state_str(const enum can_state state)
default:
return "<unknown>";
}
-
- return "<unknown>";
}
EXPORT_SYMBOL_GPL(can_get_state_str);
+const char *can_get_ctrlmode_str(u32 ctrlmode)
+{
+ switch (ctrlmode & ~(ctrlmode - 1)) {
+ case 0:
+ return "(none)";
+ case CAN_CTRLMODE_LOOPBACK:
+ return "LOOPBACK";
+ case CAN_CTRLMODE_LISTENONLY:
+ return "LISTEN-ONLY";
+ case CAN_CTRLMODE_3_SAMPLES:
+ return "TRIPLE-SAMPLING";
+ case CAN_CTRLMODE_ONE_SHOT:
+ return "ONE-SHOT";
+ case CAN_CTRLMODE_BERR_REPORTING:
+ return "BERR-REPORTING";
+ case CAN_CTRLMODE_FD:
+ return "FD";
+ case CAN_CTRLMODE_PRESUME_ACK:
+ return "PRESUME-ACK";
+ case CAN_CTRLMODE_FD_NON_ISO:
+ return "FD-NON-ISO";
+ case CAN_CTRLMODE_CC_LEN8_DLC:
+ return "CC-LEN8-DLC";
+ case CAN_CTRLMODE_TDC_AUTO:
+ return "TDC-AUTO";
+ case CAN_CTRLMODE_TDC_MANUAL:
+ return "TDC-MANUAL";
+ case CAN_CTRLMODE_RESTRICTED:
+ return "RESTRICTED";
+ case CAN_CTRLMODE_XL:
+ return "XL";
+ case CAN_CTRLMODE_XL_TDC_AUTO:
+ return "XL-TDC-AUTO";
+ case CAN_CTRLMODE_XL_TDC_MANUAL:
+ return "XL-TDC-MANUAL";
+ case CAN_CTRLMODE_XL_TMS:
+ return "TMS";
+ default:
+ return "<unknown>";
+ }
+}
+EXPORT_SYMBOL_GPL(can_get_ctrlmode_str);
+
+static enum can_state can_state_err_to_state(u16 err)
+{
+ if (err < CAN_ERROR_WARNING_THRESHOLD)
+ return CAN_STATE_ERROR_ACTIVE;
+ if (err < CAN_ERROR_PASSIVE_THRESHOLD)
+ return CAN_STATE_ERROR_WARNING;
+ if (err < CAN_BUS_OFF_THRESHOLD)
+ return CAN_STATE_ERROR_PASSIVE;
+
+ return CAN_STATE_BUS_OFF;
+}
+
+void can_state_get_by_berr_counter(const struct net_device *dev,
+ const struct can_berr_counter *bec,
+ enum can_state *tx_state,
+ enum can_state *rx_state)
+{
+ *tx_state = can_state_err_to_state(bec->txerr);
+ *rx_state = can_state_err_to_state(bec->rxerr);
+}
+EXPORT_SYMBOL_GPL(can_state_get_by_berr_counter);
+
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state)
{
@@ -133,15 +188,18 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
EXPORT_SYMBOL_GPL(can_change_state);
/* CAN device restart for bus-off recovery */
-static void can_restart(struct net_device *dev)
+static int can_restart(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
struct can_frame *cf;
int err;
- BUG_ON(netif_carrier_ok(dev));
+ if (!priv->do_set_mode)
+ return -EOPNOTSUPP;
+
+ if (netif_carrier_ok(dev))
+ netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
/* No synchronization needed because the device is bus-off and
* no messages can come in or go out.
@@ -150,26 +208,25 @@ static void can_restart(struct net_device *dev)
/* send restart message upstream */
skb = alloc_can_err_skb(dev, &cf);
- if (!skb)
- goto restart;
-
- cf->can_id |= CAN_ERR_RESTARTED;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
- netif_rx_ni(skb);
-
-restart:
- netdev_dbg(dev, "restarted\n");
- priv->can_stats.restarts++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+ netif_rx(skb);
+ }
/* Now restart the device */
+ netif_carrier_on(dev);
err = priv->do_set_mode(dev, CAN_MODE_START);
+ if (err) {
+ netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err));
+ netif_carrier_off(dev);
+
+ return err;
+ } else {
+ netdev_dbg(dev, "Restarted\n");
+ priv->can_stats.restarts++;
+ }
- netif_carrier_on(dev);
- if (err)
- netdev_err(dev, "Error %d during restart", err);
+ return 0;
}
static void can_restart_work(struct work_struct *work)
@@ -194,9 +251,8 @@ int can_restart_now(struct net_device *dev)
return -EBUSY;
cancel_delayed_work_sync(&priv->restart_work);
- can_restart(dev);
- return 0;
+ return can_restart(dev);
}
/* CAN bus-off
@@ -227,6 +283,8 @@ void can_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
@@ -296,42 +354,92 @@ void free_candev(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(free_candev);
-/* changing MTU and control mode for CAN/CANFD devices */
-int can_change_mtu(struct net_device *dev, int new_mtu)
+void can_set_default_mtu(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- /* Do not allow changing the MTU while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
+ if (priv->ctrlmode & CAN_CTRLMODE_XL) {
+ if (can_is_canxl_dev_mtu(dev->mtu))
+ return;
+ dev->mtu = CANXL_MTU;
+ dev->min_mtu = CANXL_MIN_MTU;
+ dev->max_mtu = CANXL_MAX_MTU;
+ } else if (priv->ctrlmode & CAN_CTRLMODE_FD) {
+ dev->mtu = CANFD_MTU;
+ dev->min_mtu = CANFD_MTU;
+ dev->max_mtu = CANFD_MTU;
+ } else {
+ dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
+ }
+}
- /* allow change of MTU according to the CANFD ability of the device */
- switch (new_mtu) {
- case CAN_MTU:
- /* 'CANFD-only' controllers can not switch to CAN_MTU */
- if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
- return -EINVAL;
+/* helper to define static CAN controller features at device creation time */
+int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
+{
+ struct can_priv *priv = netdev_priv(dev);
- priv->ctrlmode &= ~CAN_CTRLMODE_FD;
- break;
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ if (priv->ctrlmode_supported & static_mode) {
+ netdev_warn(dev,
+ "Controller features can not be supported and static at the same time\n");
+ return -EINVAL;
+ }
+ priv->ctrlmode = static_mode;
- case CANFD_MTU:
- /* check for potential CANFD ability */
- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
- !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
- return -EINVAL;
+ /* override MTU which was set by default in can_setup()? */
+ can_set_default_mtu(dev);
- priv->ctrlmode |= CAN_CTRLMODE_FD;
- break;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_set_static_ctrlmode);
- default:
- return -EINVAL;
- }
+/* generic implementation of netdev_ops::ndo_hwtstamp_get for CAN devices
+ * supporting hardware timestamps
+ */
+int can_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ cfg->tx_type = HWTSTAMP_TX_ON;
+ cfg->rx_filter = HWTSTAMP_FILTER_ALL;
- dev->mtu = new_mtu;
return 0;
}
-EXPORT_SYMBOL_GPL(can_change_mtu);
+EXPORT_SYMBOL(can_hwtstamp_get);
+
+/* generic implementation of netdev_ops::ndo_hwtstamp_set for CAN devices
+ * supporting hardware timestamps
+ */
+int can_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ if (cfg->tx_type == HWTSTAMP_TX_ON &&
+ cfg->rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ NL_SET_ERR_MSG_MOD(extack, "Only TX on and RX all packets filter supported");
+ return -ERANGE;
+}
+EXPORT_SYMBOL(can_hwtstamp_set);
+
+/* generic implementation of ethtool_ops::get_ts_info for CAN devices
+ * supporting hardware timestamps
+ */
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+EXPORT_SYMBOL(can_ethtool_op_get_ts_info_hwts);
/* Common open function when the device gets opened.
*
@@ -349,8 +457,8 @@ int open_candev(struct net_device *dev)
/* For CAN FD the data bitrate has to be >= the arbitration bitrate */
if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
- (!priv->data_bittiming.bitrate ||
- priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
+ (!priv->fd.data_bittiming.bitrate ||
+ priv->fd.data_bittiming.bitrate < priv->bittiming.bitrate)) {
netdev_err(dev, "incorrect/missing data bit-timing\n");
return -EINVAL;
}
@@ -411,7 +519,7 @@ static int can_set_termination(struct net_device *ndev, u16 term)
else
set = 0;
- gpiod_set_value(priv->termination_gpio, set);
+ gpiod_set_value_cansleep(priv->termination_gpio, set);
return 0;
}
@@ -459,6 +567,18 @@ static int can_get_termination(struct net_device *ndev)
return 0;
}
+static bool
+can_bittiming_const_valid(const struct can_bittiming_const *btc)
+{
+ if (!btc)
+ return true;
+
+ if (!btc->sjw_max)
+ return false;
+
+ return true;
+}
+
/* Register the CAN network device */
int register_candev(struct net_device *dev)
{
@@ -476,7 +596,16 @@ int register_candev(struct net_device *dev)
if (!priv->bitrate_const != !priv->bitrate_const_cnt)
return -EINVAL;
- if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+ if (!priv->fd.data_bitrate_const != !priv->fd.data_bitrate_const_cnt)
+ return -EINVAL;
+
+ /* We only support either fixed bit rates or bit timing const. */
+ if ((priv->bitrate_const || priv->fd.data_bitrate_const) &&
+ (priv->bittiming_const || priv->fd.data_bittiming_const))
+ return -EINVAL;
+
+ if (!can_bittiming_const_valid(priv->bittiming_const) ||
+ !can_bittiming_const_valid(priv->fd.data_bittiming_const))
return -EINVAL;
if (!priv->termination_const) {
@@ -515,11 +644,9 @@ static __init int can_dev_init(void)
{
int err;
- can_led_notifier_init();
-
err = can_netlink_register();
if (!err)
- pr_info(MOD_DESC "\n");
+ pr_info("CAN device driver interface\n");
return err;
}
@@ -528,8 +655,6 @@ module_init(can_dev_init);
static __exit void can_dev_exit(void)
{
can_netlink_unregister();
-
- can_led_notifier_exit();
}
module_exit(can_dev_exit);
diff --git a/drivers/net/can/dev/length.c b/drivers/net/can/dev/length.c
index b48140b1102e..b7f4d76dd444 100644
--- a/drivers/net/can/dev/length.c
+++ b/drivers/net/can/dev/length.c
@@ -78,18 +78,7 @@ unsigned int can_skb_get_frame_len(const struct sk_buff *skb)
else
len = cf->len;
- if (can_is_canfd_skb(skb)) {
- if (cf->can_id & CAN_EFF_FLAG)
- len += CANFD_FRAME_OVERHEAD_EFF;
- else
- len += CANFD_FRAME_OVERHEAD_SFF;
- } else {
- if (cf->can_id & CAN_EFF_FLAG)
- len += CAN_FRAME_OVERHEAD_EFF;
- else
- len += CAN_FRAME_OVERHEAD_SFF;
- }
-
- return len;
+ return can_frame_bytes(can_is_canfd_skb(skb), cf->can_id & CAN_EFF_FLAG,
+ false, len);
}
EXPORT_SYMBOL_GPL(can_skb_get_frame_len);
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 95cca4e5251f..d6b0e686fb11 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -2,7 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (C) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/can/dev.h>
@@ -18,9 +18,14 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
[IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
[IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
[IFLA_CAN_TDC] = { .type = NLA_NESTED },
+ [IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED },
+ [IFLA_CAN_XL_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
+ [IFLA_CAN_XL_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_XL_TDC] = { .type = NLA_NESTED },
+ [IFLA_CAN_XL_PWM] = { .type = NLA_NESTED },
};
static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
@@ -35,85 +40,360 @@ static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
-static int can_validate(struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
+static const struct nla_policy can_pwm_policy[IFLA_CAN_PWM_MAX + 1] = {
+ [IFLA_CAN_PWM_PWMS_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMS_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMS] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWML] = { .type = NLA_U32 },
+ [IFLA_CAN_PWM_PWMO] = { .type = NLA_U32 },
+};
+
+static int can_validate_bittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_bittiming)
+{
+ struct can_bittiming *bt;
+
+ if (!data[ifla_can_bittiming])
+ return 0;
+
+ static_assert(__alignof__(*bt) <= NLA_ALIGNTO);
+ bt = nla_data(data[ifla_can_bittiming]);
+
+ /* sample point is in one-tenth of a percent */
+ if (bt->sample_point >= 1000) {
+ NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int can_validate_tdc(struct nlattr *data_tdc,
+ struct netlink_ext_ack *extack, u32 tdc_flags)
+{
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
+ bool tdc_auto = tdc_flags & CAN_CTRLMODE_TDC_AUTO_MASK;
+ int err;
+
+ if (tdc_auto && tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual and auto modes are mutually exclusive");
+ return -EOPNOTSUPP;
+ }
+
+ /* If one of the CAN_CTRLMODE_{,XL}_TDC_* flags is set then TDC
+ * must be set and vice-versa
+ */
+ if ((tdc_auto || tdc_manual) && !data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC parameters are missing");
+ return -EOPNOTSUPP;
+ }
+ if (!(tdc_auto || tdc_manual) && data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC mode (auto or manual) is missing");
+ return -EOPNOTSUPP;
+ }
+
+ /* If providing TDC parameters, at least TDCO is needed. TDCV is
+ * needed if and only if CAN_CTRLMODE_{,XL}_TDC_MANUAL is set
+ */
+ if (data_tdc) {
+ struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+
+ err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
+ data_tdc, can_tdc_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
+ if (tdc_auto) {
+ NL_SET_ERR_MSG(extack,
+ "TDCV is incompatible with TDC auto mode");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual mode requires TDCV");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!tb_tdc[IFLA_CAN_TDC_TDCO]) {
+ NL_SET_ERR_MSG(extack, "TDCO is missing");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int can_validate_pwm(struct nlattr *data[],
+ struct netlink_ext_ack *extack, u32 flags)
+{
+ struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1];
+ int err;
+
+ if (!data[IFLA_CAN_XL_PWM])
+ return 0;
+
+ if (!(flags & CAN_CTRLMODE_XL_TMS)) {
+ NL_SET_ERR_MSG(extack, "PWM requires TMS");
+ return -EOPNOTSUPP;
+ }
+
+ err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, data[IFLA_CAN_XL_PWM],
+ can_pwm_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb_pwm[IFLA_CAN_PWM_PWMS] != !tb_pwm[IFLA_CAN_PWM_PWML]) {
+ NL_SET_ERR_MSG(extack,
+ "Provide either both PWMS and PWML, or none for automatic calculation");
+ return -EOPNOTSUPP;
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMO] &&
+ (!tb_pwm[IFLA_CAN_PWM_PWMS] || !tb_pwm[IFLA_CAN_PWM_PWML])) {
+ NL_SET_ERR_MSG(extack, "PWMO requires both PWMS and PWML");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int can_validate_databittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_data_bittiming, u32 flags)
{
- bool is_can_fd = false;
+ struct nlattr *data_tdc;
+ const char *type;
+ u32 tdc_flags;
+ bool is_on;
+ int err;
- /* Make sure that valid CAN FD configurations always consist of
+ /* Make sure that valid CAN FD/XL configurations always consist of
* - nominal/arbitration bittiming
* - data bittiming
- * - control mode with CAN_CTRLMODE_FD set
- * - TDC parameters are coherent (details below)
+ * - control mode with CAN_CTRLMODE_{FD,XL} set
+ * - TDC parameters are coherent (details in can_validate_tdc())
*/
+ if (ifla_can_data_bittiming == IFLA_CAN_DATA_BITTIMING) {
+ data_tdc = data[IFLA_CAN_TDC];
+ tdc_flags = flags & CAN_CTRLMODE_FD_TDC_MASK;
+ is_on = flags & CAN_CTRLMODE_FD;
+ type = "FD";
+ } else {
+ data_tdc = data[IFLA_CAN_XL_TDC];
+ tdc_flags = flags & CAN_CTRLMODE_XL_TDC_MASK;
+ is_on = flags & CAN_CTRLMODE_XL;
+ type = "XL";
+ }
+
+ if (is_on) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Provide both nominal and %s data bittiming",
+ type);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s data bittiming requires CAN %s",
+ type, type);
+ return -EOPNOTSUPP;
+ }
+ if (data_tdc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s TDC requires CAN %s",
+ type, type);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ err = can_validate_bittiming(data, extack, ifla_can_data_bittiming);
+ if (err)
+ return err;
+
+ err = can_validate_tdc(data_tdc, extack, tdc_flags);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int can_validate_xl_flags(struct netlink_ext_ack *extack,
+ u32 masked_flags, u32 mask)
+{
+ if (masked_flags & CAN_CTRLMODE_XL) {
+ if (masked_flags & CAN_CTRLMODE_XL_TMS) {
+ const u32 tms_conflicts_mask = CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_XL_TDC_MASK;
+ u32 tms_conflicts = masked_flags & tms_conflicts_mask;
+
+ if (tms_conflicts) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "TMS and %s are mutually exclusive",
+ can_get_ctrlmode_str(tms_conflicts));
+ return -EOPNOTSUPP;
+ }
+ }
+ } else {
+ if (mask & CAN_CTRLMODE_XL_TMS) {
+ NL_SET_ERR_MSG(extack, "TMS requires CAN XL");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ u32 flags = 0;
+ int err;
+
if (!data)
return 0;
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
- is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+ flags = cm->flags & cm->mask;
- /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */
- if (tdc_flags == CAN_CTRLMODE_TDC_MASK)
- return -EOPNOTSUPP;
- /* If one of the CAN_CTRLMODE_TDC_* flag is set then
- * TDC must be set and vice-versa
- */
- if (!!tdc_flags != !!data[IFLA_CAN_TDC])
+ if ((flags & CAN_CTRLMODE_LISTENONLY) &&
+ (flags & CAN_CTRLMODE_RESTRICTED)) {
+ NL_SET_ERR_MSG(extack,
+ "LISTEN-ONLY and RESTRICTED modes are mutually exclusive");
return -EOPNOTSUPP;
- /* If providing TDC parameters, at least TDCO is
- * needed. TDCV is needed if and only if
- * CAN_CTRLMODE_TDC_MANUAL is set
- */
- if (data[IFLA_CAN_TDC]) {
- struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
- int err;
+ }
- err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
- data[IFLA_CAN_TDC],
- can_tdc_policy, extack);
- if (err)
- return err;
+ err = can_validate_xl_flags(extack, flags, cm->mask);
+ if (err)
+ return err;
+ }
- if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
- if (tdc_flags & CAN_CTRLMODE_TDC_AUTO)
- return -EOPNOTSUPP;
- } else {
- if (tdc_flags & CAN_CTRLMODE_TDC_MANUAL)
- return -EOPNOTSUPP;
- }
+ err = can_validate_bittiming(data, extack, IFLA_CAN_BITTIMING);
+ if (err)
+ return err;
- if (!tb_tdc[IFLA_CAN_TDC_TDCO])
- return -EOPNOTSUPP;
- }
+ err = can_validate_databittiming(data, extack,
+ IFLA_CAN_DATA_BITTIMING, flags);
+ if (err)
+ return err;
+
+ err = can_validate_databittiming(data, extack,
+ IFLA_CAN_XL_DATA_BITTIMING, flags);
+ if (err)
+ return err;
+
+ err = can_validate_pwm(data, extack, flags);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int can_ctrlmode_changelink(struct net_device *dev,
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode *cm;
+ u32 ctrlstatic, maskedflags, deactivated, notsupp, ctrlstatic_missing;
+
+ if (!data[IFLA_CAN_CTRLMODE])
+ return 0;
+
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ ctrlstatic = can_get_static_ctrlmode(priv);
+ maskedflags = cm->flags & cm->mask;
+ deactivated = ~cm->flags & cm->mask;
+ notsupp = maskedflags & ~(priv->ctrlmode_supported | ctrlstatic);
+ ctrlstatic_missing = (maskedflags & ctrlstatic) ^ ctrlstatic;
+
+ if (notsupp) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "requested control mode %s not supported",
+ can_get_ctrlmode_str(notsupp));
+ return -EOPNOTSUPP;
}
- if (is_can_fd) {
- if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
- return -EOPNOTSUPP;
+ /* do not check for static fd-non-iso if 'fd' is disabled */
+ if (!(maskedflags & CAN_CTRLMODE_FD))
+ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+
+ if (ctrlstatic_missing) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "missing required %s static control mode",
+ can_get_ctrlmode_str(ctrlstatic_missing));
+ return -EOPNOTSUPP;
}
- if (data[IFLA_CAN_DATA_BITTIMING] || data[IFLA_CAN_TDC]) {
- if (!is_can_fd)
+ /* If FD was active and is not turned off, check for XL conflicts */
+ if (priv->ctrlmode & CAN_CTRLMODE_FD & ~deactivated) {
+ if (maskedflags & CAN_CTRLMODE_XL_TMS) {
+ NL_SET_ERR_MSG(extack,
+ "TMS can not be activated while CAN FD is on");
return -EOPNOTSUPP;
+ }
}
+ /* If a top dependency flag is provided, reset all its dependencies */
+ if (cm->mask & CAN_CTRLMODE_FD)
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ if (cm->mask & CAN_CTRLMODE_XL)
+ priv->ctrlmode &= ~(CAN_CTRLMODE_XL_TDC_MASK |
+ CAN_CTRLMODE_XL_TMS);
+
+ /* clear bits to be modified and copy the flag values */
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= maskedflags;
+
+ /* Wipe potential leftovers from previous CAN FD/XL config */
+ if (!(priv->ctrlmode & CAN_CTRLMODE_FD)) {
+ memset(&priv->fd.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
+ }
+ if (!(priv->ctrlmode & CAN_CTRLMODE_XL)) {
+ memset(&priv->xl.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_XL_TDC_MASK;
+ memset(&priv->xl.tdc, 0, sizeof(priv->xl.tdc));
+ memset(&priv->xl.pwm, 0, sizeof(priv->xl.pwm));
+ }
+
+ can_set_default_mtu(dev);
+
return 0;
}
-static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
+static int can_tdc_changelink(struct data_bittiming_params *dbt_params,
+ const struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
struct can_tdc tdc = { 0 };
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+ const struct can_tdc_const *tdc_const = dbt_params->tdc_const;
int err;
- if (!tdc_const || !can_tdc_is_enabled(priv))
+ if (!tdc_const) {
+ NL_SET_ERR_MSG(extack, "The device does not support TDC");
return -EOPNOTSUPP;
+ }
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
can_tdc_policy, extack);
@@ -147,8 +427,166 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
tdc.tdcf = tdcf;
}
- priv->tdc = tdc;
+ dbt_params->tdc = tdc;
+
+ return 0;
+}
+
+static int can_dbt_changelink(struct net_device *dev, struct nlattr *data[],
+ bool fd, struct netlink_ext_ack *extack)
+{
+ struct nlattr *data_bittiming, *data_tdc;
+ struct can_priv *priv = netdev_priv(dev);
+ struct data_bittiming_params *dbt_params;
+ struct can_bittiming dbt;
+ bool need_tdc_calc = false;
+ u32 tdc_mask;
+ int err;
+
+ if (fd) {
+ data_bittiming = data[IFLA_CAN_DATA_BITTIMING];
+ data_tdc = data[IFLA_CAN_TDC];
+ dbt_params = &priv->fd;
+ tdc_mask = CAN_CTRLMODE_FD_TDC_MASK;
+ } else {
+ data_bittiming = data[IFLA_CAN_XL_DATA_BITTIMING];
+ data_tdc = data[IFLA_CAN_XL_TDC];
+ dbt_params = &priv->xl;
+ tdc_mask = CAN_CTRLMODE_XL_TDC_MASK;
+ }
+
+ if (!data_bittiming)
+ return 0;
+
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Calculate bittiming parameters based on data_bittiming_const
+ * if set, otherwise pass bitrate directly via do_set_bitrate().
+ * Bail out if neither is given.
+ */
+ if (!dbt_params->data_bittiming_const && !dbt_params->do_set_data_bittiming &&
+ !dbt_params->data_bitrate_const)
+ return -EOPNOTSUPP;
+
+ memcpy(&dbt, nla_data(data_bittiming), sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt, dbt_params->data_bittiming_const,
+ dbt_params->data_bitrate_const,
+ dbt_params->data_bitrate_const_cnt, extack);
+ if (err)
+ return err;
+
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "CAN data bitrate %u bps surpasses transceiver capabilities of %u bps",
+ dbt.bitrate, priv->bitrate_max);
+ return -EINVAL;
+ }
+
+ memset(&dbt_params->tdc, 0, sizeof(dbt_params->tdc));
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+ if (fd || !(priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ need_tdc_calc = !(cm->mask & tdc_mask);
+ }
+ if (data_tdc) {
+ /* TDC parameters are provided: use them */
+ err = can_tdc_changelink(dbt_params, data_tdc, extack);
+ if (err) {
+ priv->ctrlmode &= ~tdc_mask;
+ return err;
+ }
+ } else if (need_tdc_calc) {
+ /* Neither of TDC parameters nor TDC flags are provided:
+ * do calculation
+ */
+ can_calc_tdco(&dbt_params->tdc, dbt_params->tdc_const, &dbt,
+ tdc_mask, &priv->ctrlmode, priv->ctrlmode_supported);
+ } /* else: both CAN_CTRLMODE_{,XL}_TDC_{AUTO,MANUAL} are explicitly
+ * turned off. TDC is disabled: do nothing
+ */
+
+ memcpy(&dbt_params->data_bittiming, &dbt, sizeof(dbt));
+
+ if (dbt_params->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = dbt_params->do_set_data_bittiming(dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int can_pwm_changelink(struct net_device *dev,
+ const struct nlattr *pwm_nla,
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ struct nlattr *tb_pwm[IFLA_CAN_PWM_MAX + 1];
+ struct can_pwm pwm = { 0 };
+ int err;
+
+ if (!(priv->ctrlmode & CAN_CTRLMODE_XL_TMS))
+ return 0;
+
+ if (!pwm_const) {
+ NL_SET_ERR_MSG(extack, "The device does not support PWM");
+ return -EOPNOTSUPP;
+ }
+
+ if (!pwm_nla)
+ return can_calc_pwm(dev, extack);
+ err = nla_parse_nested(tb_pwm, IFLA_CAN_PWM_MAX, pwm_nla,
+ can_pwm_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMS]) {
+ pwm.pwms = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMS]);
+ if (pwm.pwms < pwm_const->pwms_min ||
+ pwm.pwms > pwm_const->pwms_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMS: %u tqmin is out of range: %u...%u",
+ pwm.pwms, pwm_const->pwms_min,
+ pwm_const->pwms_max);
+ return -EINVAL;
+ }
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWML]) {
+ pwm.pwml = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWML]);
+ if (pwm.pwml < pwm_const->pwml_min ||
+ pwm.pwml > pwm_const->pwml_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWML: %u tqmin is out of range: %u...%u",
+ pwm.pwml, pwm_const->pwml_min,
+ pwm_const->pwml_max);
+ return -EINVAL;
+ }
+ }
+
+ if (tb_pwm[IFLA_CAN_PWM_PWMO]) {
+ pwm.pwmo = nla_get_u32(tb_pwm[IFLA_CAN_PWM_PWMO]);
+ if (pwm.pwmo < pwm_const->pwmo_min ||
+ pwm.pwmo > pwm_const->pwmo_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PWMO: %u tqmin is out of range: %u...%u",
+ pwm.pwmo, pwm_const->pwmo_min,
+ pwm_const->pwmo_max);
+ return -EINVAL;
+ }
+ }
+
+ err = can_validate_pwm_bittiming(dev, &pwm, extack);
+ if (err)
+ return err;
+
+ priv->xl.pwm = pwm;
return 0;
}
@@ -157,12 +595,13 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
- u32 tdc_mask = 0;
int err;
/* We need synchronization with dev->stop() */
ASSERT_RTNL();
+ can_ctrlmode_changelink(dev, data, extack);
+
if (data[IFLA_CAN_BITTIMING]) {
struct can_bittiming bt;
@@ -175,20 +614,23 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
- if (!priv->bittiming_const && !priv->do_set_bittiming)
+ if (!priv->bittiming_const && !priv->do_set_bittiming &&
+ !priv->bitrate_const)
return -EOPNOTSUPP;
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
err = can_get_bittiming(dev, &bt,
priv->bittiming_const,
priv->bitrate_const,
- priv->bitrate_const_cnt);
+ priv->bitrate_const_cnt,
+ extack);
if (err)
return err;
if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
+ NL_SET_ERR_MSG_FMT(extack,
+ "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
+ bt.bitrate, priv->bitrate_max);
return -EINVAL;
}
@@ -202,61 +644,28 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
}
}
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm;
- u32 ctrlstatic;
- u32 maskedflags;
-
- /* Do not allow changing controller mode while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- ctrlstatic = priv->ctrlmode_static;
- maskedflags = cm->flags & cm->mask;
-
- /* check whether provided bits are allowed to be passed */
- if (maskedflags & ~(priv->ctrlmode_supported | ctrlstatic))
- return -EOPNOTSUPP;
-
- /* do not check for static fd-non-iso if 'fd' is disabled */
- if (!(maskedflags & CAN_CTRLMODE_FD))
- ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+ if (data[IFLA_CAN_RESTART_MS]) {
+ unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
- /* make sure static options are provided by configuration */
- if ((maskedflags & ctrlstatic) != ctrlstatic)
+ if (restart_ms != 0 && !priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
return -EOPNOTSUPP;
-
- /* clear bits to be modified and copy the flag values */
- priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= maskedflags;
-
- /* CAN_CTRLMODE_FD can only be set when driver supports FD */
- if (priv->ctrlmode & CAN_CTRLMODE_FD) {
- dev->mtu = CANFD_MTU;
- } else {
- dev->mtu = CAN_MTU;
- memset(&priv->data_bittiming, 0,
- sizeof(priv->data_bittiming));
- priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
- memset(&priv->tdc, 0, sizeof(priv->tdc));
}
- tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK;
- /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually
- * exclusive: make sure to turn the other one off
- */
- if (tdc_mask)
- priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
- }
-
- if (data[IFLA_CAN_RESTART_MS]) {
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
return -EBUSY;
- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ priv->restart_ms = restart_ms;
}
if (data[IFLA_CAN_RESTART]) {
+ if (!priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
+ return -EOPNOTSUPP;
+ }
+
/* Do not allow a restart while not running */
if (!(dev->flags & IFF_UP))
return -EINVAL;
@@ -265,72 +674,29 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming dbt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * data_bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
- return -EOPNOTSUPP;
-
- memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
- sizeof(dbt));
- err = can_get_bittiming(dev, &dbt,
- priv->data_bittiming_const,
- priv->data_bitrate_const,
- priv->data_bitrate_const_cnt);
- if (err)
- return err;
-
- if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
- return -EINVAL;
- }
+ /* CAN FD */
+ err = can_dbt_changelink(dev, data, true, extack);
+ if (err)
+ return err;
- memset(&priv->tdc, 0, sizeof(priv->tdc));
- if (data[IFLA_CAN_TDC]) {
- /* TDC parameters are provided: use them */
- err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
- extack);
- if (err) {
- priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
- return err;
- }
- } else if (!tdc_mask) {
- /* Neither of TDC parameters nor TDC flags are
- * provided: do calculation
- */
- can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming,
- &priv->ctrlmode, priv->ctrlmode_supported);
- } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
- * turned off. TDC is disabled: do nothing
- */
-
- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
-
- if (priv->do_set_data_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->do_set_data_bittiming(dev);
- if (err)
- return err;
- }
- }
+ /* CAN XL */
+ err = can_dbt_changelink(dev, data, false, extack);
+ if (err)
+ return err;
+ err = can_pwm_changelink(dev, data[IFLA_CAN_XL_PWM], extack);
+ if (err)
+ return err;
if (data[IFLA_CAN_TERMINATION]) {
const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
const unsigned int num_term = priv->termination_const_cnt;
unsigned int i;
- if (!priv->do_set_termination)
+ if (!priv->do_set_termination) {
+ NL_SET_ERR_MSG(extack,
+ "Termination is not configurable on this device");
return -EOPNOTSUPP;
+ }
/* check whether given value is supported by the interface */
for (i = 0; i < num_term; i++) {
@@ -351,38 +717,85 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return 0;
}
-static size_t can_tdc_get_size(const struct net_device *dev)
+static size_t can_tdc_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
{
- struct can_priv *priv = netdev_priv(dev);
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
size_t size;
- if (!priv->tdc_const)
+ if (!dbt_params->tdc_const)
return 0;
size = nla_total_size(0); /* nest IFLA_CAN_TDC */
- if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL) {
+ if (tdc_manual) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */
}
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
- if (priv->tdc_const->tdcf_max) {
+ if (dbt_params->tdc_const->tdcf_max) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
}
- if (can_tdc_is_enabled(priv)) {
- if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
- priv->do_get_auto_tdcv)
+ if (tdc_flags) {
+ if (tdc_manual || dbt_params->do_get_auto_tdcv)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
- if (priv->tdc_const->tdcf_max)
+ if (dbt_params->tdc_const->tdcf_max)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
}
return size;
}
+static size_t can_data_bittiming_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
+{
+ size_t size = 0;
+
+ if (dbt_params->data_bittiming.bitrate) /* IFLA_CAN_{,XL}_DATA_BITTIMING */
+ size += nla_total_size(sizeof(dbt_params->data_bittiming));
+ if (dbt_params->data_bittiming_const) /* IFLA_CAN_{,XL}_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bittiming_const));
+ if (dbt_params->data_bitrate_const) /* IFLA_CAN_{,XL}_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bitrate_const) *
+ dbt_params->data_bitrate_const_cnt);
+ size += can_tdc_get_size(dbt_params, tdc_flags);/* IFLA_CAN_{,XL}_TDC */
+
+ return size;
+}
+
+static size_t can_ctrlmode_ext_get_size(void)
+{
+ return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */
+ nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */
+}
+
+static size_t can_pwm_get_size(const struct can_pwm_const *pwm_const,
+ bool pwm_on)
+{
+ size_t size;
+
+ if (!pwm_const || !pwm_on)
+ return 0;
+
+ size = nla_total_size(0); /* nest IFLA_CAN_PWM */
+
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS_MAX */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML_MAX */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO_MAX */
+
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMS */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWML */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_PWM_PWMO */
+
+ return size;
+}
+
static size_t can_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -398,10 +811,6 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
if (priv->termination_const) {
size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
@@ -410,30 +819,76 @@ static size_t can_get_size(const struct net_device *dev)
if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
size += nla_total_size(sizeof(*priv->bitrate_const) *
priv->bitrate_const_cnt);
- if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
- size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
+ size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
+
+ size += can_data_bittiming_get_size(&priv->fd,
+ priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
+
+ size += can_data_bittiming_get_size(&priv->xl,
+ priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MASK);
+ size += can_pwm_get_size(priv->xl.pwm_const, /* IFLA_CAN_XL_PWM */
+ priv->ctrlmode & CAN_CTRLMODE_XL_TMS);
return size;
}
-static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
+static int can_bittiming_fill_info(struct sk_buff *skb, int ifla_can_bittiming,
+ struct can_bittiming *bittiming)
+{
+ return bittiming->bitrate != CAN_BITRATE_UNSET &&
+ bittiming->bitrate != CAN_BITRATE_UNKNOWN &&
+ nla_put(skb, ifla_can_bittiming, sizeof(*bittiming), bittiming);
+}
+
+static int can_bittiming_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bittiming_const,
+ const struct can_bittiming_const *bittiming_const)
+{
+ return bittiming_const &&
+ nla_put(skb, ifla_can_bittiming_const,
+ sizeof(*bittiming_const), bittiming_const);
+}
+
+static int can_bitrate_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bitrate_const,
+ const u32 *bitrate_const, unsigned int cnt)
+{
+ return bitrate_const &&
+ nla_put(skb, ifla_can_bitrate_const,
+ sizeof(*bitrate_const) * cnt, bitrate_const);
+}
+
+static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev,
+ int ifla_can_tdc)
{
- struct nlattr *nest;
struct can_priv *priv = netdev_priv(dev);
- struct can_tdc *tdc = &priv->tdc;
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+ struct data_bittiming_params *dbt_params;
+ const struct can_tdc_const *tdc_const;
+ struct can_tdc *tdc;
+ struct nlattr *nest;
+ bool tdc_is_enabled, tdc_manual;
+
+ if (ifla_can_tdc == IFLA_CAN_TDC) {
+ dbt_params = &priv->fd;
+ tdc_is_enabled = can_fd_tdc_is_enabled(priv);
+ tdc_manual = priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL;
+ } else {
+ dbt_params = &priv->xl;
+ tdc_is_enabled = can_xl_tdc_is_enabled(priv);
+ tdc_manual = priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MANUAL;
+ }
+ tdc_const = dbt_params->tdc_const;
+ tdc = &dbt_params->tdc;
if (!tdc_const)
return 0;
- nest = nla_nest_start(skb, IFLA_CAN_TDC);
+ nest = nla_nest_start(skb, ifla_can_tdc);
if (!nest)
return -EMSGSIZE;
- if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL &&
+ if (tdc_manual &&
(nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) ||
nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max)))
goto err_cancel;
@@ -445,15 +900,15 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
goto err_cancel;
- if (can_tdc_is_enabled(priv)) {
+ if (tdc_is_enabled) {
u32 tdcv;
int err = -EINVAL;
- if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ if (tdc_manual) {
tdcv = tdc->tdcv;
err = 0;
- } else if (priv->do_get_auto_tdcv) {
- err = priv->do_get_auto_tdcv(dev, &tdcv);
+ } else if (dbt_params->do_get_auto_tdcv) {
+ err = dbt_params->do_get_auto_tdcv(dev, &tdcv);
}
if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
goto err_cancel;
@@ -472,6 +927,61 @@ err_cancel:
return -EMSGSIZE;
}
+static int can_pwm_fill_info(struct sk_buff *skb, const struct can_priv *priv)
+{
+ const struct can_pwm_const *pwm_const = priv->xl.pwm_const;
+ const struct can_pwm *pwm = &priv->xl.pwm;
+ struct nlattr *nest;
+
+ if (!pwm_const)
+ return 0;
+
+ nest = nla_nest_start(skb, IFLA_CAN_XL_PWM);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MIN, pwm_const->pwms_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMS_MAX, pwm_const->pwms_max) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML_MIN, pwm_const->pwml_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML_MAX, pwm_const->pwml_max) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MIN, pwm_const->pwmo_min) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO_MAX, pwm_const->pwmo_max))
+ goto err_cancel;
+
+ if (priv->ctrlmode & CAN_CTRLMODE_XL_TMS) {
+ if (nla_put_u32(skb, IFLA_CAN_PWM_PWMS, pwm->pwms) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWML, pwm->pwml) ||
+ nla_put_u32(skb, IFLA_CAN_PWM_PWMO, pwm->pwmo))
+ goto err_cancel;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int can_ctrlmode_ext_fill_info(struct sk_buff *skb,
+ const struct can_priv *priv)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, IFLA_CAN_CTRLMODE_EXT);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, IFLA_CAN_CTRLMODE_SUPPORTED,
+ priv->ctrlmode_supported)) {
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+}
+
static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -482,13 +992,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if ((priv->bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_BITTIMING,
- sizeof(priv->bittiming), &priv->bittiming)) ||
+ if (can_bittiming_fill_info(skb, IFLA_CAN_BITTIMING,
+ &priv->bittiming) ||
- (priv->bittiming_const &&
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+ can_bittiming_const_fill_info(skb, IFLA_CAN_BITTIMING_CONST,
+ priv->bittiming_const) ||
nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
nla_put_u32(skb, IFLA_CAN_STATE, state) ||
@@ -499,14 +1007,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
!priv->do_get_berr_counter(dev, &bec) &&
nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
- (priv->data_bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING,
- sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+ can_bittiming_fill_info(skb, IFLA_CAN_DATA_BITTIMING,
+ &priv->fd.data_bittiming) ||
- (priv->data_bittiming_const &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
- sizeof(*priv->data_bittiming_const),
- priv->data_bittiming_const)) ||
+ can_bittiming_const_fill_info(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ priv->fd.data_bittiming_const) ||
(priv->termination_const &&
(nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
@@ -515,25 +1020,36 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
priv->termination_const_cnt,
priv->termination_const))) ||
- (priv->bitrate_const &&
- nla_put(skb, IFLA_CAN_BITRATE_CONST,
- sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt,
- priv->bitrate_const)) ||
+ can_bitrate_const_fill_info(skb, IFLA_CAN_BITRATE_CONST,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt) ||
- (priv->data_bitrate_const &&
- nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
- sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt,
- priv->data_bitrate_const)) ||
+ can_bitrate_const_fill_info(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ priv->fd.data_bitrate_const,
+ priv->fd.data_bitrate_const_cnt) ||
(nla_put(skb, IFLA_CAN_BITRATE_MAX,
sizeof(priv->bitrate_max),
&priv->bitrate_max)) ||
- (can_tdc_fill_info(skb, dev))
- )
+ can_tdc_fill_info(skb, dev, IFLA_CAN_TDC) ||
+
+ can_ctrlmode_ext_fill_info(skb, priv) ||
+ can_bittiming_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING,
+ &priv->xl.data_bittiming) ||
+
+ can_bittiming_const_fill_info(skb, IFLA_CAN_XL_DATA_BITTIMING_CONST,
+ priv->xl.data_bittiming_const) ||
+
+ can_bitrate_const_fill_info(skb, IFLA_CAN_XL_DATA_BITRATE_CONST,
+ priv->xl.data_bitrate_const,
+ priv->xl.data_bitrate_const_cnt) ||
+
+ can_tdc_fill_info(skb, dev, IFLA_CAN_XL_TDC) ||
+
+ can_pwm_fill_info(skb, priv)
+ )
return -EMSGSIZE;
return 0;
@@ -557,8 +1073,8 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int can_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int can_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index 37b0cc65237b..46e7b6db4a1e 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2014 Protonic Holland,
* David Jander
- * Copyright (C) 2014-2021 Pengutronix,
+ * Copyright (C) 2014-2021, 2023 Pengutronix,
* Marc Kleine-Budde <kernel@pengutronix.de>
*/
@@ -54,8 +54,11 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
struct can_frame *cf = (struct can_frame *)skb->data;
work_done++;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_ERR_FLAG)) {
+ stats->rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
+ }
netif_receive_skb(skb);
}
@@ -64,11 +67,9 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
/* Check if there was another interrupt */
if (!skb_queue_empty(&offload->skb_queue))
- napi_reschedule(&offload->napi);
+ napi_schedule(&offload->napi);
}
- can_led_event(offload->dev, CAN_LED_EVENT_RX);
-
return work_done;
}
@@ -218,8 +219,8 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
- struct sk_buff *skb, u32 timestamp)
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp)
{
struct can_rx_offload_cb *cb;
@@ -237,23 +238,24 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
return 0;
}
-EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
-unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
- unsigned int idx, u32 timestamp,
- unsigned int *frame_len_ptr)
+unsigned int
+can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr)
{
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
int err;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
return 0;
- err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
if (err) {
stats->rx_errors++;
stats->tx_fifo_errors++;
@@ -261,7 +263,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
return len;
}
-EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
+EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_timestamp);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb)
@@ -278,6 +280,31 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
}
EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
+unsigned int
+can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload,
+ unsigned int idx,
+ unsigned int *frame_len_ptr)
+{
+ struct net_device *dev = offload->dev;
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ unsigned int len;
+ int err;
+
+ skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
+ if (!skb)
+ return 0;
+
+ err = can_rx_offload_queue_tail(offload, skb);
+ if (err) {
+ stats->rx_errors++;
+ stats->tx_fifo_errors++;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_tail);
+
void can_rx_offload_irq_finish(struct can_rx_offload *offload)
{
unsigned long flags;
@@ -328,13 +355,14 @@ static int can_rx_offload_init_queue(struct net_device *dev,
{
offload->dev = dev;
- /* Limit queue len to 4x the weight (rounted to next power of two) */
+ /* Limit queue len to 4x the weight (rounded to next power of two) */
offload->skb_queue_len_max = 2 << fls(weight);
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
__skb_queue_head_init(&offload->skb_irq_queue);
- netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
+ netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll,
+ weight);
dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
__func__, offload->skb_queue_len_max);
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 61660248c69e..3ebd4f779b9b 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -5,6 +5,13 @@
*/
#include <linux/can/dev.h>
+#include <linux/module.h>
+
+#define MOD_DESC "CAN device driver interface"
+
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
/* Local echo of CAN messages
*
@@ -42,12 +49,17 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
{
struct can_priv *priv = netdev_priv(dev);
- BUG_ON(idx >= priv->echo_skb_max);
+ if (idx >= priv->echo_skb_max) {
+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+ __func__, idx, priv->echo_skb_max);
+ return -EINVAL;
+ }
/* check flag whether this packet has to be looped back */
if (!(dev->flags & IFF_ECHO) ||
(skb->protocol != htons(ETH_P_CAN) &&
- skb->protocol != htons(ETH_P_CANFD))) {
+ skb->protocol != htons(ETH_P_CANFD) &&
+ skb->protocol != htons(ETH_P_CANXL))) {
kfree_skb(skb);
return 0;
}
@@ -64,6 +76,9 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
/* save frame_len to reuse it when transmission is completed */
can_skb_prv(skb)->frame_len = frame_len;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
skb_tx_timestamp(skb);
/* save this skb for tx interrupt echo handling */
@@ -80,8 +95,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
EXPORT_SYMBOL_GPL(can_put_echo_skb);
struct sk_buff *
-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
- unsigned int *frame_len_ptr)
+__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *len_ptr, unsigned int *frame_len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
@@ -97,13 +112,12 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
*/
struct sk_buff *skb = priv->echo_skb[idx];
struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
+ skb_tstamp_tx(skb, skb_hwtstamps(skb));
/* get the real payload length for netdev statistics */
- if (cf->can_id & CAN_RTR_FLAG)
- *len_ptr = 0;
- else
- *len_ptr = cf->len;
+ *len_ptr = can_skb_get_data_len(skb);
if (frame_len_ptr)
*frame_len_ptr = can_skb_priv->frame_len;
@@ -133,7 +147,7 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
unsigned int *frame_len_ptr)
{
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
@@ -177,6 +191,20 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx,
}
EXPORT_SYMBOL_GPL(can_free_echo_skb);
+/* fill common values for CAN sk_buffs */
+static void init_can_skb_reserve(struct sk_buff *skb)
+{
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->skbcnt = 0;
+}
+
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -190,16 +218,8 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
}
skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cf = skb_put_zero(skb, sizeof(struct can_frame));
@@ -221,23 +241,51 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
}
skb->protocol = htons(ETH_P_CANFD);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+ /* set CAN FD flag by default */
+ (*cfd)->flags = CANFD_FDF;
+
return skb;
}
EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len)
+{
+ struct sk_buff *skb;
+
+ if (data_len < CANXL_MIN_DLEN || data_len > CANXL_MAX_DLEN)
+ goto out_error;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ CANXL_HDR_SIZE + data_len);
+ if (unlikely(!skb))
+ goto out_error;
+
+ skb->protocol = htons(ETH_P_CANXL);
+ init_can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cxl = skb_put_zero(skb, CANXL_HDR_SIZE + data_len);
+
+ /* set CAN XL flag and length information by default */
+ (*cxl)->flags = CANXL_XLF;
+ (*cxl)->len = data_len;
+
+ return skb;
+
+out_error:
+ *cxl = NULL;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(alloc_canxl_skb);
+
struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -252,3 +300,75 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
return skb;
}
EXPORT_SYMBOL_GPL(alloc_can_err_skb);
+
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* perform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ /* set CANFD_FDF flag for CAN FD frames */
+ if (can_is_canfd_skb(skb)) {
+ struct canfd_frame *cfd;
+
+ cfd = (struct canfd_frame *)skb->data;
+ cfd->flags |= CANFD_FDF;
+ }
+ }
+
+ return true;
+}
+
+/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
+bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_CAN:
+ if (!can_is_can_skb(skb))
+ goto inval_skb;
+ break;
+
+ case ETH_P_CANFD:
+ if (!can_is_canfd_skb(skb))
+ goto inval_skb;
+ break;
+
+ case ETH_P_CANXL:
+ if (!can_is_canxl_skb(skb))
+ goto inval_skb;
+ break;
+
+ default:
+ goto inval_skb;
+ }
+
+ if (!can_skb_headroom_valid(dev, skb))
+ goto inval_skb;
+
+ return false;
+
+inval_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+EXPORT_SYMBOL_GPL(can_dropped_invalid_skb);
diff --git a/drivers/net/can/dummy_can.c b/drivers/net/can/dummy_can.c
new file mode 100644
index 000000000000..41953655e3d3
--- /dev/null
+++ b/drivers/net/can/dummy_can.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2025 Vincent Mailhol <mailhol@kernel.org> */
+
+#include <linux/array_size.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/units.h>
+#include <linux/string_choices.h>
+
+#include <linux/can.h>
+#include <linux/can/bittiming.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+
+struct dummy_can {
+ struct can_priv can;
+ struct net_device *dev;
+};
+
+static struct dummy_can *dummy_can;
+
+static const struct can_bittiming_const dummy_can_bittiming_const = {
+ .name = "dummy_can CC",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_bittiming_const dummy_can_fd_databittiming_const = {
+ .name = "dummy_can FD",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_tdc_const dummy_can_fd_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 127,
+ .tdcf_min = 0,
+ .tdcf_max = 127
+};
+
+static const struct can_bittiming_const dummy_can_xl_databittiming_const = {
+ .name = "dummy_can XL",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1
+};
+
+static const struct can_tdc_const dummy_can_xl_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 127,
+ .tdcf_min = 0,
+ .tdcf_max = 127
+};
+
+static const struct can_pwm_const dummy_can_pwm_const = {
+ .pwms_min = 1,
+ .pwms_max = 8,
+ .pwml_min = 2,
+ .pwml_max = 24,
+ .pwmo_min = 0,
+ .pwmo_max = 16,
+};
+
+static void dummy_can_print_bittiming(struct net_device *dev,
+ struct can_bittiming *bt)
+{
+ netdev_dbg(dev, "\tbitrate: %u\n", bt->bitrate);
+ netdev_dbg(dev, "\tsample_point: %u\n", bt->sample_point);
+ netdev_dbg(dev, "\ttq: %u\n", bt->tq);
+ netdev_dbg(dev, "\tprop_seg: %u\n", bt->prop_seg);
+ netdev_dbg(dev, "\tphase_seg1: %u\n", bt->phase_seg1);
+ netdev_dbg(dev, "\tphase_seg2: %u\n", bt->phase_seg2);
+ netdev_dbg(dev, "\tsjw: %u\n", bt->sjw);
+ netdev_dbg(dev, "\tbrp: %u\n", bt->brp);
+}
+
+static void dummy_can_print_tdc(struct net_device *dev, struct can_tdc *tdc)
+{
+ netdev_dbg(dev, "\t\ttdcv: %u\n", tdc->tdcv);
+ netdev_dbg(dev, "\t\ttdco: %u\n", tdc->tdco);
+ netdev_dbg(dev, "\t\ttdcf: %u\n", tdc->tdcf);
+}
+
+static void dummy_can_print_pwm(struct net_device *dev, struct can_pwm *pwm,
+ struct can_bittiming *dbt)
+{
+ netdev_dbg(dev, "\t\tpwms: %u\n", pwm->pwms);
+ netdev_dbg(dev, "\t\tpwml: %u\n", pwm->pwml);
+ netdev_dbg(dev, "\t\tpwmo: %u\n", pwm->pwmo);
+}
+
+static void dummy_can_print_ctrlmode(struct net_device *dev)
+{
+ struct dummy_can *priv = netdev_priv(dev);
+ struct can_priv *can_priv = &priv->can;
+ unsigned long supported = can_priv->ctrlmode_supported;
+ u32 enabled = can_priv->ctrlmode;
+
+ netdev_dbg(dev, "Control modes:\n");
+ netdev_dbg(dev, "\tsupported: 0x%08x\n", (u32)supported);
+ netdev_dbg(dev, "\tenabled: 0x%08x\n", enabled);
+
+ if (supported) {
+ int idx;
+
+ netdev_dbg(dev, "\tlist:");
+ for_each_set_bit(idx, &supported, BITS_PER_TYPE(u32))
+ netdev_dbg(dev, "\t\t%s: %s\n",
+ can_get_ctrlmode_str(BIT(idx)),
+ enabled & BIT(idx) ? "on" : "off");
+ }
+}
+
+static void dummy_can_print_bittiming_info(struct net_device *dev)
+{
+ struct dummy_can *priv = netdev_priv(dev);
+ struct can_priv *can_priv = &priv->can;
+
+ netdev_dbg(dev, "Clock frequency: %u\n", can_priv->clock.freq);
+ netdev_dbg(dev, "Maximum bitrate: %u\n", can_priv->bitrate_max);
+ netdev_dbg(dev, "MTU: %u\n", dev->mtu);
+ netdev_dbg(dev, "\n");
+
+ dummy_can_print_ctrlmode(dev);
+ netdev_dbg(dev, "\n");
+
+ netdev_dbg(dev, "Classical CAN nominal bittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->bittiming);
+ netdev_dbg(dev, "\n");
+
+ if (can_priv->ctrlmode & CAN_CTRLMODE_FD) {
+ netdev_dbg(dev, "CAN FD databittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->fd.data_bittiming);
+ if (can_fd_tdc_is_enabled(can_priv)) {
+ netdev_dbg(dev, "\tCAN FD TDC:\n");
+ dummy_can_print_tdc(dev, &can_priv->fd.tdc);
+ }
+ }
+ netdev_dbg(dev, "\n");
+
+ if (can_priv->ctrlmode & CAN_CTRLMODE_XL) {
+ netdev_dbg(dev, "CAN XL databittiming:\n");
+ dummy_can_print_bittiming(dev, &can_priv->xl.data_bittiming);
+ if (can_xl_tdc_is_enabled(can_priv)) {
+ netdev_dbg(dev, "\tCAN XL TDC:\n");
+ dummy_can_print_tdc(dev, &can_priv->xl.tdc);
+ }
+ if (can_priv->ctrlmode & CAN_CTRLMODE_XL_TMS) {
+ netdev_dbg(dev, "\tCAN XL PWM:\n");
+ dummy_can_print_pwm(dev, &can_priv->xl.pwm,
+ &can_priv->xl.data_bittiming);
+ }
+ }
+ netdev_dbg(dev, "\n");
+}
+
+static int dummy_can_netdev_open(struct net_device *dev)
+{
+ int ret;
+ struct can_priv *priv = netdev_priv(dev);
+
+ dummy_can_print_bittiming_info(dev);
+ netdev_dbg(dev, "error-signalling is %s\n",
+ str_enabled_disabled(!can_dev_in_xl_only_mode(priv)));
+
+ ret = open_candev(dev);
+ if (ret)
+ return ret;
+ netif_start_queue(dev);
+ netdev_dbg(dev, "dummy-can is up\n");
+
+ return 0;
+}
+
+static int dummy_can_netdev_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ close_candev(dev);
+ netdev_dbg(dev, "dummy-can is down\n");
+
+ return 0;
+}
+
+static netdev_tx_t dummy_can_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ if (can_dev_dropped_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ can_put_echo_skb(skb, dev, 0, 0);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += can_get_echo_skb(dev, 0, NULL);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops dummy_can_netdev_ops = {
+ .ndo_open = dummy_can_netdev_open,
+ .ndo_stop = dummy_can_netdev_close,
+ .ndo_start_xmit = dummy_can_start_xmit,
+};
+
+static const struct ethtool_ops dummy_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static int __init dummy_can_init(void)
+{
+ struct net_device *dev;
+ struct dummy_can *priv;
+ int ret;
+
+ dev = alloc_candev(sizeof(*priv), 1);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->netdev_ops = &dummy_can_netdev_ops;
+ dev->ethtool_ops = &dummy_can_ethtool_ops;
+ priv = netdev_priv(dev);
+ priv->can.bittiming_const = &dummy_can_bittiming_const;
+ priv->can.bitrate_max = 20 * MEGA /* BPS */;
+ priv->can.clock.freq = 160 * MEGA /* Hz */;
+ priv->can.fd.data_bittiming_const = &dummy_can_fd_databittiming_const;
+ priv->can.fd.tdc_const = &dummy_can_fd_tdc_const;
+ priv->can.xl.data_bittiming_const = &dummy_can_xl_databittiming_const;
+ priv->can.xl.tdc_const = &dummy_can_xl_tdc_const;
+ priv->can.xl.pwm_const = &dummy_can_pwm_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_FD | CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_RESTRICTED | CAN_CTRLMODE_XL |
+ CAN_CTRLMODE_XL_TDC_AUTO | CAN_CTRLMODE_XL_TMS;
+ priv->dev = dev;
+
+ ret = register_candev(priv->dev);
+ if (ret) {
+ free_candev(priv->dev);
+ return ret;
+ }
+
+ dummy_can = priv;
+ netdev_dbg(dev, "dummy-can ready\n");
+
+ return 0;
+}
+
+static void __exit dummy_can_exit(void)
+{
+ struct net_device *dev = dummy_can->dev;
+
+ netdev_dbg(dev, "dummy-can bye bye\n");
+ unregister_candev(dev);
+ free_candev(dev);
+}
+
+module_init(dummy_can_init);
+module_exit(dummy_can_exit);
+
+MODULE_DESCRIPTION("A dummy CAN driver, mainly to test the netlink interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vincent Mailhol <mailhol@kernel.org>");
diff --git a/drivers/net/can/esd/Kconfig b/drivers/net/can/esd/Kconfig
new file mode 100644
index 000000000000..54bfc366634c
--- /dev/null
+++ b/drivers/net/can/esd/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CAN_ESD_402_PCI
+ tristate "esd electronics gmbh CAN-PCI(e)/402 family"
+ depends on PCI && HAS_DMA
+ help
+ Support for C402 card family from esd electronics gmbh.
+ This card family is based on the ESDACC CAN controller and
+ available in several form factors: PCI, PCIe, PCIe Mini,
+ M.2 PCIe, CPCIserial, PMC, XMC (see https://esd.eu/en)
+
+ This driver can also be built as a module. In this case the
+ module will be called esd_402_pci.
diff --git a/drivers/net/can/esd/Makefile b/drivers/net/can/esd/Makefile
new file mode 100644
index 000000000000..5dd2d470c286
--- /dev/null
+++ b/drivers/net/can/esd/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for esd gmbh ESDACC controller driver
+#
+esd_402_pci-objs := esdacc.o esd_402_pci-core.o
+
+obj-$(CONFIG_CAN_ESD_402_PCI) += esd_402_pci.o
diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c
new file mode 100644
index 000000000000..c826f00c551b
--- /dev/null
+++ b/drivers/net/can/esd/esd_402_pci-core.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include <linux/can/dev.h>
+#include <linux/can.h>
+#include <linux/can/netlink.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "esdacc.h"
+
+#define ESD_PCI_DEVICE_ID_PCIE402 0x0402
+
+#define PCI402_FPGA_VER_MIN 0x003d
+#define PCI402_MAX_CORES 6
+#define PCI402_BAR 0
+#define PCI402_IO_OV_OFFS 0
+#define PCI402_IO_PCIEP_OFFS 0x10000
+#define PCI402_IO_LEN_TOTAL 0x20000
+#define PCI402_IO_LEN_CORE 0x2000
+#define PCI402_PCICFG_MSICAP 0x50
+
+#define PCI402_DMA_MASK DMA_BIT_MASK(32)
+#define PCI402_DMA_SIZE ALIGN(0x10000, PAGE_SIZE)
+
+#define PCI402_PCIEP_OF_INT_ENABLE 0x0050
+#define PCI402_PCIEP_OF_BM_ADDR_LO 0x1000
+#define PCI402_PCIEP_OF_BM_ADDR_HI 0x1004
+#define PCI402_PCIEP_OF_MSI_ADDR_LO 0x1008
+#define PCI402_PCIEP_OF_MSI_ADDR_HI 0x100c
+
+struct pci402_card {
+ /* Actually mapped io space, all other iomem derived from this */
+ void __iomem *addr;
+ void __iomem *addr_pciep;
+
+ void *dma_buf;
+ dma_addr_t dma_hnd;
+
+ struct acc_ov ov;
+ struct acc_core *cores;
+
+ bool msi_enabled;
+};
+
+/* The BTR register capabilities described by the can_bittiming_const structures
+ * below are valid since esdACC version 0x0032.
+ */
+
+/* Used if the esdACC FPGA is built as CAN-Classic version. */
+static const struct can_bittiming_const pci402_bittiming_const = {
+ .name = "esd_402",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+/* Used if the esdACC FPGA is built as CAN-FD version. */
+static const struct can_bittiming_const pci402_bittiming_const_canfd = {
+ .name = "esd_402fd",
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const struct net_device_ops pci402_acc_netdev_ops = {
+ .ndo_open = acc_open,
+ .ndo_stop = acc_close,
+ .ndo_start_xmit = acc_start_xmit,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
+};
+
+static const struct ethtool_ops pci402_acc_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
+static irqreturn_t pci402_interrupt(int irq, void *dev_id)
+{
+ struct pci_dev *pdev = dev_id;
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ irqreturn_t irq_status;
+
+ irq_status = acc_card_interrupt(&card->ov, card->cores);
+
+ return irq_status;
+}
+
+static int pci402_set_msiconfig(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ u32 addr_lo_offs = 0;
+ u32 addr_lo = 0;
+ u32 addr_hi = 0;
+ u32 data = 0;
+ u16 csr = 0;
+ int err;
+
+ /* The FPGA hard IP PCIe core implements a 64-bit MSI Capability
+ * Register Format
+ */
+ err = pci_read_config_word(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_FLAGS, &csr);
+ if (err)
+ goto failed;
+
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_LO,
+ &addr_lo);
+ if (err)
+ goto failed;
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_HI,
+ &addr_hi);
+ if (err)
+ goto failed;
+
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_DATA_64,
+ &data);
+ if (err)
+ goto failed;
+
+ addr_lo_offs = addr_lo & 0x0000ffff;
+ addr_lo &= 0xffff0000;
+
+ if (addr_hi)
+ addr_lo |= 1; /* To enable 64-Bit addressing in PCIe endpoint */
+
+ if (!(csr & PCI_MSI_FLAGS_ENABLE)) {
+ err = -EINVAL;
+ goto failed;
+ }
+
+ iowrite32(addr_lo, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_LO);
+ iowrite32(addr_hi, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_HI);
+ acc_ov_write32(&card->ov, ACC_OV_OF_MSI_ADDRESSOFFSET, addr_lo_offs);
+ acc_ov_write32(&card->ov, ACC_OV_OF_MSI_DATA, data);
+
+ return 0;
+
+failed:
+ pci_warn(pdev, "Error while setting MSI configuration:\n"
+ "CSR: 0x%.4x, addr: 0x%.8x%.8x, offs: 0x%.4x, data: 0x%.8x\n",
+ csr, addr_hi, addr_lo, addr_lo_offs, data);
+
+ return err;
+}
+
+static int pci402_init_card(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ card->ov.addr = card->addr + PCI402_IO_OV_OFFS;
+ card->addr_pciep = card->addr + PCI402_IO_PCIEP_OFFS;
+
+ acc_reset_fpga(&card->ov);
+ acc_init_ov(&card->ov, &pdev->dev);
+
+ if (card->ov.version < PCI402_FPGA_VER_MIN) {
+ pci_err(pdev,
+ "esdACC version (0x%.4x) outdated, please update\n",
+ card->ov.version);
+ return -EINVAL;
+ }
+
+ if (card->ov.timestamp_frequency != ACC_TS_FREQ_80MHZ) {
+ pci_err(pdev,
+ "esdACC timestamp frequency of %uHz not supported by driver. Aborted.\n",
+ card->ov.timestamp_frequency);
+ return -EINVAL;
+ }
+
+ if (card->ov.active_cores > PCI402_MAX_CORES) {
+ pci_err(pdev,
+ "Card with %u active cores not supported by driver. Aborted.\n",
+ card->ov.active_cores);
+ return -EINVAL;
+ }
+ card->cores = devm_kcalloc(&pdev->dev, card->ov.active_cores,
+ sizeof(struct acc_core), GFP_KERNEL);
+ if (!card->cores)
+ return -ENOMEM;
+
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD) {
+ pci_warn(pdev,
+ "esdACC with CAN-FD feature detected. This driver doesn't support CAN-FD yet.\n");
+ }
+
+#ifdef __LITTLE_ENDIAN
+ /* So card converts all busmastered data to LE for us: */
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE);
+#endif
+
+ return 0;
+}
+
+static int pci402_init_interrupt(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+
+ err = pci_enable_msi(pdev);
+ if (!err) {
+ err = pci402_set_msiconfig(pdev);
+ if (!err) {
+ card->msi_enabled = true;
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_dbg(pdev, "MSI preparation done\n");
+ }
+ }
+
+ err = devm_request_irq(&pdev->dev, pdev->irq, pci402_interrupt,
+ IRQF_SHARED, dev_name(&pdev->dev), pdev);
+ if (err)
+ goto failure_msidis;
+
+ iowrite32(1, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE);
+
+ return 0;
+
+failure_msidis:
+ if (card->msi_enabled) {
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_disable_msi(pdev);
+ card->msi_enabled = false;
+ }
+
+ return err;
+}
+
+static void pci402_finish_interrupt(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE);
+ devm_free_irq(&pdev->dev, pdev->irq, pdev);
+
+ if (card->msi_enabled) {
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_disable_msi(pdev);
+ card->msi_enabled = false;
+ }
+}
+
+static int pci402_init_dma(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+
+ err = dma_set_coherent_mask(&pdev->dev, PCI402_DMA_MASK);
+ if (err) {
+ pci_err(pdev, "DMA set mask failed!\n");
+ return err;
+ }
+
+ /* The esdACC DMA engine needs the DMA buffer aligned to a 64k
+ * boundary. The DMA API guarantees to align the returned buffer to the
+ * smallest PAGE_SIZE order which is greater than or equal to the
+ * requested size. With PCI402_DMA_SIZE == 64kB this suffices here.
+ */
+ card->dma_buf = dma_alloc_coherent(&pdev->dev, PCI402_DMA_SIZE,
+ &card->dma_hnd, GFP_KERNEL);
+ if (!card->dma_buf)
+ return -ENOMEM;
+
+ acc_init_bm_ptr(&card->ov, card->cores, card->dma_buf);
+
+ iowrite32(card->dma_hnd,
+ card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO);
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI);
+
+ pci_set_master(pdev);
+
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_BM_ENABLE);
+
+ return 0;
+}
+
+static void pci402_finish_dma(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int i;
+
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_BM_ENABLE);
+
+ pci_clear_master(pdev);
+
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO);
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI);
+
+ card->ov.bmfifo.messages = NULL;
+ card->ov.bmfifo.irq_cnt = NULL;
+ for (i = 0; i < card->ov.active_cores; i++) {
+ struct acc_core *core = &card->cores[i];
+
+ core->bmfifo.messages = NULL;
+ core->bmfifo.irq_cnt = NULL;
+ }
+
+ dma_free_coherent(&pdev->dev, PCI402_DMA_SIZE, card->dma_buf,
+ card->dma_hnd);
+ card->dma_buf = NULL;
+}
+
+static void pci402_unregister_core(struct acc_core *core)
+{
+ netdev_info(core->netdev, "unregister\n");
+ unregister_candev(core->netdev);
+
+ free_candev(core->netdev);
+ core->netdev = NULL;
+}
+
+static int pci402_init_cores(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+ int i;
+
+ for (i = 0; i < card->ov.active_cores; i++) {
+ struct acc_core *core = &card->cores[i];
+ struct acc_net_priv *priv;
+ struct net_device *netdev;
+ u32 fifo_config;
+
+ core->addr = card->ov.addr + (i + 1) * PCI402_IO_LEN_CORE;
+
+ fifo_config = acc_read32(core, ACC_CORE_OF_TXFIFO_CONFIG);
+ core->tx_fifo_size = (fifo_config >> 24);
+ if (core->tx_fifo_size <= 1) {
+ pci_err(pdev, "Invalid tx_fifo_size!\n");
+ err = -EINVAL;
+ goto failure;
+ }
+
+ netdev = alloc_candev(sizeof(*priv), core->tx_fifo_size);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto failure;
+ }
+ core->netdev = netdev;
+
+ netdev->flags |= IFF_ECHO;
+ netdev->dev_port = i;
+ netdev->netdev_ops = &pci402_acc_netdev_ops;
+ netdev->ethtool_ops = &pci402_acc_ethtool_ops;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ priv = netdev_priv(netdev);
+ priv->can.clock.freq = card->ov.core_frequency;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_CC_LEN8_DLC;
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_DAR)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD)
+ priv->can.bittiming_const = &pci402_bittiming_const_canfd;
+ else
+ priv->can.bittiming_const = &pci402_bittiming_const;
+ priv->can.do_set_bittiming = acc_set_bittiming;
+ priv->can.do_set_mode = acc_set_mode;
+ priv->can.do_get_berr_counter = acc_get_berr_counter;
+
+ priv->core = core;
+ priv->ov = &card->ov;
+
+ err = register_candev(netdev);
+ if (err) {
+ free_candev(core->netdev);
+ core->netdev = NULL;
+ goto failure;
+ }
+
+ netdev_info(netdev, "registered\n");
+ }
+
+ return 0;
+
+failure:
+ for (i--; i >= 0; i--)
+ pci402_unregister_core(&card->cores[i]);
+
+ return err;
+}
+
+static void pci402_finish_cores(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < card->ov.active_cores; i++)
+ pci402_unregister_core(&card->cores[i]);
+}
+
+static int pci402_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct pci402_card *card = NULL;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card) {
+ err = -ENOMEM;
+ goto failure_disable_pci;
+ }
+
+ pci_set_drvdata(pdev, card);
+
+ err = pci_request_regions(pdev, pci_name(pdev));
+ if (err)
+ goto failure_disable_pci;
+
+ card->addr = pci_iomap(pdev, PCI402_BAR, PCI402_IO_LEN_TOTAL);
+ if (!card->addr) {
+ err = -ENOMEM;
+ goto failure_release_regions;
+ }
+
+ err = pci402_init_card(pdev);
+ if (err)
+ goto failure_unmap;
+
+ err = pci402_init_dma(pdev);
+ if (err)
+ goto failure_unmap;
+
+ err = pci402_init_interrupt(pdev);
+ if (err)
+ goto failure_finish_dma;
+
+ err = pci402_init_cores(pdev);
+ if (err)
+ goto failure_finish_interrupt;
+
+ return 0;
+
+failure_finish_interrupt:
+ pci402_finish_interrupt(pdev);
+
+failure_finish_dma:
+ pci402_finish_dma(pdev);
+
+failure_unmap:
+ pci_iounmap(pdev, card->addr);
+
+failure_release_regions:
+ pci_release_regions(pdev);
+
+failure_disable_pci:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void pci402_remove(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ pci402_finish_interrupt(pdev);
+ pci402_finish_cores(pdev);
+ pci402_finish_dma(pdev);
+ pci_iounmap(pdev, card->addr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id pci402_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_ESDGMBH,
+ .device = ESD_PCI_DEVICE_ID_PCIE402,
+ .subvendor = PCI_VENDOR_ID_ESDGMBH,
+ .subdevice = PCI_ANY_ID,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci402_tbl);
+
+static struct pci_driver pci402_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci402_tbl,
+ .probe = pci402_probe,
+ .remove = pci402_remove,
+};
+module_pci_driver(pci402_driver);
+
+MODULE_DESCRIPTION("Socket-CAN driver for esd CAN 402 card family with esdACC core on PCIe");
+MODULE_AUTHOR("Thomas Körper <socketcan@esd.eu>");
+MODULE_AUTHOR("Stefan Mätje <stefan.maetje@esd.eu>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
new file mode 100644
index 000000000000..73e66f9a3781
--- /dev/null
+++ b/drivers/net/can/esd/esdacc.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include "esdacc.h"
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+
+/* esdACC ID register layout */
+#define ACC_ID_ID_MASK GENMASK(28, 0)
+#define ACC_ID_EFF_FLAG BIT(29)
+
+/* esdACC DLC register layout */
+#define ACC_DLC_DLC_MASK GENMASK(3, 0)
+#define ACC_DLC_RTR_FLAG BIT(4)
+#define ACC_DLC_SSTX_FLAG BIT(24) /* Single Shot TX */
+
+/* esdACC DLC in struct acc_bmmsg_rxtxdone::acc_dlc.len only! */
+#define ACC_DLC_TXD_FLAG BIT(5)
+
+/* ecc value of esdACC equals SJA1000's ECC register */
+#define ACC_ECC_SEG 0x1f
+#define ACC_ECC_DIR 0x20
+#define ACC_ECC_BIT 0x00
+#define ACC_ECC_FORM 0x40
+#define ACC_ECC_STUFF 0x80
+#define ACC_ECC_MASK 0xc0
+
+/* esdACC Status Register bits. Unused bits not documented. */
+#define ACC_REG_STATUS_MASK_STATUS_ES BIT(17)
+#define ACC_REG_STATUS_MASK_STATUS_EP BIT(18)
+#define ACC_REG_STATUS_MASK_STATUS_BS BIT(19)
+
+/* esdACC Overview Module BM_IRQ_Mask register related defines */
+/* Two bit wide command masks to mask or unmask a single core IRQ */
+#define ACC_BM_IRQ_UNMASK BIT(0)
+#define ACC_BM_IRQ_MASK (ACC_BM_IRQ_UNMASK << 1)
+/* Command to unmask all IRQ sources. Created by shifting
+ * and oring the two bit wide ACC_BM_IRQ_UNMASK 16 times.
+ */
+#define ACC_BM_IRQ_UNMASK_ALL 0x55555555U
+
+static void acc_resetmode_enter(struct acc_core *core)
+{
+ acc_set_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_RESETMODE);
+
+ /* Read back reset mode bit to flush PCI write posting */
+ acc_resetmode_entered(core);
+}
+
+static void acc_resetmode_leave(struct acc_core *core)
+{
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_RESETMODE);
+
+ /* Read back reset mode bit to flush PCI write posting */
+ acc_resetmode_entered(core);
+}
+
+static void acc_txq_put(struct acc_core *core, u32 acc_id, u32 acc_dlc,
+ const void *data)
+{
+ acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
+ *((const u32 *)(data + 4)));
+ acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_0,
+ *((const u32 *)data));
+ acc_write32(core, ACC_CORE_OF_TXFIFO_DLC, acc_dlc);
+ /* CAN id must be written at last. This write starts TX. */
+ acc_write32(core, ACC_CORE_OF_TXFIFO_ID, acc_id);
+}
+
+static u8 acc_tx_fifo_next(struct acc_core *core, u8 tx_fifo_idx)
+{
+ ++tx_fifo_idx;
+ if (tx_fifo_idx >= core->tx_fifo_size)
+ tx_fifo_idx = 0U;
+ return tx_fifo_idx;
+}
+
+/* Convert timestamp from esdACC time stamp ticks to ns
+ *
+ * The conversion factor ts2ns from time stamp counts to ns is basically
+ * ts2ns = NSEC_PER_SEC / timestamp_frequency
+ *
+ * We handle here only a fixed timestamp frequency of 80MHz. The
+ * resulting ts2ns factor would be 12.5.
+ *
+ * At the end we multiply by 12 and add the half of the HW timestamp
+ * to get a multiplication by 12.5. This way any overflow is
+ * avoided until ktime_t itself overflows.
+ */
+#define ACC_TS_FACTOR (NSEC_PER_SEC / ACC_TS_FREQ_80MHZ)
+#define ACC_TS_80MHZ_SHIFT 1
+
+static ktime_t acc_ts2ktime(struct acc_ov *ov, u64 ts)
+{
+ u64 ns;
+
+ ns = (ts * ACC_TS_FACTOR) + (ts >> ACC_TS_80MHZ_SHIFT);
+
+ return ns_to_ktime(ns);
+}
+
+#undef ACC_TS_FACTOR
+#undef ACC_TS_80MHZ_SHIFT
+
+void acc_init_ov(struct acc_ov *ov, struct device *dev)
+{
+ u32 temp;
+
+ temp = acc_ov_read32(ov, ACC_OV_OF_VERSION);
+ ov->version = temp;
+ ov->features = (temp >> 16);
+
+ temp = acc_ov_read32(ov, ACC_OV_OF_INFO);
+ ov->total_cores = temp;
+ ov->active_cores = (temp >> 8);
+
+ ov->core_frequency = acc_ov_read32(ov, ACC_OV_OF_CANCORE_FREQ);
+ ov->timestamp_frequency = acc_ov_read32(ov, ACC_OV_OF_TS_FREQ_LO);
+
+ /* Depending on esdACC feature NEW_PSC enable the new prescaler
+ * or adjust core_frequency according to the implicit division by 2.
+ */
+ if (ov->features & ACC_OV_REG_FEAT_MASK_NEW_PSC) {
+ acc_ov_set_bits(ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE);
+ } else {
+ ov->core_frequency /= 2;
+ }
+
+ dev_dbg(dev,
+ "esdACC v%u, freq: %u/%u, feat/strap: 0x%x/0x%x, cores: %u/%u\n",
+ ov->version, ov->core_frequency, ov->timestamp_frequency,
+ ov->features, acc_ov_read32(ov, ACC_OV_OF_INFO) >> 16,
+ ov->active_cores, ov->total_cores);
+}
+
+void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores, const void *mem)
+{
+ unsigned int u;
+
+ /* DMA buffer layout as follows where N is the number of CAN cores
+ * implemented in the FPGA, i.e. N = ov->total_cores
+ *
+ * Section Layout Section size
+ * ----------------------------------------------
+ * FIFO Card/Overview ACC_CORE_DMABUF_SIZE
+ * FIFO Core0 ACC_CORE_DMABUF_SIZE
+ * ... ...
+ * FIFO CoreN ACC_CORE_DMABUF_SIZE
+ * irq_cnt Card/Overview sizeof(u32)
+ * irq_cnt Core0 sizeof(u32)
+ * ... ...
+ * irq_cnt CoreN sizeof(u32)
+ */
+ ov->bmfifo.messages = mem;
+ ov->bmfifo.irq_cnt = mem + (ov->total_cores + 1U) * ACC_CORE_DMABUF_SIZE;
+
+ for (u = 0U; u < ov->active_cores; u++) {
+ struct acc_core *core = &cores[u];
+
+ core->bmfifo.messages = mem + (u + 1U) * ACC_CORE_DMABUF_SIZE;
+ core->bmfifo.irq_cnt = ov->bmfifo.irq_cnt + (u + 1U);
+ }
+}
+
+int acc_open(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+ u32 tx_fifo_status;
+ u32 ctrl;
+ int err;
+
+ /* Retry to enter RESET mode if out of sync. */
+ if (priv->can.state != CAN_STATE_STOPPED) {
+ netdev_warn(netdev, "Entered %s() with bad can.state: %s\n",
+ __func__, can_get_state_str(priv->can.state));
+ acc_resetmode_enter(core);
+ priv->can.state = CAN_STATE_STOPPED;
+ }
+
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ ctrl = ACC_REG_CTRL_MASK_IE_RXTX |
+ ACC_REG_CTRL_MASK_IE_TXERROR |
+ ACC_REG_CTRL_MASK_IE_ERRWARN |
+ ACC_REG_CTRL_MASK_IE_OVERRUN |
+ ACC_REG_CTRL_MASK_IE_ERRPASS;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ ctrl |= ACC_REG_CTRL_MASK_IE_BUSERR;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ ctrl |= ACC_REG_CTRL_MASK_LOM;
+
+ acc_set_bits(core, ACC_CORE_OF_CTRL, ctrl);
+
+ acc_resetmode_leave(core);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Resync TX FIFO indices to HW state after (re-)start. */
+ tx_fifo_status = acc_read32(core, ACC_CORE_OF_TXFIFO_STATUS);
+ core->tx_fifo_head = tx_fifo_status & 0xff;
+ core->tx_fifo_tail = (tx_fifo_status >> 8) & 0xff;
+
+ netif_start_queue(netdev);
+ return 0;
+}
+
+int acc_close(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_IE_RXTX |
+ ACC_REG_CTRL_MASK_IE_TXERROR |
+ ACC_REG_CTRL_MASK_IE_ERRWARN |
+ ACC_REG_CTRL_MASK_IE_OVERRUN |
+ ACC_REG_CTRL_MASK_IE_ERRPASS |
+ ACC_REG_CTRL_MASK_IE_BUSERR);
+
+ netif_stop_queue(netdev);
+ acc_resetmode_enter(core);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Mark pending TX requests to be aborted after controller restart. */
+ acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
+
+ /* ACC_REG_CTRL_MASK_LOM is only accessible in RESET mode */
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_LOM);
+
+ close_candev(netdev);
+ return 0;
+}
+
+netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u8 tx_fifo_head = core->tx_fifo_head;
+ int fifo_usage;
+ u32 acc_id;
+ u32 acc_dlc;
+
+ if (can_dev_dropped_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* Access core->tx_fifo_tail only once because it may be changed
+ * from the interrupt level.
+ */
+ fifo_usage = tx_fifo_head - core->tx_fifo_tail;
+ if (fifo_usage < 0)
+ fifo_usage += core->tx_fifo_size;
+
+ if (fifo_usage >= core->tx_fifo_size - 1) {
+ netdev_err(core->netdev,
+ "BUG: TX ring full when queue awake!\n");
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (fifo_usage == core->tx_fifo_size - 2)
+ netif_stop_queue(netdev);
+
+ acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
+ if (cf->can_id & CAN_RTR_FLAG)
+ acc_dlc |= ACC_DLC_RTR_FLAG;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ acc_dlc |= ACC_DLC_SSTX_FLAG;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ acc_id = cf->can_id & CAN_EFF_MASK;
+ acc_id |= ACC_ID_EFF_FLAG;
+ } else {
+ acc_id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ can_put_echo_skb(skb, netdev, core->tx_fifo_head, 0);
+
+ core->tx_fifo_head = acc_tx_fifo_next(core, tx_fifo_head);
+
+ acc_txq_put(core, acc_id, acc_dlc, cf->data);
+
+ return NETDEV_TX_OK;
+}
+
+int acc_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ u32 core_status = acc_read32(priv->core, ACC_CORE_OF_STATUS);
+
+ bec->txerr = (core_status >> 8) & 0xff;
+ bec->rxerr = core_status & 0xff;
+
+ return 0;
+}
+
+int acc_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ /* Paranoid FIFO index check. */
+ {
+ const u32 tx_fifo_status =
+ acc_read32(priv->core, ACC_CORE_OF_TXFIFO_STATUS);
+ const u8 hw_fifo_head = tx_fifo_status;
+
+ if (hw_fifo_head != priv->core->tx_fifo_head ||
+ hw_fifo_head != priv->core->tx_fifo_tail) {
+ netdev_warn(netdev,
+ "TX FIFO mismatch: T %2u H %2u; TFHW %#08x\n",
+ priv->core->tx_fifo_tail,
+ priv->core->tx_fifo_head,
+ tx_fifo_status);
+ }
+ }
+ acc_resetmode_leave(priv->core);
+ /* To leave the bus-off state the esdACC controller begins
+ * here a grace period where it counts 128 "idle conditions" (each
+ * of 11 consecutive recessive bits) on the bus as required
+ * by the CAN spec.
+ *
+ * During this time the TX FIFO may still contain already
+ * aborted "zombie" frames that are only drained from the FIFO
+ * at the end of the grace period.
+ *
+ * To not to interfere with this drain process we don't
+ * call netif_wake_queue() here. When the controller reaches
+ * the error-active state again, it informs us about that
+ * with an acc_bmmsg_errstatechange message. Then
+ * netif_wake_queue() is called from
+ * handle_core_msg_errstatechange() instead.
+ */
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int acc_set_bittiming(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 brp;
+ u32 btr;
+
+ if (priv->ov->features & ACC_OV_REG_FEAT_MASK_CANFD) {
+ u32 fbtr = 0;
+
+ netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
+ bt->brp, bt->prop_seg,
+ bt->phase_seg1, bt->phase_seg2, bt->sjw);
+
+ brp = FIELD_PREP(ACC_REG_BRP_FD_MASK_BRP, bt->brp - 1);
+
+ btr = FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG2, bt->phase_seg2 - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_SJW, bt->sjw - 1);
+
+ /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
+ acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
+ acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
+
+ netdev_dbg(netdev, "esdACC: BRP %u, NBTR 0x%08x, DBTR 0x%08x",
+ brp, btr, fbtr);
+ } else {
+ netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
+ bt->brp, bt->prop_seg,
+ bt->phase_seg1, bt->phase_seg2, bt->sjw);
+
+ brp = FIELD_PREP(ACC_REG_BRP_CL_MASK_BRP, bt->brp - 1);
+
+ btr = FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG2, bt->phase_seg2 - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_SJW, bt->sjw - 1);
+
+ /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
+ acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
+ acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
+
+ netdev_dbg(netdev, "esdACC: BRP %u, BTR 0x%08x", brp, btr);
+ }
+
+ return 0;
+}
+
+static void handle_core_msg_rxtxdone(struct acc_core *core,
+ const struct acc_bmmsg_rxtxdone *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct sk_buff *skb;
+
+ if (msg->acc_dlc.len & ACC_DLC_TXD_FLAG) {
+ u8 tx_fifo_tail = core->tx_fifo_tail;
+
+ if (core->tx_fifo_head == tx_fifo_tail) {
+ netdev_warn(core->netdev,
+ "TX interrupt, but queue is empty!?\n");
+ return;
+ }
+
+ /* Direct access echo skb to attach HW time stamp. */
+ skb = priv->can.echo_skb[tx_fifo_tail];
+ if (skb) {
+ skb_hwtstamps(skb)->hwtstamp =
+ acc_ts2ktime(priv->ov, msg->ts);
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += can_get_echo_skb(core->netdev, tx_fifo_tail,
+ NULL);
+
+ core->tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
+
+ netif_wake_queue(core->netdev);
+
+ } else {
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(core->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cf->can_id = msg->id & ACC_ID_ID_MASK;
+ if (msg->id & ACC_ID_EFF_FLAG)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ can_frame_set_cc_len(cf, msg->acc_dlc.len & ACC_DLC_DLC_MASK,
+ priv->can.ctrlmode);
+
+ if (msg->acc_dlc.len & ACC_DLC_RTR_FLAG) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ memcpy(cf->data, msg->data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
+ stats->rx_packets++;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+ }
+}
+
+static void handle_core_msg_txabort(struct acc_core *core,
+ const struct acc_bmmsg_txabort *msg)
+{
+ struct net_device_stats *stats = &core->netdev->stats;
+ u8 tx_fifo_tail = core->tx_fifo_tail;
+ u32 abort_mask = msg->abort_mask; /* u32 extend to avoid warnings later */
+
+ /* The abort_mask shows which frames were aborted in esdACC's FIFO. */
+ while (tx_fifo_tail != core->tx_fifo_head && (abort_mask)) {
+ const u32 tail_mask = (1U << tx_fifo_tail);
+
+ if (!(abort_mask & tail_mask))
+ break;
+ abort_mask &= ~tail_mask;
+
+ can_free_echo_skb(core->netdev, tx_fifo_tail, NULL);
+ stats->tx_dropped++;
+ stats->tx_aborted_errors++;
+
+ tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
+ }
+ core->tx_fifo_tail = tx_fifo_tail;
+ if (abort_mask)
+ netdev_warn(core->netdev, "Unhandled aborted messages\n");
+
+ if (!acc_resetmode_entered(core))
+ netif_wake_queue(core->netdev);
+}
+
+static void handle_core_msg_overrun(struct acc_core *core,
+ const struct acc_bmmsg_overrun *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* lost_cnt may be 0 if not supported by esdACC version */
+ if (msg->lost_cnt) {
+ stats->rx_errors += msg->lost_cnt;
+ stats->rx_over_errors += msg->lost_cnt;
+ } else {
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+}
+
+static void handle_core_msg_buserr(struct acc_core *core,
+ const struct acc_bmmsg_buserr *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ const u32 reg_status = msg->reg_status;
+ const u8 rxerr = reg_status;
+ const u8 txerr = (reg_status >> 8);
+ u8 can_err_prot_type = 0U;
+
+ priv->can.can_stats.bus_error++;
+
+ /* Error occurred during transmission? */
+ if (msg->ecc & ACC_ECC_DIR) {
+ stats->rx_errors++;
+ } else {
+ can_err_prot_type |= CAN_ERR_PROT_TX;
+ stats->tx_errors++;
+ }
+ /* Determine error type */
+ switch (msg->ecc & ACC_ECC_MASK) {
+ case ACC_ECC_BIT:
+ can_err_prot_type |= CAN_ERR_PROT_BIT;
+ break;
+ case ACC_ECC_FORM:
+ can_err_prot_type |= CAN_ERR_PROT_FORM;
+ break;
+ case ACC_ECC_STUFF:
+ can_err_prot_type |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ can_err_prot_type |= CAN_ERR_PROT_UNSPEC;
+ break;
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
+
+ /* Set protocol error type */
+ cf->data[2] = can_err_prot_type;
+ /* Set error location */
+ cf->data[3] = msg->ecc & ACC_ECC_SEG;
+
+ /* Insert CAN TX and RX error counters. */
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+}
+
+static void
+handle_core_msg_errstatechange(struct acc_core *core,
+ const struct acc_bmmsg_errstatechange *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ const u32 reg_status = msg->reg_status;
+ const u8 rxerr = reg_status;
+ const u8 txerr = (reg_status >> 8);
+ enum can_state new_state;
+
+ if (reg_status & ACC_REG_STATUS_MASK_STATUS_BS) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_EP) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_ES) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ /* See comment in acc_set_mode() for CAN_MODE_START */
+ netif_wake_queue(core->netdev);
+ }
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+
+ if (new_state != priv->can.state) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = (txerr >= rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (rxerr >= txerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+
+ /* Always call can_change_state() to update the state
+ * even if alloc_can_err_skb() may have failed.
+ * can_change_state() can cope with a NULL cf pointer.
+ */
+ can_change_state(core->netdev, cf, tx_state, rx_state);
+ }
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+ }
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
+ can_bus_off(core->netdev);
+ }
+}
+
+static void handle_core_interrupt(struct acc_core *core)
+{
+ u32 msg_fifo_head = core->bmfifo.local_irq_cnt & 0xff;
+
+ while (core->bmfifo.msg_fifo_tail != msg_fifo_head) {
+ const union acc_bmmsg *msg =
+ &core->bmfifo.messages[core->bmfifo.msg_fifo_tail];
+
+ switch (msg->msg_id) {
+ case BM_MSG_ID_RXTXDONE:
+ handle_core_msg_rxtxdone(core, &msg->rxtxdone);
+ break;
+
+ case BM_MSG_ID_TXABORT:
+ handle_core_msg_txabort(core, &msg->txabort);
+ break;
+
+ case BM_MSG_ID_OVERRUN:
+ handle_core_msg_overrun(core, &msg->overrun);
+ break;
+
+ case BM_MSG_ID_BUSERR:
+ handle_core_msg_buserr(core, &msg->buserr);
+ break;
+
+ case BM_MSG_ID_ERRPASSIVE:
+ case BM_MSG_ID_ERRWARN:
+ handle_core_msg_errstatechange(core,
+ &msg->errstatechange);
+ break;
+
+ default:
+ /* Ignore all other BM messages (like the CAN-FD messages) */
+ break;
+ }
+
+ core->bmfifo.msg_fifo_tail =
+ (core->bmfifo.msg_fifo_tail + 1) & 0xff;
+ }
+}
+
+/**
+ * acc_card_interrupt() - handle the interrupts of an esdACC FPGA
+ *
+ * @ov: overview module structure
+ * @cores: array of core structures
+ *
+ * This function handles all interrupts pending for the overview module and the
+ * CAN cores of the esdACC FPGA.
+ *
+ * It examines for all cores (the overview module core and the CAN cores)
+ * the bmfifo.irq_cnt and compares it with the previously saved
+ * bmfifo.local_irq_cnt. An IRQ is pending if they differ. The esdACC FPGA
+ * updates the bmfifo.irq_cnt values by DMA.
+ *
+ * The pending interrupts are masked by writing to the IRQ mask register at
+ * ACC_OV_OF_BM_IRQ_MASK. This register has for each core a two bit command
+ * field evaluated as follows:
+ *
+ * Define, bit pattern: meaning
+ * 00: no action
+ * ACC_BM_IRQ_UNMASK, 01: unmask interrupt
+ * ACC_BM_IRQ_MASK, 10: mask interrupt
+ * 11: no action
+ *
+ * For each CAN core with a pending IRQ handle_core_interrupt() handles all
+ * busmaster messages from the message FIFO. The last handled message (FIFO
+ * index) is written to the CAN core to acknowledge its handling.
+ *
+ * Last step is to unmask all interrupts in the FPGA using
+ * ACC_BM_IRQ_UNMASK_ALL.
+ *
+ * Return:
+ * IRQ_HANDLED, if card generated an interrupt that was handled
+ * IRQ_NONE, if the interrupt is not ours
+ */
+irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores)
+{
+ u32 irqmask;
+ int i;
+
+ /* First we look for whom interrupts are pending, card/overview
+ * or any of the cores. Two bits in irqmask are used for each;
+ * Each two bit field is set to ACC_BM_IRQ_MASK if an IRQ is
+ * pending.
+ */
+ irqmask = 0U;
+ if (READ_ONCE(*ov->bmfifo.irq_cnt) != ov->bmfifo.local_irq_cnt) {
+ irqmask |= ACC_BM_IRQ_MASK;
+ ov->bmfifo.local_irq_cnt = READ_ONCE(*ov->bmfifo.irq_cnt);
+ }
+
+ for (i = 0; i < ov->active_cores; i++) {
+ struct acc_core *core = &cores[i];
+
+ if (READ_ONCE(*core->bmfifo.irq_cnt) != core->bmfifo.local_irq_cnt) {
+ irqmask |= (ACC_BM_IRQ_MASK << (2 * (i + 1)));
+ core->bmfifo.local_irq_cnt = READ_ONCE(*core->bmfifo.irq_cnt);
+ }
+ }
+
+ if (!irqmask)
+ return IRQ_NONE;
+
+ /* At second we tell the card we're working on them by writing irqmask,
+ * call handle_{ov|core}_interrupt and then acknowledge the
+ * interrupts by writing irq_cnt:
+ */
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, irqmask);
+
+ if (irqmask & ACC_BM_IRQ_MASK) {
+ /* handle_ov_interrupt(); - no use yet. */
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_COUNTER,
+ ov->bmfifo.local_irq_cnt);
+ }
+
+ for (i = 0; i < ov->active_cores; i++) {
+ struct acc_core *core = &cores[i];
+
+ if (irqmask & (ACC_BM_IRQ_MASK << (2 * (i + 1)))) {
+ handle_core_interrupt(core);
+ acc_write32(core, ACC_OV_OF_BM_IRQ_COUNTER,
+ core->bmfifo.local_irq_cnt);
+ }
+ }
+
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, ACC_BM_IRQ_UNMASK_ALL);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/can/esd/esdacc.h b/drivers/net/can/esd/esdacc.h
new file mode 100644
index 000000000000..6b7ebd8c91b2
--- /dev/null
+++ b/drivers/net/can/esd/esdacc.h
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include <linux/bits.h>
+#include <linux/can/dev.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/units.h>
+
+#define ACC_TS_FREQ_80MHZ (80 * HZ_PER_MHZ)
+#define ACC_I2C_ADDON_DETECT_DELAY_MS 10
+
+/* esdACC Overview Module */
+#define ACC_OV_OF_PROBE 0x0000
+#define ACC_OV_OF_VERSION 0x0004
+#define ACC_OV_OF_INFO 0x0008
+#define ACC_OV_OF_CANCORE_FREQ 0x000c
+#define ACC_OV_OF_TS_FREQ_LO 0x0010
+#define ACC_OV_OF_TS_FREQ_HI 0x0014
+#define ACC_OV_OF_IRQ_STATUS_CORES 0x0018
+#define ACC_OV_OF_TS_CURR_LO 0x001c
+#define ACC_OV_OF_TS_CURR_HI 0x0020
+#define ACC_OV_OF_IRQ_STATUS 0x0028
+#define ACC_OV_OF_MODE 0x002c
+#define ACC_OV_OF_BM_IRQ_COUNTER 0x0070
+#define ACC_OV_OF_BM_IRQ_MASK 0x0074
+#define ACC_OV_OF_MSI_DATA 0x0080
+#define ACC_OV_OF_MSI_ADDRESSOFFSET 0x0084
+
+/* Feature flags are contained in the upper 16 bit of the version
+ * register at ACC_OV_OF_VERSION but only used with these masks after
+ * extraction into an extra variable => (xx - 16).
+ */
+#define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16)
+#define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16)
+#define ACC_OV_REG_FEAT_MASK_DAR BIT(30 - 16)
+
+#define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0)
+#define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1)
+#define ACC_OV_REG_MODE_MASK_MODE_LED BIT(2)
+#define ACC_OV_REG_MODE_MASK_TIMER_ENABLE BIT(4)
+#define ACC_OV_REG_MODE_MASK_TIMER_ONE_SHOT BIT(5)
+#define ACC_OV_REG_MODE_MASK_TIMER_ABSOLUTE BIT(6)
+#define ACC_OV_REG_MODE_MASK_TIMER GENMASK(6, 4)
+#define ACC_OV_REG_MODE_MASK_TS_SRC GENMASK(8, 7)
+#define ACC_OV_REG_MODE_MASK_I2C_ENABLE BIT(11)
+#define ACC_OV_REG_MODE_MASK_MSI_ENABLE BIT(14)
+#define ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE BIT(15)
+#define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31)
+
+/* esdACC CAN Core Module */
+#define ACC_CORE_OF_CTRL 0x0000
+#define ACC_CORE_OF_STATUS_IRQ 0x0008
+#define ACC_CORE_OF_BRP 0x000c
+#define ACC_CORE_OF_BTR 0x0010
+#define ACC_CORE_OF_FBTR 0x0014
+#define ACC_CORE_OF_STATUS 0x0030
+#define ACC_CORE_OF_TXFIFO_CONFIG 0x0048
+#define ACC_CORE_OF_TXFIFO_STATUS 0x004c
+#define ACC_CORE_OF_TX_STATUS_IRQ 0x0050
+#define ACC_CORE_OF_TX_ABORT_MASK 0x0054
+#define ACC_CORE_OF_BM_IRQ_COUNTER 0x0070
+#define ACC_CORE_OF_TXFIFO_ID 0x00c0
+#define ACC_CORE_OF_TXFIFO_DLC 0x00c4
+#define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8
+#define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc
+
+/* CTRL register layout */
+#define ACC_REG_CTRL_MASK_RESETMODE BIT(0)
+#define ACC_REG_CTRL_MASK_LOM BIT(1)
+#define ACC_REG_CTRL_MASK_STM BIT(2)
+#define ACC_REG_CTRL_MASK_TRANSEN BIT(5)
+#define ACC_REG_CTRL_MASK_TS BIT(6)
+#define ACC_REG_CTRL_MASK_SCHEDULE BIT(7)
+
+#define ACC_REG_CTRL_MASK_IE_RXTX BIT(8)
+#define ACC_REG_CTRL_MASK_IE_TXERROR BIT(9)
+#define ACC_REG_CTRL_MASK_IE_ERRWARN BIT(10)
+#define ACC_REG_CTRL_MASK_IE_OVERRUN BIT(11)
+#define ACC_REG_CTRL_MASK_IE_TSI BIT(12)
+#define ACC_REG_CTRL_MASK_IE_ERRPASS BIT(13)
+#define ACC_REG_CTRL_MASK_IE_ALI BIT(14)
+#define ACC_REG_CTRL_MASK_IE_BUSERR BIT(15)
+
+/* BRP and BTR register layout for CAN-Classic version */
+#define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0)
+#define ACC_REG_BTR_CL_MASK_TSEG1 GENMASK(3, 0)
+#define ACC_REG_BTR_CL_MASK_TSEG2 GENMASK(18, 16)
+#define ACC_REG_BTR_CL_MASK_SJW GENMASK(25, 24)
+
+/* BRP and BTR register layout for CAN-FD version */
+#define ACC_REG_BRP_FD_MASK_BRP GENMASK(7, 0)
+#define ACC_REG_BTR_FD_MASK_TSEG1 GENMASK(7, 0)
+#define ACC_REG_BTR_FD_MASK_TSEG2 GENMASK(22, 16)
+#define ACC_REG_BTR_FD_MASK_SJW GENMASK(30, 24)
+
+/* 256 BM_MSGs of 32 byte size */
+#define ACC_CORE_DMAMSG_SIZE 32U
+#define ACC_CORE_DMABUF_SIZE (256U * ACC_CORE_DMAMSG_SIZE)
+
+enum acc_bmmsg_id {
+ BM_MSG_ID_RXTXDONE = 0x01,
+ BM_MSG_ID_TXABORT = 0x02,
+ BM_MSG_ID_OVERRUN = 0x03,
+ BM_MSG_ID_BUSERR = 0x04,
+ BM_MSG_ID_ERRPASSIVE = 0x05,
+ BM_MSG_ID_ERRWARN = 0x06,
+ BM_MSG_ID_TIMESLICE = 0x07,
+ BM_MSG_ID_HWTIMER = 0x08,
+ BM_MSG_ID_HOTPLUG = 0x09,
+};
+
+/* The struct acc_bmmsg_* structure declarations that follow here provide
+ * access to the ring buffer of bus master messages maintained by the FPGA
+ * bus master engine. All bus master messages have the same size of
+ * ACC_CORE_DMAMSG_SIZE and a minimum alignment of ACC_CORE_DMAMSG_SIZE in
+ * memory.
+ *
+ * All structure members are natural aligned. Therefore we should not need
+ * a __packed attribute. All struct acc_bmmsg_* declarations have at least
+ * reserved* members to fill the structure to the full ACC_CORE_DMAMSG_SIZE.
+ *
+ * A failure of this property due padding will be detected at compile time
+ * by static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE).
+ */
+
+struct acc_bmmsg_rxtxdone {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u32 id;
+ struct {
+ u8 len;
+ u8 txdfifo_idx;
+ u8 zeroes8;
+ u8 reserved;
+ } acc_dlc;
+ u8 data[CAN_MAX_DLEN];
+ /* Time stamps in struct acc_ov::timestamp_frequency ticks. */
+ u64 ts;
+};
+
+struct acc_bmmsg_txabort {
+ u8 msg_id;
+ u8 txfifo_level;
+ u16 abort_mask;
+ u8 txtsfifo_level;
+ u8 reserved2[1];
+ u16 abort_mask_txts;
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_overrun {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 lost_cnt;
+ u8 reserved1;
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_buserr {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 ecc;
+ u8 reserved1;
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reg_status;
+ u32 reg_btr;
+ u32 reserved3[2];
+};
+
+struct acc_bmmsg_errstatechange {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reg_status;
+ u32 reserved3[3];
+};
+
+struct acc_bmmsg_timeslice {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_hwtimer {
+ u8 msg_id;
+ u8 reserved1[3];
+ u32 reserved2[1];
+ u64 timer;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_hotplug {
+ u8 msg_id;
+ u8 reserved1[3];
+ u32 reserved2[7];
+};
+
+union acc_bmmsg {
+ u8 msg_id;
+ struct acc_bmmsg_rxtxdone rxtxdone;
+ struct acc_bmmsg_txabort txabort;
+ struct acc_bmmsg_overrun overrun;
+ struct acc_bmmsg_buserr buserr;
+ struct acc_bmmsg_errstatechange errstatechange;
+ struct acc_bmmsg_timeslice timeslice;
+ struct acc_bmmsg_hwtimer hwtimer;
+};
+
+/* Check size of union acc_bmmsg to be of expected size. */
+static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE);
+
+struct acc_bmfifo {
+ const union acc_bmmsg *messages;
+ /* irq_cnt points to an u32 value where the esdACC FPGA deposits
+ * the bm_fifo head index in coherent DMA memory. Only bits 7..0
+ * are valid. Use READ_ONCE() to access this memory location.
+ */
+ const u32 *irq_cnt;
+ u32 local_irq_cnt;
+ u32 msg_fifo_tail;
+};
+
+struct acc_core {
+ void __iomem *addr;
+ struct net_device *netdev;
+ struct acc_bmfifo bmfifo;
+ u8 tx_fifo_size;
+ u8 tx_fifo_head;
+ u8 tx_fifo_tail;
+};
+
+struct acc_ov {
+ void __iomem *addr;
+ struct acc_bmfifo bmfifo;
+ u32 timestamp_frequency;
+ u32 core_frequency;
+ u16 version;
+ u16 features;
+ u8 total_cores;
+ u8 active_cores;
+};
+
+struct acc_net_priv {
+ struct can_priv can; /* must be the first member! */
+ struct acc_core *core;
+ struct acc_ov *ov;
+};
+
+static inline u32 acc_read32(struct acc_core *core, unsigned short offs)
+{
+ return ioread32be(core->addr + offs);
+}
+
+static inline void acc_write32(struct acc_core *core,
+ unsigned short offs, u32 v)
+{
+ iowrite32be(v, core->addr + offs);
+}
+
+static inline void acc_write32_noswap(struct acc_core *core,
+ unsigned short offs, u32 v)
+{
+ iowrite32(v, core->addr + offs);
+}
+
+static inline void acc_set_bits(struct acc_core *core,
+ unsigned short offs, u32 mask)
+{
+ u32 v = acc_read32(core, offs);
+
+ v |= mask;
+ acc_write32(core, offs, v);
+}
+
+static inline void acc_clear_bits(struct acc_core *core,
+ unsigned short offs, u32 mask)
+{
+ u32 v = acc_read32(core, offs);
+
+ v &= ~mask;
+ acc_write32(core, offs, v);
+}
+
+static inline int acc_resetmode_entered(struct acc_core *core)
+{
+ u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL);
+
+ return (ctrl & ACC_REG_CTRL_MASK_RESETMODE) != 0;
+}
+
+static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs)
+{
+ return ioread32be(ov->addr + offs);
+}
+
+static inline void acc_ov_write32(struct acc_ov *ov,
+ unsigned short offs, u32 v)
+{
+ iowrite32be(v, ov->addr + offs);
+}
+
+static inline void acc_ov_set_bits(struct acc_ov *ov,
+ unsigned short offs, u32 b)
+{
+ u32 v = acc_ov_read32(ov, offs);
+
+ v |= b;
+ acc_ov_write32(ov, offs, v);
+}
+
+static inline void acc_ov_clear_bits(struct acc_ov *ov,
+ unsigned short offs, u32 b)
+{
+ u32 v = acc_ov_read32(ov, offs);
+
+ v &= ~b;
+ acc_ov_write32(ov, offs, v);
+}
+
+static inline void acc_reset_fpga(struct acc_ov *ov)
+{
+ acc_ov_write32(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_FPGA_RESET);
+
+ /* (Re-)start and wait for completion of addon detection on the I^2C bus */
+ acc_ov_set_bits(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_I2C_ENABLE);
+ mdelay(ACC_I2C_ADDON_DETECT_DELAY_MS);
+}
+
+void acc_init_ov(struct acc_ov *ov, struct device *dev);
+void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores,
+ const void *mem);
+int acc_open(struct net_device *netdev);
+int acc_close(struct net_device *netdev);
+netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+int acc_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec);
+int acc_set_mode(struct net_device *netdev, enum can_mode mode);
+int acc_set_bittiming(struct net_device *netdev);
+irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores);
diff --git a/drivers/net/can/flexcan/Makefile b/drivers/net/can/flexcan/Makefile
new file mode 100644
index 000000000000..89d5695c902e
--- /dev/null
+++ b/drivers/net/can/flexcan/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
+
+flexcan-objs :=
+flexcan-objs += flexcan-core.o
+flexcan-objs += flexcan-ethtool.o
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan/flexcan-core.c
index 7734229aa078..f5d22c61503f 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -14,8 +14,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
-#include <linux/can/rx-offload.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware/imx/sci.h>
@@ -25,14 +23,17 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/can/platform/flexcan.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include "flexcan.h"
+
#define DRV_NAME "flexcan"
/* 8 for RX fifo and 2 error handling */
@@ -173,9 +174,9 @@
/* FLEXCAN interrupt flag register (IFLAG) bits */
/* Errata ERR005829 step7: Reserve first valid MB */
-#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
-#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
+#define FLEXCAN_TX_MB_RESERVED_RX_FIFO 8
+#define FLEXCAN_TX_MB_RESERVED_RX_MAILBOX 0
+#define FLEXCAN_RX_MB_RX_MAILBOX_FIRST (FLEXCAN_TX_MB_RESERVED_RX_MAILBOX + 1)
#define FLEXCAN_IFLAG_MB(x) BIT_ULL(x)
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
@@ -206,53 +207,6 @@
#define FLEXCAN_TIMEOUT_US (250)
-/* FLEXCAN hardware feature flags
- *
- * Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB
- * Filter? connected? Passive detection ption in MB Supported?
- * MCF5441X FlexCAN2 ? no yes no no yes no 16
- * MX25 FlexCAN2 03.00.00.00 no no no no no no 64
- * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64
- * MX35 FlexCAN2 03.00.00.00 no no no no no no 64
- * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64
- * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64
- * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64
- * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64
- * VF610 FlexCAN3 ? no yes no yes yes? no 64
- * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64
- * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64
- *
- * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
- */
-
-/* [TR]WRN_INT not connected */
-#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1)
- /* Disable RX FIFO Global mask */
-#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2)
-/* Enable EACEN and RRS bit in ctrl2 */
-#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3)
-/* Disable non-correctable errors interrupt and freeze mode */
-#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4)
-/* Use timestamp based offloading */
-#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5)
-/* No interrupt for error passive */
-#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6)
-/* default to BE register access */
-#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7)
-/* Setup stop mode with GPR to support wakeup */
-#define FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR BIT(8)
-/* Support CAN-FD mode */
-#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
-/* support memory detection and correction */
-#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
-/* Setup stop mode with SCU firmware to support wakeup */
-#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
-/* Setup 3 separate interrupts, main, boff and err */
-#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12)
-/* Setup 16 mailboxes */
-#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
-
/* Structure of the message buffer */
struct flexcan_mb {
u32 can_ctrl;
@@ -290,31 +244,33 @@ struct flexcan_regs {
u32 dbg1; /* 0x58 */
u32 dbg2; /* 0x5c */
u32 _reserved3[8]; /* 0x60 */
- u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */
- /* FIFO-mode:
- * MB
- * 0x080...0x08f 0 RX message buffer
- * 0x090...0x0df 1-5 reserved
- * 0x0e0...0x0ff 6-7 8 entry ID table
- * (mx25, mx28, mx35, mx53)
- * 0x0e0...0x2df 6-7..37 8..128 entry ID table
- * size conf'ed via ctrl2::RFFN
- * (mx6, vf610)
- */
- u32 _reserved4[256]; /* 0x480 */
- u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */
- u32 _reserved5[24]; /* 0x980 */
- u32 gfwr_mx6; /* 0x9e0 - MX6 */
- u32 _reserved6[39]; /* 0x9e4 */
- u32 _rxfir[6]; /* 0xa80 */
- u32 _reserved8[2]; /* 0xa98 */
- u32 _rxmgmask; /* 0xaa0 */
- u32 _rxfgmask; /* 0xaa4 */
- u32 _rx14mask; /* 0xaa8 */
- u32 _rx15mask; /* 0xaac */
- u32 tx_smb[4]; /* 0xab0 */
- u32 rx_smb0[4]; /* 0xac0 */
- u32 rx_smb1[4]; /* 0xad0 */
+ struct_group(init,
+ u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */
+ /* FIFO-mode:
+ * MB
+ * 0x080...0x08f 0 RX message buffer
+ * 0x090...0x0df 1-5 reserved
+ * 0x0e0...0x0ff 6-7 8 entry ID table
+ * (mx25, mx28, mx35, mx53)
+ * 0x0e0...0x2df 6-7..37 8..128 entry ID table
+ * size conf'ed via ctrl2::RFFN
+ * (mx6, vf610)
+ */
+ u32 _reserved4[256]; /* 0x480 */
+ u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */
+ u32 _reserved5[24]; /* 0x980 */
+ u32 gfwr_mx6; /* 0x9e0 - MX6 */
+ u32 _reserved6[39]; /* 0x9e4 */
+ u32 _rxfir[6]; /* 0xa80 */
+ u32 _reserved8[2]; /* 0xa98 */
+ u32 _rxmgmask; /* 0xaa0 */
+ u32 _rxfgmask; /* 0xaa4 */
+ u32 _rx14mask; /* 0xaa8 */
+ u32 _rx15mask; /* 0xaac */
+ u32 tx_smb[4]; /* 0xab0 */
+ u32 rx_smb0[4]; /* 0xac0 */
+ u32 rx_smb1[4]; /* 0xad0 */
+ );
u32 mecr; /* 0xae0 */
u32 erriar; /* 0xae4 */
u32 erridpr; /* 0xae8 */
@@ -328,113 +284,117 @@ struct flexcan_regs {
u32 fdcbt; /* 0xc04 - Not affected by Soft Reset */
u32 fdcrc; /* 0xc08 */
u32 _reserved9[199]; /* 0xc0c */
- u32 tx_smb_fd[18]; /* 0xf28 */
- u32 rx_smb0_fd[18]; /* 0xf70 */
- u32 rx_smb1_fd[18]; /* 0xfb8 */
+ struct_group(init_fd,
+ u32 tx_smb_fd[18]; /* 0xf28 */
+ u32 rx_smb0_fd[18]; /* 0xf70 */
+ u32 rx_smb1_fd[18]; /* 0xfb8 */
+ );
};
static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
-struct flexcan_devtype_data {
- u32 quirks; /* quirks needed for different IP cores */
-};
-
-struct flexcan_stop_mode {
- struct regmap *gpr;
- u8 req_gpr;
- u8 req_bit;
-};
-
-struct flexcan_priv {
- struct can_priv can;
- struct can_rx_offload offload;
- struct device *dev;
-
- struct flexcan_regs __iomem *regs;
- struct flexcan_mb __iomem *tx_mb;
- struct flexcan_mb __iomem *tx_mb_reserved;
- u8 tx_mb_idx;
- u8 mb_count;
- u8 mb_size;
- u8 clk_src; /* clock source of CAN Protocol Engine */
- u8 scu_idx;
-
- u64 rx_mask;
- u64 tx_mask;
- u32 reg_ctrl_default;
-
- struct clk *clk_ipg;
- struct clk *clk_per;
- const struct flexcan_devtype_data *devtype_data;
- struct regulator *reg_xceiver;
- struct flexcan_stop_mode stm;
-
- int irq_boff;
- int irq_err;
-
- /* IPC handle when setup stop mode by System Controller firmware(scfw) */
- struct imx_sc_ipc *sc_ipc_handle;
-
- /* Read and Write APIs */
- u32 (*read)(void __iomem *addr);
- void (*write)(u32 val, void __iomem *addr);
-};
-
static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16,
+ FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN,
+ FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
- .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR,
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW,
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
- FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC,
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
+};
+
+static struct flexcan_devtype_data fsl_imx93_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
+};
+
+static const struct flexcan_devtype_data fsl_imx95_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI,
};
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC,
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD |
- FLEXCAN_QUIRK_SUPPORT_ECC,
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
+};
+
+static const struct flexcan_devtype_data nxp_s32g2_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_NR_IRQ_3 |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SECONDARY_MB_IRQ,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -596,13 +556,20 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
priv->write(reg_mcr, &regs->mcr);
/* enable stop request */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
ret = flexcan_stop_mode_enable_scfw(priv, true);
if (ret < 0)
return ret;
- } else {
+ } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI) {
+ /* For the SCMI mode, driver do nothing, ATF will send request to
+ * SM(system manager, M33 core) through SCMI protocol after linux
+ * suspend. Once SM get this request, it will send IPG_STOP signal
+ * to Flex_CAN, let CAN in STOP mode.
+ */
+ return 0;
}
return flexcan_low_power_enter_ack(priv);
@@ -614,12 +581,16 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
u32 reg_mcr;
int ret;
- /* remove stop request */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ /* Remove stop request, for FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI,
+ * do nothing here, because ATF already send request to SM before
+ * linux resume. Once SM get this request, it will deassert the
+ * IPG_STOP signal to Flex_CAN.
+ */
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
ret = flexcan_stop_mode_enable_scfw(priv, false);
if (ret < 0)
return ret;
- } else {
+ } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 0);
}
@@ -674,18 +645,22 @@ static void flexcan_clks_disable(const struct flexcan_priv *priv)
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
{
- if (!priv->reg_xceiver)
- return 0;
+ if (priv->reg_xceiver)
+ return regulator_enable(priv->reg_xceiver);
+ else if (priv->transceiver)
+ return phy_power_on(priv->transceiver);
- return regulator_enable(priv->reg_xceiver);
+ return 0;
}
static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
{
- if (!priv->reg_xceiver)
- return 0;
+ if (priv->reg_xceiver)
+ return regulator_disable(priv->reg_xceiver);
+ else if (priv->transceiver)
+ return phy_power_off(priv->transceiver);
- return regulator_disable(priv->reg_xceiver);
+ return 0;
}
static int flexcan_chip_enable(struct flexcan_priv *priv)
@@ -790,11 +765,9 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
const struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = __flexcan_get_berr_counter(dev, bec);
@@ -812,7 +785,7 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_fd_len2dlc(cfd->len)) << 16);
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -912,7 +885,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
if (tx_errors)
dev->stats.tx_errors++;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -959,7 +932,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -1011,14 +984,9 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
- if (unlikely(drop)) {
- skb = ERR_PTR(-ENOBUFS);
- goto mark_as_read;
- }
-
mb = flexcan_get_mb(priv, n);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
u32 code;
do {
@@ -1044,6 +1012,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_ctrl = priv->read(&mb->can_ctrl);
}
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
skb = alloc_canfd_skb(offload->dev, &cfd);
else
@@ -1083,7 +1056,7 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
}
mark_as_read:
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
flexcan_write64(priv, FLEXCAN_IFLAG_MB(n), &regs->iflag1);
else
priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
@@ -1109,7 +1082,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
enum can_state last_state = priv->can.state;
/* reception interrupt */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
u64 reg_iflag_rx;
int ret;
@@ -1147,10 +1120,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
handled = IRQ_HANDLED;
stats->tx_bytes +=
- can_rx_offload_get_echo_skb(&priv->offload, 0,
- reg_ctrl << 16, NULL);
+ can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, 0,
+ reg_ctrl << 16, NULL);
stats->tx_packets++;
- can_led_event(dev, CAN_LED_EVENT_TX);
/* after sending a RTR frame MB is in RX mode */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
@@ -1169,7 +1141,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
/* state change interrupt or broken error state quirk fix is enabled */
if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
- (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+ (priv->devtype_data.quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
flexcan_irq_state(dev, reg_esr);
@@ -1191,11 +1163,11 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
* (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
*/
if ((last_state != priv->can.state) &&
- (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
+ (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
- if (priv->devtype_data->quirks &
+ if (priv->devtype_data.quirks &
FLEXCAN_QUIRK_BROKEN_WERR_STATE)
flexcan_error_irq_enable(priv);
else
@@ -1254,7 +1226,7 @@ static void flexcan_set_bittiming_cbt(const struct net_device *dev)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_cbt, reg_fdctrl;
@@ -1400,14 +1372,10 @@ static void flexcan_ram_init(struct net_device *dev)
reg_ctrl2 |= FLEXCAN_CTRL2_WRMFRZ;
priv->write(reg_ctrl2, &regs->ctrl2);
- memset_io(&regs->mb[0][0], 0,
- offsetof(struct flexcan_regs, rx_smb1[3]) -
- offsetof(struct flexcan_regs, mb[0][0]) + 0x4);
+ memset_io(&regs->init, 0, sizeof(regs->init));
if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
- memset_io(&regs->tx_smb_fd[0], 0,
- offsetof(struct flexcan_regs, rx_smb1_fd[17]) -
- offsetof(struct flexcan_regs, tx_smb_fd[0]) + 0x4);
+ memset_io(&regs->init_fd, 0, sizeof(regs->init_fd));
reg_ctrl2 &= ~FLEXCAN_CTRL2_WRMFRZ;
priv->write(reg_ctrl2, &regs->ctrl2);
@@ -1423,26 +1391,26 @@ static int flexcan_rx_offload_setup(struct net_device *dev)
else
priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_MB_16)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_MB_16)
priv->mb_count = 16;
else
priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
(sizeof(priv->regs->mb[1]) / priv->mb_size);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
priv->tx_mb_reserved =
- flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP);
+ flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_MAILBOX);
else
priv->tx_mb_reserved =
- flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO);
+ flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_FIFO);
priv->tx_mb_idx = priv->mb_count - 1;
priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx);
priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
priv->offload.mailbox_read = flexcan_mailbox_read;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
+ priv->offload.mb_first = FLEXCAN_RX_MB_RX_MAILBOX_FIRST;
priv->offload.mb_last = priv->mb_count - 2;
priv->rx_mask = GENMASK_ULL(priv->offload.mb_last,
@@ -1506,7 +1474,7 @@ static int flexcan_chip_start(struct net_device *dev)
if (err)
goto out_chip_disable;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_ECC)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_ECC)
flexcan_ram_init(dev);
flexcan_set_bittiming(dev);
@@ -1532,10 +1500,10 @@ static int flexcan_chip_start(struct net_device *dev)
/* MCR
*
* FIFO:
- * - disable for timestamp mode
+ * - disable for mailbox mode
* - enable for FIFO mode
*/
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
reg_mcr &= ~FLEXCAN_MCR_FEN;
else
reg_mcr |= FLEXCAN_MCR_FEN;
@@ -1586,7 +1554,7 @@ static int flexcan_chip_start(struct net_device *dev)
* on most Flexcan cores, too. Otherwise we don't get
* any error warning or passive interrupts.
*/
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
else
@@ -1599,7 +1567,7 @@ static int flexcan_chip_start(struct net_device *dev)
netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
priv->write(reg_ctrl, &regs->ctrl);
- if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
+ if ((priv->devtype_data.quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
reg_ctrl2 = priv->read(&regs->ctrl2);
reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
priv->write(reg_ctrl2, &regs->ctrl2);
@@ -1631,7 +1599,7 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(reg_fdctrl, &regs->fdctrl);
}
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
mb = flexcan_get_mb(priv, i);
priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
@@ -1639,7 +1607,7 @@ static int flexcan_chip_start(struct net_device *dev)
}
} else {
/* clear and invalidate unused mailboxes first */
- for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) {
+ for (i = FLEXCAN_TX_MB_RESERVED_RX_FIFO; i < priv->mb_count; i++) {
mb = flexcan_get_mb(priv, i);
priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
&mb->can_ctrl);
@@ -1659,7 +1627,7 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(0x0, &regs->rx14mask);
priv->write(0x0, &regs->rx15mask);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
priv->write(0x0, &regs->rxfgmask);
/* clear acceptance filters */
@@ -1673,7 +1641,7 @@ static int flexcan_chip_start(struct net_device *dev)
* This also works around errata e5295 which generates false
* positive memory errors and put the device in freeze mode.
*/
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
/* Follow the protocol as described in "Detection
* and Correction of Memory Errors" to write to
* MECR register (step 1 - 5)
@@ -1771,11 +1739,9 @@ static int flexcan_open(struct net_device *dev)
return -EINVAL;
}
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = open_candev(dev);
if (err)
@@ -1799,7 +1765,7 @@ static int flexcan_open(struct net_device *dev)
if (err)
goto out_can_rx_offload_disable;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
err = request_irq(priv->irq_boff,
flexcan_irq, IRQF_SHARED, dev->name, dev);
if (err)
@@ -1811,16 +1777,25 @@ static int flexcan_open(struct net_device *dev)
goto out_free_irq_boff;
}
- flexcan_chip_interrupts_enable(dev);
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
+ err = request_irq(priv->irq_secondary_mb,
+ flexcan_irq, IRQF_SHARED, dev->name, dev);
+ if (err)
+ goto out_free_irq_err;
+ }
- can_led_event(dev, CAN_LED_EVENT_OPEN);
+ flexcan_chip_interrupts_enable(dev);
netif_start_queue(dev);
return 0;
+ out_free_irq_err:
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
+ free_irq(priv->irq_err, dev);
out_free_irq_boff:
- free_irq(priv->irq_boff, dev);
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
+ free_irq(priv->irq_boff, dev);
out_free_irq:
free_irq(dev->irq, dev);
out_can_rx_offload_disable:
@@ -1845,7 +1820,10 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
flexcan_chip_interrupts_disable(dev);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ)
+ free_irq(priv->irq_secondary_mb, dev);
+
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
free_irq(priv->irq_err, dev);
free_irq(priv->irq_boff, dev);
}
@@ -1860,8 +1838,6 @@ static int flexcan_close(struct net_device *dev)
pm_runtime_put(priv->dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -1891,7 +1867,6 @@ static const struct net_device_ops flexcan_netdev_ops = {
.ndo_open = flexcan_open,
.ndo_stop = flexcan_close,
.ndo_start_xmit = flexcan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
static int register_flexcandev(struct net_device *dev)
@@ -2051,15 +2026,25 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
priv = netdev_priv(dev);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW)
ret = flexcan_setup_stop_mode_scfw(pdev);
- else if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
+ else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
ret = flexcan_setup_stop_mode_gpr(pdev);
+ else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)
+ /* ATF will handle all STOP_IPG related work */
+ ret = 0;
else
/* return 0 directly if doesn't support stop mode feature */
return 0;
- if (ret)
+ /* If ret is -EINVAL, this means SoC claim to support stop mode, but
+ * dts file lack the stop mode property definition. For this case,
+ * directly return 0, this will skip the wakeup capable setting and
+ * will not block the driver probe.
+ */
+ if (ret == -EINVAL)
+ return 0;
+ else if (ret)
return ret;
device_set_wakeup_capable(&pdev->dev, true);
@@ -2073,6 +2058,8 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
{ .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
+ { .compatible = "fsl,imx93-flexcan", .data = &fsl_imx93_devtype_data, },
+ { .compatible = "fsl,imx95-flexcan", .data = &fsl_imx95_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
@@ -2082,6 +2069,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
{ .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, },
+ { .compatible = "nxp,s32g2-flexcan", .data = &nxp_s32g2_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -2098,11 +2086,11 @@ MODULE_DEVICE_TABLE(platform, flexcan_id_table);
static int flexcan_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
const struct flexcan_devtype_data *devtype_data;
struct net_device *dev;
struct flexcan_priv *priv;
struct regulator *reg_xceiver;
+ struct phy *transceiver;
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
struct flexcan_platform_data *pdata;
@@ -2118,6 +2106,11 @@ static int flexcan_probe(struct platform_device *pdev)
else if (IS_ERR(reg_xceiver))
return PTR_ERR(reg_xceiver);
+ transceiver = devm_phy_optional_get(&pdev->dev, NULL);
+ if (IS_ERR(transceiver))
+ return dev_err_probe(&pdev->dev, PTR_ERR(transceiver),
+ "failed to get phy\n");
+
if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &clock_freq);
@@ -2147,25 +2140,35 @@ static int flexcan_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENODEV;
+ if (irq < 0)
+ return irq;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
- of_id = of_match_device(flexcan_of_match, &pdev->dev);
- if (of_id)
- devtype_data = of_id->data;
- else if (platform_get_device_id(pdev)->driver_data)
- devtype_data = (struct flexcan_devtype_data *)
- platform_get_device_id(pdev)->driver_data;
- else
- return -ENODEV;
+ devtype_data = device_get_match_data(&pdev->dev);
if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
- !(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) {
- dev_err(&pdev->dev, "CAN-FD mode doesn't work with FIFO mode!\n");
+ !((devtype_data->quirks &
+ (FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO)) ==
+ (FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR))) {
+ dev_err(&pdev->dev, "CAN-FD mode doesn't work in RX-FIFO mode!\n");
+ return -EINVAL;
+ }
+
+ if ((devtype_data->quirks &
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR) {
+ dev_err(&pdev->dev,
+ "Quirks (0x%08x) inconsistent: RX_MAILBOX_RX supported but not RX_MAILBOX\n",
+ devtype_data->quirks);
return -EINVAL;
}
@@ -2177,13 +2180,15 @@ static int flexcan_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &flexcan_netdev_ops;
+ dev->ethtool_ops = &flexcan_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
priv = netdev_priv(dev);
+ priv->devtype_data = *devtype_data;
if (of_property_read_bool(pdev->dev.of_node, "big-endian") ||
- devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
priv->read = flexcan_read_be;
priv->write = flexcan_write_be;
} else {
@@ -2202,27 +2207,38 @@ static int flexcan_probe(struct platform_device *pdev)
priv->clk_ipg = clk_ipg;
priv->clk_per = clk_per;
priv->clk_src = clk_src;
- priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
+ priv->transceiver = transceiver;
+
+ if (transceiver)
+ priv->can.bitrate_max = transceiver->attrs.max_link_rate;
- if (devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
priv->irq_boff = platform_get_irq(pdev, 1);
- if (priv->irq_boff <= 0) {
- err = -ENODEV;
+ if (priv->irq_boff < 0) {
+ err = priv->irq_boff;
goto failed_platform_get_irq;
}
priv->irq_err = platform_get_irq(pdev, 2);
- if (priv->irq_err <= 0) {
- err = -ENODEV;
+ if (priv->irq_err < 0) {
+ err = priv->irq_err;
goto failed_platform_get_irq;
}
}
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
+ priv->irq_secondary_mb = platform_get_irq_byname(pdev, "mb-1");
+ if (priv->irq_secondary_mb < 0) {
+ err = priv->irq_secondary_mb;
+ goto failed_platform_get_irq;
+ }
+ }
+
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;
priv->can.bittiming_const = &flexcan_fd_bittiming_const;
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&flexcan_fd_data_bittiming_const;
} else {
priv->can.bittiming_const = &flexcan_bittiming_const;
@@ -2240,13 +2256,11 @@ static int flexcan_probe(struct platform_device *pdev)
err = flexcan_setup_stop_mode(pdev);
if (err < 0) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "setup stop mode failed\n");
+ dev_err_probe(&pdev->dev, err, "setup stop mode failed\n");
goto failed_setup_stop_mode;
}
of_can_transceiver(dev);
- devm_can_led_init(dev);
return 0;
@@ -2260,7 +2274,7 @@ static int flexcan_probe(struct platform_device *pdev)
return err;
}
-static int flexcan_remove(struct platform_device *pdev)
+static void flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -2269,8 +2283,6 @@ static int flexcan_remove(struct platform_device *pdev)
unregister_flexcandev(dev);
pm_runtime_disable(&pdev->dev);
free_candev(dev);
-
- return 0;
}
static int __maybe_unused flexcan_suspend(struct device *device)
@@ -2295,14 +2307,19 @@ static int __maybe_unused flexcan_suspend(struct device *device)
flexcan_chip_interrupts_disable(dev);
+ err = flexcan_transceiver_disable(priv);
+ if (err)
+ return err;
+
err = pinctrl_pm_select_sleep_state(device);
if (err)
return err;
}
netif_stop_queue(dev);
netif_device_detach(dev);
+
+ priv->can.state = CAN_STATE_SLEEPING;
}
- priv->can.state = CAN_STATE_SLEEPING;
return 0;
}
@@ -2313,7 +2330,6 @@ static int __maybe_unused flexcan_resume(struct device *device)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
@@ -2327,12 +2343,20 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (err)
return err;
- err = flexcan_chip_start(dev);
+ err = flexcan_transceiver_enable(priv);
if (err)
return err;
+ err = flexcan_chip_start(dev);
+ if (err) {
+ flexcan_transceiver_disable(priv);
+ return err;
+ }
+
flexcan_chip_interrupts_enable(dev);
}
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
return 0;
@@ -2367,9 +2391,19 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, true);
- err = pm_runtime_force_suspend(device);
- if (err)
- return err;
+ /* For FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI, it need ATF to send
+ * to SM through SCMI protocol, SM will assert the IPG_STOP
+ * signal. But all this works need the CAN clocks keep on.
+ * After the CAN module get the IPG_STOP mode, and switch to
+ * STOP mode, whether still keep the CAN clocks on or gate them
+ * off depend on the Hardware design.
+ */
+ if (!(device_may_wakeup(device) &&
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) {
+ err = pm_runtime_force_suspend(device);
+ if (err)
+ return err;
+ }
}
return 0;
@@ -2383,9 +2417,12 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
if (netif_running(dev)) {
int err;
- err = pm_runtime_force_resume(device);
- if (err)
- return err;
+ if (!(device_may_wakeup(device) &&
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) {
+ err = pm_runtime_force_resume(device);
+ if (err)
+ return err;
+ }
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, false);
diff --git a/drivers/net/can/flexcan/flexcan-ethtool.c b/drivers/net/can/flexcan/flexcan-ethtool.c
new file mode 100644
index 000000000000..50e86b2da532
--- /dev/null
+++ b/drivers/net/can/flexcan/flexcan-ethtool.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ * Copyright (c) 2022 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ */
+
+#include <linux/can/dev.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "flexcan.h"
+
+static const char flexcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define FLEXCAN_PRIV_FLAGS_RX_RTR BIT(0)
+ "rx-rtr",
+};
+
+static void
+flexcan_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *ext_ack)
+{
+ const struct flexcan_priv *priv = netdev_priv(ndev);
+
+ ring->rx_max_pending = priv->mb_count;
+ ring->tx_max_pending = priv->mb_count;
+
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
+ ring->rx_pending = priv->offload.mb_last -
+ priv->offload.mb_first + 1;
+ else
+ ring->rx_pending = 6; /* RX-FIFO depth is fixed */
+
+ /* the drive currently supports only on TX buffer */
+ ring->tx_pending = 1;
+}
+
+static void
+flexcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, flexcan_priv_flags_strings,
+ sizeof(flexcan_priv_flags_strings));
+ }
+}
+
+static u32 flexcan_get_priv_flags(struct net_device *ndev)
+{
+ const struct flexcan_priv *priv = netdev_priv(ndev);
+ u32 priv_flags = 0;
+
+ if (flexcan_active_rx_rtr(priv))
+ priv_flags |= FLEXCAN_PRIV_FLAGS_RX_RTR;
+
+ return priv_flags;
+}
+
+static int flexcan_set_priv_flags(struct net_device *ndev, u32 priv_flags)
+{
+ struct flexcan_priv *priv = netdev_priv(ndev);
+ u32 quirks = priv->devtype_data.quirks;
+
+ if (priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) {
+ if (flexcan_supports_rx_mailbox_rtr(priv))
+ quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ else if (flexcan_supports_rx_fifo(priv))
+ quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ else
+ quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ } else {
+ if (flexcan_supports_rx_mailbox(priv))
+ quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ else
+ quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ }
+
+ if (quirks != priv->devtype_data.quirks && netif_running(ndev))
+ return -EBUSY;
+
+ priv->devtype_data.quirks = quirks;
+
+ if (!(priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) &&
+ !flexcan_active_rx_rtr(priv))
+ netdev_info(ndev,
+ "Activating RX mailbox mode, cannot receive RTR frames.\n");
+
+ return 0;
+}
+
+static int flexcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(flexcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+const struct ethtool_ops flexcan_ethtool_ops = {
+ .get_ringparam = flexcan_get_ringparam,
+ .get_strings = flexcan_get_strings,
+ .get_priv_flags = flexcan_get_priv_flags,
+ .set_priv_flags = flexcan_set_priv_flags,
+ .get_sset_count = flexcan_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
new file mode 100644
index 000000000000..16692a2502eb
--- /dev/null
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * flexcan.c - FLEXCAN CAN controller driver
+ *
+ * Copyright (c) 2005-2006 Varma Electronics Oy
+ * Copyright (c) 2009 Sascha Hauer, Pengutronix
+ * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2014 David Jander, Protonic Holland
+ * Copyright (C) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
+ *
+ */
+
+#ifndef _FLEXCAN_H
+#define _FLEXCAN_H
+
+#include <linux/can/rx-offload.h>
+
+/* FLEXCAN hardware feature flags
+ *
+ * Below is some version info we got:
+ * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB
+ * Filter? connected? Passive detection ption in MB Supported?
+ * MCF5441X FlexCAN2 ? no yes no no no no 16
+ * MX25 FlexCAN2 03.00.00.00 no no no no no no 64
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64
+ * MX35 FlexCAN2 03.00.00.00 no no no no no no 64
+ * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64
+ * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64
+ * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64
+ * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64
+ * VF610 FlexCAN3 ? no yes no yes yes? no 64
+ * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64
+ * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64
+ *
+ * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
+ */
+
+/* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1)
+ /* Disable RX FIFO Global mask */
+#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2)
+/* Enable EACEN and RRS bit in ctrl2 */
+#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3)
+/* Disable non-correctable errors interrupt and freeze mode */
+#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4)
+/* Use mailboxes (not FIFO) for RX path */
+#define FLEXCAN_QUIRK_USE_RX_MAILBOX BIT(5)
+/* No interrupt for error passive */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6)
+/* default to BE register access */
+#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7)
+/* Setup stop mode with GPR to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR BIT(8)
+/* Support CAN-FD mode */
+#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
+/* support memory detection and correction */
+#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
+/* Setup stop mode with SCU firmware to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
+/* Setup 3 separate interrupts, main, boff and err */
+#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12)
+/* Setup 16 mailboxes */
+#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
+/* Device supports RX via mailboxes */
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX BIT(14)
+/* Device supports RTR reception via mailboxes */
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
+/* Device supports RX via FIFO */
+#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
+/* Setup stop mode with ATF SCMI protocol to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17)
+/* Device has two separate interrupt lines for two mailbox ranges, which
+ * both need to have an interrupt handler registered.
+ */
+#define FLEXCAN_QUIRK_SECONDARY_MB_IRQ BIT(18)
+
+struct flexcan_devtype_data {
+ u32 quirks; /* quirks needed for different IP cores */
+};
+
+struct flexcan_stop_mode {
+ struct regmap *gpr;
+ u8 req_gpr;
+ u8 req_bit;
+};
+
+struct flexcan_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct device *dev;
+
+ struct flexcan_regs __iomem *regs;
+ struct flexcan_mb __iomem *tx_mb;
+ struct flexcan_mb __iomem *tx_mb_reserved;
+ u8 tx_mb_idx;
+ u8 mb_count;
+ u8 mb_size;
+ u8 clk_src; /* clock source of CAN Protocol Engine */
+ u8 scu_idx;
+
+ u64 rx_mask;
+ u64 tx_mask;
+ u32 reg_ctrl_default;
+
+ struct clk *clk_ipg;
+ struct clk *clk_per;
+ struct flexcan_devtype_data devtype_data;
+ struct regulator *reg_xceiver;
+ struct phy *transceiver;
+ struct flexcan_stop_mode stm;
+
+ int irq_boff;
+ int irq_err;
+ int irq_secondary_mb;
+
+ /* IPC handle when setup stop mode by System Controller firmware(scfw) */
+ struct imx_sc_ipc *sc_ipc_handle;
+
+ /* Read and Write APIs */
+ u32 (*read)(void __iomem *addr);
+ void (*write)(u32 val, void __iomem *addr);
+};
+
+extern const struct ethtool_ops flexcan_ethtool_ops;
+
+static inline bool
+flexcan_supports_rx_mailbox(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX;
+}
+
+static inline bool
+flexcan_supports_rx_mailbox_rtr(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ return (quirks & (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR);
+}
+
+static inline bool
+flexcan_supports_rx_fifo(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_FIFO;
+}
+
+static inline bool
+flexcan_active_rx_rtr(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ if (quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
+ if (quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)
+ return true;
+ } else {
+ /* RX-FIFO is always RTR capable */
+ return true;
+ }
+
+ return false;
+}
+
+
+#endif /* _FLEXCAN_H */
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 78e27940b2af..3b1b09943436 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -27,10 +27,12 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/can/dev.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/dma-mapping.h>
@@ -241,13 +243,14 @@ struct grcan_device_config {
.rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
}
-#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
+#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100
#define GRLIB_VERSION_MASK 0xffff
/* GRCAN private data structure */
struct grcan_priv {
struct can_priv can; /* must be the first member */
struct net_device *dev;
+ struct device *ofdev_dev;
struct napi_struct napi;
struct grcan_registers __iomem *regs; /* ioremap'ed registers */
@@ -255,7 +258,6 @@ struct grcan_priv {
struct grcan_dma dma;
struct sk_buff **echo_skb; /* We allocate this on our own */
- u8 *txdlc; /* Length of queued frames */
/* The echo skb pointer, pointing into echo_skb and indicating which
* frames can be echoed back. See the "Notes on the tx cyclic buffer
@@ -515,9 +517,7 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
if (echo) {
/* Normal echo of messages */
stats->tx_packets++;
- stats->tx_bytes += priv->txdlc[i];
- priv->txdlc[i] = 0;
- can_get_echo_skb(dev, i, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, i, NULL);
} else {
/* For cleanup of untransmitted messages */
can_free_echo_skb(dev, i, NULL);
@@ -673,6 +673,7 @@ static void grcan_err(struct net_device *dev, u32 sources, u32 status)
/* There are no others at this point */
break;
}
+ cf.can_id |= CAN_ERR_CNT;
cf.data[6] = txerr;
cf.data[7] = rxerr;
priv->can.state = state;
@@ -777,7 +778,7 @@ static irqreturn_t grcan_interrupt(int irq, void *dev_id)
*/
if (priv->need_txbug_workaround &&
(sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) {
- del_timer(&priv->hang_timer);
+ timer_delete(&priv->hang_timer);
}
/* Frame(s) received or transmitted */
@@ -805,7 +806,7 @@ static irqreturn_t grcan_interrupt(int irq, void *dev_id)
*/
static void grcan_running_reset(struct timer_list *t)
{
- struct grcan_priv *priv = from_timer(priv, t, rr_timer);
+ struct grcan_priv *priv = timer_container_of(priv, t, rr_timer);
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
@@ -816,8 +817,8 @@ static void grcan_running_reset(struct timer_list *t)
spin_lock_irqsave(&priv->lock, flags);
priv->resetting = false;
- del_timer(&priv->hang_timer);
- del_timer(&priv->rr_timer);
+ timer_delete(&priv->hang_timer);
+ timer_delete(&priv->rr_timer);
if (!priv->closing) {
/* Save and reset - config register preserved by grcan_reset */
@@ -896,7 +897,7 @@ static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate)
/* Disable channels and schedule a running reset */
static void grcan_initiate_running_reset(struct timer_list *t)
{
- struct grcan_priv *priv = from_timer(priv, t, hang_timer);
+ struct grcan_priv *priv = timer_container_of(priv, t, hang_timer);
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
@@ -924,7 +925,7 @@ static void grcan_free_dma_buffers(struct net_device *dev)
struct grcan_priv *priv = netdev_priv(dev);
struct grcan_dma *dma = &priv->dma;
- dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
+ dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf,
dma->base_handle);
memset(dma, 0, sizeof(*dma));
}
@@ -949,7 +950,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev,
/* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
- dma->base_buf = dma_alloc_coherent(&dev->dev,
+ dma->base_buf = dma_alloc_coherent(priv->ofdev_dev,
dma->base_size,
&dma->base_handle,
GFP_KERNEL);
@@ -1062,25 +1063,20 @@ static int grcan_open(struct net_device *dev)
priv->can.echo_skb_max = dma->tx.size;
priv->can.echo_skb = priv->echo_skb;
- priv->txdlc = kcalloc(dma->tx.size, sizeof(*priv->txdlc), GFP_KERNEL);
- if (!priv->txdlc) {
- err = -ENOMEM;
- goto exit_free_echo_skb;
- }
-
/* Get can device up */
err = open_candev(dev);
if (err)
- goto exit_free_txdlc;
+ goto exit_free_echo_skb;
err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED,
dev->name, dev);
if (err)
goto exit_close_candev;
+ napi_enable(&priv->napi);
+
spin_lock_irqsave(&priv->lock, flags);
- napi_enable(&priv->napi);
grcan_start(dev);
if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(dev);
@@ -1093,8 +1089,6 @@ static int grcan_open(struct net_device *dev)
exit_close_candev:
close_candev(dev);
-exit_free_txdlc:
- kfree(priv->txdlc);
exit_free_echo_skb:
kfree(priv->echo_skb);
exit_free_dma_buffers:
@@ -1113,8 +1107,10 @@ static int grcan_close(struct net_device *dev)
priv->closing = true;
if (priv->need_txbug_workaround) {
- del_timer_sync(&priv->hang_timer);
- del_timer_sync(&priv->rr_timer);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ timer_delete_sync(&priv->hang_timer);
+ timer_delete_sync(&priv->rr_timer);
+ spin_lock_irqsave(&priv->lock, flags);
}
netif_stop_queue(dev);
grcan_stop_hardware(dev);
@@ -1129,12 +1125,11 @@ static int grcan_close(struct net_device *dev)
priv->can.echo_skb_max = 0;
priv->can.echo_skb = NULL;
kfree(priv->echo_skb);
- kfree(priv->txdlc);
return 0;
}
-static int grcan_transmit_catch_up(struct net_device *dev, int budget)
+static void grcan_transmit_catch_up(struct net_device *dev)
{
struct grcan_priv *priv = netdev_priv(dev);
unsigned long flags;
@@ -1142,7 +1137,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
spin_lock_irqsave(&priv->lock, flags);
- work_done = catch_up_echo_skb(dev, budget, true);
+ work_done = catch_up_echo_skb(dev, -1, true);
if (work_done) {
if (!priv->resetting && !priv->closing &&
!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
@@ -1152,12 +1147,10 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
* so prevent a running reset while catching up
*/
if (priv->need_txbug_workaround)
- del_timer(&priv->hang_timer);
+ timer_delete(&priv->hang_timer);
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- return work_done;
}
static int grcan_receive(struct net_device *dev, int budget)
@@ -1211,11 +1204,11 @@ static int grcan_receive(struct net_device *dev, int budget)
shift = GRCAN_MSG_DATA_SHIFT(i);
cf->data[i] = (u8)(slot[j] >> shift);
}
- }
- /* Update statistics and read pointer */
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_receive_skb(skb);
rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
@@ -1239,19 +1232,13 @@ static int grcan_poll(struct napi_struct *napi, int budget)
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
- int tx_work_done, rx_work_done;
- int rx_budget = budget / 2;
- int tx_budget = budget - rx_budget;
+ int work_done;
- /* Half of the budget for receiving messages */
- rx_work_done = grcan_receive(dev, rx_budget);
+ work_done = grcan_receive(dev, budget);
- /* Half of the budget for transmitting messages as that can trigger echo
- * frames being received
- */
- tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
+ grcan_transmit_catch_up(dev);
- if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
+ if (work_done < budget) {
napi_complete(napi);
/* Guarantee no interference with a running reset that otherwise
@@ -1268,7 +1255,7 @@ static int grcan_poll(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&priv->lock, flags);
}
- return rx_work_done + tx_work_done;
+ return work_done;
}
/* Work tx bug by waiting while for the risky situation to clear. If that fails,
@@ -1360,7 +1347,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
unsigned long flags;
u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
/* Trying to transmit in silent mode will generate error interrupts, but
@@ -1447,7 +1434,6 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
* can_put_echo_skb would be an error unless other measures are
* taken.
*/
- priv->txdlc[slotindex] = cf->len; /* Store dlc for statistics */
can_put_echo_skb(skb, dev, slotindex, 0);
/* Make sure everything is written before allowing hardware to
@@ -1575,7 +1561,10 @@ static const struct net_device_ops grcan_netdev_ops = {
.ndo_open = grcan_open,
.ndo_stop = grcan_close,
.ndo_start_xmit = grcan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops grcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
static int grcan_setup_netdev(struct platform_device *ofdev,
@@ -1594,12 +1583,14 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
dev->irq = irq;
dev->flags |= IFF_ECHO;
dev->netdev_ops = &grcan_netdev_ops;
+ dev->ethtool_ops = &grcan_ethtool_ops;
dev->sysfs_groups[0] = &sysfs_grcan_group;
priv = netdev_priv(dev);
memcpy(&priv->config, &grcan_module_config,
sizeof(struct grcan_device_config));
priv->dev = dev;
+ priv->ofdev_dev = &ofdev->dev;
priv->regs = base;
priv->can.bittiming_const = &grcan_bittiming_const;
priv->can.do_set_bittiming = grcan_set_bittiming;
@@ -1626,7 +1617,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
timer_setup(&priv->hang_timer, grcan_initiate_running_reset, 0);
}
- netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
+ netif_napi_add_weight(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
SET_NETDEV_DEV(dev, &ofdev->dev);
dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n",
@@ -1652,6 +1643,7 @@ exit_free_candev:
static int grcan_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
+ struct device_node *sysid_parent;
u32 sysid, ambafreq;
int irq, err;
void __iomem *base;
@@ -1660,10 +1652,14 @@ static int grcan_probe(struct platform_device *ofdev)
/* Compare GRLIB version number with the first that does not
* have the tx bug (see start_xmit)
*/
- err = of_property_read_u32(np, "systemid", &sysid);
- if (!err && ((sysid & GRLIB_VERSION_MASK)
- >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
- txbug = false;
+ sysid_parent = of_find_node_by_path("/ambapp0");
+ if (sysid_parent) {
+ err = of_property_read_u32(sysid_parent, "systemid", &sysid);
+ if (!err && ((sysid & GRLIB_VERSION_MASK) >=
+ GRCAN_TXBUG_SAFE_GRLIB_VERSION))
+ txbug = false;
+ of_node_put(sysid_parent);
+ }
err = of_property_read_u32(np, "freq", &ambafreq);
if (err) {
@@ -1701,7 +1697,7 @@ exit_error:
return err;
}
-static int grcan_remove(struct platform_device *ofdev)
+static void grcan_remove(struct platform_device *ofdev)
{
struct net_device *dev = platform_get_drvdata(ofdev);
struct grcan_priv *priv = netdev_priv(dev);
@@ -1711,8 +1707,6 @@ static int grcan_remove(struct platform_device *ofdev)
irq_dispose_mapping(dev->irq);
netif_napi_del(&priv->napi);
free_candev(dev);
-
- return 0;
}
static const struct of_device_id grcan_match[] = {
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 5bb957a26bc6..0f83335e4d07 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -13,13 +13,13 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/can/dev.h>
@@ -309,15 +309,15 @@ static void ifi_canfd_read_fifo(struct net_device *ndev)
*(u32 *)(cf->data + i) =
readl(priv->base + IFI_CANFD_RXFIFO_DATA + i);
}
+
+ stats->rx_bytes += cf->len;
}
+ stats->rx_packets++;
/* Remove the packet from FIFO */
writel(IFI_CANFD_RXSTCMD_REMOVE_MSG, priv->base + IFI_CANFD_RXSTCMD);
writel(rx_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
netif_receive_skb(skb);
}
@@ -345,9 +345,6 @@ static int ifi_canfd_do_rx_poll(struct net_device *ndev, int quota)
rxst = readl(priv->base + IFI_CANFD_RXSTCMD);
}
- if (pkts)
- can_led_event(ndev, CAN_LED_EVENT_RX);
-
return pkts;
}
@@ -393,36 +390,55 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
return 0;
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
/* Propagate the error condition to the CAN stack. */
skb = alloc_can_err_skb(ndev, &cf);
- if (unlikely(!skb))
- return 0;
/* Read the error counter register and check for new errors. */
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST)
- cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST)
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) {
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST)
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
- if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST)
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) {
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
/* Reset the error counter, ack the IRQ and re-enable the counter. */
writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
@@ -430,8 +446,9 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
priv->base + IFI_CANFD_INTERRUPT);
writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (unlikely(!skb))
+ return 0;
+
netif_receive_skb(skb);
return 1;
@@ -456,7 +473,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
enum can_state new_state)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
@@ -498,7 +514,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -507,7 +523,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
@@ -522,8 +538,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -631,7 +645,6 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL);
stats->tx_packets++;
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
if (isr & tx_irq_mask)
@@ -656,7 +669,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2, tdc;
/* Configure bit timing */
@@ -835,7 +848,6 @@ static int ifi_canfd_open(struct net_device *ndev)
ifi_canfd_start(ndev);
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -858,8 +870,6 @@ static int ifi_canfd_close(struct net_device *ndev)
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -871,7 +881,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
u32 txst, txid, txdlc;
int i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
/* Check if the TX buffer is full */
@@ -934,7 +944,10 @@ static const struct net_device_ops ifi_canfd_netdev_ops = {
.ndo_open = ifi_canfd_open,
.ndo_stop = ifi_canfd_close,
.ndo_start_xmit = ifi_canfd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops ifi_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
static int ifi_canfd_plat_probe(struct platform_device *pdev)
@@ -974,21 +987,22 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
ndev->irq = irq;
ndev->flags |= IFF_ECHO; /* we support local echo */
ndev->netdev_ops = &ifi_canfd_netdev_ops;
+ ndev->ethtool_ops = &ifi_canfd_ethtool_ops;
priv = netdev_priv(ndev);
priv->ndev = ndev;
priv->base = addr;
- netif_napi_add(ndev, &priv->napi, ifi_canfd_poll, 64);
+ netif_napi_add(ndev, &priv->napi, ifi_canfd_poll);
priv->can.state = CAN_STATE_STOPPED;
priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK);
- priv->can.bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.data_bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.do_set_mode = ifi_canfd_set_mode;
- priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
+ priv->can.bittiming_const = &ifi_canfd_bittiming_const;
+ priv->can.fd.data_bittiming_const = &ifi_canfd_bittiming_const;
+ priv->can.do_set_mode = ifi_canfd_set_mode;
+ priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
/* IFI CANFD can do both Bosch FD and ISO FD */
priv->can.ctrlmode = CAN_CTRLMODE_FD;
@@ -1009,8 +1023,6 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
goto err_reg;
}
- devm_can_led_init(ndev);
-
dev_info(dev, "Driver registered: regs=%p, irq=%d, clock=%d\n",
priv->base, ndev->irq, priv->can.clock.freq);
@@ -1021,15 +1033,13 @@ err_reg:
return ret;
}
-static int ifi_canfd_plat_remove(struct platform_device *pdev)
+static void ifi_canfd_plat_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
unregister_candev(ndev);
platform_set_drvdata(pdev, NULL);
free_candev(ndev);
-
- return 0;
}
static const struct of_device_id ifi_canfd_of_table[] = {
@@ -1044,7 +1054,7 @@ static struct platform_driver ifi_canfd_plat_driver = {
.of_match_table = ifi_canfd_of_table,
},
.probe = ifi_canfd_plat_probe,
- .remove = ifi_canfd_plat_remove,
+ .remove = ifi_canfd_plat_remove,
};
module_platform_driver(ifi_canfd_plat_driver);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 32006dbf5abd..1efdd1fd8caa 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
@@ -1127,7 +1128,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* bus error interrupt */
if (isrc == CEVTIND_BEI) {
mod->can.can_stats.bus_error++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
switch (ecc & ECC_MASK) {
case ECC_BIT:
@@ -1153,7 +1154,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING ||
state == CAN_STATE_ERROR_PASSIVE)) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
if (state == CAN_STATE_ERROR_WARNING) {
mod->can.can_stats.error_warning++;
cf->data[1] = (txerr > rxerr) ?
@@ -1277,6 +1278,8 @@ static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
if (!skb)
return;
+ skb_tx_timestamp(skb);
+
/* save this skb for tx interrupt echo handling */
skb_queue_tail(&mod->echoq, skb);
}
@@ -1285,7 +1288,7 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod)
{
struct sk_buff *skb = skb_dequeue(&mod->echoq);
struct can_frame *cf;
- u8 dlc;
+ u8 dlc = 0;
/* this should never trigger unless there is a driver bug */
if (!skb) {
@@ -1294,7 +1297,8 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod)
}
cf = (struct can_frame *)skb->data;
- dlc = cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ dlc = cf->len;
/* check flag whether this packet has to be looped back */
if (skb->pkt_type != PACKET_LOOPBACK) {
@@ -1421,7 +1425,8 @@ static int ican3_recv_skb(struct ican3_dev *mod)
/* update statistics, receive the skb */
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
err_noalloc:
@@ -1688,7 +1693,7 @@ static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
void __iomem *desc_addr;
unsigned long flags;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
spin_lock_irqsave(&mod->lock, flags);
@@ -1747,7 +1752,10 @@ static const struct net_device_ops ican3_netdev_ops = {
.ndo_open = ican3_open,
.ndo_stop = ican3_stop,
.ndo_start_xmit = ican3_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops ican3_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
/*
@@ -1858,7 +1866,7 @@ static ssize_t fwinfo_show(struct device *dev,
{
struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
- return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo);
+ return sysfs_emit(buf, "%s\n", mod->fwinfo);
}
static DEVICE_ATTR_RW(termination);
@@ -1908,7 +1916,7 @@ static int ican3_probe(struct platform_device *pdev)
mod = netdev_priv(ndev);
mod->ndev = ndev;
mod->num = pdata->modno;
- netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
+ netif_napi_add_weight(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
skb_queue_head_init(&mod->echoq);
spin_lock_init(&mod->lock);
init_completion(&mod->termination_comp);
@@ -1921,6 +1929,7 @@ static int ican3_probe(struct platform_device *pdev)
mod->free_page = DPM_FREE_START;
ndev->netdev_ops = &ican3_netdev_ops;
+ ndev->ethtool_ops = &ican3_ethtool_ops;
ndev->flags |= IFF_ECHO;
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2013,7 +2022,7 @@ out_return:
return ret;
}
-static int ican3_remove(struct platform_device *pdev)
+static void ican3_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ican3_dev *mod = netdev_priv(ndev);
@@ -2032,8 +2041,6 @@ static int ican3_remove(struct platform_device *pdev)
iounmap(mod->dpm);
free_candev(ndev);
-
- return 0;
}
static struct platform_driver ican3_driver = {
diff --git a/drivers/net/can/kvaser_pciefd/Makefile b/drivers/net/can/kvaser_pciefd/Makefile
new file mode 100644
index 000000000000..8c5b8cdc6b5f
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o
+kvaser_pciefd-y = kvaser_pciefd_core.o kvaser_pciefd_devlink.o
diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h b/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h
new file mode 100644
index 000000000000..08c9ddc1ee85
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/* kvaser_pciefd common definitions and declarations
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+
+#ifndef _KVASER_PCIEFD_H
+#define _KVASER_PCIEFD_H
+
+#include <linux/can/dev.h>
+#include <linux/completion.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
+#define KVASER_PCIEFD_DMA_COUNT 2U
+#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
+#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
+
+struct kvaser_pciefd;
+
+struct kvaser_pciefd_address_offset {
+ u32 serdes;
+ u32 pci_ien;
+ u32 pci_irq;
+ u32 sysid;
+ u32 loopback;
+ u32 kcan_srb_fifo;
+ u32 kcan_srb;
+ u32 kcan_ch0;
+ u32 kcan_ch1;
+};
+
+struct kvaser_pciefd_irq_mask {
+ u32 kcan_rx0;
+ u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ u32 all;
+};
+
+struct kvaser_pciefd_dev_ops {
+ void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
+};
+
+struct kvaser_pciefd_driver_data {
+ const struct kvaser_pciefd_address_offset *address_offset;
+ const struct kvaser_pciefd_irq_mask *irq_mask;
+ const struct kvaser_pciefd_dev_ops *ops;
+};
+
+struct kvaser_pciefd_fw_version {
+ u8 major;
+ u8 minor;
+ u16 build;
+};
+
+struct kvaser_pciefd_can {
+ struct can_priv can;
+ struct devlink_port devlink_port;
+ struct kvaser_pciefd *kv_pcie;
+ void __iomem *reg_base;
+ struct can_berr_counter bec;
+ u32 ioc;
+ u8 cmd_seq;
+ u8 tx_max_count;
+ u8 tx_idx;
+ u8 ack_idx;
+ int err_rep_cnt;
+ unsigned int completed_tx_pkts;
+ unsigned int completed_tx_bytes;
+ spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
+ struct timer_list bec_poll_timer;
+ struct completion start_comp, flush_comp;
+};
+
+struct kvaser_pciefd {
+ struct pci_dev *pci;
+ void __iomem *reg_base;
+ struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ const struct kvaser_pciefd_driver_data *driver_data;
+ void *dma_data[KVASER_PCIEFD_DMA_COUNT];
+ u8 nr_channels;
+ u32 bus_freq;
+ u32 freq;
+ u32 freq_to_ticks_div;
+ struct kvaser_pciefd_fw_version fw_version;
+};
+
+extern const struct devlink_ops kvaser_pciefd_devlink_ops;
+
+int kvaser_pciefd_devlink_port_register(struct kvaser_pciefd_can *can);
+void kvaser_pciefd_devlink_port_unregister(struct kvaser_pciefd_can *can);
+#endif /* _KVASER_PCIEFD_H */
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c
index 74d9899fc904..d8c9bfb20230 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c
@@ -1,20 +1,24 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
* Parts of this driver are based on the following:
- * - Kvaser linux pciefd driver (version 5.25)
+ * - Kvaser linux pciefd driver (version 5.42)
* - PEAK linux canfd driver
- * - Altera Avalon EPCS flash controller driver
*/
+#include "kvaser_pciefd.h"
+
+#include <linux/bitfield.h>
+#include <linux/can/dev.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
-#include <linux/device.h>
+#include <linux/netdevice.h>
#include <linux/pci.h>
-#include <linux/can/dev.h>
#include <linux/timer.h>
-#include <linux/netdevice.h>
-#include <linux/crc32.h>
-#include <linux/iopoll.h>
+#include <net/netdev_queues.h>
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
@@ -24,255 +28,354 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
-#define KVASER_PCIEFD_MAX_ERR_REP 256
-#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
-#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
-#define KVASER_PCIEFD_DMA_COUNT 2
-
-#define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
-#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
+#define KVASER_PCIEFD_MAX_ERR_REP 256U
#define KVASER_PCIEFD_VENDOR 0x1a07
-#define KVASER_PCIEFD_4HS_ID 0x0d
-#define KVASER_PCIEFD_2HS_ID 0x0e
-#define KVASER_PCIEFD_HS_ID 0x0f
-#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
-#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
-
-/* PCIe IRQ registers */
-#define KVASER_PCIEFD_IRQ_REG 0x40
-#define KVASER_PCIEFD_IEN_REG 0x50
-/* DMA map */
-#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
+
+/* Altera based devices */
+#define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d
+#define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e
+#define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f
+#define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010
+#define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011
+
+/* SmartFusion2 based devices */
+#define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012
+#define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013
+#define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014
+#define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015
+#define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016
+
+/* Xilinx based devices */
+#define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017
+#define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019
+
+/* Altera SerDes Enable 64-bit DMA address translation */
+#define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0)
+
+/* SmartFusion2 SerDes LSB address translation mask */
+#define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12)
+
+/* Xilinx SerDes LSB address translation mask */
+#define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12)
+
/* Kvaser KCAN CAN controller registers */
-#define KVASER_PCIEFD_KCAN0_BASE 0x10000
-#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
+#define KVASER_PCIEFD_KCAN_IOC_REG 0x404
#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
-#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
+#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414
#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
-/* Loopback control register */
-#define KVASER_PCIEFD_LOOP_REG 0x1f000
/* System identification and information registers */
-#define KVASER_PCIEFD_SYSID_BASE 0x1f020
-#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
-#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
-#define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10)
-#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
+#define KVASER_PCIEFD_SYSID_VERSION_REG 0x8
+#define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc
+#define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10
+#define KVASER_PCIEFD_SYSID_BUILD_REG 0x14
+/* Shared receive buffer FIFO registers */
+#define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4
/* Shared receive buffer registers */
-#define KVASER_PCIEFD_SRB_BASE 0x1f200
-#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
-#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
-#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
-#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
-#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
-/* EPCS flash controller registers */
-#define KVASER_PCIEFD_SPI_BASE 0x1fc00
-#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
-#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
-#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
-#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
-#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
-
-#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
-#define KVASER_PCIEFD_IRQ_SRB BIT(4)
-
-#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
-#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
-#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
+#define KVASER_PCIEFD_SRB_CMD_REG 0x0
+#define KVASER_PCIEFD_SRB_IEN_REG 0x04
+#define KVASER_PCIEFD_SRB_IRQ_REG 0x0c
+#define KVASER_PCIEFD_SRB_STAT_REG 0x10
+#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14
+#define KVASER_PCIEFD_SRB_CTRL_REG 0x18
+
+/* System build information fields */
+#define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24)
+#define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16)
+#define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0)
+#define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1)
/* Reset DMA buffer 0, 1 and FIFO offset */
-#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
+#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
-/* DMA packet done, buffer 0 and 1 */
-#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
-#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
-/* DMA overflow, buffer 0 and 1 */
-#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
-#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
/* DMA underflow, buffer 0 and 1 */
-#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
+#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
+/* DMA overflow, buffer 0 and 1 */
+#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
+#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
+/* DMA packet done, buffer 0 and 1 */
+#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
+#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
+/* Got DMA support */
+#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
/* DMA idle */
#define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
-/* DMA support */
-#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
+
+/* SRB current packet level */
+#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0)
/* DMA Enable */
#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
-/* EPCS flash controller definitions */
-#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
-#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
-#define KVASER_PCIEFD_CFG_MAX_PARAMS 256
-#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
-#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
-#define KVASER_PCIEFD_CFG_SYS_VER 1
-#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
-#define KVASER_PCIEFD_SPI_TMT BIT(5)
-#define KVASER_PCIEFD_SPI_TRDY BIT(6)
-#define KVASER_PCIEFD_SPI_RRDY BIT(7)
-#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
-/* Commands for controlling the onboard flash */
-#define KVASER_PCIEFD_FLASH_RES_CMD 0xab
-#define KVASER_PCIEFD_FLASH_READ_CMD 0x3
-#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
-
-/* Kvaser KCAN definitions */
-#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
-#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
-
-#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
-/* Request status packet */
-#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
+/* KCAN CTRL packet types */
+#define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29)
+#define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4
+#define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5
+
+/* Command sequence number */
+#define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16)
+/* Command bits */
+#define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0)
/* Abort, flush and reset */
#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
+/* Request status packet */
+#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
+
+/* Control CAN LED, active low */
+#define KVASER_PCIEFD_KCAN_IOC_LED BIT(0)
-/* Tx FIFO unaligned read */
-#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
-/* Tx FIFO unaligned end */
-#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
-/* Bus parameter protection error */
-#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
-/* FDF bit when controller is in classic mode */
-#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
-/* Rx FIFO overflow */
-#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
-/* Abort done */
-#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
-/* Tx buffer flush done */
-#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
-/* Tx FIFO overflow */
-#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
-/* Tx FIFO empty */
-#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
/* Transmitter unaligned */
#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
+/* Tx FIFO empty */
+#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
+/* Tx FIFO overflow */
+#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
+/* Tx buffer flush done */
+#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
+/* Abort done */
+#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
+/* Rx FIFO overflow */
+#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
+/* FDF bit when controller is in classic CAN mode */
+#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
+/* Bus parameter protection error */
+#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
+/* Tx FIFO unaligned end */
+#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
+/* Tx FIFO unaligned read */
+#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
-#define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
+/* Tx FIFO size */
+#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16)
+/* Tx FIFO current packet level */
+#define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0)
-#define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
-/* Abort request */
-#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
-/* Idle state. Controller in reset mode and no abort or flush pending */
-#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
-/* Bus off */
-#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
-/* Reset mode request */
-#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
-/* Controller in reset mode */
-#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
-/* Controller got one-shot capability */
-#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
+/* Current status packet sequence number */
+#define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24)
/* Controller got CAN FD capability */
#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
-#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
- KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
- KVASER_PCIEFD_KCAN_STAT_IRM)
+/* Controller got one-shot capability */
+#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
+/* Controller in reset mode */
+#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
+/* Reset mode request */
+#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
+/* Bus off */
+#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
+/* Idle state. Controller in reset mode and no abort or flush pending */
+#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
+/* Abort request */
+#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
+/* Controller is bus off */
+#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \
+ (KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \
+ KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM)
-/* Reset mode */
-#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
-/* Listen only mode */
-#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
-/* Error packet enable */
-#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
-/* CAN FD non-ISO */
-#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
-/* Acknowledgment packet type */
-#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
-/* Active error flag enable. Clear to force error passive */
-#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
/* Classic CAN mode */
#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
+/* Active error flag enable. Clear to force error passive */
+#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
+/* Acknowledgment packet type */
+#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
+/* CAN FD non-ISO */
+#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
+/* Error packet enable */
+#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
+/* Listen only mode */
+#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
+/* Reset mode */
+#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
-#define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
-#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
-#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
-
-#define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
-
-/* Kvaser KCAN packet types */
-#define KVASER_PCIEFD_PACK_TYPE_DATA 0
-#define KVASER_PCIEFD_PACK_TYPE_ACK 1
-#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
-#define KVASER_PCIEFD_PACK_TYPE_ERROR 3
-#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
-#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
-#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
-#define KVASER_PCIEFD_PACK_TYPE_STATUS 8
-#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
-
-/* Kvaser KCAN packet common definitions */
-#define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
-#define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
-#define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
-
-/* Kvaser KCAN TDATA and RDATA first word */
+/* BTRN and BTRD fields */
+#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26)
+#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17)
+#define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13)
+#define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0)
+
+/* PWM Control fields */
+#define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16)
+#define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0)
+
+/* KCAN packet type IDs */
+#define KVASER_PCIEFD_PACK_TYPE_DATA 0x0
+#define KVASER_PCIEFD_PACK_TYPE_ACK 0x1
+#define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2
+#define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3
+#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4
+#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5
+#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6
+#define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8
+#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9
+
+/* Common KCAN packet definitions, second word */
+#define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28)
+#define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25)
+#define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0)
+
+/* KCAN Transmit/Receive data packet, first word */
#define KVASER_PCIEFD_RPACKET_IDE BIT(30)
#define KVASER_PCIEFD_RPACKET_RTR BIT(29)
-/* Kvaser KCAN TDATA and RDATA second word */
-#define KVASER_PCIEFD_RPACKET_ESI BIT(13)
-#define KVASER_PCIEFD_RPACKET_BRS BIT(14)
-#define KVASER_PCIEFD_RPACKET_FDF BIT(15)
-#define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
-/* Kvaser KCAN TDATA second word */
-#define KVASER_PCIEFD_TPACKET_SMS BIT(16)
+#define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0)
+/* KCAN Transmit data packet, second word */
#define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
+#define KVASER_PCIEFD_TPACKET_SMS BIT(16)
+/* KCAN Transmit/Receive data packet, second word */
+#define KVASER_PCIEFD_RPACKET_FDF BIT(15)
+#define KVASER_PCIEFD_RPACKET_BRS BIT(14)
+#define KVASER_PCIEFD_RPACKET_ESI BIT(13)
+#define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8)
-/* Kvaser KCAN APACKET */
-#define KVASER_PCIEFD_APACKET_FLU BIT(8)
-#define KVASER_PCIEFD_APACKET_CT BIT(9)
-#define KVASER_PCIEFD_APACKET_ABL BIT(10)
+/* KCAN Transmit acknowledge packet, first word */
#define KVASER_PCIEFD_APACKET_NACK BIT(11)
+#define KVASER_PCIEFD_APACKET_ABL BIT(10)
+#define KVASER_PCIEFD_APACKET_CT BIT(9)
+#define KVASER_PCIEFD_APACKET_FLU BIT(8)
-/* Kvaser KCAN SPACK first word */
-#define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
-#define KVASER_PCIEFD_SPACK_BOFF BIT(16)
-#define KVASER_PCIEFD_SPACK_IDET BIT(20)
-#define KVASER_PCIEFD_SPACK_IRM BIT(21)
+/* KCAN Status packet, first word */
#define KVASER_PCIEFD_SPACK_RMCD BIT(22)
-/* Kvaser KCAN SPACK second word */
-#define KVASER_PCIEFD_SPACK_AUTO BIT(21)
-#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
+#define KVASER_PCIEFD_SPACK_IRM BIT(21)
+#define KVASER_PCIEFD_SPACK_IDET BIT(20)
+#define KVASER_PCIEFD_SPACK_BOFF BIT(16)
+#define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8)
+#define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0)
+/* KCAN Status packet, second word */
#define KVASER_PCIEFD_SPACK_EPLR BIT(24)
+#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
+#define KVASER_PCIEFD_SPACK_AUTO BIT(21)
+
+/* KCAN Error detected packet, second word */
+#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
+
+/* Macros for calculating addresses of registers */
+#define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \
+ ((pcie)->reg_base + (pcie)->driver_data->address_offset->block)
+#define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien))
+#define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq))
+#define KVASER_PCIEFD_SERDES_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes))
+#define KVASER_PCIEFD_SYSID_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid))
+#define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback))
+#define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo))
+#define KVASER_PCIEFD_SRB_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb))
+#define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0))
+#define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \
+ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1))
+#define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \
+ (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)))
+#define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \
+ (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie)))
struct kvaser_pciefd;
+static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
+static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
+static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
+
+static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = {
+ .serdes = 0x1000,
+ .pci_ien = 0x50,
+ .pci_irq = 0x40,
+ .sysid = 0x1f020,
+ .loopback = 0x1f000,
+ .kcan_srb_fifo = 0x1f200,
+ .kcan_srb = 0x1f400,
+ .kcan_ch0 = 0x10000,
+ .kcan_ch1 = 0x11000,
+};
-struct kvaser_pciefd_can {
- struct can_priv can;
- struct kvaser_pciefd *kv_pcie;
- void __iomem *reg_base;
- struct can_berr_counter bec;
- u8 cmd_seq;
- int err_rep_cnt;
- int echo_idx;
- spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
- spinlock_t echo_lock; /* Locks the message echo buffer */
- struct timer_list bec_poll_timer;
- struct completion start_comp, flush_comp;
+static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = {
+ .serdes = 0x280c8,
+ .pci_ien = 0x102004,
+ .pci_irq = 0x102008,
+ .sysid = 0x100000,
+ .loopback = 0x103000,
+ .kcan_srb_fifo = 0x120000,
+ .kcan_srb = 0x121000,
+ .kcan_ch0 = 0x140000,
+ .kcan_ch1 = 0x142000,
+};
+
+static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = {
+ .serdes = 0x00208,
+ .pci_ien = 0x102004,
+ .pci_irq = 0x102008,
+ .sysid = 0x100000,
+ .loopback = 0x103000,
+ .kcan_srb_fifo = 0x120000,
+ .kcan_srb = 0x121000,
+ .kcan_ch0 = 0x140000,
+ .kcan_ch1 = 0x142000,
};
-struct kvaser_pciefd {
- struct pci_dev *pci;
- void __iomem *reg_base;
- struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
- void *dma_data[KVASER_PCIEFD_DMA_COUNT];
- u8 nr_channels;
- u32 bus_freq;
- u32 freq;
- u32 freq_to_ticks_div;
+static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = {
+ .kcan_rx0 = BIT(4),
+ .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) },
+ .all = GENMASK(4, 0),
+};
+
+static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = {
+ .kcan_rx0 = BIT(4),
+ .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) },
+ .all = GENMASK(19, 16) | BIT(4),
+};
+
+static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = {
+ .kcan_rx0 = BIT(4),
+ .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23) },
+ .all = GENMASK(23, 16) | BIT(4),
+};
+
+static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = {
+ .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera,
+};
+
+static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = {
+ .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2,
+};
+
+static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = {
+ .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx,
+};
+
+static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = {
+ .address_offset = &kvaser_pciefd_altera_address_offset,
+ .irq_mask = &kvaser_pciefd_altera_irq_mask,
+ .ops = &kvaser_pciefd_altera_dev_ops,
+};
+
+static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = {
+ .address_offset = &kvaser_pciefd_sf2_address_offset,
+ .irq_mask = &kvaser_pciefd_sf2_irq_mask,
+ .ops = &kvaser_pciefd_sf2_dev_ops,
+};
+
+static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = {
+ .address_offset = &kvaser_pciefd_xilinx_address_offset,
+ .irq_mask = &kvaser_pciefd_xilinx_irq_mask,
+ .ops = &kvaser_pciefd_xilinx_dev_ops,
};
struct kvaser_pciefd_rx_packet {
@@ -297,198 +400,86 @@ static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
.brp_inc = 1,
};
-struct kvaser_pciefd_cfg_param {
- __le32 magic;
- __le32 nr;
- __le32 len;
- u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
-};
-
-struct kvaser_pciefd_cfg_img {
- __le32 version;
- __le32 magic;
- __le32 crc;
- struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
-};
-
static struct pci_device_id kvaser_pciefd_id_table[] = {
- { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), },
- { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), },
- { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), },
- { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), },
- { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), },
- { 0,},
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_8CAN_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
+ },
+ {
+ 0,
+ },
};
MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
-/* Onboard flash memory functions */
-static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
-{
- u32 res;
- int ret;
-
- ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
- res, res & msk, 0, 10);
-
- return ret;
-}
-
-static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
- u32 tx_len, u8 *rx, u32 rx_len)
+static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can *can, u32 cmd)
{
- int c;
-
- iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
- iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
- ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
-
- c = tx_len;
- while (c--) {
- if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
- return -EIO;
-
- iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
-
- if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
- return -EIO;
-
- ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
- }
-
- c = rx_len;
- while (c-- > 0) {
- if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
- return -EIO;
-
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
-
- if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
- return -EIO;
-
- *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
- }
-
- if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
- return -EIO;
-
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
-
- if (c != -1) {
- dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
- struct kvaser_pciefd_cfg_img *img)
-{
- int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
- int res, crc;
- u8 *crc_buff;
-
- u8 cmd[] = {
- KVASER_PCIEFD_FLASH_READ_CMD,
- (u8)((offset >> 16) & 0xff),
- (u8)((offset >> 8) & 0xff),
- (u8)(offset & 0xff)
- };
-
- res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
- KVASER_PCIEFD_CFG_IMG_SZ);
- if (res)
- return res;
-
- crc_buff = (u8 *)img->params;
-
- if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
- dev_err(&pcie->pci->dev,
- "Config flash corrupted, version number is wrong\n");
- return -ENODEV;
- }
-
- if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
- dev_err(&pcie->pci->dev,
- "Config flash corrupted, magic number is wrong\n");
- return -ENODEV;
- }
-
- crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
- if (le32_to_cpu(img->crc) != crc) {
- dev_err(&pcie->pci->dev,
- "Stored CRC does not match flash image contents\n");
- return -EIO;
- }
-
- return 0;
+ iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK, cmd) |
+ FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK, ++can->cmd_seq),
+ can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
}
-static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
- struct kvaser_pciefd_cfg_img *img)
+static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
{
- struct kvaser_pciefd_cfg_param *param;
-
- param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
- memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
+ kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_SRQ);
}
-static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
+static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can)
{
- int res;
- struct kvaser_pciefd_cfg_img *img;
-
- /* Read electronic signature */
- u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
-
- res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
- if (res)
- return -EIO;
-
- img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
- if (!img)
- return -ENOMEM;
-
- if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
- dev_err(&pcie->pci->dev,
- "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
- cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
-
- res = -ENODEV;
- goto image_free;
- }
-
- cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
- res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
- if (res) {
- goto image_free;
- } else if (cmd[0] & 1) {
- res = -EIO;
- /* No write is ever done, the WIP should never be set */
- dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
- goto image_free;
- }
-
- res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
- if (res) {
- res = -EIO;
- goto image_free;
- }
-
- kvaser_pciefd_cfg_read_params(pcie, img);
-
-image_free:
- kfree(img);
- return res;
+ kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT);
}
-static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
+static inline void kvaser_pciefd_set_led(struct kvaser_pciefd_can *can, bool on)
{
- u32 cmd;
+ if (on)
+ can->ioc &= ~KVASER_PCIEFD_KCAN_IOC_LED;
+ else
+ can->ioc |= KVASER_PCIEFD_KCAN_IOC_LED;
- cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
- cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
- iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
+ iowrite32(can->ioc, can->reg_base + KVASER_PCIEFD_KCAN_IOC_REG);
}
static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
@@ -517,7 +508,7 @@ static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
spin_unlock_irqrestore(&can->lock, irq);
}
-static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
+static inline void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
{
u32 msk;
@@ -525,11 +516,16 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
- KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
+ KVASER_PCIEFD_KCAN_IRQ_TAR;
iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+}
- return 0;
+static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie,
+ struct sk_buff *skb, u64 timestamp)
+{
+ skb_hwtstamps(skb)->hwtstamp =
+ ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div));
}
static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
@@ -538,7 +534,6 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
unsigned long irq;
spin_lock_irqsave(&can->lock, irq);
-
mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
@@ -553,7 +548,8 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
-
+ else
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM;
mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
/* Use ACK packet type */
@@ -570,18 +566,13 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
unsigned long irq;
spin_lock_irqsave(&can->lock, irq);
- iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+ iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
-
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
- u32 cmd;
-
/* If controller is already idle, run abort, flush and reset */
- cmd = KVASER_PCIEFD_KCAN_CMD_AT;
- cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
- iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
+ kvaser_pciefd_abort_flush_reset(can);
} else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
u32 mode;
@@ -590,7 +581,6 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
mode |= KVASER_PCIEFD_KCAN_MODE_RM;
iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
}
-
spin_unlock_irqrestore(&can->lock, irq);
}
@@ -599,8 +589,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
u32 mode;
unsigned long irq;
- del_timer(&can->bec_poll_timer);
-
+ timer_delete(&can->bec_poll_timer);
if (!completion_done(&can->flush_comp))
kvaser_pciefd_start_controller_flush(can);
@@ -612,11 +601,9 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
spin_lock_irqsave(&can->lock, irq);
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
- iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
-
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+ iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
-
mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
@@ -629,11 +616,10 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
}
/* Reset interrupt handling */
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
- iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
kvaser_pciefd_set_tx_irq(can);
kvaser_pciefd_setup_controller(can);
-
can->can.state = CAN_STATE_ERROR_ACTIVE;
netif_wake_queue(can->can.dev);
can->bec.txerr = 0;
@@ -651,10 +637,9 @@ static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
spin_lock_irqsave(&can->lock, irq);
pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
- top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;
-
+ top = FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, pwm_ctrl);
/* Set duty cycle to zero */
- pwm_ctrl |= top;
+ pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top);
iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
spin_unlock_irqrestore(&can->lock, irq);
}
@@ -667,35 +652,37 @@ static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
kvaser_pciefd_pwm_stop(can);
spin_lock_irqsave(&can->lock, irq);
-
- /* Set frequency to 500 KHz*/
+ /* Set frequency to 500 KHz */
top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
- pwm_ctrl = top & 0xff;
- pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
+ pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top);
+ pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top);
iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
/* Set duty cycle to 95 */
trigger = (100 * top - 95 * (top + 1) + 50) / 100;
- pwm_ctrl = trigger & 0xff;
- pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
+ pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, trigger);
+ pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top);
iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
spin_unlock_irqrestore(&can->lock, irq);
}
static int kvaser_pciefd_open(struct net_device *netdev)
{
- int err;
+ int ret;
struct kvaser_pciefd_can *can = netdev_priv(netdev);
- err = open_candev(netdev);
- if (err)
- return err;
+ can->tx_idx = 0;
+ can->ack_idx = 0;
- err = kvaser_pciefd_bus_on(can);
- if (err) {
+ ret = open_candev(netdev);
+ if (ret)
+ return ret;
+
+ ret = kvaser_pciefd_bus_on(can);
+ if (ret) {
close_candev(netdev);
- return err;
+ return ret;
}
return 0;
@@ -716,24 +703,29 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
ret = -ETIMEDOUT;
} else {
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
- del_timer(&can->bec_poll_timer);
+ timer_delete(&can->bec_poll_timer);
}
+ can->can.state = CAN_STATE_STOPPED;
+ netdev_reset_queue(netdev);
close_candev(netdev);
return ret;
}
+static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can)
+{
+ return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx));
+}
+
static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
- struct kvaser_pciefd_can *can,
+ struct can_priv *can, u8 seq,
struct sk_buff *skb)
{
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
int packet_size;
- int seq = can->echo_idx;
memset(p, 0, sizeof(*p));
-
- if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT)
p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
if (cf->can_id & CAN_RTR_FLAG)
@@ -742,19 +734,24 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
if (cf->can_id & CAN_EFF_FLAG)
p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
- p->header[0] |= cf->can_id & CAN_EFF_MASK;
- p->header[1] |= can_fd_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
+ p->header[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK, cf->can_id);
p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
if (can_is_canfd_skb(skb)) {
+ p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
+ can_fd_len2dlc(cf->len));
p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
if (cf->flags & CANFD_BRS)
p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
if (cf->flags & CANFD_ESI)
p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
+ } else {
+ p->header[1] |=
+ FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
+ can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode));
}
- p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
+ p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
packet_size = cf->len;
memcpy(p->data, cf->data, packet_size);
@@ -766,23 +763,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct kvaser_pciefd_can *can = netdev_priv(netdev);
- unsigned long irq_flags;
struct kvaser_pciefd_tx_packet packet;
- int nwords;
- u8 count;
+ unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1);
+ unsigned int frame_len;
+ int nr_words;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
+ if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1))
+ return NETDEV_TX_BUSY;
- nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
-
- spin_lock_irqsave(&can->echo_lock, irq_flags);
+ nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb);
/* Prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, netdev, can->echo_idx, 0);
-
- /* Move echo index to the next slot */
- can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
+ WRITE_ONCE(can->can.echo_skb[seq], NULL);
+ frame_len = can_skb_get_frame_len(skb);
+ can_put_echo_skb(skb, netdev, seq, frame_len);
+ netdev_sent_queue(netdev, frame_len);
+ WRITE_ONCE(can->tx_idx, can->tx_idx + 1);
/* Write header to fifo */
iowrite32(packet.header[0],
@@ -790,13 +788,13 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
iowrite32(packet.header[1],
can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
- if (nwords) {
- u32 data_last = ((u32 *)packet.data)[nwords - 1];
+ if (nr_words) {
+ u32 data_last = ((u32 *)packet.data)[nr_words - 1];
/* Write data to fifo, except last word */
iowrite32_rep(can->reg_base +
KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
- nwords - 1);
+ nr_words - 1);
/* Write last word to end of fifo */
__raw_writel(data_last, can->reg_base +
KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
@@ -806,15 +804,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
}
- count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
- /* No room for a new message, stop the queue until at least one
- * successful transmit
- */
- if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
- can->can.echo_skb[can->echo_idx])
- netif_stop_queue(netdev);
-
- spin_unlock_irqrestore(&can->echo_lock, irq_flags);
+ netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1);
return NETDEV_TX_OK;
}
@@ -827,29 +817,24 @@ static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
struct can_bittiming *bt;
if (data)
- bt = &can->can.data_bittiming;
+ bt = &can->can.fd.data_bittiming;
else
bt = &can->can.bittiming;
- btrn = ((bt->phase_seg2 - 1) & 0x1f) <<
- KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT |
- (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) <<
- KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT |
- ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
- ((bt->brp - 1) & 0x1fff);
+ btrn = FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK, bt->phase_seg2 - 1) |
+ FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) |
+ FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK, bt->sjw - 1) |
+ FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK, bt->brp - 1);
spin_lock_irqsave(&can->lock, irq_flags);
mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
-
/* Put the circuit in reset mode */
iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
/* Can only set bittiming if in reset mode */
ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
- test, test & KVASER_PCIEFD_KCAN_MODE_RM,
- 0, 10);
-
+ test, test & KVASER_PCIEFD_KCAN_MODE_RM, 0, 10);
if (ret) {
spin_unlock_irqrestore(&can->lock, irq_flags);
return -EBUSY;
@@ -859,11 +844,10 @@ static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
else
iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
-
/* Restore previous reset mode status */
iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
-
spin_unlock_irqrestore(&can->lock, irq_flags);
+
return 0;
}
@@ -901,12 +885,14 @@ static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
bec->rxerr = can->bec.rxerr;
bec->txerr = can->bec.txerr;
+
return 0;
}
static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
{
- struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
+ struct kvaser_pciefd_can *can = timer_container_of(can, data,
+ bec_poll_timer);
kvaser_pciefd_enable_err_gen(can);
kvaser_pciefd_request_status(can);
@@ -917,7 +903,36 @@ static const struct net_device_ops kvaser_pciefd_netdev_ops = {
.ndo_open = kvaser_pciefd_open,
.ndo_stop = kvaser_pciefd_stop,
.ndo_start_xmit = kvaser_pciefd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
+};
+
+static int kvaser_pciefd_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(netdev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 3; /* 3 On/Off cycles per second */
+
+ case ETHTOOL_ID_ON:
+ kvaser_pciefd_set_led(can, true);
+ return 0;
+
+ case ETHTOOL_ID_OFF:
+ case ETHTOOL_ID_INACTIVE:
+ kvaser_pciefd_set_led(can, false);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct ethtool_ops kvaser_pciefd_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .set_phys_id = kvaser_pciefd_set_phys_id,
};
static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
@@ -927,61 +942,56 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
for (i = 0; i < pcie->nr_channels; i++) {
struct net_device *netdev;
struct kvaser_pciefd_can *can;
- u32 status, tx_npackets;
+ u32 status, tx_nr_packets_max;
+ int ret;
netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
- KVASER_PCIEFD_CAN_TX_MAX_COUNT);
+ roundup_pow_of_two(KVASER_PCIEFD_CAN_TX_MAX_COUNT));
if (!netdev)
return -ENOMEM;
can = netdev_priv(netdev);
netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
- can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
- i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
-
+ netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops;
+ can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i);
can->kv_pcie = pcie;
can->cmd_seq = 0;
can->err_rep_cnt = 0;
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
can->bec.txerr = 0;
can->bec.rxerr = 0;
+ can->can.dev->dev_port = i;
init_completion(&can->start_comp);
init_completion(&can->flush_comp);
- timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
- 0);
+ timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0);
/* Disable Bus load reporting */
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
- tx_npackets = ioread32(can->reg_base +
- KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
- if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
- 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
- dev_err(&pcie->pci->dev,
- "Max Tx count is smaller than expected\n");
+ can->ioc = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IOC_REG);
+ kvaser_pciefd_set_led(can, false);
- free_candev(netdev);
- return -ENODEV;
- }
+ tx_nr_packets_max =
+ FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
+ ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+ can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->can.clock.freq = pcie->freq;
- can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
- can->echo_idx = 0;
- spin_lock_init(&can->echo_lock);
spin_lock_init(&can->lock);
- can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
- can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
+ can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
+ can->can.fd.data_bittiming_const = &kvaser_pciefd_bittiming_const;
can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
- can->can.do_set_data_bittiming =
- kvaser_pciefd_set_data_bittiming;
-
+ can->can.fd.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
can->can.do_set_mode = kvaser_pciefd_set_mode;
can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
-
can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_FD |
- CAN_CTRLMODE_FD_NON_ISO;
+ CAN_CTRLMODE_FD_NON_ISO |
+ CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
@@ -996,16 +1006,19 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
netdev->flags |= IFF_ECHO;
-
SET_NETDEV_DEV(netdev, &pcie->pci->dev);
- iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
- KVASER_PCIEFD_KCAN_IRQ_TFD,
+ iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
pcie->can[i] = can;
kvaser_pciefd_pwm_start(can);
+ ret = kvaser_pciefd_devlink_port_register(can);
+ if (ret) {
+ dev_err(&pcie->pci->dev, "Failed to register devlink port\n");
+ return ret;
+ }
}
return 0;
@@ -1016,69 +1029,111 @@ static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
int i;
for (i = 0; i < pcie->nr_channels; i++) {
- int err = register_candev(pcie->can[i]->can.dev);
+ int ret = register_candev(pcie->can[i]->can.dev);
- if (err) {
+ if (ret) {
int j;
/* Unregister all successfully registered devices. */
for (j = 0; j < i; j++)
unregister_candev(pcie->can[j]->can.dev);
- return err;
+ return ret;
}
}
return 0;
}
-static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
- dma_addr_t addr, int offset)
+static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index)
{
+ void __iomem *serdes_base;
u32 word1, word2;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
- word2 = addr >> 32;
-#else
- word1 = addr;
- word2 = 0;
-#endif
- iowrite32(word1, pcie->reg_base + offset);
- iowrite32(word2, pcie->reg_base + offset + 4);
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) {
+ word1 = lower_32_bits(addr) | KVASER_PCIEFD_ALTERA_DMA_64BIT;
+ word2 = upper_32_bits(addr);
+ } else {
+ word1 = addr;
+ word2 = 0;
+ }
+ serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
+ iowrite32(word1, serdes_base);
+ iowrite32(word2, serdes_base + 0x4);
+}
+
+static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index)
+{
+ void __iomem *serdes_base;
+ u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK;
+ u32 msb = 0x0;
+
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ msb = upper_32_bits(addr);
+
+ serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index;
+ iowrite32(lsb, serdes_base);
+ iowrite32(msb, serdes_base + 0x4);
+}
+
+static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index)
+{
+ void __iomem *serdes_base;
+ u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK;
+ u32 msb = 0x0;
+
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ msb = upper_32_bits(addr);
+
+ serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
+ iowrite32(msb, serdes_base);
+ iowrite32(lsb, serdes_base + 0x4);
}
static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
{
int i;
u32 srb_status;
+ u32 srb_packet_count;
dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
/* Disable the DMA */
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
- for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
- unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
+ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
- pcie->dma_data[i] =
- dmam_alloc_coherent(&pcie->pci->dev,
- KVASER_PCIEFD_DMA_SIZE,
- &dma_addr[i],
- GFP_KERNEL);
+ dma_set_mask_and_coherent(&pcie->pci->dev, DMA_BIT_MASK(64));
+
+ for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
+ pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev,
+ KVASER_PCIEFD_DMA_SIZE,
+ &dma_addr[i],
+ GFP_KERNEL);
if (!pcie->dma_data[i] || !dma_addr[i]) {
dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
KVASER_PCIEFD_DMA_SIZE);
return -ENOMEM;
}
-
- kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
+ pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i);
}
/* Reset Rx FIFO, and both DMA buffers */
iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
KVASER_PCIEFD_SRB_CMD_RDB1,
- pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ /* Empty Rx FIFO */
+ srb_packet_count =
+ FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK,
+ ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) +
+ KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG));
+ while (srb_packet_count) {
+ /* Drop current packet in FIFO */
+ ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
+ srb_packet_count--;
+ }
- srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
+ srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG);
if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
return -EIO;
@@ -1086,57 +1141,38 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
/* Enable the DMA */
iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
- pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
return 0;
}
static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
{
- u32 sysid, srb_status, build;
- u8 sysid_nr_chan;
- int ret;
-
- ret = kvaser_pciefd_read_cfg(pcie);
- if (ret)
- return ret;
-
- sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
- sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
- if (pcie->nr_channels != sysid_nr_chan) {
- dev_err(&pcie->pci->dev,
- "Number of channels does not match: %u vs %u\n",
- pcie->nr_channels,
- sysid_nr_chan);
- return -ENODEV;
- }
+ u32 version, srb_status, build;
- if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
- pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;
+ version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG);
+ build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG);
+ pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS,
+ FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version));
+ pcie->fw_version.major = FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version);
+ pcie->fw_version.minor = FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version);
+ pcie->fw_version.build = FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build);
- build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
- dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
- (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
- sysid & 0xff,
- (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);
-
- srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
+ srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG);
if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
- dev_err(&pcie->pci->dev,
- "Hardware without DMA is not supported\n");
+ dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n");
return -ENODEV;
}
- pcie->bus_freq = ioread32(pcie->reg_base +
- KVASER_PCIEFD_SYSID_BUSFREQ_REG);
- pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
+ pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG);
+ pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG);
pcie->freq_to_ticks_div = pcie->freq / 1000000;
if (pcie->freq_to_ticks_div == 0)
pcie->freq_to_ticks_div = 1;
-
/* Turn off all loopback functionality */
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
- return ret;
+ iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie));
+
+ return 0;
}
static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
@@ -1146,65 +1182,64 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
struct sk_buff *skb;
struct canfd_frame *cf;
struct can_priv *priv;
- struct net_device_stats *stats;
- struct skb_shared_hwtstamps *shhwtstamps;
- u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+ u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
+ u8 dlc;
if (ch_id >= pcie->nr_channels)
return -EIO;
priv = &pcie->can[ch_id]->can;
- stats = &priv->dev->stats;
+ dlc = FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1]);
if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
skb = alloc_canfd_skb(priv->dev, &cf);
if (!skb) {
- stats->rx_dropped++;
- return -ENOMEM;
+ priv->dev->stats.rx_dropped++;
+ return 0;
}
+ cf->len = can_fd_dlc2len(dlc);
if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
cf->flags |= CANFD_BRS;
-
if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
cf->flags |= CANFD_ESI;
} else {
skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
if (!skb) {
- stats->rx_dropped++;
- return -ENOMEM;
+ priv->dev->stats.rx_dropped++;
+ return 0;
}
+ can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
}
- cf->can_id = p->header[0] & CAN_EFF_MASK;
+ cf->can_id = FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK, p->header[0]);
if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
cf->can_id |= CAN_EFF_FLAG;
- cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
-
- if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
+ if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, data, cf->len);
+ priv->dev->stats.rx_bytes += cf->len;
+ }
+ priv->dev->stats.rx_packets++;
+ kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- shhwtstamps = skb_hwtstamps(skb);
-
- shhwtstamps->hwtstamp =
- ns_to_ktime(div_u64(p->timestamp * 1000,
- pcie->freq_to_ticks_div));
-
- stats->rx_bytes += cf->len;
- stats->rx_packets++;
+ netif_rx(skb);
- return netif_rx(skb);
+ return 0;
}
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
+ const struct can_berr_counter *bec,
struct can_frame *cf,
enum can_state new_state,
enum can_state tx_state,
enum can_state rx_state)
{
+ enum can_state old_state;
+
+ old_state = can->can.state;
can_change_state(can->can.dev, cf, tx_state, rx_state);
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1214,13 +1249,24 @@ static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
spin_lock_irqsave(&can->lock, irq_flags);
netif_stop_queue(can->can.dev);
spin_unlock_irqrestore(&can->lock, irq_flags);
-
/* Prevent CAN controller from auto recover from bus off */
if (!can->can.restart_ms) {
kvaser_pciefd_start_controller_flush(can);
can_bus_off(ndev);
}
}
+ if (old_state == CAN_STATE_BUS_OFF &&
+ new_state == CAN_STATE_ERROR_ACTIVE &&
+ can->can.restart_ms) {
+ can->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
}
static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
@@ -1232,7 +1278,7 @@ static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
p->header[0] & KVASER_PCIEFD_SPACK_IRM)
*new_state = CAN_STATE_BUS_OFF;
- else if (bec->txerr >= 255 || bec->rxerr >= 255)
+ else if (bec->txerr >= 255 || bec->rxerr >= 255)
*new_state = CAN_STATE_BUS_OFF;
else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
*new_state = CAN_STATE_ERROR_PASSIVE;
@@ -1255,59 +1301,44 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
struct net_device *ndev = can->can.dev;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct can_frame *cf = NULL;
- struct skb_shared_hwtstamps *shhwtstamps;
- struct net_device_stats *stats = &ndev->stats;
old_state = can->can.state;
- bec.txerr = p->header[0] & 0xff;
- bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
-
- kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
- &rx_state);
-
- skb = alloc_can_err_skb(ndev, &cf);
+ bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]);
+ bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
+ kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(ndev, &cf);
if (new_state != old_state) {
- kvaser_pciefd_change_state(can, cf, new_state, tx_state,
- rx_state);
-
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- if (skb)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
}
can->err_rep_cnt++;
can->can.can_stats.bus_error++;
- stats->rx_errors++;
+ if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
+ ndev->stats.tx_errors++;
+ else
+ ndev->stats.rx_errors++;
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
- if (!skb) {
- stats->rx_dropped++;
- return -ENOMEM;
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ if (!skb) {
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ndev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
}
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp =
- ns_to_ktime(div_u64(p->timestamp * 1000,
- can->kv_pcie->freq_to_ticks_div));
- cf->can_id |= CAN_ERR_BUSERROR;
-
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
- netif_rx(skb);
return 0;
}
@@ -1315,19 +1346,19 @@ static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
struct kvaser_pciefd_rx_packet *p)
{
struct kvaser_pciefd_can *can;
- u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+ u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
if (ch_id >= pcie->nr_channels)
return -EIO;
can = pcie->can[ch_id];
-
kvaser_pciefd_rx_error_frame(can, p);
if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
/* Do not report more errors, until bec_poll_timer expires */
kvaser_pciefd_disable_err_gen(can);
/* Start polling the error counters */
mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
+
return 0;
}
@@ -1336,48 +1367,29 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
{
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
+ int ret = 0;
old_state = can->can.state;
- bec.txerr = p->header[0] & 0xff;
- bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
-
- kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
- &rx_state);
+ bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]);
+ bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
+ kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
if (new_state != old_state) {
struct net_device *ndev = can->can.dev;
struct sk_buff *skb;
struct can_frame *cf;
- struct skb_shared_hwtstamps *shhwtstamps;
skb = alloc_can_err_skb(ndev, &cf);
- if (!skb) {
- struct net_device_stats *stats = &ndev->stats;
-
- stats->rx_dropped++;
- return -ENOMEM;
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
+ if (skb) {
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ netif_rx(skb);
+ } else {
+ ndev->stats.rx_dropped++;
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ret = -ENOMEM;
}
-
- kvaser_pciefd_change_state(can, cf, new_state, tx_state,
- rx_state);
-
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- cf->can_id |= CAN_ERR_RESTARTED;
- }
-
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp =
- ns_to_ktime(div_u64(p->timestamp * 1000,
- can->kv_pcie->freq_to_ticks_div));
-
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- netif_rx(skb);
}
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
@@ -1385,7 +1397,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
if (bec.txerr || bec.rxerr)
mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
- return 0;
+ return ret;
}
static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
@@ -1394,7 +1406,7 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
struct kvaser_pciefd_can *can;
u8 cmdseq;
u32 status;
- u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+ u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
if (ch_id >= pcie->nr_channels)
return -EIO;
@@ -1402,46 +1414,40 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
can = pcie->can[ch_id];
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
- cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
+ cmdseq = FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK, status);
/* Reset done, start abort and flush */
if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
- cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
+ cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) &&
status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
- u32 cmd;
-
iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
- cmd = KVASER_PCIEFD_KCAN_CMD_AT;
- cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
- iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
-
- iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
- can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ kvaser_pciefd_abort_flush_reset(can);
} else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
- cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
+ cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) &&
status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
/* Reset detected, send end of flush if no packet are in FIFO */
- u8 count = ioread32(can->reg_base +
- KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+ u8 count;
+ count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
+ ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
if (!count)
- iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
+ iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK,
+ KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH),
can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
} else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
- cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
+ cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1])) {
/* Response to status request received */
kvaser_pciefd_handle_status_resp(can, p);
if (can->can.state != CAN_STATE_BUS_OFF &&
can->can.state != CAN_STATE_ERROR_ACTIVE) {
- mod_timer(&can->bec_poll_timer,
- KVASER_PCIEFD_BEC_POLL_FREQ);
+ mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
}
} else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
- !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
+ !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK)) {
/* Reset to bus on detected */
if (!completion_done(&can->start_comp))
complete(&can->start_comp);
@@ -1450,50 +1456,14 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
return 0;
}
-static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
- struct kvaser_pciefd_rx_packet *p)
-{
- struct kvaser_pciefd_can *can;
- u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
-
- if (ch_id >= pcie->nr_channels)
- return -EIO;
-
- can = pcie->can[ch_id];
-
- /* If this is the last flushed packet, send end of flush */
- if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
- u8 count = ioread32(can->reg_base +
- KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
-
- if (count == 0)
- iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
- can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
- } else {
- int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
- int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
- struct net_device_stats *stats = &can->can.dev->stats;
-
- stats->tx_bytes += dlc;
- stats->tx_packets++;
-
- if (netif_queue_stopped(can->can.dev))
- netif_wake_queue(can->can.dev);
- }
-
- return 0;
-}
-
static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
struct kvaser_pciefd_rx_packet *p)
{
struct sk_buff *skb;
- struct net_device_stats *stats = &can->can.dev->stats;
struct can_frame *cf;
skb = alloc_can_err_skb(can->can.dev, &cf);
-
- stats->tx_errors++;
+ can->can.dev->stats.tx_errors++;
if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
if (skb)
cf->can_id |= CAN_ERR_LOSTARB;
@@ -1504,11 +1474,10 @@ static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
if (skb) {
cf->can_id |= CAN_ERR_BUSERROR;
- stats->rx_bytes += cf->len;
- stats->rx_packets++;
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
netif_rx(skb);
} else {
- stats->rx_dropped++;
+ can->can.dev->stats.rx_dropped++;
netdev_warn(can->can.dev, "No memory left for err_skb\n");
}
}
@@ -1518,7 +1487,7 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
{
struct kvaser_pciefd_can *can;
bool one_shot_fail = false;
- u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+ u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
if (ch_id >= pcie->nr_channels)
return -EIO;
@@ -1536,20 +1505,26 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
netdev_dbg(can->can.dev, "Packet was flushed\n");
} else {
- int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
- int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
- u8 count = ioread32(can->reg_base +
- KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+ int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
+ unsigned int len, frame_len = 0;
+ struct sk_buff *skb;
- if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
- netif_queue_stopped(can->can.dev))
- netif_wake_queue(can->can.dev);
+ if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1)))
+ return 0;
+ skb = can->can.echo_skb[echo_idx];
+ if (!skb)
+ return 0;
+ kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
+ len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
- if (!one_shot_fail) {
- struct net_device_stats *stats = &can->can.dev->stats;
+ /* Pairs with barrier in kvaser_pciefd_start_xmit() */
+ smp_store_release(&can->ack_idx, can->ack_idx + 1);
+ can->completed_tx_pkts++;
+ can->completed_tx_bytes += frame_len;
- stats->tx_bytes += dlc;
- stats->tx_packets++;
+ if (!one_shot_fail) {
+ can->can.dev->stats.tx_bytes += len;
+ can->can.dev->stats.tx_packets++;
}
}
@@ -1560,7 +1535,7 @@ static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
struct kvaser_pciefd_rx_packet *p)
{
struct kvaser_pciefd_can *can;
- u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+ u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
if (ch_id >= pcie->nr_channels)
return -EIO;
@@ -1599,15 +1574,15 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
pos += 2;
p->timestamp = le64_to_cpu(timestamp);
- type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
+ type = FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK, p->header[1]);
switch (type) {
case KVASER_PCIEFD_PACK_TYPE_DATA:
ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
u8 data_len;
- data_len = can_fd_dlc2len(p->header[1] >>
- KVASER_PCIEFD_RPACKET_DLC_SHIFT);
+ data_len = can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK,
+ p->header[1]));
pos += DIV_ROUND_UP(data_len, 4);
}
break;
@@ -1624,16 +1599,13 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
ret = kvaser_pciefd_handle_error_packet(pcie, p);
break;
- case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
- ret = kvaser_pciefd_handle_eack_packet(pcie, p);
- break;
-
case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
break;
case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
+ case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
case KVASER_PCIEFD_PACK_TYPE_TXRQ:
dev_info(&pcie->pci->dev,
"Received unexpected packet type 0x%08X\n", type);
@@ -1651,7 +1623,7 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
/* Position does not point to the end of the package,
* corrupted packet size?
*/
- if ((*start_pos + size) != pos)
+ if (unlikely((*start_pos + size) != pos))
return -EIO;
/* Point to the next packet header, if any */
@@ -1664,59 +1636,60 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
{
int pos = 0;
int res = 0;
+ unsigned int i;
do {
res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
+ /* Report ACKs in this buffer to BQL en masse for correct periods */
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
+
+ if (!can->completed_tx_pkts)
+ continue;
+ netif_subqueue_completed_wake(can->can.dev, 0,
+ can->completed_tx_pkts,
+ can->completed_tx_bytes,
+ kvaser_pciefd_tx_avail(can), 1);
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
+ }
+
return res;
}
-static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
- u32 irq;
+ void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG;
+ u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+
+ iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
kvaser_pciefd_read_buffer(pcie, 0);
- /* Reset DMA buffer 0 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
- pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */
}
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
kvaser_pciefd_read_buffer(pcie, 1);
- /* Reset DMA buffer 1 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
- pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */
}
- if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
+ if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
-
- iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
- return 0;
}
-static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
+static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
{
u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
netdev_err(can->can.dev, "Tx FIFO overflow\n");
- if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
- u8 count = ioread32(can->reg_base +
- KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
-
- if (count == 0)
- iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
- can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
- }
-
if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
netdev_err(can->can.dev,
"Fail to change bittiming, when not in reset mode\n");
@@ -1728,133 +1701,158 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
netdev_err(can->can.dev, "Rx FIFO overflow\n");
iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
- return 0;
}
static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
{
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
- u32 board_irq;
+ const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
+ u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
int i;
- board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
-
- if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK))
+ if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
- if (board_irq & KVASER_PCIEFD_IRQ_SRB)
+ iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+
+ if (pci_irq & irq_mask->kcan_rx0)
kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
- if (!pcie->can[i]) {
- dev_err(&pcie->pci->dev,
- "IRQ mask points to unallocated controller\n");
- break;
- }
-
- /* Check that mask matches channel (i) IRQ mask */
- if (board_irq & (1 << i))
+ if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
- iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+
return IRQ_HANDLED;
}
static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
{
int i;
- struct kvaser_pciefd_can *can;
for (i = 0; i < pcie->nr_channels; i++) {
- can = pcie->can[i];
+ struct kvaser_pciefd_can *can = pcie->can[i];
+
if (can) {
- iowrite32(0,
- can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
kvaser_pciefd_pwm_stop(can);
+ kvaser_pciefd_devlink_port_unregister(can);
free_candev(can->can.dev);
}
}
}
+static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie)
+{
+ unsigned int i;
+
+ /* Masking PCI_IRQ is insufficient as running ISR will unmask it */
+ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
+ for (i = 0; i < pcie->nr_channels; ++i)
+ iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+}
+
static int kvaser_pciefd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int err;
+ int ret;
+ struct devlink *devlink;
+ struct device *dev = &pdev->dev;
struct kvaser_pciefd *pcie;
+ const struct kvaser_pciefd_irq_mask *irq_mask;
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ devlink = devlink_alloc(&kvaser_pciefd_devlink_ops, sizeof(*pcie), dev);
+ if (!devlink)
return -ENOMEM;
+ pcie = devlink_priv(devlink);
pci_set_drvdata(pdev, pcie);
pcie->pci = pdev;
+ pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data;
+ irq_mask = pcie->driver_data->irq_mask;
- err = pci_enable_device(pdev);
- if (err)
- return err;
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free_devlink;
- err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
- if (err)
+ ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
+ if (ret)
goto err_disable_pci;
pcie->reg_base = pci_iomap(pdev, 0, 0);
if (!pcie->reg_base) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_release_regions;
}
- err = kvaser_pciefd_setup_board(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_board(pcie);
+ if (ret)
goto err_pci_iounmap;
- err = kvaser_pciefd_setup_dma(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_dma(pcie);
+ if (ret)
goto err_pci_iounmap;
pci_set_master(pdev);
- err = kvaser_pciefd_setup_can_ctrls(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_can_ctrls(pcie);
+ if (ret)
goto err_teardown_can_ctrls;
+ ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "Failed to allocate IRQ vectors.\n");
+ goto err_teardown_can_ctrls;
+ }
+
+ ret = pci_irq_vector(pcie->pci, 0);
+ if (ret < 0)
+ goto err_pci_free_irq_vectors;
+
+ pcie->pci->irq = ret;
+
+ ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+ IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d\n", pcie->pci->irq);
+ goto err_pci_free_irq_vectors;
+ }
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
- pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
- pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
-
- /* Reset IRQ handling, expected to be off before */
- iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
- pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
- iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
- pcie->reg_base + KVASER_PCIEFD_IEN_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
+ /* Enable PCI interrupts */
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
/* Ready the DMA buffers */
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
- pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
- pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
- IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
- if (err)
- goto err_teardown_can_ctrls;
-
- err = kvaser_pciefd_reg_candev(pcie);
- if (err)
+ ret = kvaser_pciefd_reg_candev(pcie);
+ if (ret)
goto err_free_irq;
+ devlink_register(devlink);
+
return 0;
err_free_irq:
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
+err_pci_free_irq_vectors:
+ pci_free_irq_vectors(pcie->pci);
+
err_teardown_can_ctrls:
kvaser_pciefd_teardown_can_ctrls(pcie);
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
pci_clear_master(pdev);
err_pci_iounmap:
@@ -1866,45 +1864,38 @@ err_release_regions:
err_disable_pci:
pci_disable_device(pdev);
- return err;
-}
-
-static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
-{
- struct kvaser_pciefd_can *can;
- int i;
+err_free_devlink:
+ devlink_free(devlink);
- for (i = 0; i < pcie->nr_channels; i++) {
- can = pcie->can[i];
- if (can) {
- iowrite32(0,
- can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
- unregister_candev(can->can.dev);
- del_timer(&can->bec_poll_timer);
- kvaser_pciefd_pwm_stop(can);
- free_candev(can->can.dev);
- }
- }
+ return ret;
}
static void kvaser_pciefd_remove(struct pci_dev *pdev)
{
struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
+ unsigned int i;
- kvaser_pciefd_remove_all_ctrls(pcie);
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
- /* Turn off IRQ generation */
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
- iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
- pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
- iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
+ unregister_candev(can->can.dev);
+ timer_delete(&can->bec_poll_timer);
+ kvaser_pciefd_pwm_stop(can);
+ kvaser_pciefd_devlink_port_unregister(can);
+ }
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
+ pci_free_irq_vectors(pcie->pci);
- pci_clear_master(pdev);
+ for (i = 0; i < pcie->nr_channels; ++i)
+ free_candev(pcie->can[i]->can.dev);
+
+ devlink_unregister(priv_to_devlink(pcie));
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
+ devlink_free(priv_to_devlink(pcie));
}
static struct pci_driver kvaser_pciefd = {
diff --git a/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c
new file mode 100644
index 000000000000..1d61a8b0eeba
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* kvaser_pciefd devlink functions
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+#include "kvaser_pciefd.h"
+
+#include <linux/netdevice.h>
+#include <net/devlink.h>
+
+static int kvaser_pciefd_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct kvaser_pciefd *pcie = devlink_priv(devlink);
+ char buf[] = "xxx.xxx.xxxxx";
+ int ret;
+
+ if (pcie->fw_version.major) {
+ snprintf(buf, sizeof(buf), "%u.%u.%u",
+ pcie->fw_version.major,
+ pcie->fw_version.minor,
+ pcie->fw_version.build);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct devlink_ops kvaser_pciefd_devlink_ops = {
+ .info_get = kvaser_pciefd_devlink_info_get,
+};
+
+int kvaser_pciefd_devlink_port_register(struct kvaser_pciefd_can *can)
+{
+ int ret;
+ struct devlink_port_attrs attrs = {
+ .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ .phys.port_number = can->can.dev->dev_port,
+ };
+ devlink_port_attrs_set(&can->devlink_port, &attrs);
+
+ ret = devlink_port_register(priv_to_devlink(can->kv_pcie),
+ &can->devlink_port, can->can.dev->dev_port);
+ if (ret)
+ return ret;
+
+ SET_NETDEV_DEVLINK_PORT(can->can.dev, &can->devlink_port);
+
+ return 0;
+}
+
+void kvaser_pciefd_devlink_port_unregister(struct kvaser_pciefd_can *can)
+{
+ devlink_port_unregister(&can->devlink_port);
+}
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
deleted file mode 100644
index db14897f8e16..000000000000
--- a/drivers/net/can/led.c
+++ /dev/null
@@ -1,140 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
- * Copyright 2012, Kurt Van Dijck <kurt.van.dijck@eia.be>
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/can/dev.h>
-
-#include <linux/can/led.h>
-
-static unsigned long led_delay = 50;
-module_param(led_delay, ulong, 0644);
-MODULE_PARM_DESC(led_delay,
- "blink delay time for activity leds (msecs, default: 50).");
-
-/* Trigger a LED event in response to a CAN device event */
-void can_led_event(struct net_device *netdev, enum can_led_event event)
-{
- struct can_priv *priv = netdev_priv(netdev);
-
- switch (event) {
- case CAN_LED_EVENT_OPEN:
- led_trigger_event(priv->tx_led_trig, LED_FULL);
- led_trigger_event(priv->rx_led_trig, LED_FULL);
- led_trigger_event(priv->rxtx_led_trig, LED_FULL);
- break;
- case CAN_LED_EVENT_STOP:
- led_trigger_event(priv->tx_led_trig, LED_OFF);
- led_trigger_event(priv->rx_led_trig, LED_OFF);
- led_trigger_event(priv->rxtx_led_trig, LED_OFF);
- break;
- case CAN_LED_EVENT_TX:
- if (led_delay) {
- led_trigger_blink_oneshot(priv->tx_led_trig,
- &led_delay, &led_delay, 1);
- led_trigger_blink_oneshot(priv->rxtx_led_trig,
- &led_delay, &led_delay, 1);
- }
- break;
- case CAN_LED_EVENT_RX:
- if (led_delay) {
- led_trigger_blink_oneshot(priv->rx_led_trig,
- &led_delay, &led_delay, 1);
- led_trigger_blink_oneshot(priv->rxtx_led_trig,
- &led_delay, &led_delay, 1);
- }
- break;
- }
-}
-EXPORT_SYMBOL_GPL(can_led_event);
-
-static void can_led_release(struct device *gendev, void *res)
-{
- struct can_priv *priv = netdev_priv(to_net_dev(gendev));
-
- led_trigger_unregister_simple(priv->tx_led_trig);
- led_trigger_unregister_simple(priv->rx_led_trig);
- led_trigger_unregister_simple(priv->rxtx_led_trig);
-}
-
-/* Register CAN LED triggers for a CAN device
- *
- * This is normally called from a driver's probe function
- */
-void devm_can_led_init(struct net_device *netdev)
-{
- struct can_priv *priv = netdev_priv(netdev);
- void *res;
-
- res = devres_alloc(can_led_release, 0, GFP_KERNEL);
- if (!res) {
- netdev_err(netdev, "cannot register LED triggers\n");
- return;
- }
-
- snprintf(priv->tx_led_trig_name, sizeof(priv->tx_led_trig_name),
- "%s-tx", netdev->name);
- snprintf(priv->rx_led_trig_name, sizeof(priv->rx_led_trig_name),
- "%s-rx", netdev->name);
- snprintf(priv->rxtx_led_trig_name, sizeof(priv->rxtx_led_trig_name),
- "%s-rxtx", netdev->name);
-
- led_trigger_register_simple(priv->tx_led_trig_name,
- &priv->tx_led_trig);
- led_trigger_register_simple(priv->rx_led_trig_name,
- &priv->rx_led_trig);
- led_trigger_register_simple(priv->rxtx_led_trig_name,
- &priv->rxtx_led_trig);
-
- devres_add(&netdev->dev, res);
-}
-EXPORT_SYMBOL_GPL(devm_can_led_init);
-
-/* NETDEV rename notifier to rename the associated led triggers too */
-static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
- void *ptr)
-{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct can_priv *priv = safe_candev_priv(netdev);
- char name[CAN_LED_NAME_SZ];
-
- if (!priv)
- return NOTIFY_DONE;
-
- if (!priv->tx_led_trig || !priv->rx_led_trig || !priv->rxtx_led_trig)
- return NOTIFY_DONE;
-
- if (msg == NETDEV_CHANGENAME) {
- snprintf(name, sizeof(name), "%s-tx", netdev->name);
- led_trigger_rename_static(name, priv->tx_led_trig);
-
- snprintf(name, sizeof(name), "%s-rx", netdev->name);
- led_trigger_rename_static(name, priv->rx_led_trig);
-
- snprintf(name, sizeof(name), "%s-rxtx", netdev->name);
- led_trigger_rename_static(name, priv->rxtx_led_trig);
- }
-
- return NOTIFY_DONE;
-}
-
-/* notifier block for netdevice event */
-static struct notifier_block can_netdev_notifier __read_mostly = {
- .notifier_call = can_led_notifier,
-};
-
-int __init can_led_notifier_init(void)
-{
- return register_netdevice_notifier(&can_netdev_notifier);
-}
-
-void __exit can_led_notifier_exit(void)
-{
- unregister_netdevice_notifier(&can_netdev_notifier);
-}
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index 45ad1b3f0cd0..fc2afab36279 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig CAN_M_CAN
tristate "Bosch M_CAN support"
+ select CAN_RX_OFFLOAD
help
Say Y here if you want support for Bosch M_CAN controller framework.
This is common support for devices that embed the Bosch M_CAN IP.
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2470c47b2e31..eb856547ae7d 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// CAN bus driver for Bosch M_CAN controller
// Copyright (C) 2014 Freescale Semiconductor, Inc.
-// Dong Aisheng <b29396@freescale.com>
+// Dong Aisheng <aisheng.dong@nxp.com>
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
/* Bosch M_CAN user manual can be obtained from:
@@ -9,19 +9,21 @@
*/
#include <linux/bitfield.h>
+#include <linux/can/dev.h>
+#include <linux/ethtool.h>
+#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/iopoll.h>
-#include <linux/can/dev.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/phy/phy.h>
+#include <linux/reset.h>
#include "m_can.h"
@@ -77,9 +79,6 @@ enum m_can_reg {
M_CAN_TXEFA = 0xf8,
};
-/* napi related */
-#define M_CAN_NAPI_WEIGHT 64
-
/* message ram configuration data length */
#define MRAM_CFG_LEN 8
@@ -158,6 +157,7 @@ enum m_can_reg {
#define PSR_EW BIT(6)
#define PSR_EP BIT(5)
#define PSR_LEC_MASK GENMASK(2, 0)
+#define PSR_DLEC_MASK GENMASK(10, 8)
/* Interrupt Register (IR) */
#define IR_ALL_INT 0xffffffff
@@ -204,16 +204,16 @@ enum m_can_reg {
/* Interrupts for version 3.0.x */
#define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
-#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
- IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
- IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
+ IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+ IR_RF0L)
#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
/* Interrupts for version >= 3.1.x */
#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
-#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
- IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
- IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
+ IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+ IR_RF0L)
#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
/* Interrupt Line Select (ILS) */
@@ -256,6 +256,7 @@ enum m_can_reg {
#define TXESC_TBDS_64B 0x7
/* Tx Event FIFO Configuration (TXEFC) */
+#define TXEFC_EFWM_MASK GENMASK(29, 24)
#define TXEFC_EFS_MASK GENMASK(21, 16)
/* Tx Event FIFO Status (TXEFS) */
@@ -309,6 +310,9 @@ enum m_can_reg {
#define TX_EVENT_MM_MASK GENMASK(31, 24)
#define TX_EVENT_TXTS_MASK GENMASK(15, 0)
+/* Hrtimer polling interval */
+#define HRTIMER_POLL_INTERVAL_MS 1
+
/* The ID and DLC registers are adjacent in M_CAN FIFO memory,
* and we can save a (potentially slow) bus round trip by combining
* reads and writes to them.
@@ -318,6 +322,12 @@ struct id_and_dlc {
u32 dlc;
};
+struct m_can_fifo_element {
+ u32 id;
+ u32 dlc;
+ u8 data[CANFD_MAX_DLEN];
+};
+
static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
{
return cdev->ops->read_reg(cdev, reg);
@@ -336,6 +346,9 @@ m_can_fifo_read(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->read_fifo(cdev, addr_offset, val, val_count);
}
@@ -346,6 +359,9 @@ m_can_fifo_write(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->write_fifo(cdev, addr_offset, val, val_count);
}
@@ -364,54 +380,115 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
-static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
+static int m_can_cccr_update_bits(struct m_can_classdev *cdev, u32 mask, u32 val)
{
- return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF);
+ u32 val_before = m_can_read(cdev, M_CAN_CCCR);
+ u32 val_after = (val_before & ~mask) | val;
+ size_t tries = 10;
+
+ if (!(mask & CCCR_INIT) && !(val_before & CCCR_INIT)) {
+ netdev_err(cdev->net,
+ "refusing to configure device when in normal mode\n");
+ return -EBUSY;
+ }
+
+ /* The chip should be in standby mode when changing the CCCR register,
+ * and some chips set the CSR and CSA bits when in standby. Furthermore,
+ * the CSR and CSA bits should be written as zeros, even when they read
+ * ones.
+ */
+ val_after &= ~(CCCR_CSR | CCCR_CSA);
+
+ while (tries--) {
+ u32 val_read;
+
+ /* Write the desired value in each try, as setting some bits in
+ * the CCCR register require other bits to be set first. E.g.
+ * setting the NISO bit requires setting the CCE bit first.
+ */
+ m_can_write(cdev, M_CAN_CCCR, val_after);
+
+ val_read = m_can_read(cdev, M_CAN_CCCR) & ~(CCCR_CSR | CCCR_CSA);
+
+ if (val_read == val_after)
+ return 0;
+
+ usleep_range(1, 5);
+ }
+
+ return -ETIMEDOUT;
}
-static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
+static int m_can_config_enable(struct m_can_classdev *cdev)
{
- u32 cccr = m_can_read(cdev, M_CAN_CCCR);
- u32 timeout = 10;
- u32 val = 0;
+ int err;
- /* Clear the Clock stop request if it was set */
- if (cccr & CCCR_CSR)
- cccr &= ~CCCR_CSR;
+ /* CCCR_INIT must be set in order to set CCCR_CCE, but access to
+ * configuration registers should only be enabled when in standby mode,
+ * where CCCR_INIT is always set.
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_CCE, CCCR_CCE);
+ if (err)
+ netdev_err(cdev->net, "failed to enable configuration mode\n");
- if (enable) {
- /* enable m_can configuration */
- m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT);
- udelay(5);
- /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
- m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
- } else {
- m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
- }
+ return err;
+}
- /* there's a delay for module initialization */
- if (enable)
- val = CCCR_INIT | CCCR_CCE;
+static int m_can_config_disable(struct m_can_classdev *cdev)
+{
+ int err;
- while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
- if (timeout == 0) {
- netdev_warn(cdev->net, "Failed to init module\n");
- return;
- }
- timeout--;
- udelay(1);
- }
+ /* Only clear CCCR_CCE, since CCCR_INIT cannot be cleared while in
+ * standby mode
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_CCE, 0);
+ if (err)
+ netdev_err(cdev->net, "failed to disable configuration registers\n");
+
+ return err;
+}
+
+static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts)
+{
+ if (cdev->active_interrupts == interrupts)
+ return;
+ m_can_write(cdev, M_CAN_IE, interrupts);
+ cdev->active_interrupts = interrupts;
+}
+
+static void m_can_coalescing_disable(struct m_can_classdev *cdev)
+{
+ u32 new_interrupts = cdev->active_interrupts | IR_RF0N | IR_TEFN;
+
+ if (!cdev->net->irq)
+ return;
+
+ hrtimer_cancel(&cdev->hrtimer);
+ m_can_interrupt_enable(cdev, new_interrupts);
}
static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
+ if (!cdev->net->irq) {
+ netdev_dbg(cdev->net, "Start hrtimer\n");
+ hrtimer_start(&cdev->hrtimer,
+ ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
+ HRTIMER_MODE_REL_PINNED);
+ }
+
/* Only interrupt line 0 is used in this driver */
m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
}
static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
+ m_can_coalescing_disable(cdev);
m_can_write(cdev, M_CAN_ILE, 0x0);
+
+ if (!cdev->net->irq) {
+ netdev_dbg(cdev->net, "Stop hrtimer\n");
+ hrtimer_try_to_cancel(&cdev->hrtimer);
+ }
}
/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
@@ -431,18 +508,26 @@ static u32 m_can_get_timestamp(struct m_can_classdev *cdev)
static void m_can_clean(struct net_device *net)
{
struct m_can_classdev *cdev = netdev_priv(net);
+ unsigned long irqflags;
- if (cdev->tx_skb) {
- int putidx = 0;
+ if (cdev->tx_ops) {
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ if (!cdev->tx_ops[i].skb)
+ continue;
- net->stats.tx_errors++;
- if (cdev->version > 30)
- putidx = FIELD_GET(TXFQS_TFQPI_MASK,
- m_can_read(cdev, M_CAN_TXFQS));
-
- can_free_echo_skb(cdev->net, putidx, NULL);
- cdev->tx_skb = NULL;
+ net->stats.tx_errors++;
+ cdev->tx_ops[i].skb = NULL;
+ }
}
+
+ for (int i = 0; i != cdev->can.echo_skb_max; ++i)
+ can_free_echo_skb(cdev->net, i, NULL);
+
+ netdev_reset_queue(cdev->net);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ cdev->tx_fifo_in_flight = 0;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
}
/* For peripherals, pass skb to rx-offload, which will push skb from
@@ -458,8 +543,8 @@ static void m_can_receive_skb(struct m_can_classdev *cdev,
struct net_device_stats *stats = &cdev->net->stats;
int err;
- err = can_rx_offload_queue_sorted(&cdev->offload, skb,
- timestamp);
+ err = can_rx_offload_queue_timestamp(&cdev->offload, skb,
+ timestamp);
if (err)
stats->rx_fifo_errors++;
} else {
@@ -467,19 +552,16 @@ static void m_can_receive_skb(struct m_can_classdev *cdev,
}
}
-static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
+static int m_can_read_fifo(struct net_device *dev, u32 fgi)
{
struct net_device_stats *stats = &dev->stats;
struct m_can_classdev *cdev = netdev_priv(dev);
struct canfd_frame *cf;
struct sk_buff *skb;
struct id_and_dlc fifo_header;
- u32 fgi;
u32 timestamp = 0;
int err;
- /* calculate the fifo get index for where to read data */
- fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2);
if (err)
goto out_fail;
@@ -517,21 +599,20 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA,
cf->data, DIV_ROUND_UP(cf->len, 4));
if (err)
- goto out_fail;
- }
-
- /* acknowledge rx fifo 0 */
- m_can_write(cdev, M_CAN_RXF0A, fgi);
+ goto out_free_skb;
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
- timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc);
+ timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16;
m_can_receive_skb(cdev, skb, timestamp);
return 0;
+out_free_skb:
+ kfree_skb(skb);
out_fail:
netdev_err(dev, "FIFO read returned %d\n", err);
return err;
@@ -542,7 +623,11 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
struct m_can_classdev *cdev = netdev_priv(dev);
u32 pkts = 0;
u32 rxfs;
- int err;
+ u32 rx_count;
+ u32 fgi;
+ int ack_fgi = -1;
+ int i;
+ int err = 0;
rxfs = m_can_read(cdev, M_CAN_RXF0S);
if (!(rxfs & RXFS_FFL_MASK)) {
@@ -550,18 +635,25 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
return 0;
}
- while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
- err = m_can_read_fifo(dev, rxfs);
+ rx_count = FIELD_GET(RXFS_FFL_MASK, rxfs);
+ fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
+
+ for (i = 0; i < rx_count && quota > 0; ++i) {
+ err = m_can_read_fifo(dev, fgi);
if (err)
- return err;
+ break;
quota--;
pkts++;
- rxfs = m_can_read(cdev, M_CAN_RXF0S);
+ ack_fgi = fgi;
+ fgi = (++fgi >= cdev->mcfg[MRAM_RXF0].num ? 0 : fgi);
}
- if (pkts)
- can_led_event(dev, CAN_LED_EVENT_RX);
+ if (ack_fgi != -1)
+ m_can_write(cdev, M_CAN_RXF0A, ack_fgi);
+
+ if (err)
+ return err;
return pkts;
}
@@ -574,7 +666,7 @@ static int m_can_handle_lost_msg(struct net_device *dev)
struct can_frame *frame;
u32 timestamp = 0;
- netdev_err(dev, "msg lost in rxf0\n");
+ netdev_dbg(dev, "msg lost in rxf0\n");
stats->rx_errors++;
stats->rx_over_errors++;
@@ -604,49 +696,59 @@ static int m_can_handle_lec_err(struct net_device *dev,
u32 timestamp = 0;
cdev->can.can_stats.bus_error++;
- stats->rx_errors++;
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
- if (unlikely(!skb))
- return 0;
/* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (likely(skb))
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (lec_type) {
case LEC_STUFF_ERROR:
netdev_dbg(dev, "stuff error\n");
- cf->data[2] |= CAN_ERR_PROT_STUFF;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
case LEC_FORM_ERROR:
netdev_dbg(dev, "form error\n");
- cf->data[2] |= CAN_ERR_PROT_FORM;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_FORM;
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
break;
case LEC_BIT1_ERROR:
netdev_dbg(dev, "bit1 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT1;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
break;
case LEC_BIT0_ERROR:
netdev_dbg(dev, "bit0 error\n");
- cf->data[2] |= CAN_ERR_PROT_BIT0;
+ stats->tx_errors++;
+ if (likely(skb))
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ stats->rx_errors++;
+ if (likely(skb))
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
break;
default:
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (unlikely(!skb))
+ return 0;
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
@@ -689,6 +791,10 @@ static int m_can_get_berr_counter(const struct net_device *dev,
struct m_can_classdev *cdev = netdev_priv(dev);
int err;
+ /* Avoid waking up the controller if the interface is down */
+ if (!(dev->flags & IFF_UP))
+ return 0;
+
err = m_can_clk_start(cdev);
if (err)
return err;
@@ -704,7 +810,6 @@ static int m_can_handle_state_change(struct net_device *dev,
enum can_state new_state)
{
struct m_can_classdev *cdev = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
@@ -712,6 +817,9 @@ static int m_can_handle_state_change(struct net_device *dev,
u32 timestamp = 0;
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cdev->can.can_stats.error_warning++;
@@ -741,9 +849,15 @@ static int m_can_handle_state_change(struct net_device *dev,
__m_can_get_berr_counter(dev, &bec);
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -752,7 +866,7 @@ static int m_can_handle_state_change(struct net_device *dev,
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
ecr = m_can_read(cdev, M_CAN_ECR);
if (ecr & ECR_RP)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
@@ -769,9 +883,6 @@ static int m_can_handle_state_change(struct net_device *dev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
@@ -780,38 +891,39 @@ static int m_can_handle_state_change(struct net_device *dev,
return 1;
}
-static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
+static enum can_state
+m_can_state_get_by_psr(struct m_can_classdev *cdev)
{
- struct m_can_classdev *cdev = netdev_priv(dev);
- int work_done = 0;
+ u32 reg_psr;
- if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
- netdev_dbg(dev, "entered error warning state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_WARNING);
- }
+ reg_psr = m_can_read(cdev, M_CAN_PSR);
- if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
- netdev_dbg(dev, "entered error passive state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_PASSIVE);
- }
+ if (reg_psr & PSR_BO)
+ return CAN_STATE_BUS_OFF;
+ if (reg_psr & PSR_EP)
+ return CAN_STATE_ERROR_PASSIVE;
+ if (reg_psr & PSR_EW)
+ return CAN_STATE_ERROR_WARNING;
- if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "entered error bus off state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_BUS_OFF);
- }
+ return CAN_STATE_ERROR_ACTIVE;
+}
- return work_done;
+static int m_can_handle_state_errors(struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ enum can_state new_state;
+
+ new_state = m_can_state_get_by_psr(cdev);
+ if (new_state == cdev->can.state)
+ return 0;
+
+ return m_can_handle_state_change(dev, new_state);
}
static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
{
if (irqstatus & IR_WDI)
netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
- if (irqstatus & IR_ELO)
- netdev_err(dev, "Error Logging Overflow\n");
if (irqstatus & IR_BEU)
netdev_err(dev, "Bit Error Uncorrected\n");
if (irqstatus & IR_BEC)
@@ -822,11 +934,9 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
netdev_err(dev, "Message RAM access failure occurred\n");
}
-static inline bool is_lec_err(u32 psr)
+static inline bool is_lec_err(u8 lec)
{
- psr &= LEC_UNUSED;
-
- return psr && (psr != LEC_UNUSED);
+ return lec != LEC_NO_ERROR && lec != LEC_NO_CHANGE;
}
static inline bool m_can_is_protocol_err(u32 irqstatus)
@@ -881,9 +991,20 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
work_done += m_can_handle_lost_msg(dev);
/* handle lec errors on the bus */
- if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
- is_lec_err(psr))
- work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ u8 lec = FIELD_GET(PSR_LEC_MASK, psr);
+ u8 dlec = FIELD_GET(PSR_DLEC_MASK, psr);
+
+ if (is_lec_err(lec)) {
+ netdev_dbg(dev, "Arbitration phase error detected\n");
+ work_done += m_can_handle_lec_err(dev, lec);
+ }
+
+ if (is_lec_err(dlec)) {
+ netdev_dbg(dev, "Data phase error detected\n");
+ work_done += m_can_handle_lec_err(dev, dlec);
+ }
+ }
/* handle protocol errors in arbitration phase */
if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
@@ -896,14 +1017,12 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
return work_done;
}
-static int m_can_rx_handler(struct net_device *dev, int quota)
+static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
{
struct m_can_classdev *cdev = netdev_priv(dev);
int rx_work_or_err;
int work_done = 0;
- u32 irqstatus, psr;
- irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
if (!irqstatus)
goto end;
@@ -928,13 +1047,12 @@ static int m_can_rx_handler(struct net_device *dev, int quota)
}
}
- psr = m_can_read(cdev, M_CAN_PSR);
-
if (irqstatus & IR_ERR_STATE)
- work_done += m_can_handle_state_errors(dev, psr);
+ work_done += m_can_handle_state_errors(dev);
if (irqstatus & IR_ERR_BUS_30X)
- work_done += m_can_handle_bus_errors(dev, irqstatus, psr);
+ work_done += m_can_handle_bus_errors(dev, irqstatus,
+ m_can_read(cdev, M_CAN_PSR));
if (irqstatus & IR_RF0N) {
rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done));
@@ -947,29 +1065,16 @@ end:
return work_done;
}
-static int m_can_rx_peripheral(struct net_device *dev)
-{
- struct m_can_classdev *cdev = netdev_priv(dev);
- int work_done;
-
- work_done = m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
-
- /* Don't re-enable interrupts if the driver had a fatal error
- * (e.g., FIFO read failure).
- */
- if (work_done >= 0)
- m_can_enable_all_interrupts(cdev);
-
- return work_done;
-}
-
static int m_can_poll(struct napi_struct *napi, int quota)
{
struct net_device *dev = napi->dev;
struct m_can_classdev *cdev = netdev_priv(dev);
int work_done;
+ u32 irqstatus;
+
+ irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
- work_done = m_can_rx_handler(dev, quota);
+ work_done = m_can_rx_handler(dev, quota, irqstatus);
/* Don't re-enable interrupts if the driver had a fatal error
* (e.g., FIFO read failure).
@@ -986,23 +1091,60 @@ static int m_can_poll(struct napi_struct *napi, int quota)
* echo. timestamp is used for peripherals to ensure correct ordering
* by rx-offload, and is ignored for non-peripherals.
*/
-static void m_can_tx_update_stats(struct m_can_classdev *cdev,
- unsigned int msg_mark,
- u32 timestamp)
+static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev,
+ unsigned int msg_mark, u32 timestamp)
{
struct net_device *dev = cdev->net;
struct net_device_stats *stats = &dev->stats;
+ unsigned int frame_len;
if (cdev->is_peripheral)
stats->tx_bytes +=
- can_rx_offload_get_echo_skb(&cdev->offload,
- msg_mark,
- timestamp,
- NULL);
+ can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload,
+ msg_mark,
+ timestamp,
+ &frame_len);
else
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len);
stats->tx_packets++;
+
+ return frame_len;
+}
+
+static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted,
+ unsigned int transmitted_frame_len)
+{
+ unsigned long irqflags;
+
+ netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0)
+ netif_wake_queue(cdev->net);
+ cdev->tx_fifo_in_flight -= transmitted;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+}
+
+static netdev_tx_t m_can_start_tx(struct m_can_classdev *cdev)
+{
+ unsigned long irqflags;
+ int tx_fifo_in_flight;
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ tx_fifo_in_flight = cdev->tx_fifo_in_flight + 1;
+ if (tx_fifo_in_flight >= cdev->tx_fifo_size) {
+ netif_stop_queue(cdev->net);
+ if (tx_fifo_in_flight > cdev->tx_fifo_size) {
+ netdev_err_once(cdev->net, "hard_xmit called while TX FIFO full\n");
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ cdev->tx_fifo_in_flight = tx_fifo_in_flight;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+
+ return NETDEV_TX_OK;
}
static int m_can_echo_tx_event(struct net_device *dev)
@@ -1010,8 +1152,12 @@ static int m_can_echo_tx_event(struct net_device *dev)
u32 txe_count = 0;
u32 m_can_txefs;
u32 fgi = 0;
+ int ack_fgi = -1;
int i = 0;
+ int err = 0;
unsigned int msg_mark;
+ int processed = 0;
+ unsigned int processed_frame_len = 0;
struct m_can_classdev *cdev = netdev_priv(dev);
@@ -1020,52 +1166,102 @@ static int m_can_echo_tx_event(struct net_device *dev)
/* Get Tx Event fifo element count */
txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs);
+ fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_txefs);
/* Get and process all sent elements */
for (i = 0; i < txe_count; i++) {
u32 txe, timestamp = 0;
- int err;
-
- /* retrieve get index */
- fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_read(cdev, M_CAN_TXEFS));
/* get message marker, timestamp */
err = m_can_txe_fifo_read(cdev, fgi, 4, &txe);
if (err) {
netdev_err(dev, "TXE FIFO read returned %d\n", err);
- return err;
+ break;
}
msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
- timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe);
+ timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16;
- /* ack txe element */
- m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
- fgi));
+ ack_fgi = fgi;
+ fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
/* update stats */
- m_can_tx_update_stats(cdev, msg_mark, timestamp);
+ processed_frame_len += m_can_tx_update_stats(cdev, msg_mark,
+ timestamp);
+
+ ++processed;
}
- return 0;
+ if (ack_fgi != -1)
+ m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
+ ack_fgi));
+
+ m_can_finish_tx(cdev, processed, processed_frame_len);
+
+ return err;
}
-static irqreturn_t m_can_isr(int irq, void *dev_id)
+static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
{
- struct net_device *dev = (struct net_device *)dev_id;
- struct m_can_classdev *cdev = netdev_priv(dev);
- u32 ir;
+ u32 new_interrupts = cdev->active_interrupts;
+ bool enable_rx_timer = false;
+ bool enable_tx_timer = false;
+
+ if (!cdev->net->irq)
+ return;
+
+ if (cdev->rx_coalesce_usecs_irq > 0 && (ir & (IR_RF0N | IR_RF0W))) {
+ enable_rx_timer = true;
+ new_interrupts &= ~IR_RF0N;
+ }
+ if (cdev->tx_coalesce_usecs_irq > 0 && (ir & (IR_TEFN | IR_TEFW))) {
+ enable_tx_timer = true;
+ new_interrupts &= ~IR_TEFN;
+ }
+ if (!enable_rx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_RF0N;
+ if (!enable_tx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_TEFN;
+
+ m_can_interrupt_enable(cdev, new_interrupts);
+ if (enable_rx_timer | enable_tx_timer)
+ hrtimer_start(&cdev->hrtimer, cdev->irq_timer_wait,
+ HRTIMER_MODE_REL);
+}
+
+/* This interrupt handler is called either from the interrupt thread or a
+ * hrtimer. This has implications like cancelling a timer won't be possible
+ * blocking.
+ */
+static int m_can_interrupt_handler(struct m_can_classdev *cdev)
+{
+ struct net_device *dev = cdev->net;
+ u32 ir = 0, ir_read;
+ int ret;
if (pm_runtime_suspended(cdev->dev))
return IRQ_NONE;
- ir = m_can_read(cdev, M_CAN_IR);
- if (!ir)
- return IRQ_NONE;
- /* ACK all irqs */
- if (ir & IR_ALL_INT)
+ /* The m_can controller signals its interrupt status as a level, but
+ * depending in the integration the CPU may interpret the signal as
+ * edge-triggered (for example with m_can_pci). For these
+ * edge-triggered integrations, we must observe that IR is 0 at least
+ * once to be sure that the next interrupt will generate an edge.
+ */
+ while ((ir_read = m_can_read(cdev, M_CAN_IR)) != 0) {
+ ir |= ir_read;
+
+ /* ACK all irqs */
m_can_write(cdev, M_CAN_IR, ir);
+ if (!cdev->irq_edge_triggered)
+ break;
+ }
+
+ m_can_coalescing_update(cdev, ir);
+ if (!ir)
+ return IRQ_NONE;
+
if (cdev->ops->clear_interrupts)
cdev->ops->clear_interrupts(cdev);
@@ -1074,37 +1270,35 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
* - state change IRQ
* - bus error IRQ and bus error reporting
*/
- if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
+ if (ir & (IR_RF0N | IR_RF0W | IR_ERR_ALL_30X)) {
cdev->irqstatus = ir;
- m_can_disable_all_interrupts(cdev);
- if (!cdev->is_peripheral)
+ if (!cdev->is_peripheral) {
+ m_can_disable_all_interrupts(cdev);
napi_schedule(&cdev->napi);
- else if (m_can_rx_peripheral(dev) < 0)
- goto out_fail;
+ } else {
+ ret = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir);
+ if (ret < 0)
+ return ret;
+ }
}
if (cdev->version == 30) {
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
u32 timestamp = 0;
+ unsigned int frame_len;
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
- m_can_tx_update_stats(cdev, 0, timestamp);
-
- can_led_event(dev, CAN_LED_EVENT_TX);
- netif_wake_queue(dev);
+ frame_len = m_can_tx_update_stats(cdev, 0, timestamp);
+ m_can_finish_tx(cdev, 1, frame_len);
}
} else {
- if (ir & IR_TEFN) {
+ if (ir & (IR_TEFN | IR_TEFW)) {
/* New TX FIFO Element arrived */
- if (m_can_echo_tx_event(dev) != 0)
- goto out_fail;
-
- can_led_event(dev, CAN_LED_EVENT_TX);
- if (netif_queue_stopped(dev) &&
- !m_can_tx_fifo_full(cdev))
- netif_wake_queue(dev);
+ ret = m_can_echo_tx_event(dev);
+ if (ret != 0)
+ return ret;
}
}
@@ -1112,10 +1306,34 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
can_rx_offload_threaded_irq_finish(&cdev->offload);
return IRQ_HANDLED;
+}
-out_fail:
- m_can_disable_all_interrupts(cdev);
- return IRQ_HANDLED;
+static irqreturn_t m_can_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ int ret;
+
+ ret = m_can_interrupt_handler(cdev);
+ if (ret < 0) {
+ m_can_disable_all_interrupts(cdev);
+ return IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer)
+{
+ struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer);
+
+ if (cdev->can.state == CAN_STATE_BUS_OFF ||
+ cdev->can.state == CAN_STATE_STOPPED)
+ return HRTIMER_NORESTART;
+
+ irq_wake_thread(cdev->net->irq, cdev->net);
+
+ return HRTIMER_NORESTART;
}
static const struct can_bittiming_const m_can_bittiming_const_30X = {
@@ -1166,11 +1384,32 @@ static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
.brp_inc = 1,
};
+static int m_can_init_ram(struct m_can_classdev *cdev)
+{
+ int end, i, start;
+ int err = 0;
+
+ /* initialize the entire Message RAM in use to avoid possible
+ * ECC/parity checksum errors when reading an uninitialized buffer
+ */
+ start = cdev->mcfg[MRAM_SIDF].off;
+ end = cdev->mcfg[MRAM_TXB].off +
+ cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
+
+ for (i = start; i < end; i += 4) {
+ err = m_can_fifo_write_no_off(cdev, i, 0x0);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
static int m_can_set_bittiming(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
const struct can_bittiming *bt = &cdev->can.bittiming;
- const struct can_bittiming *dbt = &cdev->can.data_bittiming;
+ const struct can_bittiming *dbt = &cdev->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 reg_btp;
@@ -1242,12 +1481,27 @@ static int m_can_set_bittiming(struct net_device *dev)
* - setup bittiming
* - configure timestamp generation
*/
-static void m_can_chip_config(struct net_device *dev)
+static int m_can_chip_config(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ u32 interrupts = IR_ALL_INT;
u32 cccr, test;
+ int err;
- m_can_config_endisable(cdev, true);
+ err = m_can_init_ram(cdev);
+ if (err) {
+ netdev_err(dev, "Message RAM configuration failed\n");
+ return err;
+ }
+
+ /* Disable unused interrupts */
+ interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
+ IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F |
+ IR_TSW);
+
+ err = m_can_config_enable(cdev);
+ if (err)
+ return err;
/* RX Buffer/FIFO Element Size 64 bytes data field */
m_can_write(cdev, M_CAN_RXESC,
@@ -1282,6 +1536,8 @@ static void m_can_chip_config(struct net_device *dev)
} else {
/* Full TX Event FIFO is used */
m_can_write(cdev, M_CAN_TXEFC,
+ FIELD_PREP(TXEFC_EFWM_MASK,
+ cdev->tx_max_coalesced_frames_irq) |
FIELD_PREP(TXEFC_EFS_MASK,
cdev->mcfg[MRAM_TXE].num) |
cdev->mcfg[MRAM_TXE].off);
@@ -1289,6 +1545,7 @@ static void m_can_chip_config(struct net_device *dev)
/* rx fifo configuration, blocking mode, fifo size 1 */
m_can_write(cdev, M_CAN_RXF0C,
+ FIELD_PREP(RXFC_FWM_MASK, cdev->rx_max_coalesced_frames_irq) |
FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) |
cdev->mcfg[MRAM_RXF0].off);
@@ -1341,16 +1598,14 @@ static void m_can_chip_config(struct net_device *dev)
m_can_write(cdev, M_CAN_TEST, test);
/* Enable interrupts */
- m_can_write(cdev, M_CAN_IR, IR_ALL_INT);
- if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
if (cdev->version == 30)
- m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
- ~(IR_ERR_LEC_30X));
+ interrupts &= ~(IR_ERR_LEC_30X);
else
- m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
- ~(IR_ERR_LEC_31X));
- else
- m_can_write(cdev, M_CAN_IE, IR_ALL_INT);
+ interrupts &= ~(IR_ERR_LEC_31X);
+ }
+ cdev->active_interrupts = 0;
+ m_can_interrupt_enable(cdev, interrupts);
/* route all interrupts to INT0 */
m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
@@ -1358,27 +1613,49 @@ static void m_can_chip_config(struct net_device *dev)
/* set bittiming params */
m_can_set_bittiming(dev);
- /* enable internal timestamp generation, with a prescalar of 16. The
- * prescalar is applied to the nominal bit timing
+ /* enable internal timestamp generation, with a prescaler of 16. The
+ * prescaler is applied to the nominal bit timing
*/
- m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf));
+ m_can_write(cdev, M_CAN_TSCC,
+ FIELD_PREP(TSCC_TCP_MASK, 0xf) |
+ FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
- m_can_config_endisable(cdev, false);
+ err = m_can_config_disable(cdev);
+ if (err)
+ return err;
if (cdev->ops->init)
cdev->ops->init(cdev);
+
+ return 0;
}
-static void m_can_start(struct net_device *dev)
+static int m_can_start(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ int ret;
/* basic m_can configuration */
- m_can_chip_config(dev);
+ ret = m_can_chip_config(dev);
+ if (ret)
+ return ret;
+
+ netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
+ cdev->tx_max_coalesced_frames);
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ cdev->can.state = m_can_state_get_by_psr(cdev);
m_can_enable_all_interrupts(cdev);
+
+ if (cdev->version > 30)
+ cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK,
+ m_can_read(cdev, M_CAN_TXFQS));
+
+ ret = m_can_cccr_update_bits(cdev, CCCR_INIT, 0);
+ if (ret)
+ netdev_err(dev, "failed to enter normal mode\n");
+
+ return ret;
}
static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
@@ -1427,55 +1704,56 @@ static int m_can_check_core_release(struct m_can_classdev *cdev)
}
/* Selectable Non ISO support only in version 3.2.x
- * This function checks if the bit is writable.
+ * Return 1 if the bit is writable, 0 if it is not, or negative on error.
*/
-static bool m_can_niso_supported(struct m_can_classdev *cdev)
+static int m_can_niso_supported(struct m_can_classdev *cdev)
{
- u32 cccr_reg, cccr_poll = 0;
- int niso_timeout = -ETIMEDOUT;
- int i;
+ int ret, niso;
- m_can_config_endisable(cdev, true);
- cccr_reg = m_can_read(cdev, M_CAN_CCCR);
- cccr_reg |= CCCR_NISO;
- m_can_write(cdev, M_CAN_CCCR, cccr_reg);
+ ret = m_can_config_enable(cdev);
+ if (ret)
+ return ret;
- for (i = 0; i <= 10; i++) {
- cccr_poll = m_can_read(cdev, M_CAN_CCCR);
- if (cccr_poll == cccr_reg) {
- niso_timeout = 0;
- break;
- }
+ /* First try to set the NISO bit. */
+ niso = m_can_cccr_update_bits(cdev, CCCR_NISO, CCCR_NISO);
- usleep_range(1, 5);
+ /* Then clear the it again. */
+ ret = m_can_cccr_update_bits(cdev, CCCR_NISO, 0);
+ if (ret) {
+ netdev_err(cdev->net, "failed to revert the NON-ISO bit in CCCR\n");
+ return ret;
}
- /* Clear NISO */
- cccr_reg &= ~(CCCR_NISO);
- m_can_write(cdev, M_CAN_CCCR, cccr_reg);
-
- m_can_config_endisable(cdev, false);
+ ret = m_can_config_disable(cdev);
+ if (ret)
+ return ret;
- /* return false if time out (-ETIMEDOUT), else return true */
- return !niso_timeout;
+ return niso == 0;
}
static int m_can_dev_setup(struct m_can_classdev *cdev)
{
struct net_device *dev = cdev->net;
- int m_can_version;
+ int m_can_version, err, niso;
m_can_version = m_can_check_core_release(cdev);
/* return if unsupported version */
if (!m_can_version) {
- dev_err(cdev->dev, "Unsupported version number: %2d",
- m_can_version);
+ netdev_err(cdev->net, "Unsupported version number: %2d",
+ m_can_version);
return -EINVAL;
}
+ /* Write the INIT bit, in case no hardware reset has happened before
+ * the probe (for example, it was observed that the Intel Elkhart Lake
+ * SoCs do not properly reset the CAN controllers on reboot)
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ if (err)
+ return err;
+
if (!cdev->is_peripheral)
- netif_napi_add(dev, &cdev->napi,
- m_can_poll, M_CAN_NAPI_WEIGHT);
+ netif_napi_add(dev, &cdev->napi, m_can_poll);
/* Shared properties of all M_CAN versions */
cdev->version = m_can_version;
@@ -1493,50 +1771,64 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
switch (cdev->version) {
case 30:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
- can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ if (err)
+ return err;
cdev->can.bittiming_const = &m_can_bittiming_const_30X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
- can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ if (err)
+ return err;
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
- cdev->can.ctrlmode_supported |=
- (m_can_niso_supported(cdev) ?
- CAN_CTRLMODE_FD_NON_ISO : 0);
+ niso = m_can_niso_supported(cdev);
+ if (niso < 0)
+ return niso;
+ if (niso)
+ cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
break;
default:
- dev_err(cdev->dev, "Unsupported version number: %2d",
- cdev->version);
+ netdev_err(cdev->net, "Unsupported version number: %2d",
+ cdev->version);
return -EINVAL;
}
- if (cdev->ops->init)
- cdev->ops->init(cdev);
-
return 0;
}
static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ int ret;
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
/* Set init mode to disengage from the network */
- m_can_config_endisable(cdev, true);
+ ret = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ if (ret)
+ netdev_err(dev, "failed to enter standby mode: %pe\n",
+ ERR_PTR(ret));
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
+
+ if (cdev->ops->deinit) {
+ ret = cdev->ops->deinit(cdev);
+ if (ret)
+ netdev_err(dev, "failed to deinitialize: %pe\n",
+ ERR_PTR(ret));
+ }
}
static int m_can_close(struct net_device *dev)
@@ -1545,85 +1837,68 @@ static int m_can_close(struct net_device *dev)
netif_stop_queue(dev);
- if (!cdev->is_peripheral)
- napi_disable(&cdev->napi);
-
m_can_stop(dev);
- m_can_clk_stop(cdev);
- free_irq(dev->irq, dev);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+
+ m_can_clean(dev);
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL;
- }
-
- if (cdev->is_peripheral)
can_rx_offload_disable(&cdev->offload);
+ } else {
+ napi_disable(&cdev->napi);
+ }
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
+ reset_control_assert(cdev->rst);
+ m_can_clk_stop(cdev);
phy_power_off(cdev->transceiver);
return 0;
}
-static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
+static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
{
- struct m_can_classdev *cdev = netdev_priv(dev);
- /*get wrap around for loopback skb index */
- unsigned int wrap = cdev->can.echo_skb_max;
- int next_idx;
-
- /* calculate next index */
- next_idx = (++putidx >= wrap ? 0 : putidx);
-
- /* check if occupied */
- return !!cdev->can.echo_skb[next_idx];
-}
-
-static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
-{
- struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u8 len_padded = DIV_ROUND_UP(cf->len, 4);
+ struct m_can_fifo_element fifo_element;
struct net_device *dev = cdev->net;
- struct sk_buff *skb = cdev->tx_skb;
- struct id_and_dlc fifo_header;
u32 cccr, fdflags;
int err;
- int putidx;
-
- cdev->tx_skb = NULL;
+ u32 putidx;
+ unsigned int frame_len = can_skb_get_frame_len(skb);
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
- fifo_header.id = cf->can_id & CAN_EFF_MASK;
- fifo_header.id |= TX_BUF_XTD;
+ fifo_element.id = cf->can_id & CAN_EFF_MASK;
+ fifo_element.id |= TX_BUF_XTD;
} else {
- fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18);
+ fifo_element.id = ((cf->can_id & CAN_SFF_MASK) << 18);
}
if (cf->can_id & CAN_RTR_FLAG)
- fifo_header.id |= TX_BUF_RTR;
+ fifo_element.id |= TX_BUF_RTR;
if (cdev->version == 30) {
netif_stop_queue(dev);
- fifo_header.dlc = can_fd_len2dlc(cf->len) << 16;
+ fifo_element.dlc = can_fd_len2dlc(cf->len) << 16;
/* Write the frame ID, DLC, and payload to the FIFO element. */
- err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2);
+ err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_element, 2);
if (err)
goto out_fail;
err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ cf->data, len_padded);
if (err)
goto out_fail;
- can_put_echo_skb(skb, dev, 0, 0);
-
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
cccr = m_can_read(cdev, M_CAN_CCCR);
cccr &= ~CCCR_CMR_MASK;
@@ -1640,30 +1915,16 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
m_can_write(cdev, M_CAN_CCCR, cccr);
}
m_can_write(cdev, M_CAN_TXBTIE, 0x1);
+
+ can_put_echo_skb(skb, dev, 0, frame_len);
+
m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
/* Transmit routine for version >= v3.1.x */
- /* Check if FIFO full */
- if (m_can_tx_fifo_full(cdev)) {
- /* This shouldn't happen */
- netif_stop_queue(dev);
- netdev_warn(dev,
- "TX queue active although FIFO is full.");
-
- if (cdev->is_peripheral) {
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- } else {
- return NETDEV_TX_BUSY;
- }
- }
-
/* get put index for frame */
- putidx = FIELD_GET(TXFQS_TFQPI_MASK,
- m_can_read(cdev, M_CAN_TXFQS));
+ putidx = cdev->tx_fifo_putidx;
/* Construct DLC Field, with CAN-FD configuration.
* Use the put index of the fifo as the message marker,
@@ -1678,30 +1939,32 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
fdflags |= TX_BUF_BRS;
}
- fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
+ fifo_element.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) |
fdflags | TX_BUF_EFC;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2);
- if (err)
- goto out_fail;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ memcpy_and_pad(fifo_element.data, CANFD_MAX_DLEN, &cf->data,
+ cf->len, 0);
+
+ err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID,
+ &fifo_element, 2 + len_padded);
if (err)
goto out_fail;
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
- can_put_echo_skb(skb, dev, putidx, 0);
+ can_put_echo_skb(skb, dev, putidx, frame_len);
- /* Enable TX FIFO element to start transfer */
- m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
-
- /* stop network queue if fifo full */
- if (m_can_tx_fifo_full(cdev) ||
- m_can_next_echo_skb_occupied(dev, putidx))
- netif_stop_queue(dev);
+ if (cdev->is_peripheral) {
+ /* Delay enabling TX FIFO element */
+ cdev->tx_peripheral_submit |= BIT(putidx);
+ } else {
+ /* Enable TX FIFO element to start transfer */
+ m_can_write(cdev, M_CAN_TXBAR, BIT(putidx));
+ }
+ cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ?
+ 0 : cdev->tx_fifo_putidx);
}
return NETDEV_TX_OK;
@@ -1712,46 +1975,107 @@ out_fail:
return NETDEV_TX_BUSY;
}
+static void m_can_tx_submit(struct m_can_classdev *cdev)
+{
+ m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit);
+ cdev->tx_peripheral_submit = 0;
+}
+
static void m_can_tx_work_queue(struct work_struct *ws)
{
- struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
- tx_work);
+ struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work);
+ struct m_can_classdev *cdev = op->cdev;
+ struct sk_buff *skb = op->skb;
+
+ op->skb = NULL;
+ m_can_tx_handler(cdev, skb);
+ if (op->submit)
+ m_can_tx_submit(cdev);
+}
+
+static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb,
+ bool submit)
+{
+ cdev->tx_ops[cdev->next_tx_op].skb = skb;
+ cdev->tx_ops[cdev->next_tx_op].submit = submit;
+ queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work);
+
+ ++cdev->next_tx_op;
+ if (cdev->next_tx_op >= cdev->tx_fifo_size)
+ cdev->next_tx_op = 0;
+}
+
+static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
+{
+ bool submit;
+
+ ++cdev->nr_txs_without_submit;
+ if (cdev->nr_txs_without_submit >= cdev->tx_max_coalesced_frames ||
+ !netdev_xmit_more()) {
+ cdev->nr_txs_without_submit = 0;
+ submit = true;
+ } else {
+ submit = false;
+ }
+ m_can_tx_queue_skb(cdev, skb, submit);
- m_can_tx_handler(cdev);
+ return NETDEV_TX_OK;
}
static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ unsigned int frame_len;
+ netdev_tx_t ret;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
- if (cdev->is_peripheral) {
- if (cdev->tx_skb) {
- netdev_err(dev, "hard_xmit called while tx busy\n");
- return NETDEV_TX_BUSY;
- }
+ frame_len = can_skb_get_frame_len(skb);
- if (cdev->can.state == CAN_STATE_BUS_OFF) {
- m_can_clean(dev);
- } else {
- /* Need to stop the queue to avoid numerous requests
- * from being sent. Suggested improvement is to create
- * a queueing mechanism that will queue the skbs and
- * process them in order.
- */
- cdev->tx_skb = skb;
- netif_stop_queue(cdev->net);
- queue_work(cdev->tx_wq, &cdev->tx_work);
- }
- } else {
- cdev->tx_skb = skb;
- return m_can_tx_handler(cdev);
+ if (cdev->can.state == CAN_STATE_BUS_OFF) {
+ m_can_clean(cdev->net);
+ return NETDEV_TX_OK;
}
- return NETDEV_TX_OK;
+ ret = m_can_start_tx(cdev);
+ if (ret != NETDEV_TX_OK)
+ return ret;
+
+ netdev_sent_queue(dev, frame_len);
+
+ if (cdev->is_peripheral)
+ ret = m_can_start_peripheral_xmit(cdev, skb);
+ else
+ ret = m_can_tx_handler(cdev, skb);
+
+ if (ret != NETDEV_TX_OK)
+ netdev_completed_queue(dev, 1, frame_len);
+
+ return ret;
+}
+
+static enum hrtimer_restart m_can_polling_timer(struct hrtimer *timer)
+{
+ struct m_can_classdev *cdev = container_of(timer, struct
+ m_can_classdev, hrtimer);
+ int ret;
+
+ if (cdev->can.state == CAN_STATE_BUS_OFF ||
+ cdev->can.state == CAN_STATE_STOPPED)
+ return HRTIMER_NORESTART;
+
+ ret = m_can_interrupt_handler(cdev);
+
+ /* On error or if napi is scheduled to read, stop the timer */
+ if (ret < 0 || napi_is_scheduled(&cdev->napi))
+ return HRTIMER_NORESTART;
+
+ hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS));
+
+ return HRTIMER_RESTART;
}
static int m_can_open(struct net_device *dev)
@@ -1767,32 +2091,40 @@ static int m_can_open(struct net_device *dev)
if (err)
goto out_phy_power_off;
+ err = reset_control_deassert(cdev->rst);
+ if (err)
+ goto exit_disable_clks;
+
/* open the can device */
err = open_candev(dev);
if (err) {
netdev_err(dev, "failed to open can device\n");
- goto exit_disable_clks;
+ goto out_reset_control_assert;
}
if (cdev->is_peripheral)
can_rx_offload_enable(&cdev->offload);
+ else
+ napi_enable(&cdev->napi);
/* register interrupt handler */
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
- cdev->tx_wq = alloc_workqueue("mcan_wq",
- WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+ cdev->tx_wq = alloc_ordered_workqueue("mcan_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM);
if (!cdev->tx_wq) {
err = -ENOMEM;
goto out_wq_fail;
}
- INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ cdev->tx_ops[i].cdev = cdev;
+ INIT_WORK(&cdev->tx_ops[i].work, m_can_tx_work_queue);
+ }
err = request_threaded_irq(dev->irq, NULL, m_can_isr,
IRQF_ONESHOT,
dev->name, dev);
- } else {
+ } else if (dev->irq) {
err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
dev);
}
@@ -1803,24 +2135,28 @@ static int m_can_open(struct net_device *dev)
}
/* start the m_can controller */
- m_can_start(dev);
-
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
- if (!cdev->is_peripheral)
- napi_enable(&cdev->napi);
+ err = m_can_start(dev);
+ if (err)
+ goto exit_start_fail;
netif_start_queue(dev);
return 0;
+exit_start_fail:
+ if (cdev->is_peripheral || dev->irq)
+ free_irq(dev->irq, dev);
exit_irq_fail:
if (cdev->is_peripheral)
destroy_workqueue(cdev->tx_wq);
out_wq_fail:
if (cdev->is_peripheral)
can_rx_offload_disable(&cdev->offload);
+ else
+ napi_disable(&cdev->napi);
close_candev(dev);
+out_reset_control_assert:
+ reset_control_assert(cdev->rst);
exit_disable_clks:
m_can_clk_stop(cdev);
out_phy_power_off:
@@ -1832,17 +2168,194 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_open = m_can_open,
.ndo_stop = m_can_close,
.ndo_start_xmit = m_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
};
-static int register_m_can_dev(struct net_device *dev)
+static int m_can_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq;
+ ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq;
+ ec->tx_max_coalesced_frames = cdev->tx_max_coalesced_frames;
+ ec->tx_max_coalesced_frames_irq = cdev->tx_max_coalesced_frames_irq;
+ ec->tx_coalesce_usecs_irq = cdev->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static int m_can_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ if (cdev->can.state != CAN_STATE_STOPPED) {
+ netdev_err(dev, "Device is in use, please shut it down first\n");
+ return -EBUSY;
+ }
+
+ if (ec->rx_max_coalesced_frames_irq > cdev->mcfg[MRAM_RXF0].num) {
+ netdev_err(dev, "rx-frames-irq %u greater than the RX FIFO %u\n",
+ ec->rx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_RXF0].num);
+ return -EINVAL;
+ }
+ if ((ec->rx_max_coalesced_frames_irq == 0) != (ec->rx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "rx-frames-irq and rx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if ((ec->tx_max_coalesced_frames_irq == 0) != (ec->tx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "tx-frames-irq and tx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if (ec->rx_coalesce_usecs_irq != 0 && ec->tx_coalesce_usecs_irq != 0 &&
+ ec->rx_coalesce_usecs_irq != ec->tx_coalesce_usecs_irq) {
+ netdev_err(dev, "rx-usecs-irq %u needs to be equal to tx-usecs-irq %u if both are enabled\n",
+ ec->rx_coalesce_usecs_irq,
+ ec->tx_coalesce_usecs_irq);
+ return -EINVAL;
+ }
+
+ cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
+ cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+ cdev->tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+ cdev->tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
+ cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+ if (cdev->rx_coalesce_usecs_irq)
+ cdev->irq_timer_wait = us_to_ktime(cdev->rx_coalesce_usecs_irq);
+ else
+ cdev->irq_timer_wait = us_to_ktime(cdev->tx_coalesce_usecs_irq);
+
+ return 0;
+}
+
+static void m_can_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ wol->supported = device_can_wakeup(cdev->dev) ? WAKE_PHY : 0;
+ wol->wolopts = device_may_wakeup(cdev->dev) ? WAKE_PHY : 0;
+}
+
+static int m_can_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ bool wol_enable = !!(wol->wolopts & WAKE_PHY);
+ int ret;
+
+ if (wol->wolopts & ~WAKE_PHY)
+ return -EINVAL;
+
+ if (wol_enable == device_may_wakeup(cdev->dev))
+ return 0;
+
+ ret = device_set_wakeup_enable(cdev->dev, wol_enable);
+ if (ret) {
+ netdev_err(cdev->net, "Failed to set wakeup enable %pE\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!IS_ERR_OR_NULL(cdev->pinctrl_state_wakeup)) {
+ if (wol_enable)
+ ret = pinctrl_select_state(cdev->pinctrl, cdev->pinctrl_state_wakeup);
+ else
+ ret = pinctrl_pm_select_default_state(cdev->dev);
+
+ if (ret) {
+ netdev_err(cdev->net, "Failed to select pinctrl state %pE\n",
+ ERR_PTR(ret));
+ goto err_wakeup_enable;
+ }
+ }
+
+ return 0;
+
+err_wakeup_enable:
+ /* Revert wakeup enable */
+ device_set_wakeup_enable(cdev->dev, !wol_enable);
+
+ return ret;
+}
+
+static const struct ethtool_ops m_can_ethtool_ops_coalescing = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_coalesce = m_can_get_coalesce,
+ .set_coalesce = m_can_set_coalesce,
+ .get_wol = m_can_get_wol,
+ .set_wol = m_can_set_wol,
+};
+
+static const struct ethtool_ops m_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_wol = m_can_get_wol,
+ .set_wol = m_can_set_wol,
+};
+
+static int register_m_can_dev(struct m_can_classdev *cdev)
+{
+ struct net_device *dev = cdev->net;
+
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
+ if (dev->irq && cdev->is_peripheral)
+ dev->ethtool_ops = &m_can_ethtool_ops_coalescing;
+ else
+ dev->ethtool_ops = &m_can_ethtool_ops;
return register_candev(dev);
}
+int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size)
+{
+ u32 total_size;
+
+ total_size = cdev->mcfg[MRAM_TXB].off - cdev->mcfg[MRAM_SIDF].off +
+ cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
+ if (total_size > mram_max_size) {
+ netdev_err(cdev->net, "Total size of mram config(%u) exceeds mram(%u)\n",
+ total_size, mram_max_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(m_can_check_mram_cfg);
+
static void m_can_of_parse_mram(struct m_can_classdev *cdev,
const u32 *mram_config_vals)
{
@@ -1870,38 +2383,16 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev,
cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
FIELD_MAX(TXBC_NDTB_MASK);
- dev_dbg(cdev->dev,
- "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
- cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
- cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
- cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
- cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
- cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
- cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
- cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
-}
-
-int m_can_init_ram(struct m_can_classdev *cdev)
-{
- int end, i, start;
- int err = 0;
-
- /* initialize the entire Message RAM in use to avoid possible
- * ECC/parity checksum errors when reading an uninitialized buffer
- */
- start = cdev->mcfg[MRAM_SIDF].off;
- end = cdev->mcfg[MRAM_TXB].off +
- cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
-
- for (i = start; i < end; i += 4) {
- err = m_can_fifo_write_no_off(cdev, i, 0x0);
- if (err)
- break;
- }
-
- return err;
+ netdev_dbg(cdev->net,
+ "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
+ cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
+ cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
+ cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
+ cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
+ cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
+ cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
+ cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
}
-EXPORT_SYMBOL_GPL(m_can_init_ram);
int m_can_class_get_clocks(struct m_can_classdev *cdev)
{
@@ -1910,8 +2401,8 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev)
cdev->hclk = devm_clk_get(cdev->dev, "hclk");
cdev->cclk = devm_clk_get(cdev->dev, "cclk");
- if (IS_ERR(cdev->cclk)) {
- dev_err(cdev->dev, "no clock found\n");
+ if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) {
+ netdev_err(cdev->net, "no clock found\n");
ret = -ENODEV;
}
@@ -1919,6 +2410,42 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev)
}
EXPORT_SYMBOL_GPL(m_can_class_get_clocks);
+static bool m_can_class_wakeup_pinctrl_enabled(struct m_can_classdev *class_dev)
+{
+ return device_may_wakeup(class_dev->dev) && class_dev->pinctrl_state_wakeup;
+}
+
+static int m_can_class_parse_pinctrl(struct m_can_classdev *class_dev)
+{
+ struct device *dev = class_dev->dev;
+ int ret;
+
+ class_dev->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(class_dev->pinctrl)) {
+ ret = PTR_ERR(class_dev->pinctrl);
+ class_dev->pinctrl = NULL;
+
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret, "Failed to get pinctrl\n");
+ }
+
+ class_dev->pinctrl_state_wakeup =
+ pinctrl_lookup_state(class_dev->pinctrl, "wakeup");
+ if (IS_ERR(class_dev->pinctrl_state_wakeup)) {
+ ret = PTR_ERR(class_dev->pinctrl_state_wakeup);
+ class_dev->pinctrl_state_wakeup = NULL;
+
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret, "Failed to lookup pinctrl wakeup state\n");
+ }
+
+ return 0;
+}
+
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
int sizeof_priv)
{
@@ -1934,9 +2461,12 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
sizeof(mram_config_vals) / 4);
if (ret) {
dev_err(dev, "Could not get Message RAM configuration.");
- goto out;
+ return ERR_PTR(ret);
}
+ if (dev->of_node && of_property_read_bool(dev->of_node, "wakeup-source"))
+ device_set_wakeup_capable(dev, true);
+
/* Get TX FIFO size
* Defines the total amount of echo buffers for loopback
*/
@@ -1946,7 +2476,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
net_dev = alloc_candev(sizeof_priv, tx_fifo_size);
if (!net_dev) {
dev_err(dev, "Failed to allocate CAN device");
- goto out;
+ return ERR_PTR(-ENOMEM);
}
class_dev = netdev_priv(net_dev);
@@ -1955,8 +2485,17 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
SET_NETDEV_DEV(net_dev, dev);
m_can_of_parse_mram(class_dev, mram_config_vals);
-out:
+ spin_lock_init(&class_dev->tx_handling_spinlock);
+
+ ret = m_can_class_parse_pinctrl(class_dev);
+ if (ret)
+ goto err_free_candev;
+
return class_dev;
+
+err_free_candev:
+ free_candev(net_dev);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
@@ -1970,40 +2509,67 @@ int m_can_class_register(struct m_can_classdev *cdev)
{
int ret;
- if (cdev->pm_clock_support) {
- ret = m_can_clk_start(cdev);
- if (ret)
- return ret;
+ cdev->tx_fifo_size = max(1, min(cdev->mcfg[MRAM_TXB].num,
+ cdev->mcfg[MRAM_TXE].num));
+ if (cdev->is_peripheral) {
+ cdev->tx_ops =
+ devm_kzalloc(cdev->dev,
+ cdev->tx_fifo_size * sizeof(*cdev->tx_ops),
+ GFP_KERNEL);
+ if (!cdev->tx_ops)
+ return -ENOMEM;
}
+ cdev->rst = devm_reset_control_get_optional_shared(cdev->dev, NULL);
+ if (IS_ERR(cdev->rst))
+ return dev_err_probe(cdev->dev, PTR_ERR(cdev->rst),
+ "Failed to get reset line\n");
+
+ ret = m_can_clk_start(cdev);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(cdev->rst);
+ if (ret)
+ goto clk_disable;
+
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
- M_CAN_NAPI_WEIGHT);
+ NAPI_POLL_WEIGHT);
if (ret)
- goto clk_disable;
+ goto out_reset_control_assert;
+ }
+
+ if (!cdev->net->irq) {
+ netdev_dbg(cdev->net, "Polling enabled, initialize hrtimer");
+ hrtimer_setup(&cdev->hrtimer, m_can_polling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ hrtimer_setup(&cdev->hrtimer, m_can_coalescing_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = m_can_dev_setup(cdev);
if (ret)
goto rx_offload_del;
- ret = register_m_can_dev(cdev->net);
+ ret = register_m_can_dev(cdev);
if (ret) {
- dev_err(cdev->dev, "registering %s failed (err=%d)\n",
- cdev->net->name, ret);
+ netdev_err(cdev->net, "registering %s failed (err=%d)\n",
+ cdev->net->name, ret);
goto rx_offload_del;
}
- devm_can_led_init(cdev->net);
-
of_can_transceiver(cdev->net);
- dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n",
- KBUILD_MODNAME, cdev->net->irq, cdev->version);
+ netdev_info(cdev->net, "device registered (irq=%d, version=%d)\n",
+ cdev->net->irq, cdev->version);
/* Probe finished
- * Stop clocks. They will be reactivated once the M_CAN device is opened
+ * Assert reset and stop clocks.
+ * They will be reactivated once the M_CAN device is opened
*/
+ reset_control_assert(cdev->rst);
m_can_clk_stop(cdev);
return 0;
@@ -2011,6 +2577,8 @@ int m_can_class_register(struct m_can_classdev *cdev)
rx_offload_del:
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
+out_reset_control_assert:
+ reset_control_assert(cdev->rst);
clk_disable:
m_can_clk_stop(cdev);
@@ -2020,9 +2588,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
+ unregister_candev(cdev->net);
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
- unregister_candev(cdev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
@@ -2030,19 +2598,34 @@ int m_can_class_suspend(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
- m_can_stop(ndev);
+
+ /* leave the chip running with rx interrupt enabled if it is
+ * used as a wake-up source. Coalescing needs to be reset then,
+ * the timer is cancelled here, interrupts are done in resume.
+ */
+ if (cdev->pm_wake_source) {
+ hrtimer_cancel(&cdev->hrtimer);
+ m_can_write(cdev, M_CAN_IE, IR_RF0N);
+
+ if (cdev->ops->deinit)
+ ret = cdev->ops->deinit(cdev);
+ } else {
+ m_can_stop(ndev);
+ }
+
m_can_clk_stop(cdev);
+ cdev->can.state = CAN_STATE_SLEEPING;
}
- pinctrl_pm_select_sleep_state(dev);
+ if (!m_can_class_wakeup_pinctrl_enabled(cdev))
+ pinctrl_pm_select_sleep_state(dev);
- cdev->can.state = CAN_STATE_SLEEPING;
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_suspend);
@@ -2050,29 +2633,48 @@ int m_can_class_resume(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
- pinctrl_pm_select_default_state(dev);
-
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ if (!m_can_class_wakeup_pinctrl_enabled(cdev))
+ pinctrl_pm_select_default_state(dev);
if (netif_running(ndev)) {
- int ret;
-
ret = m_can_clk_start(cdev);
if (ret)
return ret;
- m_can_init_ram(cdev);
- m_can_start(ndev);
+ if (cdev->pm_wake_source) {
+ /* Restore active interrupts but disable coalescing as
+ * we may have missed important waterlevel interrupts
+ * between suspend and resume. Timers are already
+ * stopped in suspend. Here we enable all interrupts
+ * again.
+ */
+ cdev->active_interrupts |= IR_RF0N | IR_TEFN;
+
+ if (cdev->ops->init)
+ ret = cdev->ops->init(cdev);
+
+ cdev->can.state = m_can_state_get_by_psr(cdev);
+
+ m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
+ } else {
+ ret = m_can_start(ndev);
+ if (ret) {
+ m_can_clk_stop(cdev);
+ return ret;
+ }
+ }
+
netif_device_attach(ndev);
netif_start_queue(ndev);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_resume);
-MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index d18b515e6ccc..4743342b2fba 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -7,28 +7,27 @@
#define _CAN_M_CAN_H_
#include <linux/can/core.h>
-#include <linux/can/led.h>
+#include <linux/can/dev.h>
#include <linux/can/rx-offload.h>
+#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/freezer.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
+#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/iopoll.h>
-#include <linux/can/dev.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
/* m_can lec values */
enum m_can_lec_type {
@@ -39,7 +38,7 @@ enum m_can_lec_type {
LEC_BIT1_ERROR,
LEC_BIT0_ERROR,
LEC_CRC_ERROR,
- LEC_UNUSED,
+ LEC_NO_CHANGE,
};
enum m_can_mram_cfg {
@@ -69,6 +68,14 @@ struct m_can_ops {
int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset,
const void *val, size_t val_count);
int (*init)(struct m_can_classdev *cdev);
+ int (*deinit)(struct m_can_classdev *cdev);
+};
+
+struct m_can_tx_op {
+ struct m_can_classdev *cdev;
+ struct work_struct work;
+ struct sk_buff *skb;
+ bool submit;
};
struct m_can_classdev {
@@ -79,21 +86,52 @@ struct m_can_classdev {
struct device *dev;
struct clk *hclk;
struct clk *cclk;
+ struct reset_control *rst;
struct workqueue_struct *tx_wq;
- struct work_struct tx_work;
- struct sk_buff *tx_skb;
struct phy *transceiver;
- struct m_can_ops *ops;
+ ktime_t irq_timer_wait;
+
+ const struct m_can_ops *ops;
int version;
u32 irqstatus;
int pm_clock_support;
+ int pm_wake_source;
int is_peripheral;
+ bool irq_edge_triggered;
+
+ // Cached M_CAN_IE register content
+ u32 active_interrupts;
+ u32 rx_max_coalesced_frames_irq;
+ u32 rx_coalesce_usecs_irq;
+ u32 tx_max_coalesced_frames;
+ u32 tx_max_coalesced_frames_irq;
+ u32 tx_coalesce_usecs_irq;
+
+ // Store this internally to avoid fetch delays on peripheral chips
+ u32 tx_fifo_putidx;
+
+ /* Protects shared state between start_xmit and m_can_isr */
+ spinlock_t tx_handling_spinlock;
+ int tx_fifo_in_flight;
+
+ struct m_can_tx_op *tx_ops;
+ int tx_fifo_size;
+ int next_tx_op;
+
+ int nr_txs_without_submit;
+ /* bitfield of fifo elements that will be submitted together */
+ u32 tx_peripheral_submit;
struct mram_cfg mcfg[MRAM_CFG_NUM];
+
+ struct hrtimer hrtimer;
+
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pinctrl_state_wakeup;
};
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, int sizeof_priv);
@@ -101,7 +139,7 @@ void m_can_class_free_dev(struct net_device *net);
int m_can_class_register(struct m_can_classdev *cdev);
void m_can_class_unregister(struct m_can_classdev *cdev);
int m_can_class_get_clocks(struct m_can_classdev *cdev);
-int m_can_init_ram(struct m_can_classdev *priv);
+int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size);
int m_can_class_suspend(struct device *dev);
int m_can_class_resume(struct device *dev);
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index 89cc3d41e952..eb31ed1f9644 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -18,7 +18,7 @@
#define M_CAN_PCI_MMIO_BAR 0
-#define M_CAN_CLOCK_FREQ_EHL 100000000
+#define M_CAN_CLOCK_FREQ_EHL 200000000
#define CTL_CSR_INT_CTL_OFFSET 0x508
struct m_can_pci_priv {
@@ -42,8 +42,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
{
struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+ void __iomem *src = priv->base + offset;
- ioread32_rep(priv->base + offset, val, val_count);
+ while (val_count--) {
+ *(unsigned int *)val = ioread32(src);
+ val += 4;
+ src += 4;
+ }
return 0;
}
@@ -61,13 +66,18 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
const void *val, size_t val_count)
{
struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+ void __iomem *dst = priv->base + offset;
- iowrite32_rep(priv->base + offset, val, val_count);
+ while (val_count--) {
+ iowrite32(*(unsigned int *)val, dst);
+ val += 4;
+ dst += 4;
+ }
return 0;
}
-static struct m_can_ops m_can_pci_ops = {
+static const struct m_can_ops m_can_pci_ops = {
.read_reg = iomap_read_reg,
.write_reg = iomap_write_reg,
.write_fifo = iomap_write_fifo,
@@ -101,8 +111,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class = m_can_class_allocate_dev(&pci->dev,
sizeof(struct m_can_pci_priv));
- if (!mcan_class)
- return -ENOMEM;
+ if (IS_ERR(mcan_class))
+ return PTR_ERR(mcan_class);
priv = cdev_to_priv(mcan_class);
@@ -110,19 +120,21 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
- return ret;
+ goto err_free_dev;
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
+ mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = id->driver_data;
+ mcan_class->irq_edge_triggered = true;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);
ret = m_can_class_register(mcan_class);
if (ret)
- goto err;
+ goto err_free_irq;
/* Enable interrupt control at CAN wrapper IP */
writel(0x1, base + CTL_CSR_INT_CTL_OFFSET);
@@ -134,8 +146,10 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
return 0;
-err:
+err_free_irq:
pci_free_irq_vectors(pci);
+err_free_dev:
+ m_can_class_free_dev(mcan_class->net);
return ret;
}
@@ -151,6 +165,7 @@ static void m_can_pci_remove(struct pci_dev *pci)
writel(0x0, priv->base + CTL_CSR_INT_CTL_OFFSET);
m_can_class_unregister(mcan_class);
+ m_can_class_free_dev(mcan_class->net);
pci_free_irq_vectors(pci);
}
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index eee47bad0592..56da411878af 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -1,12 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// IOMapped CAN bus driver for Bosch M_CAN controller
// Copyright (C) 2014 Freescale Semiconductor, Inc.
-// Dong Aisheng <b29396@freescale.com>
+// Dong Aisheng <aisheng.dong@nxp.com>
//
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
-#include <linux/platform_device.h>
+#include <linux/hrtimer.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include "m_can.h"
@@ -67,7 +68,7 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
return 0;
}
-static struct m_can_ops m_can_plat_ops = {
+static const struct m_can_ops m_can_plat_ops = {
.read_reg = iomap_read_reg,
.write_reg = iomap_write_reg,
.write_fifo = iomap_write_fifo,
@@ -82,12 +83,12 @@ static int m_can_plat_probe(struct platform_device *pdev)
void __iomem *addr;
void __iomem *mram_addr;
struct phy *transceiver;
- int irq, ret = 0;
+ int irq = 0, ret = 0;
mcan_class = m_can_class_allocate_dev(&pdev->dev,
sizeof(struct m_can_plat_priv));
- if (!mcan_class)
- return -ENOMEM;
+ if (IS_ERR(mcan_class))
+ return PTR_ERR(mcan_class);
priv = cdev_to_priv(mcan_class);
@@ -96,12 +97,20 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto probe_fail;
addr = devm_platform_ioremap_resource_byname(pdev, "m_can");
- irq = platform_get_irq_byname(pdev, "int0");
- if (IS_ERR(addr) || irq < 0) {
- ret = -EINVAL;
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
goto probe_fail;
}
+ if (device_property_present(mcan_class->dev, "interrupts") ||
+ device_property_present(mcan_class->dev, "interrupt-names")) {
+ irq = platform_get_irq_byname(pdev, "int0");
+ if (irq < 0) {
+ ret = irq;
+ goto probe_fail;
+ }
+ }
+
/* message ram could be shared */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
if (!res) {
@@ -130,6 +139,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->net->irq = irq;
mcan_class->pm_clock_support = 1;
+ mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk);
mcan_class->dev = &pdev->dev;
mcan_class->transceiver = transceiver;
@@ -140,10 +150,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mcan_class);
- ret = m_can_init_ram(mcan_class);
- if (ret)
- goto probe_fail;
-
pm_runtime_enable(mcan_class->dev);
ret = m_can_class_register(mcan_class);
if (ret)
@@ -168,16 +174,14 @@ static __maybe_unused int m_can_resume(struct device *dev)
return m_can_class_resume(dev);
}
-static int m_can_plat_remove(struct platform_device *pdev)
+static void m_can_plat_remove(struct platform_device *pdev)
{
struct m_can_plat_priv *priv = platform_get_drvdata(pdev);
struct m_can_classdev *mcan_class = &priv->cdev;
m_can_class_unregister(mcan_class);
-
+ pm_runtime_disable(mcan_class->dev);
m_can_class_free_dev(mcan_class->net);
-
- return 0;
}
static int __maybe_unused m_can_runtime_suspend(struct device *dev)
@@ -232,7 +236,7 @@ static struct platform_driver m_can_plat_driver = {
module_platform_driver(m_can_plat_driver);
-MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("M_CAN driver for IO Mapped Bosch controllers");
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 04687b15b250..31cc9d0abd45 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -6,11 +6,12 @@
#define TCAN4X5X_EXT_CLK_DEF 40000000
-#define TCAN4X5X_DEV_ID0 0x00
-#define TCAN4X5X_DEV_ID1 0x04
+#define TCAN4X5X_DEV_ID1 0x00
+#define TCAN4X5X_DEV_ID1_TCAN 0x4e414354 /* ASCII TCAN */
+#define TCAN4X5X_DEV_ID2 0x04
#define TCAN4X5X_REV 0x08
#define TCAN4X5X_STATUS 0x0C
-#define TCAN4X5X_ERROR_STATUS 0x10
+#define TCAN4X5X_ERROR_STATUS_MASK 0x10
#define TCAN4X5X_CONTROL 0x14
#define TCAN4X5X_CONFIG 0x800
@@ -80,6 +81,7 @@
TCAN4X5X_MCAN_IR_RF1F)
#define TCAN4X5X_MRAM_START 0x8000
+#define TCAN4X5X_MRAM_SIZE 0x800
#define TCAN4X5X_MCAN_OFFSET 0x1000
#define TCAN4X5X_CLEAR_ALL_INT 0xffffffff
@@ -90,6 +92,8 @@
#define TCAN4X5X_MODE_STANDBY BIT(6)
#define TCAN4X5X_MODE_NORMAL BIT(7)
+#define TCAN4X5X_NWKRQ_VOLTAGE_VIO BIT(19)
+
#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
#define TCAN4X5X_DISABLE_INH_MSK BIT(9)
@@ -102,6 +106,37 @@
#define TCAN4X5X_WD_3_S_TIMER BIT(29)
#define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29))
+struct tcan4x5x_version_info {
+ const char *name;
+ u32 id2_register;
+
+ bool has_wake_pin;
+ bool has_state_pin;
+};
+
+enum {
+ TCAN4552 = 0,
+ TCAN4553,
+ TCAN4X5X,
+};
+
+static const struct tcan4x5x_version_info tcan4x5x_versions[] = {
+ [TCAN4552] = {
+ .name = "4552",
+ .id2_register = 0x32353534,
+ },
+ [TCAN4553] = {
+ .name = "4553",
+ .id2_register = 0x33353534,
+ },
+ /* generic version with no id2_register at the end */
+ [TCAN4X5X] = {
+ .name = "generic",
+ .has_wake_pin = true,
+ .has_state_pin = true,
+ },
+};
+
static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
{
return container_of(cdev, struct tcan4x5x_priv, cdev);
@@ -204,17 +239,7 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev)
if (ret)
return ret;
- ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG,
- TCAN4X5X_ENABLE_MCAN_INT);
- if (ret)
- return ret;
-
- ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
- TCAN4X5X_CLEAR_ALL_INT);
- if (ret)
- return ret;
-
- return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS,
+ return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
TCAN4X5X_CLEAR_ALL_INT);
}
@@ -234,8 +259,8 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
if (ret)
return ret;
- /* Zero out the MCAN buffers */
- ret = m_can_init_ram(cdev);
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS_MASK,
+ TCAN4X5X_CLEAR_ALL_INT);
if (ret)
return ret;
@@ -244,9 +269,24 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
if (ret)
return ret;
+ if (tcan4x5x->nwkrq_voltage_vio) {
+ ret = regmap_set_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_NWKRQ_VOLTAGE_VIO);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
+static int tcan4x5x_deinit(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_STANDBY);
+};
+
static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
@@ -263,18 +303,59 @@ static int tcan4x5x_disable_state(struct m_can_classdev *cdev)
TCAN4X5X_DISABLE_INH_MSK, 0x01);
}
+static const struct tcan4x5x_version_info
+*tcan4x5x_find_version(struct tcan4x5x_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->regmap, TCAN4X5X_DEV_ID1, &val);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (val != TCAN4X5X_DEV_ID1_TCAN) {
+ dev_err(&priv->spi->dev, "Not a tcan device %x\n", val);
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = regmap_read(priv->regmap, TCAN4X5X_DEV_ID2, &val);
+ if (ret)
+ return ERR_PTR(ret);
+
+ for (int i = 0; i != ARRAY_SIZE(tcan4x5x_versions); ++i) {
+ const struct tcan4x5x_version_info *vinfo = &tcan4x5x_versions[i];
+
+ if (!vinfo->id2_register || val == vinfo->id2_register) {
+ dev_info(&priv->spi->dev, "Detected TCAN device version %s\n",
+ vinfo->name);
+ return vinfo;
+ }
+ }
+
+ return &tcan4x5x_versions[TCAN4X5X];
+}
+
+static void tcan4x5x_get_dt_data(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ tcan4x5x->nwkrq_voltage_vio =
+ of_property_read_bool(cdev->dev->of_node, "ti,nwkrq-voltage-vio");
+}
+
static int tcan4x5x_get_gpios(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
int ret;
- tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
- GPIOD_OUT_HIGH);
+ tcan4x5x->device_wake_gpio = devm_gpiod_get_optional(cdev->dev,
+ "device-wake",
+ GPIOD_OUT_HIGH);
if (IS_ERR(tcan4x5x->device_wake_gpio)) {
if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- tcan4x5x_disable_wake(cdev);
+ tcan4x5x->device_wake_gpio = NULL;
}
tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset",
@@ -289,16 +370,36 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev)
tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
"device-state",
GPIOD_IN);
- if (IS_ERR(tcan4x5x->device_state_gpio)) {
+ if (IS_ERR(tcan4x5x->device_state_gpio))
tcan4x5x->device_state_gpio = NULL;
- tcan4x5x_disable_state(cdev);
+
+ return 0;
+}
+
+static int tcan4x5x_check_gpios(struct m_can_classdev *cdev,
+ const struct tcan4x5x_version_info *version_info)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+ int ret;
+
+ if (version_info->has_wake_pin && !tcan4x5x->device_wake_gpio) {
+ ret = tcan4x5x_disable_wake(cdev);
+ if (ret)
+ return ret;
+ }
+
+ if (version_info->has_state_pin && !tcan4x5x->device_state_gpio) {
+ ret = tcan4x5x_disable_state(cdev);
+ if (ret)
+ return ret;
}
return 0;
}
-static struct m_can_ops tcan4x5x_ops = {
+static const struct m_can_ops tcan4x5x_ops = {
.init = tcan4x5x_init,
+ .deinit = tcan4x5x_deinit,
.read_reg = tcan4x5x_read_reg,
.write_reg = tcan4x5x_write_reg,
.write_fifo = tcan4x5x_write_fifo,
@@ -308,26 +409,32 @@ static struct m_can_ops tcan4x5x_ops = {
static int tcan4x5x_can_probe(struct spi_device *spi)
{
+ const struct tcan4x5x_version_info *version_info;
struct tcan4x5x_priv *priv;
struct m_can_classdev *mcan_class;
int freq, ret;
mcan_class = m_can_class_allocate_dev(&spi->dev,
sizeof(struct tcan4x5x_priv));
- if (!mcan_class)
- return -ENOMEM;
+ if (IS_ERR(mcan_class))
+ return PTR_ERR(mcan_class);
+
+ ret = m_can_check_mram_cfg(mcan_class, TCAN4X5X_MRAM_SIZE);
+ if (ret)
+ goto out_m_can_class_free_dev;
priv = cdev_to_priv(mcan_class);
priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
- if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto out_m_can_class_free_dev;
- } else {
+ if (IS_ERR(priv->power)) {
+ if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_m_can_class_free_dev;
+ }
priv->power = NULL;
}
- m_can_class_get_clocks(mcan_class);
+ mcan_class->cclk = devm_clk_get(mcan_class->dev, "cclk");
if (IS_ERR(mcan_class->cclk)) {
dev_err(&spi->dev, "no CAN clock source defined\n");
freq = TCAN4X5X_EXT_CLK_DEF;
@@ -337,6 +444,8 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
/* Sanity check */
if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF) {
+ dev_err(&spi->dev, "Clock frequency is out of supported range %d\n",
+ freq);
ret = -ERANGE;
goto out_m_can_class_free_dev;
}
@@ -344,6 +453,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->spi = spi;
mcan_class->pm_clock_support = 0;
+ mcan_class->pm_wake_source = device_property_read_bool(&spi->dev, "wakeup-source");
mcan_class->can.clock.freq = freq;
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
@@ -355,28 +465,67 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
/* Configure the SPI bus */
spi->bits_per_word = 8;
ret = spi_setup(spi);
- if (ret)
+ if (ret) {
+ dev_err(&spi->dev, "SPI setup failed %pe\n", ERR_PTR(ret));
goto out_m_can_class_free_dev;
+ }
ret = tcan4x5x_regmap_init(priv);
- if (ret)
+ if (ret) {
+ dev_err(&spi->dev, "regmap init failed %pe\n", ERR_PTR(ret));
goto out_m_can_class_free_dev;
+ }
ret = tcan4x5x_power_enable(priv->power, 1);
- if (ret)
+ if (ret) {
+ dev_err(&spi->dev, "Enabling regulator failed %pe\n",
+ ERR_PTR(ret));
goto out_m_can_class_free_dev;
+ }
ret = tcan4x5x_get_gpios(mcan_class);
- if (ret)
+ if (ret) {
+ dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret));
goto out_power;
+ }
- ret = tcan4x5x_init(mcan_class);
- if (ret)
+ version_info = tcan4x5x_find_version(priv);
+ if (IS_ERR(version_info)) {
+ ret = PTR_ERR(version_info);
+ goto out_power;
+ }
+
+ ret = tcan4x5x_check_gpios(mcan_class, version_info);
+ if (ret) {
+ dev_err(&spi->dev, "Checking gpios failed %pe\n", ERR_PTR(ret));
goto out_power;
+ }
+
+ tcan4x5x_get_dt_data(mcan_class);
+
+ tcan4x5x_check_wake(priv);
+
+ ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0);
+ if (ret) {
+ dev_err(&spi->dev, "Disabling interrupts failed %pe\n", ERR_PTR(ret));
+ goto out_power;
+ }
+
+ ret = tcan4x5x_clear_interrupts(mcan_class);
+ if (ret) {
+ dev_err(&spi->dev, "Clearing interrupts failed %pe\n", ERR_PTR(ret));
+ goto out_power;
+ }
+
+ if (mcan_class->pm_wake_source)
+ device_init_wakeup(&spi->dev, true);
ret = m_can_class_register(mcan_class);
- if (ret)
+ if (ret) {
+ dev_err(&spi->dev, "Failed registering m_can device %pe\n",
+ ERR_PTR(ret));
goto out_power;
+ }
netdev_info(mcan_class->net, "TCAN4X5X successfully initialized.\n");
return 0;
@@ -388,7 +537,7 @@ out_power:
return ret;
}
-static int tcan4x5x_can_remove(struct spi_device *spi)
+static void tcan4x5x_can_remove(struct spi_device *spi)
{
struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
@@ -397,8 +546,29 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
tcan4x5x_power_enable(priv->power, 0);
m_can_class_free_dev(priv->cdev.net);
+}
- return 0;
+static int __maybe_unused tcan4x5x_suspend(struct device *dev)
+{
+ struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ struct spi_device *spi = to_spi_device(dev);
+
+ if (cdev->pm_wake_source)
+ enable_irq_wake(spi->irq);
+
+ return m_can_class_suspend(dev);
+}
+
+static int __maybe_unused tcan4x5x_resume(struct device *dev)
+{
+ struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret = m_can_class_resume(dev);
+
+ if (cdev->pm_wake_source)
+ disable_irq_wake(spi->irq);
+
+ return ret;
}
static const struct of_device_id tcan4x5x_of_match[] = {
@@ -419,11 +589,15 @@ static const struct spi_device_id tcan4x5x_id_table[] = {
};
MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table);
+static const struct dev_pm_ops tcan4x5x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tcan4x5x_suspend, tcan4x5x_resume)
+};
+
static struct spi_driver tcan4x5x_can_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = tcan4x5x_of_match,
- .pm = NULL,
+ .pm = &tcan4x5x_pm_ops,
},
.id_table = tcan4x5x_id_table,
.probe = tcan4x5x_can_probe,
diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c
index ca80dbaf7a3f..fafa6daa67e6 100644
--- a/drivers/net/can/m_can/tcan4x5x-regmap.c
+++ b/drivers/net/can/m_can/tcan4x5x-regmap.c
@@ -12,7 +12,7 @@
#define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24)
#define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24)
-#define TCAN4X5X_MAX_REGISTER 0x8ffc
+#define TCAN4X5X_MAX_REGISTER 0x87fc
static int tcan4x5x_regmap_gather_write(void *context,
const void *reg, size_t reg_len,
@@ -90,16 +90,46 @@ static int tcan4x5x_regmap_read(void *context,
return 0;
}
-static const struct regmap_range tcan4x5x_reg_table_yes_range[] = {
- regmap_reg_range(0x0000, 0x002c), /* Device ID and SPI Registers */
- regmap_reg_range(0x0800, 0x083c), /* Device configuration registers and Interrupt Flags*/
+static const struct regmap_range tcan4x5x_reg_table_wr_range[] = {
+ /* Device ID and SPI Registers */
+ regmap_reg_range(0x000c, 0x0010),
+ /* Device configuration registers and Interrupt Flags*/
+ regmap_reg_range(0x0800, 0x080c),
+ regmap_reg_range(0x0820, 0x0820),
+ regmap_reg_range(0x0830, 0x0830),
+ /* M_CAN */
+ regmap_reg_range(0x100c, 0x102c),
+ regmap_reg_range(0x1048, 0x1048),
+ regmap_reg_range(0x1050, 0x105c),
+ regmap_reg_range(0x1080, 0x1088),
+ regmap_reg_range(0x1090, 0x1090),
+ regmap_reg_range(0x1098, 0x10a0),
+ regmap_reg_range(0x10a8, 0x10b0),
+ regmap_reg_range(0x10b8, 0x10c0),
+ regmap_reg_range(0x10c8, 0x10c8),
+ regmap_reg_range(0x10d0, 0x10d4),
+ regmap_reg_range(0x10e0, 0x10e4),
+ regmap_reg_range(0x10f0, 0x10f0),
+ regmap_reg_range(0x10f8, 0x10f8),
+ /* MRAM */
+ regmap_reg_range(0x8000, 0x87fc),
+};
+
+static const struct regmap_range tcan4x5x_reg_table_rd_range[] = {
+ regmap_reg_range(0x0000, 0x0010), /* Device ID and SPI Registers */
+ regmap_reg_range(0x0800, 0x0830), /* Device configuration registers and Interrupt Flags*/
regmap_reg_range(0x1000, 0x10fc), /* M_CAN */
regmap_reg_range(0x8000, 0x87fc), /* MRAM */
};
-static const struct regmap_access_table tcan4x5x_reg_table = {
- .yes_ranges = tcan4x5x_reg_table_yes_range,
- .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_yes_range),
+static const struct regmap_access_table tcan4x5x_reg_table_wr = {
+ .yes_ranges = tcan4x5x_reg_table_wr_range,
+ .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_wr_range),
+};
+
+static const struct regmap_access_table tcan4x5x_reg_table_rd = {
+ .yes_ranges = tcan4x5x_reg_table_rd_range,
+ .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_rd_range),
};
static const struct regmap_config tcan4x5x_regmap = {
@@ -107,8 +137,8 @@ static const struct regmap_config tcan4x5x_regmap = {
.reg_stride = 4,
.pad_bits = 8,
.val_bits = 32,
- .wr_table = &tcan4x5x_reg_table,
- .rd_table = &tcan4x5x_reg_table,
+ .wr_table = &tcan4x5x_reg_table_wr,
+ .rd_table = &tcan4x5x_reg_table_rd,
.max_register = TCAN4X5X_MAX_REGISTER,
.cache_type = REGCACHE_NONE,
.read_flag_mask = (__force unsigned long)
diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h
index e62c030d3e1e..203399d5e8cc 100644
--- a/drivers/net/can/m_can/tcan4x5x.h
+++ b/drivers/net/can/m_can/tcan4x5x.h
@@ -42,6 +42,8 @@ struct tcan4x5x_priv {
struct tcan4x5x_map_buf map_buf_rx;
struct tcan4x5x_map_buf map_buf_tx;
+
+ bool nwkrq_voltage_vio;
};
static inline void
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index de4ddf79ba9b..0080c39ee182 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -12,8 +12,12 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/netdevice.h>
#include <linux/can/dev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <linux/clk.h>
@@ -61,7 +65,7 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
else
*mscan_clksrc = MSCAN_CLKSRC_XTAL;
- freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
+ freq = mpc5xxx_get_bus_frequency(&ofdev->dev);
if (!freq)
return 0;
@@ -288,7 +292,7 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
int irq, mscan_clksrc = 0;
int err = -ENOMEM;
- data = of_device_get_match_data(&ofdev->dev);
+ data = device_get_match_data(&ofdev->dev);
if (!data)
return -EINVAL;
@@ -320,14 +324,14 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
&mscan_clksrc);
if (!priv->can.clock.freq) {
dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
- goto exit_free_mscan;
+ goto exit_put_clock;
}
err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
- goto exit_free_mscan;
+ goto exit_put_clock;
}
dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
@@ -335,7 +339,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
return 0;
-exit_free_mscan:
+exit_put_clock:
+ if (data->put_clock)
+ data->put_clock(ofdev);
free_candev(dev);
exit_dispose_irq:
irq_dispose_mapping(irq);
@@ -345,15 +351,13 @@ exit_unmap_mem:
return err;
}
-static int mpc5xxx_can_remove(struct platform_device *ofdev)
+static void mpc5xxx_can_remove(struct platform_device *ofdev)
{
- const struct of_device_id *match;
const struct mpc5xxx_can_data *data;
struct net_device *dev = platform_get_drvdata(ofdev);
struct mscan_priv *priv = netdev_priv(dev);
- match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
- data = match ? match->data : NULL;
+ data = device_get_match_data(&ofdev->dev);
unregister_mscandev(dev);
if (data && data->put_clock)
@@ -361,8 +365,6 @@ static int mpc5xxx_can_remove(struct platform_device *ofdev)
iounmap(priv->reg_base);
irq_dispose_mapping(dev->irq);
free_candev(dev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index fa32e418eb29..39c7aa2a0b2f 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -34,12 +34,6 @@ static const struct can_bittiming_const mscan_bittiming_const = {
.brp_inc = 1,
};
-struct mscan_state {
- u8 mode;
- u8 canrier;
- u8 cantier;
-};
-
static enum can_state state_map[] = {
CAN_STATE_ERROR_ACTIVE,
CAN_STATE_ERROR_WARNING,
@@ -191,7 +185,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rtr, buf_id;
u32 can_id;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
out_8(&regs->cantier, 0);
@@ -401,13 +395,15 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
continue;
}
- if (canrflg & MSCAN_RXF)
+ if (canrflg & MSCAN_RXF) {
mscan_get_rx_frame(dev, frame);
- else if (canrflg & MSCAN_ERR_IF)
+ stats->rx_packets++;
+ if (!(frame->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += frame->len;
+ } else if (canrflg & MSCAN_ERR_IF) {
mscan_get_err_frame(dev, frame, canrflg);
+ }
- stats->rx_packets++;
- stats->rx_bytes += frame->len;
work_done++;
netif_receive_skb(skb);
}
@@ -446,9 +442,9 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
continue;
out_8(&regs->cantbsel, mask);
- stats->tx_bytes += in_8(&regs->tx.dlr);
+ stats->tx_bytes += can_get_echo_skb(dev, entry->id,
+ NULL);
stats->tx_packets++;
- can_get_echo_skb(dev, entry->id, NULL);
priv->tx_active &= ~mask;
list_del(pos);
}
@@ -611,7 +607,10 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_open = mscan_open,
.ndo_stop = mscan_close,
.ndo_start_xmit = mscan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops mscan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
int register_mscandev(struct net_device *dev, int mscan_clksrc)
@@ -674,10 +673,11 @@ struct net_device *alloc_mscandev(void)
priv = netdev_priv(dev);
dev->netdev_ops = &mscan_netdev_ops;
+ dev->ethtool_ops = &mscan_ethtool_ops;
dev->flags |= IFF_ECHO; /* we support local echo */
- netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
+ netif_napi_add_weight(dev, &priv->napi, mscan_rx_poll, 8);
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
deleted file mode 100644
index 92a54a5fd4c5..000000000000
--- a/drivers/net/can/pch_can.c
+++ /dev/null
@@ -1,1247 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 LAPIS SEMICONDUCTOR CO., LTD.
- */
-
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/can.h>
-#include <linux/can/dev.h>
-#include <linux/can/error.h>
-
-#define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */
-#define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */
-#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
-#define PCH_CTRL_CCE BIT(6)
-#define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */
-#define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */
-#define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */
-
-#define PCH_CMASK_RX_TX_SET 0x00f3
-#define PCH_CMASK_RX_TX_GET 0x0073
-#define PCH_CMASK_ALL 0xff
-#define PCH_CMASK_NEWDAT BIT(2)
-#define PCH_CMASK_CLRINTPND BIT(3)
-#define PCH_CMASK_CTRL BIT(4)
-#define PCH_CMASK_ARB BIT(5)
-#define PCH_CMASK_MASK BIT(6)
-#define PCH_CMASK_RDWR BIT(7)
-#define PCH_IF_MCONT_NEWDAT BIT(15)
-#define PCH_IF_MCONT_MSGLOST BIT(14)
-#define PCH_IF_MCONT_INTPND BIT(13)
-#define PCH_IF_MCONT_UMASK BIT(12)
-#define PCH_IF_MCONT_TXIE BIT(11)
-#define PCH_IF_MCONT_RXIE BIT(10)
-#define PCH_IF_MCONT_RMTEN BIT(9)
-#define PCH_IF_MCONT_TXRQXT BIT(8)
-#define PCH_IF_MCONT_EOB BIT(7)
-#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
-#define PCH_ID2_DIR BIT(13)
-#define PCH_ID2_XTD BIT(14)
-#define PCH_ID_MSGVAL BIT(15)
-#define PCH_IF_CREQ_BUSY BIT(15)
-
-#define PCH_STATUS_INT 0x8000
-#define PCH_RP 0x00008000
-#define PCH_REC 0x00007f00
-#define PCH_TEC 0x000000ff
-
-#define PCH_TX_OK BIT(3)
-#define PCH_RX_OK BIT(4)
-#define PCH_EPASSIV BIT(5)
-#define PCH_EWARN BIT(6)
-#define PCH_BUS_OFF BIT(7)
-
-/* bit position of certain controller bits. */
-#define PCH_BIT_BRP_SHIFT 0
-#define PCH_BIT_SJW_SHIFT 6
-#define PCH_BIT_TSEG1_SHIFT 8
-#define PCH_BIT_TSEG2_SHIFT 12
-#define PCH_BIT_BRPE_BRPE_SHIFT 6
-
-#define PCH_MSK_BITT_BRP 0x3f
-#define PCH_MSK_BRPE_BRPE 0x3c0
-#define PCH_MSK_CTRL_IE_SIE_EIE 0x07
-#define PCH_COUNTER_LIMIT 10
-
-#define PCH_CAN_CLK 50000000 /* 50MHz */
-
-/*
- * Define the number of message object.
- * PCH CAN communications are done via Message RAM.
- * The Message RAM consists of 32 message objects.
- */
-#define PCH_RX_OBJ_NUM 26
-#define PCH_TX_OBJ_NUM 6
-#define PCH_RX_OBJ_START 1
-#define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
-#define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
-#define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
-
-#define PCH_FIFO_THRESH 16
-
-/* TxRqst2 show status of MsgObjNo.17~32 */
-#define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\
- (PCH_RX_OBJ_END - 16))
-
-enum pch_ifreg {
- PCH_RX_IFREG,
- PCH_TX_IFREG,
-};
-
-enum pch_can_err {
- PCH_STUF_ERR = 1,
- PCH_FORM_ERR,
- PCH_ACK_ERR,
- PCH_BIT1_ERR,
- PCH_BIT0_ERR,
- PCH_CRC_ERR,
- PCH_LEC_ALL,
-};
-
-enum pch_can_mode {
- PCH_CAN_ENABLE,
- PCH_CAN_DISABLE,
- PCH_CAN_ALL,
- PCH_CAN_NONE,
- PCH_CAN_STOP,
- PCH_CAN_RUN,
-};
-
-struct pch_can_if_regs {
- u32 creq;
- u32 cmask;
- u32 mask1;
- u32 mask2;
- u32 id1;
- u32 id2;
- u32 mcont;
- u32 data[4];
- u32 rsv[13];
-};
-
-struct pch_can_regs {
- u32 cont;
- u32 stat;
- u32 errc;
- u32 bitt;
- u32 intr;
- u32 opt;
- u32 brpe;
- u32 reserve;
- struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */
- u32 reserve1[8];
- u32 treq1;
- u32 treq2;
- u32 reserve2[6];
- u32 data1;
- u32 data2;
- u32 reserve3[6];
- u32 canipend1;
- u32 canipend2;
- u32 reserve4[6];
- u32 canmval1;
- u32 canmval2;
- u32 reserve5[37];
- u32 srst;
-};
-
-struct pch_can_priv {
- struct can_priv can;
- struct pci_dev *dev;
- u32 tx_enable[PCH_TX_OBJ_END];
- u32 rx_enable[PCH_TX_OBJ_END];
- u32 rx_link[PCH_TX_OBJ_END];
- u32 int_enables;
- struct net_device *ndev;
- struct pch_can_regs __iomem *regs;
- struct napi_struct napi;
- int tx_obj; /* Point next Tx Obj index */
- int use_msi;
-};
-
-static const struct can_bittiming_const pch_can_bittiming_const = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2,
- .tseg1_max = 16,
- .tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 1024, /* 6bit + extended 4bit */
- .brp_inc = 1,
-};
-
-static const struct pci_device_id pch_pci_tbl[] = {
- {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
- {0,}
-};
-MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
-
-static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
-{
- iowrite32(ioread32(addr) | mask, addr);
-}
-
-static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
-{
- iowrite32(ioread32(addr) & ~mask, addr);
-}
-
-static void pch_can_set_run_mode(struct pch_can_priv *priv,
- enum pch_can_mode mode)
-{
- switch (mode) {
- case PCH_CAN_RUN:
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
- break;
-
- case PCH_CAN_STOP:
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
- break;
-
- default:
- netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__);
- break;
- }
-}
-
-static void pch_can_set_optmode(struct pch_can_priv *priv)
-{
- u32 reg_val = ioread32(&priv->regs->opt);
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- reg_val |= PCH_OPT_SILENT;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
- reg_val |= PCH_OPT_LBACK;
-
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
- iowrite32(reg_val, &priv->regs->opt);
-}
-
-static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num)
-{
- int counter = PCH_COUNTER_LIMIT;
- u32 ifx_creq;
-
- iowrite32(num, creq_addr);
- while (counter) {
- ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
- if (!ifx_creq)
- break;
- counter--;
- udelay(1);
- }
- if (!counter)
- pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
-}
-
-static void pch_can_set_int_enables(struct pch_can_priv *priv,
- enum pch_can_mode interrupt_no)
-{
- switch (interrupt_no) {
- case PCH_CAN_DISABLE:
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
- break;
-
- case PCH_CAN_ALL:
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
- break;
-
- case PCH_CAN_NONE:
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
- break;
-
- default:
- netdev_err(priv->ndev, "Invalid interrupt number.\n");
- break;
- }
-}
-
-static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
- int set, enum pch_ifreg dir)
-{
- u32 ie;
-
- if (dir)
- ie = PCH_IF_MCONT_TXIE;
- else
- ie = PCH_IF_MCONT_RXIE;
-
- /* Reading the Msg buffer from Message RAM to IF1/2 registers. */
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
-
- /* Setting the IF1/2MASK1 register to access MsgVal and RxIE bits */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
- &priv->regs->ifregs[dir].cmask);
-
- if (set) {
- /* Setting the MsgVal and RxIE/TxIE bits */
- pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
- pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
- } else {
- /* Clearing the MsgVal and RxIE/TxIE bits */
- pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
- pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
- }
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
-}
-
-static void pch_can_set_rx_all(struct pch_can_priv *priv, int set)
-{
- int i;
-
- /* Traversing to obtain the object configured as receivers. */
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
- pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
-}
-
-static void pch_can_set_tx_all(struct pch_can_priv *priv, int set)
-{
- int i;
-
- /* Traversing to obtain the object configured as transmit object. */
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
- pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
-}
-
-static u32 pch_can_int_pending(struct pch_can_priv *priv)
-{
- return ioread32(&priv->regs->intr) & 0xffff;
-}
-
-static void pch_can_clear_if_buffers(struct pch_can_priv *priv)
-{
- int i; /* Msg Obj ID (1~32) */
-
- for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
- iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
- iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
- iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
- iowrite32(0x0, &priv->regs->ifregs[0].id1);
- iowrite32(0x0, &priv->regs->ifregs[0].id2);
- iowrite32(0x0, &priv->regs->ifregs[0].mcont);
- iowrite32(0x0, &priv->regs->ifregs[0].data[0]);
- iowrite32(0x0, &priv->regs->ifregs[0].data[1]);
- iowrite32(0x0, &priv->regs->ifregs[0].data[2]);
- iowrite32(0x0, &priv->regs->ifregs[0].data[3]);
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
- PCH_CMASK_ARB | PCH_CMASK_CTRL,
- &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
- }
-}
-
-static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
-{
- int i;
-
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
-
- iowrite32(0x0, &priv->regs->ifregs[0].id1);
- iowrite32(0x0, &priv->regs->ifregs[0].id2);
-
- pch_can_bit_set(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_UMASK);
-
- /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
- if (i == PCH_RX_OBJ_END)
- pch_can_bit_set(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_EOB);
- else
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_EOB);
-
- iowrite32(0, &priv->regs->ifregs[0].mask1);
- pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
- 0x1fff | PCH_MASK2_MDIR_MXTD);
-
- /* Setting CMASK for writing */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
- PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
- }
-
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
-
- /* Resetting DIR bit for reception */
- iowrite32(0x0, &priv->regs->ifregs[1].id1);
- iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2);
-
- /* Setting EOB bit for transmitter */
- iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK,
- &priv->regs->ifregs[1].mcont);
-
- iowrite32(0, &priv->regs->ifregs[1].mask1);
- pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
-
- /* Setting CMASK for writing */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
- PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
- }
-}
-
-static void pch_can_init(struct pch_can_priv *priv)
-{
- /* Stopping the Can device. */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Clearing all the message object buffers. */
- pch_can_clear_if_buffers(priv);
-
- /* Configuring the respective message object as either rx/tx object. */
- pch_can_config_rx_tx_buffers(priv);
-
- /* Enabling the interrupts. */
- pch_can_set_int_enables(priv, PCH_CAN_ALL);
-}
-
-static void pch_can_release(struct pch_can_priv *priv)
-{
- /* Stooping the CAN device. */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Disabling the interrupts. */
- pch_can_set_int_enables(priv, PCH_CAN_NONE);
-
- /* Disabling all the receive object. */
- pch_can_set_rx_all(priv, 0);
-
- /* Disabling all the transmit object. */
- pch_can_set_tx_all(priv, 0);
-}
-
-/* This function clears interrupt(s) from the CAN device. */
-static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
-{
- /* Clear interrupt for transmit object */
- if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
- /* Setting CMASK for clearing the reception interrupts. */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
- &priv->regs->ifregs[0].cmask);
-
- /* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
-
- /* Clearing NewDat & IntPnd */
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask);
- } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
- /*
- * Setting CMASK for clearing interrupts for frame transmission.
- */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
- &priv->regs->ifregs[1].cmask);
-
- /* Resetting the ID registers. */
- pch_can_bit_set(&priv->regs->ifregs[1].id2,
- PCH_ID2_DIR | (0x7ff << 2));
- iowrite32(0x0, &priv->regs->ifregs[1].id1);
-
- /* Clearing NewDat, TxRqst & IntPnd */
- pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
- PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
- PCH_IF_MCONT_TXRQXT);
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask);
- }
-}
-
-static void pch_can_reset(struct pch_can_priv *priv)
-{
- /* write to sw reset register */
- iowrite32(1, &priv->regs->srst);
- iowrite32(0, &priv->regs->srst);
-}
-
-static void pch_can_error(struct net_device *ndev, u32 status)
-{
- struct sk_buff *skb;
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct can_frame *cf;
- u32 errc, lec;
- struct net_device_stats *stats = &(priv->ndev->stats);
- enum can_state state = priv->can.state;
-
- skb = alloc_can_err_skb(ndev, &cf);
- if (!skb)
- return;
-
- if (status & PCH_BUS_OFF) {
- pch_can_set_tx_all(priv, 0);
- pch_can_set_rx_all(priv, 0);
- state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
- priv->can.can_stats.bus_off++;
- can_bus_off(ndev);
- }
-
- errc = ioread32(&priv->regs->errc);
- /* Warning interrupt. */
- if (status & PCH_EWARN) {
- state = CAN_STATE_ERROR_WARNING;
- priv->can.can_stats.error_warning++;
- cf->can_id |= CAN_ERR_CRTL;
- if (((errc & PCH_REC) >> 8) > 96)
- cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
- if ((errc & PCH_TEC) > 96)
- cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
- netdev_dbg(ndev,
- "%s -> Error Counter is more than 96.\n", __func__);
- }
- /* Error passive interrupt. */
- if (status & PCH_EPASSIV) {
- priv->can.can_stats.error_passive++;
- state = CAN_STATE_ERROR_PASSIVE;
- cf->can_id |= CAN_ERR_CRTL;
- if (errc & PCH_RP)
- cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
- if ((errc & PCH_TEC) > 127)
- cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
- netdev_dbg(ndev,
- "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
- }
-
- lec = status & PCH_LEC_ALL;
- switch (lec) {
- case PCH_STUF_ERR:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_FORM_ERR:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_ACK_ERR:
- cf->can_id |= CAN_ERR_ACK;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_BIT1_ERR:
- case PCH_BIT0_ERR:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_CRC_ERR:
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_LEC_ALL: /* Written by CPU. No error status */
- break;
- }
-
- cf->data[6] = errc & PCH_TEC;
- cf->data[7] = (errc & PCH_REC) >> 8;
-
- priv->can.state = state;
- netif_receive_skb(skb);
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-}
-
-static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
-{
- struct net_device *ndev = (struct net_device *)dev_id;
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- if (!pch_can_int_pending(priv))
- return IRQ_NONE;
-
- pch_can_set_int_enables(priv, PCH_CAN_NONE);
- napi_schedule(&priv->napi);
- return IRQ_HANDLED;
-}
-
-static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id)
-{
- if (obj_id < PCH_FIFO_THRESH) {
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
- PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
-
- /* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
-
- /* Clearing NewDat & IntPnd */
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_INTPND);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
- } else if (obj_id > PCH_FIFO_THRESH) {
- pch_can_int_clr(priv, obj_id);
- } else if (obj_id == PCH_FIFO_THRESH) {
- int cnt;
- for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
- pch_can_int_clr(priv, cnt + 1);
- }
-}
-
-static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &(priv->ndev->stats);
- struct sk_buff *skb;
- struct can_frame *cf;
-
- netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n");
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_MSGLOST);
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
- &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
-
- skb = alloc_can_err_skb(ndev, &cf);
- if (!skb)
- return;
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_over_errors++;
- stats->rx_errors++;
-
- netif_receive_skb(skb);
-}
-
-static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
-{
- u32 reg;
- canid_t id;
- int rcv_pkts = 0;
- struct sk_buff *skb;
- struct can_frame *cf;
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &(priv->ndev->stats);
- int i;
- u32 id2;
- u16 data_reg;
-
- do {
- /* Reading the message object from the Message RAM */
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num);
-
- /* Reading the MCONT register. */
- reg = ioread32(&priv->regs->ifregs[0].mcont);
-
- if (reg & PCH_IF_MCONT_EOB)
- break;
-
- /* If MsgLost bit set. */
- if (reg & PCH_IF_MCONT_MSGLOST) {
- pch_can_rx_msg_lost(ndev, obj_num);
- rcv_pkts++;
- quota--;
- obj_num++;
- continue;
- } else if (!(reg & PCH_IF_MCONT_NEWDAT)) {
- obj_num++;
- continue;
- }
-
- skb = alloc_can_skb(priv->ndev, &cf);
- if (!skb) {
- netdev_err(ndev, "alloc_can_skb Failed\n");
- return rcv_pkts;
- }
-
- /* Get Received data */
- id2 = ioread32(&priv->regs->ifregs[0].id2);
- if (id2 & PCH_ID2_XTD) {
- id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
- id |= (((id2) & 0x1fff) << 16);
- cf->can_id = id | CAN_EFF_FLAG;
- } else {
- id = (id2 >> 2) & CAN_SFF_MASK;
- cf->can_id = id;
- }
-
- if (id2 & PCH_ID2_DIR)
- cf->can_id |= CAN_RTR_FLAG;
-
- cf->len = can_cc_dlc2len((ioread32(&priv->regs->
- ifregs[0].mcont)) & 0xF);
-
- for (i = 0; i < cf->len; i += 2) {
- data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
- cf->data[i] = data_reg;
- cf->data[i + 1] = data_reg >> 8;
- }
-
- netif_receive_skb(skb);
- rcv_pkts++;
- stats->rx_packets++;
- quota--;
- stats->rx_bytes += cf->len;
-
- pch_fifo_thresh(priv, obj_num);
- obj_num++;
- } while (quota > 0);
-
- return rcv_pkts;
-}
-
-static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &(priv->ndev->stats);
- u32 dlc;
-
- can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1, NULL);
- iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
- &priv->regs->ifregs[1].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
- dlc = can_cc_dlc2len(ioread32(&priv->regs->ifregs[1].mcont) &
- PCH_IF_MCONT_DLC);
- stats->tx_bytes += dlc;
- stats->tx_packets++;
- if (int_stat == PCH_TX_OBJ_END)
- netif_wake_queue(ndev);
-}
-
-static int pch_can_poll(struct napi_struct *napi, int quota)
-{
- struct net_device *ndev = napi->dev;
- struct pch_can_priv *priv = netdev_priv(ndev);
- u32 int_stat;
- u32 reg_stat;
- int quota_save = quota;
-
- int_stat = pch_can_int_pending(priv);
- if (!int_stat)
- goto end;
-
- if (int_stat == PCH_STATUS_INT) {
- reg_stat = ioread32(&priv->regs->stat);
-
- if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) &&
- ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) {
- pch_can_error(ndev, reg_stat);
- quota--;
- }
-
- if (reg_stat & (PCH_TX_OK | PCH_RX_OK))
- pch_can_bit_clear(&priv->regs->stat,
- reg_stat & (PCH_TX_OK | PCH_RX_OK));
-
- int_stat = pch_can_int_pending(priv);
- }
-
- if (quota == 0)
- goto end;
-
- if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
- quota -= pch_can_rx_normal(ndev, int_stat, quota);
- } else if ((int_stat >= PCH_TX_OBJ_START) &&
- (int_stat <= PCH_TX_OBJ_END)) {
- /* Handle transmission interrupt */
- pch_can_tx_complete(ndev, int_stat);
- }
-
-end:
- napi_complete(napi);
- pch_can_set_int_enables(priv, PCH_CAN_ALL);
-
- return quota_save - quota;
-}
-
-static int pch_set_bittiming(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- const struct can_bittiming *bt = &priv->can.bittiming;
- u32 canbit;
- u32 bepe;
-
- /* Setting the CCE bit for accessing the Can Timing register. */
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
-
- canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP;
- canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT;
- canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT;
- canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT;
- bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT;
- iowrite32(canbit, &priv->regs->bitt);
- iowrite32(bepe, &priv->regs->brpe);
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
-
- return 0;
-}
-
-static void pch_can_start(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- if (priv->can.state != CAN_STATE_STOPPED)
- pch_can_reset(priv);
-
- pch_set_bittiming(ndev);
- pch_can_set_optmode(priv);
-
- pch_can_set_tx_all(priv, 1);
- pch_can_set_rx_all(priv, 1);
-
- /* Setting the CAN to run mode. */
- pch_can_set_run_mode(priv, PCH_CAN_RUN);
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
- return;
-}
-
-static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
-{
- int ret = 0;
-
- switch (mode) {
- case CAN_MODE_START:
- pch_can_start(ndev);
- netif_wake_queue(ndev);
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
-
-static int pch_can_open(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- int retval;
-
- /* Registering the interrupt. */
- retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
- ndev->name, ndev);
- if (retval) {
- netdev_err(ndev, "request_irq failed.\n");
- goto req_irq_err;
- }
-
- /* Open common can device */
- retval = open_candev(ndev);
- if (retval) {
- netdev_err(ndev, "open_candev() failed %d\n", retval);
- goto err_open_candev;
- }
-
- pch_can_init(priv);
- pch_can_start(ndev);
- napi_enable(&priv->napi);
- netif_start_queue(ndev);
-
- return 0;
-
-err_open_candev:
- free_irq(priv->dev->irq, ndev);
-req_irq_err:
- pch_can_release(priv);
-
- return retval;
-}
-
-static int pch_close(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- netif_stop_queue(ndev);
- napi_disable(&priv->napi);
- pch_can_release(priv);
- free_irq(priv->dev->irq, ndev);
- close_candev(ndev);
- priv->can.state = CAN_STATE_STOPPED;
- return 0;
-}
-
-static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct can_frame *cf = (struct can_frame *)skb->data;
- int tx_obj_no;
- int i;
- u32 id2;
-
- if (can_dropped_invalid_skb(ndev, skb))
- return NETDEV_TX_OK;
-
- tx_obj_no = priv->tx_obj;
- if (priv->tx_obj == PCH_TX_OBJ_END) {
- if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK)
- netif_stop_queue(ndev);
-
- priv->tx_obj = PCH_TX_OBJ_START;
- } else {
- priv->tx_obj++;
- }
-
- /* Setting the CMASK register. */
- pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
-
- /* If ID extended is set. */
- if (cf->can_id & CAN_EFF_FLAG) {
- iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1);
- id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD;
- } else {
- iowrite32(0, &priv->regs->ifregs[1].id1);
- id2 = (cf->can_id & CAN_SFF_MASK) << 2;
- }
-
- id2 |= PCH_ID_MSGVAL;
-
- /* If remote frame has to be transmitted.. */
- if (!(cf->can_id & CAN_RTR_FLAG))
- id2 |= PCH_ID2_DIR;
-
- iowrite32(id2, &priv->regs->ifregs[1].id2);
-
- /* Copy data to register */
- for (i = 0; i < cf->len; i += 2) {
- iowrite16(cf->data[i] | (cf->data[i + 1] << 8),
- &priv->regs->ifregs[1].data[i / 2]);
- }
-
- can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1, 0);
-
- /* Set the size of the data. Update if2_mcont */
- iowrite32(cf->len | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
- PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no);
-
- return NETDEV_TX_OK;
-}
-
-static const struct net_device_ops pch_can_netdev_ops = {
- .ndo_open = pch_can_open,
- .ndo_stop = pch_close,
- .ndo_start_xmit = pch_xmit,
- .ndo_change_mtu = can_change_mtu,
-};
-
-static void pch_can_remove(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- unregister_candev(priv->ndev);
- if (priv->use_msi)
- pci_disable_msi(priv->dev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pch_can_reset(priv);
- pci_iounmap(pdev, priv->regs);
- free_candev(priv->ndev);
-}
-
-static void __maybe_unused pch_can_set_int_custom(struct pch_can_priv *priv)
-{
- /* Clearing the IE, SIE and EIE bits of Can control register. */
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
-
- /* Appropriately setting them. */
- pch_can_bit_set(&priv->regs->cont,
- ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
-}
-
-/* This function retrieves interrupt enabled for the CAN device. */
-static u32 __maybe_unused pch_can_get_int_enables(struct pch_can_priv *priv)
-{
- /* Obtaining the status of IE, SIE and EIE interrupt bits. */
- return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
-}
-
-static u32 __maybe_unused pch_can_get_rxtx_ir(struct pch_can_priv *priv,
- u32 buff_num, enum pch_ifreg dir)
-{
- u32 ie, enable;
-
- if (dir)
- ie = PCH_IF_MCONT_RXIE;
- else
- ie = PCH_IF_MCONT_TXIE;
-
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
-
- if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
- ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie))
- enable = 1;
- else
- enable = 0;
-
- return enable;
-}
-
-static void __maybe_unused pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num, int set)
-{
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
- &priv->regs->ifregs[0].cmask);
- if (set)
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_EOB);
- else
- pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
-}
-
-static u32 __maybe_unused pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num)
-{
- u32 link;
-
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
-
- if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
- link = 0;
- else
- link = 1;
- return link;
-}
-
-static int __maybe_unused pch_can_get_buffer_status(struct pch_can_priv *priv)
-{
- return (ioread32(&priv->regs->treq1) & 0xffff) |
- (ioread32(&priv->regs->treq2) << 16);
-}
-
-static int __maybe_unused pch_can_suspend(struct device *dev_d)
-{
- int i;
- u32 buf_stat; /* Variable for reading the transmit buffer status. */
- int counter = PCH_COUNTER_LIMIT;
-
- struct net_device *dev = dev_get_drvdata(dev_d);
- struct pch_can_priv *priv = netdev_priv(dev);
-
- /* Stop the CAN controller */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Indicate that we are aboutto/in suspend */
- priv->can.state = CAN_STATE_STOPPED;
-
- /* Waiting for all transmission to complete. */
- while (counter) {
- buf_stat = pch_can_get_buffer_status(priv);
- if (!buf_stat)
- break;
- counter--;
- udelay(1);
- }
- if (!counter)
- dev_err(dev_d, "%s -> Transmission time out.\n", __func__);
-
- /* Save interrupt configuration and then disable them */
- priv->int_enables = pch_can_get_int_enables(priv);
- pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
-
- /* Save Tx buffer enable state */
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
- priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
- PCH_TX_IFREG);
-
- /* Disable all Transmit buffers */
- pch_can_set_tx_all(priv, 0);
-
- /* Save Rx buffer enable state */
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
- priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
- PCH_RX_IFREG);
- priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i);
- }
-
- /* Disable all Receive buffers */
- pch_can_set_rx_all(priv, 0);
-
- return 0;
-}
-
-static int __maybe_unused pch_can_resume(struct device *dev_d)
-{
- int i;
- struct net_device *dev = dev_get_drvdata(dev_d);
- struct pch_can_priv *priv = netdev_priv(dev);
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
- /* Disabling all interrupts. */
- pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
-
- /* Setting the CAN device in Stop Mode. */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Configuring the transmit and receive buffers. */
- pch_can_config_rx_tx_buffers(priv);
-
- /* Restore the CAN state */
- pch_set_bittiming(dev);
-
- /* Listen/Active */
- pch_can_set_optmode(priv);
-
- /* Enabling the transmit buffer. */
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
- pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG);
-
- /* Configuring the receive buffer and enabling them. */
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
- /* Restore buffer link */
- pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]);
-
- /* Restore buffer enables */
- pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG);
- }
-
- /* Enable CAN Interrupts */
- pch_can_set_int_custom(priv);
-
- /* Restore Run Mode */
- pch_can_set_run_mode(priv, PCH_CAN_RUN);
-
- return 0;
-}
-
-static int pch_can_get_berr_counter(const struct net_device *dev,
- struct can_berr_counter *bec)
-{
- struct pch_can_priv *priv = netdev_priv(dev);
- u32 errc = ioread32(&priv->regs->errc);
-
- bec->txerr = errc & PCH_TEC;
- bec->rxerr = (errc & PCH_REC) >> 8;
-
- return 0;
-}
-
-static int pch_can_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- struct net_device *ndev;
- struct pch_can_priv *priv;
- int rc;
- void __iomem *addr;
-
- rc = pci_enable_device(pdev);
- if (rc) {
- dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
- goto probe_exit_endev;
- }
-
- rc = pci_request_regions(pdev, KBUILD_MODNAME);
- if (rc) {
- dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
- goto probe_exit_pcireq;
- }
-
- addr = pci_iomap(pdev, 1, 0);
- if (!addr) {
- rc = -EIO;
- dev_err(&pdev->dev, "Failed pci_iomap\n");
- goto probe_exit_ipmap;
- }
-
- ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
- if (!ndev) {
- rc = -ENOMEM;
- dev_err(&pdev->dev, "Failed alloc_candev\n");
- goto probe_exit_alloc_candev;
- }
-
- priv = netdev_priv(ndev);
- priv->ndev = ndev;
- priv->regs = addr;
- priv->dev = pdev;
- priv->can.bittiming_const = &pch_can_bittiming_const;
- priv->can.do_set_mode = pch_can_do_set_mode;
- priv->can.do_get_berr_counter = pch_can_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_LOOPBACK;
- priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */
-
- ndev->irq = pdev->irq;
- ndev->flags |= IFF_ECHO;
-
- pci_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->netdev_ops = &pch_can_netdev_ops;
- priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
-
- netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
-
- rc = pci_enable_msi(priv->dev);
- if (rc) {
- netdev_err(ndev, "PCH CAN opened without MSI\n");
- priv->use_msi = 0;
- } else {
- netdev_err(ndev, "PCH CAN opened with MSI\n");
- pci_set_master(pdev);
- priv->use_msi = 1;
- }
-
- rc = register_candev(ndev);
- if (rc) {
- dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
- goto probe_exit_reg_candev;
- }
-
- return 0;
-
-probe_exit_reg_candev:
- if (priv->use_msi)
- pci_disable_msi(priv->dev);
- free_candev(ndev);
-probe_exit_alloc_candev:
- pci_iounmap(pdev, addr);
-probe_exit_ipmap:
- pci_release_regions(pdev);
-probe_exit_pcireq:
- pci_disable_device(pdev);
-probe_exit_endev:
- return rc;
-}
-
-static SIMPLE_DEV_PM_OPS(pch_can_pm_ops,
- pch_can_suspend,
- pch_can_resume);
-
-static struct pci_driver pch_can_pci_driver = {
- .name = "pch_can",
- .id_table = pch_pci_tbl,
- .probe = pch_can_probe,
- .remove = pch_can_remove,
- .driver.pm = &pch_can_pm_ops,
-};
-
-module_pci_driver(pch_can_pci_driver);
-
-MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION("0.94");
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index d08718e98e11..06cb2629f66a 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -1,12 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
- * Copyright (C) 2016 PEAK System-Technik GmbH
+ * Copyright (C) 2016-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/ethtool.h>
#include "peak_canfd_user.h"
@@ -266,10 +267,9 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
- can_get_echo_skb(priv->ndev, msg->client, NULL);
/* count bytes of the echo instead of skb */
- stats->tx_bytes += cf_len;
+ stats->tx_bytes += can_get_echo_skb(priv->ndev, msg->client, NULL);
stats->tx_packets++;
/* restart tx queue (a slot is free) */
@@ -310,12 +310,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
if (rx_msg_flags & PUCAN_MSG_EXT_ID)
cf->can_id |= CAN_EFF_FLAG;
- if (rx_msg_flags & PUCAN_MSG_RTR)
+ if (rx_msg_flags & PUCAN_MSG_RTR) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, msg->d, cf->len);
- stats->rx_bytes += cf->len;
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
@@ -373,7 +374,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
@@ -386,7 +387,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -409,8 +410,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
return -ENOMEM;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
return 0;
@@ -432,14 +431,12 @@ static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
return -ENOMEM;
}
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
cf->data[6] = priv->bec.txerr;
cf->data[7] = priv->bec.rxerr;
- stats->rx_bytes += cf->len;
- stats->rx_packets++;
netif_rx(skb);
return 0;
@@ -627,7 +624,7 @@ static int peak_canfd_set_data_bittiming(struct net_device *ndev)
{
struct peak_canfd_priv *priv = netdev_priv(ndev);
- return pucan_set_timing_fast(priv, &priv->can.data_bittiming);
+ return pucan_set_timing_fast(priv, &priv->can.fd.data_bittiming);
}
static int peak_canfd_close(struct net_device *ndev)
@@ -654,7 +651,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
int room_left;
u8 len;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
msg_size = ALIGN(sizeof(*msg) + cf->len, 4);
@@ -746,11 +743,50 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int peak_eth_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ config->tx_type = HWTSTAMP_TX_OFF;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+
+ return 0;
+}
+
+static int peak_eth_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ if (config->tx_type == HWTSTAMP_TX_OFF &&
+ config->rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack, "Only RX HWTSTAMP_FILTER_ALL is supported");
+ return -ERANGE;
+}
+
static const struct net_device_ops peak_canfd_netdev_ops = {
.ndo_open = peak_canfd_open,
.ndo_stop = peak_canfd_close,
.ndo_start_xmit = peak_canfd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = peak_eth_hwtstamp_get,
+ .ndo_hwtstamp_set = peak_eth_hwtstamp_set,
+};
+
+static int peak_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops peak_canfd_ethtool_ops = {
+ .get_ts_info = peak_get_ts_info,
};
struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
@@ -773,12 +809,12 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
/* complete now socket-can initialization side */
priv->can.state = CAN_STATE_STOPPED;
priv->can.bittiming_const = &peak_canfd_nominal_const;
- priv->can.data_bittiming_const = &peak_canfd_data_const;
+ priv->can.fd.data_bittiming_const = &peak_canfd_data_const;
priv->can.do_set_mode = peak_canfd_set_mode;
priv->can.do_get_berr_counter = peak_canfd_get_berr_counter;
priv->can.do_set_bittiming = peak_canfd_set_bittiming;
- priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming;
+ priv->can.fd.do_set_data_bittiming = peak_canfd_set_data_bittiming;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_3_SAMPLES |
@@ -793,6 +829,7 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
ndev->flags |= IFF_ECHO;
ndev->netdev_ops = &peak_canfd_netdev_ops;
+ ndev->ethtool_ops = &peak_canfd_ethtool_ops;
ndev->dev_id = index;
return ndev;
diff --git a/drivers/net/can/peak_canfd/peak_canfd_user.h b/drivers/net/can/peak_canfd/peak_canfd_user.h
index a72719dc3b74..60c6542028cf 100644
--- a/drivers/net/can/peak_canfd/peak_canfd_user.h
+++ b/drivers/net/can/peak_canfd/peak_canfd_user.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* CAN driver for PEAK System micro-CAN based adapters
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PEAK_CANFD_USER_H
#define PEAK_CANFD_USER_H
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 1df3c4b54f03..93558e33bc02 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
*
- * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ * Copyright (C) 2001-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
@@ -19,7 +19,7 @@
#include "peak_canfd_user.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 8999ec9455ec..fc3df328e877 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -5,17 +5,20 @@
* Copyright (C) 2013 Renesas Solutions Corp.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
-#include <linux/can/led.h>
#include <linux/can/dev.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#define RCAR_CAN_DRV_NAME "rcar_can"
@@ -92,9 +95,7 @@ struct rcar_can_priv {
struct net_device *ndev;
struct napi_struct napi;
struct rcar_can_regs __iomem *regs;
- struct clk *clk;
struct clk *can_clk;
- u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
u32 tx_head;
u32 tx_tail;
u8 clock_select;
@@ -114,100 +115,102 @@ static const struct can_bittiming_const rcar_can_bittiming_const = {
};
/* Control Register bits */
-#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */
-#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */
- /* at bus-off entry */
-#define RCAR_CAN_CTLR_SLPM (1 << 10)
-#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */
-#define RCAR_CAN_CTLR_CANM_HALT (1 << 9)
-#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
-#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
-#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */
-#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */
-#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
-#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */
+#define RCAR_CAN_CTLR_BOM GENMASK(12, 11) /* Bus-Off Recovery Mode Bits */
+#define RCAR_CAN_CTLR_BOM_ENT 1 /* Entry to halt mode */
+ /* at bus-off entry */
+#define RCAR_CAN_CTLR_SLPM BIT(10) /* Sleep Mode */
+#define RCAR_CAN_CTLR_CANM GENMASK(9, 8) /* Operating Mode Select Bit */
+#define RCAR_CAN_CTLR_CANM_OPER 0 /* Operation Mode */
+#define RCAR_CAN_CTLR_CANM_RESET 1 /* Reset Mode */
+#define RCAR_CAN_CTLR_CANM_HALT 2 /* Halt Mode */
+#define RCAR_CAN_CTLR_CANM_FORCE_RESET 3 /* Reset Mode (forcible) */
+#define RCAR_CAN_CTLR_MLM BIT(3) /* Message Lost Mode Select */
+#define RCAR_CAN_CTLR_IDFM GENMASK(2, 1) /* ID Format Mode Select Bits */
+#define RCAR_CAN_CTLR_IDFM_STD 0 /* Standard ID mode */
+#define RCAR_CAN_CTLR_IDFM_EXT 1 /* Extended ID mode */
+#define RCAR_CAN_CTLR_IDFM_MIXED 2 /* Mixed ID mode */
+#define RCAR_CAN_CTLR_MBM BIT(0) /* Mailbox Mode select */
/* Status Register bits */
-#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */
+#define RCAR_CAN_STR_RSTST BIT(8) /* Reset Status Bit */
/* FIFO Received ID Compare Registers 0 and 1 bits */
-#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */
-#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */
+#define RCAR_CAN_FIDCR_IDE BIT(31) /* ID Extension Bit */
+#define RCAR_CAN_FIDCR_RTR BIT(30) /* Remote Transmission Request Bit */
/* Receive FIFO Control Register bits */
-#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */
-#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */
+#define RCAR_CAN_RFCR_RFEST BIT(7) /* Receive FIFO Empty Status Flag */
+#define RCAR_CAN_RFCR_RFE BIT(0) /* Receive FIFO Enable */
/* Transmit FIFO Control Register bits */
-#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */
- /* Number Status Bits */
-#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */
- /* Message Number Status Bits */
-#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */
-
-#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
- /* for Rx mailboxes 0-31 */
+#define RCAR_CAN_TFCR_TFUST GENMASK(3, 1) /* Transmit FIFO Unsent Message */
+ /* Number Status Bits */
+#define RCAR_CAN_TFCR_TFE BIT(0) /* Transmit FIFO Enable */
+
+#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
+ /* for Rx mailboxes 0-31 */
#define RCAR_CAN_N_RX_MKREGS2 8
/* Bit Configuration Register settings */
-#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20)
-#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8)
-#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4)
-#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07)
+#define RCAR_CAN_BCR_TSEG1 GENMASK(23, 20)
+#define RCAR_CAN_BCR_BRP GENMASK(17, 8)
+#define RCAR_CAN_BCR_SJW GENMASK(5, 4)
+#define RCAR_CAN_BCR_TSEG2 GENMASK(2, 0)
/* Mailbox and Mask Registers bits */
-#define RCAR_CAN_IDE (1 << 31)
-#define RCAR_CAN_RTR (1 << 30)
-#define RCAR_CAN_SID_SHIFT 18
+#define RCAR_CAN_IDE BIT(31) /* ID Extension */
+#define RCAR_CAN_RTR BIT(30) /* Remote Transmission Request */
+#define RCAR_CAN_SID GENMASK(28, 18) /* Standard ID */
+#define RCAR_CAN_EID GENMASK(28, 0) /* Extended ID */
/* Mailbox Interrupt Enable Register 1 bits */
-#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */
-#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_RXFIE BIT(28) /* Receive FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_TXFIE BIT(24) /* Transmit FIFO Interrupt Enable */
/* Interrupt Enable Register bits */
-#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */
-#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */
- /* Enable Bit */
-#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */
- /* Enable Bit */
+#define RCAR_CAN_IER_ERSIE BIT(5) /* Error (ERS) Interrupt Enable Bit */
+#define RCAR_CAN_IER_RXFIE BIT(4) /* Reception FIFO Interrupt */
+ /* Enable Bit */
+#define RCAR_CAN_IER_TXFIE BIT(3) /* Transmission FIFO Interrupt */
+ /* Enable Bit */
/* Interrupt Status Register bits */
-#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */
-#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */
- /* Status Bit */
-#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */
- /* Status Bit */
+#define RCAR_CAN_ISR_ERSF BIT(5) /* Error (ERS) Interrupt Status Bit */
+#define RCAR_CAN_ISR_RXFF BIT(4) /* Reception FIFO Interrupt */
+ /* Status Bit */
+#define RCAR_CAN_ISR_TXFF BIT(3) /* Transmission FIFO Interrupt */
+ /* Status Bit */
/* Error Interrupt Enable Register bits */
-#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */
-#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */
- /* Interrupt Enable */
-#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */
-#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */
-#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */
-#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */
-#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */
-#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */
+#define RCAR_CAN_EIER_BLIE BIT(7) /* Bus Lock Interrupt Enable */
+#define RCAR_CAN_EIER_OLIE BIT(6) /* Overload Frame Transmit */
+ /* Interrupt Enable */
+#define RCAR_CAN_EIER_ORIE BIT(5) /* Receive Overrun Interrupt Enable */
+#define RCAR_CAN_EIER_BORIE BIT(4) /* Bus-Off Recovery Interrupt Enable */
+#define RCAR_CAN_EIER_BOEIE BIT(3) /* Bus-Off Entry Interrupt Enable */
+#define RCAR_CAN_EIER_EPIE BIT(2) /* Error Passive Interrupt Enable */
+#define RCAR_CAN_EIER_EWIE BIT(1) /* Error Warning Interrupt Enable */
+#define RCAR_CAN_EIER_BEIE BIT(0) /* Bus Error Interrupt Enable */
/* Error Interrupt Factor Judge Register bits */
-#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */
-#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */
- /* Detect Flag */
-#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */
-#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */
-#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */
-#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */
-#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */
-#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */
+#define RCAR_CAN_EIFR_BLIF BIT(7) /* Bus Lock Detect Flag */
+#define RCAR_CAN_EIFR_OLIF BIT(6) /* Overload Frame Transmission */
+ /* Detect Flag */
+#define RCAR_CAN_EIFR_ORIF BIT(5) /* Receive Overrun Detect Flag */
+#define RCAR_CAN_EIFR_BORIF BIT(4) /* Bus-Off Recovery Detect Flag */
+#define RCAR_CAN_EIFR_BOEIF BIT(3) /* Bus-Off Entry Detect Flag */
+#define RCAR_CAN_EIFR_EPIF BIT(2) /* Error Passive Detect Flag */
+#define RCAR_CAN_EIFR_EWIF BIT(1) /* Error Warning Detect Flag */
+#define RCAR_CAN_EIFR_BEIF BIT(0) /* Bus Error Detect Flag */
/* Error Code Store Register bits */
-#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */
-#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */
-#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */
-#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */
-#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */
-#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */
-#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */
-#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */
+#define RCAR_CAN_ECSR_EDPM BIT(7) /* Error Display Mode Select Bit */
+#define RCAR_CAN_ECSR_ADEF BIT(6) /* ACK Delimiter Error Flag */
+#define RCAR_CAN_ECSR_BE0F BIT(5) /* Bit Error (dominant) Flag */
+#define RCAR_CAN_ECSR_BE1F BIT(4) /* Bit Error (recessive) Flag */
+#define RCAR_CAN_ECSR_CEF BIT(3) /* CRC Error Flag */
+#define RCAR_CAN_ECSR_AEF BIT(2) /* ACK Error Flag */
+#define RCAR_CAN_ECSR_FEF BIT(1) /* Form Error Flag */
+#define RCAR_CAN_ECSR_SEF BIT(0) /* Stuff Error Flag */
#define RCAR_CAN_NAPI_WEIGHT 4
#define MAX_STR_READS 0x100
@@ -223,7 +226,6 @@ static void tx_failure_cleanup(struct net_device *ndev)
static void rcar_can_error(struct net_device *ndev)
{
struct rcar_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
u8 eifr, txerr = 0, rxerr = 0;
@@ -235,11 +237,8 @@ static void rcar_can_error(struct net_device *ndev)
if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
txerr = readb(&priv->regs->tecr);
rxerr = readb(&priv->regs->recr);
- if (skb) {
+ if (skb)
cf->can_id |= CAN_ERR_CRTL;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
}
if (eifr & RCAR_CAN_EIFR_BEIF) {
int rx_errors = 0, tx_errors = 0;
@@ -253,35 +252,35 @@ static void rcar_can_error(struct net_device *ndev)
if (ecsr & RCAR_CAN_ECSR_ADEF) {
netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
if (skb)
cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
}
if (ecsr & RCAR_CAN_ECSR_BE0F) {
netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_BIT0;
}
if (ecsr & RCAR_CAN_ECSR_BE1F) {
netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_BIT1;
}
if (ecsr & RCAR_CAN_ECSR_CEF) {
netdev_dbg(priv->ndev, "CRC Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
if (skb)
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
if (ecsr & RCAR_CAN_ECSR_AEF) {
netdev_dbg(priv->ndev, "ACK Error\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
if (skb) {
cf->can_id |= CAN_ERR_ACK;
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
@@ -290,14 +289,14 @@ static void rcar_can_error(struct net_device *ndev)
if (ecsr & RCAR_CAN_ECSR_FEF) {
netdev_dbg(priv->ndev, "Form Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_FORM;
}
if (ecsr & RCAR_CAN_ECSR_SEF) {
netdev_dbg(priv->ndev, "Stuff Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_STUFF;
}
@@ -305,14 +304,14 @@ static void rcar_can_error(struct net_device *ndev)
priv->can.can_stats.bus_error++;
ndev->stats.rx_errors += rx_errors;
ndev->stats.tx_errors += tx_errors;
- writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
}
if (eifr & RCAR_CAN_EIFR_EWIF) {
netdev_dbg(priv->ndev, "Error warning interrupt\n");
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
if (skb)
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -322,7 +321,7 @@ static void rcar_can_error(struct net_device *ndev)
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
if (skb)
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
@@ -334,17 +333,21 @@ static void rcar_can_error(struct net_device *ndev)
writeb(priv->ier, &priv->regs->ier);
priv->can.state = CAN_STATE_BUS_OFF;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
priv->can.can_stats.bus_off++;
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
if (eifr & RCAR_CAN_EIFR_ORIF) {
netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
ndev->stats.rx_over_errors++;
ndev->stats.rx_errors++;
- writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
@@ -355,18 +358,15 @@ static void rcar_can_error(struct net_device *ndev)
"Overload Frame Transmission error interrupt\n");
ndev->stats.rx_over_errors++;
ndev->stats.rx_errors++;
- writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
if (skb) {
cf->can_id |= CAN_ERR_PROT;
cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
}
}
- if (skb) {
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (skb)
netif_rx(skb);
- }
}
static void rcar_can_tx_done(struct net_device *ndev)
@@ -376,24 +376,23 @@ static void rcar_can_tx_done(struct net_device *ndev)
u8 isr;
while (1) {
- u8 unsent = readb(&priv->regs->tfcr);
+ u8 unsent = FIELD_GET(RCAR_CAN_TFCR_TFUST,
+ readb(&priv->regs->tfcr));
- unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
- RCAR_CAN_TFCR_TFUST_SHIFT;
if (priv->tx_head - priv->tx_tail <= unsent)
break;
stats->tx_packets++;
- stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
- RCAR_CAN_FIFO_DEPTH];
- priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
- can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH, NULL);
+ stats->tx_bytes +=
+ can_get_echo_skb(ndev,
+ priv->tx_tail % RCAR_CAN_FIFO_DEPTH,
+ NULL);
+
priv->tx_tail++;
netif_wake_queue(ndev);
}
/* Clear interrupt */
isr = readb(&priv->regs->isr);
writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
@@ -424,15 +423,16 @@ static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rcar_can_set_bittiming(struct net_device *dev)
+static void rcar_can_set_bittiming(struct net_device *ndev)
{
- struct rcar_can_priv *priv = netdev_priv(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
u32 bcr;
- bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
- RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
- RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
+ bcr = FIELD_PREP(RCAR_CAN_BCR_TSEG1, bt->phase_seg1 + bt->prop_seg - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_BRP, bt->brp - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_SJW, bt->sjw - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_TSEG2, bt->phase_seg2 - 1);
/* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
* All the registers are big-endian but they get byte-swapped on 32-bit
* read/write (but not on 8-bit, contrary to the manuals)...
@@ -456,16 +456,17 @@ static void rcar_can_start(struct net_device *ndev)
ctlr &= ~RCAR_CAN_CTLR_SLPM;
writew(ctlr, &priv->regs->ctlr);
/* Go to reset mode */
- ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET);
writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
break;
}
rcar_can_set_bittiming(ndev);
- ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
- ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */
- /* at bus-off */
+ /* Select mixed ID mode */
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_IDFM, RCAR_CAN_CTLR_IDFM_MIXED);
+ /* Entry to halt mode automatically at bus-off */
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_BOM, RCAR_CAN_CTLR_BOM_ENT);
ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */
ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */
writew(ctlr, &priv->regs->ctlr);
@@ -495,7 +496,9 @@ static void rcar_can_start(struct net_device *ndev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* Go to operation mode */
- writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_CANM;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_OPER);
+ writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
break;
@@ -510,32 +513,30 @@ static int rcar_can_open(struct net_device *ndev)
struct rcar_can_priv *priv = netdev_priv(ndev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err) {
- netdev_err(ndev,
- "failed to enable peripheral clock, error %d\n",
- err);
+ netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n",
+ ERR_PTR(err));
goto out;
}
err = clk_prepare_enable(priv->can_clk);
if (err) {
- netdev_err(ndev, "failed to enable CAN clock, error %d\n",
- err);
- goto out_clock;
+ netdev_err(ndev, "failed to enable CAN clock: %pe\n",
+ ERR_PTR(err));
+ goto out_rpm;
}
err = open_candev(ndev);
if (err) {
- netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ netdev_err(ndev, "open_candev() failed %pe\n", ERR_PTR(err));
goto out_can_clock;
}
napi_enable(&priv->napi);
err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
if (err) {
- netdev_err(ndev, "request_irq(%d) failed, error %d\n",
- ndev->irq, err);
+ netdev_err(ndev, "request_irq(%d) failed %pe\n", ndev->irq,
+ ERR_PTR(err));
goto out_close;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
rcar_can_start(ndev);
netif_start_queue(ndev);
return 0;
@@ -544,8 +545,8 @@ out_close:
close_candev(ndev);
out_can_clock:
clk_disable_unprepare(priv->can_clk);
-out_clock:
- clk_disable_unprepare(priv->clk);
+out_rpm:
+ pm_runtime_put(ndev->dev.parent);
out:
return err;
}
@@ -558,7 +559,7 @@ static void rcar_can_stop(struct net_device *ndev)
/* Go to (force) reset mode */
ctlr = readw(&priv->regs->ctlr);
- ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET);
writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
@@ -583,9 +584,8 @@ static int rcar_can_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
napi_disable(&priv->napi);
clk_disable_unprepare(priv->can_clk);
- clk_disable_unprepare(priv->clk);
+ pm_runtime_put(ndev->dev.parent);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -596,13 +596,14 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
u32 data, i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
- data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
+ data = FIELD_PREP(RCAR_CAN_EID, cf->can_id & CAN_EFF_MASK) |
+ RCAR_CAN_IDE;
else /* Standard frame format */
- data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
+ data = FIELD_PREP(RCAR_CAN_SID, cf->can_id & CAN_SFF_MASK);
if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
data |= RCAR_CAN_RTR;
@@ -616,7 +617,6 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
- priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->len;
can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0);
priv->tx_head++;
/* Start Tx: write 0xff to the TFPCR register to increment
@@ -635,7 +635,10 @@ static const struct net_device_ops rcar_can_netdev_ops = {
.ndo_open = rcar_can_open,
.ndo_stop = rcar_can_close,
.ndo_start_xmit = rcar_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops rcar_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
@@ -654,9 +657,9 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
if (data & RCAR_CAN_IDE)
- cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ cf->can_id = FIELD_GET(RCAR_CAN_EID, data) | CAN_EFF_FLAG;
else
- cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
+ cf->can_id = FIELD_GET(RCAR_CAN_SID, data);
dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
cf->len = can_cc_dlc2len(dlc);
@@ -666,12 +669,11 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
for (dlc = 0; dlc < cf->len; dlc++)
cf->data[dlc] =
readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
- }
-
- can_led_event(priv->ndev, CAN_LED_EVENT_RX);
- stats->rx_bytes += cf->len;
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
+
netif_receive_skb(skb);
}
@@ -719,18 +721,21 @@ static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
}
-static int rcar_can_get_berr_counter(const struct net_device *dev,
+static int rcar_can_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
- struct rcar_can_priv *priv = netdev_priv(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err)
return err;
+
bec->txerr = readb(&priv->regs->tecr);
bec->rxerr = readb(&priv->regs->recr);
- clk_disable_unprepare(priv->clk);
+
+ pm_runtime_put(ndev->dev.parent);
+
return 0;
}
@@ -742,6 +747,7 @@ static const char * const clock_names[] = {
static int rcar_can_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rcar_can_priv *priv;
struct net_device *ndev;
void __iomem *addr;
@@ -749,7 +755,7 @@ static int rcar_can_probe(struct platform_device *pdev)
int err = -ENODEV;
int irq;
- of_property_read_u32(pdev->dev.of_node, "renesas,can-clock-select",
+ of_property_read_u32(dev->of_node, "renesas,can-clock-select",
&clock_select);
irq = platform_get_irq(pdev, 0);
@@ -766,34 +772,26 @@ static int rcar_can_probe(struct platform_device *pdev)
ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
if (!ndev) {
- dev_err(&pdev->dev, "alloc_candev() failed\n");
err = -ENOMEM;
goto fail;
}
priv = netdev_priv(ndev);
- priv->clk = devm_clk_get(&pdev->dev, "clkp1");
- if (IS_ERR(priv->clk)) {
- err = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
- err);
- goto fail_clk;
- }
-
if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
err = -EINVAL;
- dev_err(&pdev->dev, "invalid CAN clock selected\n");
+ dev_err(dev, "invalid CAN clock selected\n");
goto fail_clk;
}
- priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
+ priv->can_clk = devm_clk_get(dev, clock_names[clock_select]);
if (IS_ERR(priv->can_clk)) {
+ dev_err(dev, "cannot get CAN clock: %pe\n", priv->can_clk);
err = PTR_ERR(priv->can_clk);
- dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
goto fail_clk;
}
ndev->netdev_ops = &rcar_can_netdev_ops;
+ ndev->ethtool_ops = &rcar_can_ethtool_ops;
ndev->irq = irq;
ndev->flags |= IFF_ECHO;
priv->ndev = ndev;
@@ -805,23 +803,24 @@ static int rcar_can_probe(struct platform_device *pdev)
priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
+
+ netif_napi_add_weight(ndev, &priv->napi, rcar_can_rx_poll,
+ RCAR_CAN_NAPI_WEIGHT);
+
+ pm_runtime_enable(dev);
- netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
- RCAR_CAN_NAPI_WEIGHT);
err = register_candev(ndev);
if (err) {
- dev_err(&pdev->dev, "register_candev() failed, error %d\n",
- err);
- goto fail_candev;
+ dev_err(dev, "register_candev() failed %pe\n", ERR_PTR(err));
+ goto fail_rpm;
}
- devm_can_led_init(ndev);
-
- dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
+ dev_info(dev, "device registered (IRQ%d)\n", ndev->irq);
return 0;
-fail_candev:
+fail_rpm:
+ pm_runtime_disable(dev);
netif_napi_del(&priv->napi);
fail_clk:
free_candev(ndev);
@@ -829,18 +828,18 @@ fail:
return err;
}
-static int rcar_can_remove(struct platform_device *pdev)
+static void rcar_can_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct rcar_can_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
+ pm_runtime_disable(&pdev->dev);
netif_napi_del(&priv->napi);
free_candev(ndev);
- return 0;
}
-static int __maybe_unused rcar_can_suspend(struct device *dev)
+static int rcar_can_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct rcar_can_priv *priv = netdev_priv(ndev);
@@ -853,38 +852,32 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
netif_device_detach(ndev);
ctlr = readw(&priv->regs->ctlr);
- ctlr |= RCAR_CAN_CTLR_CANM_HALT;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_HALT);
writew(ctlr, &priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_SLPM;
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_SLEEPING;
- clk_disable(priv->clk);
+ pm_runtime_put(dev);
return 0;
}
-static int __maybe_unused rcar_can_resume(struct device *dev)
+static int rcar_can_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct rcar_can_priv *priv = netdev_priv(ndev);
- u16 ctlr;
int err;
if (!netif_running(ndev))
return 0;
- err = clk_enable(priv->clk);
+ err = pm_runtime_resume_and_get(dev);
if (err) {
- netdev_err(ndev, "clk_enable() failed, error %d\n", err);
+ netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n",
+ ERR_PTR(err));
return err;
}
- ctlr = readw(&priv->regs->ctlr);
- ctlr &= ~RCAR_CAN_CTLR_SLPM;
- writew(ctlr, &priv->regs->ctlr);
- ctlr &= ~RCAR_CAN_CTLR_CANM;
- writew(ctlr, &priv->regs->ctlr);
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ rcar_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
@@ -892,7 +885,8 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend,
+ rcar_can_resume);
static const struct of_device_id rcar_can_of_table[] __maybe_unused = {
{ .compatible = "renesas,can-r8a7778" },
@@ -910,7 +904,7 @@ static struct platform_driver rcar_can_driver = {
.driver = {
.name = RCAR_CAN_DRV_NAME,
.of_match_table = of_match_ptr(rcar_can_of_table),
- .pm = &rcar_can_pm_ops,
+ .pm = pm_sleep_ptr(&rcar_can_pm_ops),
},
.probe = rcar_can_probe,
.remove = rcar_can_remove,
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index ff9d0f5ae0dd..7895e1fdea1c 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -21,31 +21,27 @@
* wherever it is modified to a readable name.
*/
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
#include <linux/netdevice.h>
-#include <linux/platform_device.h>
-#include <linux/can/led.h>
-#include <linux/can/dev.h>
-#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/types.h>
#define RCANFD_DRV_NAME "rcar_canfd"
-enum rcanfd_chip_id {
- RENESAS_RCAR_GEN3 = 0,
- RENESAS_RZG2L,
-};
-
/* Global register bits */
/* RSCFDnCFDGRMCFG */
@@ -79,27 +75,24 @@ enum rcanfd_chip_id {
#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
/* RSCFDnCFDGERFL / RSCFDnGERFL */
-#define RCANFD_GERFL_EEF1 BIT(17)
-#define RCANFD_GERFL_EEF0 BIT(16)
+#define RCANFD_GERFL_EEF GENMASK(23, 16)
#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
#define RCANFD_GERFL_THLES BIT(2)
#define RCANFD_GERFL_MES BIT(1)
#define RCANFD_GERFL_DEF BIT(0)
-#define RCANFD_GERFL_ERR(gpriv, x) ((x) & (RCANFD_GERFL_EEF1 |\
- RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\
- (gpriv->fdmode ?\
- RCANFD_GERFL_CMPOF : 0)))
+#define RCANFD_GERFL_ERR(gpriv, x) \
+({\
+ typeof(gpriv) (_gpriv) = (gpriv); \
+ ((x) & ((FIELD_PREP(RCANFD_GERFL_EEF, (_gpriv)->channels_mask)) | \
+ RCANFD_GERFL_MES | ((_gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0))); \
+})
/* AFL Rx rules registers */
-/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8))
-#define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff)
-
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(x) ((x) & 0x1f)
+#define RCANFD_GAFLECTR_AFLPN(gpriv, page_num) ((page_num) & (gpriv)->info->max_aflpn)
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -110,16 +103,13 @@ enum rcanfd_chip_id {
/* Channel register bits */
/* RSCFDnCmCFG - Classical CAN only */
-#define RCANFD_CFG_SJW(x) (((x) & 0x3) << 24)
-#define RCANFD_CFG_TSEG2(x) (((x) & 0x7) << 20)
-#define RCANFD_CFG_TSEG1(x) (((x) & 0xf) << 16)
-#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
+#define RCANFD_CFG_SJW GENMASK(25, 24)
+#define RCANFD_CFG_TSEG2 GENMASK(22, 20)
+#define RCANFD_CFG_TSEG1 GENMASK(19, 16)
+#define RCANFD_CFG_BRP GENMASK(9, 0)
/* RSCFDnCFDCmNCFG - CAN FD only */
-#define RCANFD_NCFG_NTSEG2(x) (((x) & 0x1f) << 24)
-#define RCANFD_NCFG_NTSEG1(x) (((x) & 0x7f) << 16)
-#define RCANFD_NCFG_NSJW(x) (((x) & 0x1f) << 11)
-#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
+#define RCANFD_NCFG_NBRP GENMASK(9, 0)
/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
#define RCANFD_CCTR_CTME BIT(24)
@@ -179,15 +169,24 @@ enum rcanfd_chip_id {
#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
/* RSCFDnCFDCmDCFG */
-#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24)
-#define RCANFD_DCFG_DTSEG2(x) (((x) & 0x7) << 20)
-#define RCANFD_DCFG_DTSEG1(x) (((x) & 0xf) << 16)
-#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
+#define RCANFD_DCFG_DBRP GENMASK(7, 0)
/* RSCFDnCFDCmFDCFG */
+#define RCANFD_GEN4_FDCFG_CLOE BIT(30)
+#define RCANFD_GEN4_FDCFG_FDOE BIT(28)
+#define RCANFD_FDCFG_TDCO GENMASK(23, 16)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
-#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
+
+/* RSCFDnCFDCmFDSTS */
+#define RCANFD_FDSTS_SOC GENMASK(31, 24)
+#define RCANFD_FDSTS_EOC GENMASK(23, 16)
+#define RCANFD_GEN4_FDSTS_TDCVF BIT(15)
+#define RCANFD_GEN4_FDSTS_PNSTS GENMASK(13, 12)
+#define RCANFD_FDSTS_SOCO BIT(9)
+#define RCANFD_FDSTS_EOCO BIT(8)
+#define RCANFD_FDSTS_TDCVF BIT(7)
+#define RCANFD_FDSTS_TDCR GENMASK(7, 0)
/* RSCFDnCFDRFCCx */
#define RCANFD_RFCC_RFIM BIT(12)
@@ -208,8 +207,6 @@ enum rcanfd_chip_id {
/* RSCFDnCFDRFPTRx */
#define RCANFD_RFPTR_RFDLC(x) (((x) >> 28) & 0xf)
-#define RCANFD_RFPTR_RFPTR(x) (((x) >> 16) & 0xfff)
-#define RCANFD_RFPTR_RFTS(x) (((x) >> 0) & 0xffff)
/* RSCFDnCFDRFFDSTSx */
#define RCANFD_RFFDSTS_RFFDF BIT(2)
@@ -219,10 +216,14 @@ enum rcanfd_chip_id {
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(x) (((x) & 0xf) << 20)
-#define RCANFD_CFCC_CFM(x) (((x) & 0x3) << 16)
+#define RCANFD_CFCC_CFTML(gpriv, cftml) \
+({\
+ typeof(gpriv) (_gpriv) = (gpriv); \
+ (((cftml) & (_gpriv)->info->max_cftml) << (_gpriv)->info->sh->cftml); \
+})
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << (gpriv)->info->sh->cfm)
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(x) (((x) & 0x7) << 8)
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << (gpriv)->info->sh->cfdc)
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -237,12 +238,9 @@ enum rcanfd_chip_id {
/* RSCFDnCFDCFIDk */
#define RCANFD_CFID_CFIDE BIT(31)
#define RCANFD_CFID_CFRTR BIT(30)
-#define RCANFD_CFID_CFID_MASK(x) ((x) & 0x1fffffff)
/* RSCFDnCFDCFPTRk */
#define RCANFD_CFPTR_CFDLC(x) (((x) & 0xf) << 28)
-#define RCANFD_CFPTR_CFPTR(x) (((x) & 0xfff) << 16)
-#define RCANFD_CFPTR_CFTS(x) (((x) & 0xff) << 0)
/* RSCFDnCFDCFFDCSTSk */
#define RCANFD_CFFDCSTS_CFFDF BIT(2)
@@ -282,87 +280,32 @@ enum rcanfd_chip_id {
#define RCANFD_GTSC (0x0094)
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR (0x0098)
-/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG0 (0x009c)
-/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */
-#define RCANFD_GAFLCFG1 (0x00a0)
+/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */
+#define RCANFD_GAFLCFG(w) (0x009c + (0x04 * (w)))
/* RSCFDnCFDRMNB / RSCFDnRMNB */
#define RCANFD_RMNB (0x00a4)
/* RSCFDnCFDRMND / RSCFDnRMND */
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(x) (0x00b8 + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) ((gpriv)->info->regs->rfcc + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
-#define RCANFD_RFSTS(x) (0x00d8 + (0x04 * (x)))
+#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
-#define RCANFD_RFPCTR(x) (0x00f8 + (0x04 * (x)))
+#define RCANFD_RFPCTR(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x40)
/* Common FIFO Control registers */
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
-#define RCANFD_CFCC(ch, idx) (0x0118 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFCC(gpriv, ch, idx) \
+ ((gpriv)->info->regs->cfcc + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
-#define RCANFD_CFSTS(ch, idx) (0x0178 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFSTS(gpriv, ch, idx) \
+ ((gpriv)->info->regs->cfsts + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
-#define RCANFD_CFPCTR(ch, idx) (0x01d8 + (0x0c * (ch)) + \
- (0x04 * (idx)))
-
-/* RSCFDnCFDFESTS / RSCFDnFESTS */
-#define RCANFD_FESTS (0x0238)
-/* RSCFDnCFDFFSTS / RSCFDnFFSTS */
-#define RCANFD_FFSTS (0x023c)
-/* RSCFDnCFDFMSTS / RSCFDnFMSTS */
-#define RCANFD_FMSTS (0x0240)
-/* RSCFDnCFDRFISTS / RSCFDnRFISTS */
-#define RCANFD_RFISTS (0x0244)
-/* RSCFDnCFDCFRISTS / RSCFDnCFRISTS */
-#define RCANFD_CFRISTS (0x0248)
-/* RSCFDnCFDCFTISTS / RSCFDnCFTISTS */
-#define RCANFD_CFTISTS (0x024c)
-
-/* RSCFDnCFDTMCp / RSCFDnTMCp */
-#define RCANFD_TMC(p) (0x0250 + (0x01 * (p)))
-/* RSCFDnCFDTMSTSp / RSCFDnTMSTSp */
-#define RCANFD_TMSTS(p) (0x02d0 + (0x01 * (p)))
-
-/* RSCFDnCFDTMTRSTSp / RSCFDnTMTRSTSp */
-#define RCANFD_TMTRSTS(y) (0x0350 + (0x04 * (y)))
-/* RSCFDnCFDTMTARSTSp / RSCFDnTMTARSTSp */
-#define RCANFD_TMTARSTS(y) (0x0360 + (0x04 * (y)))
-/* RSCFDnCFDTMTCSTSp / RSCFDnTMTCSTSp */
-#define RCANFD_TMTCSTS(y) (0x0370 + (0x04 * (y)))
-/* RSCFDnCFDTMTASTSp / RSCFDnTMTASTSp */
-#define RCANFD_TMTASTS(y) (0x0380 + (0x04 * (y)))
-/* RSCFDnCFDTMIECy / RSCFDnTMIECy */
-#define RCANFD_TMIEC(y) (0x0390 + (0x04 * (y)))
-
-/* RSCFDnCFDTXQCCm / RSCFDnTXQCCm */
-#define RCANFD_TXQCC(m) (0x03a0 + (0x04 * (m)))
-/* RSCFDnCFDTXQSTSm / RSCFDnTXQSTSm */
-#define RCANFD_TXQSTS(m) (0x03c0 + (0x04 * (m)))
-/* RSCFDnCFDTXQPCTRm / RSCFDnTXQPCTRm */
-#define RCANFD_TXQPCTR(m) (0x03e0 + (0x04 * (m)))
-
-/* RSCFDnCFDTHLCCm / RSCFDnTHLCCm */
-#define RCANFD_THLCC(m) (0x0400 + (0x04 * (m)))
-/* RSCFDnCFDTHLSTSm / RSCFDnTHLSTSm */
-#define RCANFD_THLSTS(m) (0x0420 + (0x04 * (m)))
-/* RSCFDnCFDTHLPCTRm / RSCFDnTHLPCTRm */
-#define RCANFD_THLPCTR(m) (0x0440 + (0x04 * (m)))
-
-/* RSCFDnCFDGTINTSTS0 / RSCFDnGTINTSTS0 */
-#define RCANFD_GTINTSTS0 (0x0460)
-/* RSCFDnCFDGTINTSTS1 / RSCFDnGTINTSTS1 */
-#define RCANFD_GTINTSTS1 (0x0464)
-/* RSCFDnCFDGTSTCFG / RSCFDnGTSTCFG */
-#define RCANFD_GTSTCFG (0x0468)
-/* RSCFDnCFDGTSTCTR / RSCFDnGTSTCTR */
-#define RCANFD_GTSTCTR (0x046c)
-/* RSCFDnCFDGLOCKK / RSCFDnGLOCKK */
-#define RCANFD_GLOCKK (0x047c)
+#define RCANFD_CFPCTR(gpriv, ch, idx) \
+ ((gpriv)->info->regs->cfpctr + (0x0c * (ch)) + (0x04 * (idx)))
+
/* RSCFDnCFDGRMCFG */
#define RCANFD_GRMCFG (0x04fc)
@@ -380,98 +323,72 @@ enum rcanfd_chip_id {
/* RSCFDnGAFLXXXj offset */
#define RCANFD_C_GAFL_OFFSET (0x0500)
-/* RSCFDnRMXXXq -> RCANFD_C_RMXXX(q) */
-#define RCANFD_C_RMID(q) (0x0600 + (0x10 * (q)))
-#define RCANFD_C_RMPTR(q) (0x0604 + (0x10 * (q)))
-#define RCANFD_C_RMDF0(q) (0x0608 + (0x10 * (q)))
-#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q)))
-
/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
-#define RCANFD_C_RFOFFSET (0x0e00)
-#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
-#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + \
- (0x10 * (x)))
-#define RCANFD_C_RFDF(x, df) (RCANFD_C_RFOFFSET + 0x08 + \
- (0x10 * (x)) + (0x04 * (df)))
+#define RCANFD_C_RFOFFSET (0x0e00)
+#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
+#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + (0x10 * (x)))
+#define RCANFD_C_RFDF(x, df) \
+ (RCANFD_C_RFOFFSET + 0x08 + (0x10 * (x)) + (0x04 * (df)))
/* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */
#define RCANFD_C_CFOFFSET (0x0e80)
-#define RCANFD_C_CFID(ch, idx) (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \
- (0x10 * (idx)))
-#define RCANFD_C_CFPTR(ch, idx) (RCANFD_C_CFOFFSET + 0x04 + \
- (0x30 * (ch)) + (0x10 * (idx)))
-#define RCANFD_C_CFDF(ch, idx, df) (RCANFD_C_CFOFFSET + 0x08 + \
- (0x30 * (ch)) + (0x10 * (idx)) + \
- (0x04 * (df)))
-
-/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
-#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p)))
-#define RCANFD_C_TMPTR(p) (0x1004 + (0x10 * (p)))
-#define RCANFD_C_TMDF0(p) (0x1008 + (0x10 * (p)))
-#define RCANFD_C_TMDF1(p) (0x100c + (0x10 * (p)))
-
-/* RSCFDnTHLACCm */
-#define RCANFD_C_THLACC(m) (0x1800 + (0x04 * (m)))
-/* RSCFDnRPGACCr */
-#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
+
+#define RCANFD_C_CFID(ch, idx) \
+ (RCANFD_C_CFOFFSET + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFPTR(ch, idx) \
+ (RCANFD_C_CFOFFSET + 0x04 + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFDF(ch, idx, df) \
+ (RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df)))
+
+/* R-Car Gen4 Classical and CAN FD mode specific register map */
+#define RCANFD_GEN4_GAFL_OFFSET (0x1800)
/* CAN FD mode specific register map */
-/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
-#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m)))
-#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
-#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
-#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
-#define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m)))
+/* RSCFDnCFDCmXXX -> gpriv->fcbase[m].xxx */
+struct rcar_canfd_f_c {
+ u32 dcfg;
+ u32 cfdcfg;
+ u32 cfdctr;
+ u32 cfdsts;
+ u32 cfdcrc;
+ u32 pad[3];
+};
/* RSCFDnCFDGAFLXXXj offset */
#define RCANFD_F_GAFL_OFFSET (0x1000)
-/* RSCFDnCFDRMXXXq -> RCANFD_F_RMXXX(q) */
-#define RCANFD_F_RMID(q) (0x2000 + (0x20 * (q)))
-#define RCANFD_F_RMPTR(q) (0x2004 + (0x20 * (q)))
-#define RCANFD_F_RMFDSTS(q) (0x2008 + (0x20 * (q)))
-#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
-
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET (0x3000)
-#define RCANFD_F_RFID(x) (RCANFD_F_RFOFFSET + (0x80 * (x)))
-#define RCANFD_F_RFPTR(x) (RCANFD_F_RFOFFSET + 0x04 + \
- (0x80 * (x)))
-#define RCANFD_F_RFFDSTS(x) (RCANFD_F_RFOFFSET + 0x08 + \
- (0x80 * (x)))
-#define RCANFD_F_RFDF(x, df) (RCANFD_F_RFOFFSET + 0x0c + \
- (0x80 * (x)) + (0x04 * (df)))
+#define RCANFD_F_RFOFFSET(gpriv) ((gpriv)->info->regs->rfoffset)
+#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
+#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
+#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
+#define RCANFD_F_RFDF(gpriv, x, df) \
+ (RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET (0x3400)
-#define RCANFD_F_CFID(ch, idx) (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \
- (0x80 * (idx)))
-#define RCANFD_F_CFPTR(ch, idx) (RCANFD_F_CFOFFSET + 0x04 + \
- (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFFDCSTS(ch, idx) (RCANFD_F_CFOFFSET + 0x08 + \
- (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFDF(ch, idx, df) (RCANFD_F_CFOFFSET + 0x0c + \
- (0x180 * (ch)) + (0x80 * (idx)) + \
- (0x04 * (df)))
-
-/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
-#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p)))
-#define RCANFD_F_TMPTR(p) (0x4004 + (0x20 * (p)))
-#define RCANFD_F_TMFDCTR(p) (0x4008 + (0x20 * (p)))
-#define RCANFD_F_TMDF(p, b) (0x400c + (0x20 * (p)) + (0x04 * (b)))
-
-/* RSCFDnCFDTHLACCm */
-#define RCANFD_F_THLACC(m) (0x6000 + (0x04 * (m)))
-/* RSCFDnCFDRPGACCr */
-#define RCANFD_F_RPGACC(r) (0x6400 + (0x04 * (r)))
+#define RCANFD_F_CFOFFSET(gpriv) ((gpriv)->info->regs->cfoffset)
+
+#define RCANFD_F_CFID(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFPTR(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x04 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFFDCSTS(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x08 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFDF(gpriv, ch, idx, df) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \
+ (0x04 * (df)))
/* Constants */
#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */
#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */
-#define RCANFD_NUM_CHANNELS 2 /* Two channels max */
-#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1)
+#define RCANFD_NUM_CHANNELS 8 /* Eight channels max */
#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16)
#define RCANFD_CHANNEL_NUMRULES 1 /* only one rule per channel */
@@ -487,13 +404,47 @@ enum rcanfd_chip_id {
*/
#define RCANFD_CFFIFO_IDX 0
-/* fCAN clock select register settings */
-enum rcar_canfd_fcanclk {
- RCANFD_CANFDCLK = 0, /* CANFD clock */
- RCANFD_EXTCLK, /* Externally input clock */
+struct rcar_canfd_global;
+
+struct rcar_canfd_regs {
+ u16 rfcc; /* RX FIFO Configuration/Control Register */
+ u16 cfcc; /* Common FIFO Configuration/Control Register */
+ u16 cfsts; /* Common FIFO Status Register */
+ u16 cfpctr; /* Common FIFO Pointer Control Register */
+ u16 coffset; /* Channel Data Bitrate Configuration Register */
+ u16 rfoffset; /* Receive FIFO buffer access ID register */
+ u16 cfoffset; /* Transmit/receive FIFO buffer access ID register */
};
-struct rcar_canfd_global;
+struct rcar_canfd_shift_data {
+ u8 ntseg2; /* Nominal Bit Rate Time Segment 2 Control */
+ u8 ntseg1; /* Nominal Bit Rate Time Segment 1 Control */
+ u8 nsjw; /* Nominal Bit Rate Resynchronization Jump Width Control */
+ u8 dtseg2; /* Data Bit Rate Time Segment 2 Control */
+ u8 dtseg1; /* Data Bit Rate Time Segment 1 Control */
+ u8 cftml; /* Common FIFO TX Message Buffer Link */
+ u8 cfm; /* Common FIFO Mode */
+ u8 cfdc; /* Common FIFO Depth Configuration */
+};
+
+struct rcar_canfd_hw_info {
+ const struct can_bittiming_const *nom_bittiming;
+ const struct can_bittiming_const *data_bittiming;
+ const struct can_tdc_const *tdc_const;
+ const struct rcar_canfd_regs *regs;
+ const struct rcar_canfd_shift_data *sh;
+ u8 rnc_field_width;
+ u8 max_aflpn;
+ u8 max_cftml;
+ u8 max_channels;
+ u8 postdiv;
+ /* hardware features */
+ unsigned shared_global_irqs:1; /* Has shared global irqs */
+ unsigned multi_channel_irqs:1; /* Has multiple channel irqs */
+ unsigned ch_interface_mode:1; /* Has channel interface mode */
+ unsigned shared_can_regs:1; /* Has shared classical can registers */
+ unsigned external_clk:1; /* Has external clock */
+};
/* Channel priv data */
struct rcar_canfd_channel {
@@ -501,8 +452,8 @@ struct rcar_canfd_channel {
struct net_device *ndev;
struct rcar_canfd_global *gpriv; /* Controller reference */
void __iomem *base; /* Register base address */
+ struct phy *transceiver; /* Optional transceiver */
struct napi_struct napi;
- u8 tx_len[RCANFD_FIFO_DEPTH]; /* For net stats */
u32 tx_head; /* Incremented on xmit */
u32 tx_tail; /* Incremented on xmit done */
u32 channel; /* Channel number */
@@ -513,19 +464,21 @@ struct rcar_canfd_channel {
struct rcar_canfd_global {
struct rcar_canfd_channel *ch[RCANFD_NUM_CHANNELS];
void __iomem *base; /* Register base address */
+ struct rcar_canfd_f_c __iomem *fcbase;
struct platform_device *pdev; /* Respective platform device */
struct clk *clkp; /* Peripheral clock */
struct clk *can_clk; /* fCAN clock */
- enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */
+ struct clk *clk_ram; /* Clock RAM */
unsigned long channels_mask; /* Enabled channels mask */
+ bool extclk; /* CANFD or Ext clock */
bool fdmode; /* CAN FD or Classical CAN only mode */
struct reset_control *rstc1;
struct reset_control *rstc2;
- enum rcanfd_chip_id chip_id;
+ const struct rcar_canfd_hw_info *info;
};
/* CAN FD mode nominal rate constants */
-static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
+static const struct can_bittiming_const rcar_canfd_gen3_nom_bittiming_const = {
.name = RCANFD_DRV_NAME,
.tseg1_min = 2,
.tseg1_max = 128,
@@ -537,8 +490,20 @@ static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const rcar_canfd_gen4_nom_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
/* CAN FD mode data rate constants */
-static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
+static const struct can_bittiming_const rcar_canfd_gen3_data_bittiming_const = {
.name = RCANFD_DRV_NAME,
.tseg1_min = 2,
.tseg1_max = 16,
@@ -550,6 +515,18 @@ static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const rcar_canfd_gen4_data_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 32,
+ .tseg2_min = 2,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
/* Classical CAN mode bitrate constants */
static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.name = RCANFD_DRV_NAME,
@@ -563,6 +540,135 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.brp_inc = 1,
};
+/* CAN FD Transmission Delay Compensation constants */
+static const struct can_tdc_const rcar_canfd_gen3_tdc_const = {
+ .tdcv_min = 1,
+ .tdcv_max = 128,
+ .tdco_min = 1,
+ .tdco_max = 128,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+static const struct can_tdc_const rcar_canfd_gen4_tdc_const = {
+ .tdcv_min = 1,
+ .tdcv_max = 256,
+ .tdco_min = 1,
+ .tdco_max = 256,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+static const struct rcar_canfd_regs rcar_gen3_regs = {
+ .rfcc = 0x00b8,
+ .cfcc = 0x0118,
+ .cfsts = 0x0178,
+ .cfpctr = 0x01d8,
+ .coffset = 0x0500,
+ .rfoffset = 0x3000,
+ .cfoffset = 0x3400,
+};
+
+static const struct rcar_canfd_regs rcar_gen4_regs = {
+ .rfcc = 0x00c0,
+ .cfcc = 0x0120,
+ .cfsts = 0x01e0,
+ .cfpctr = 0x0240,
+ .coffset = 0x1400,
+ .rfoffset = 0x6000,
+ .cfoffset = 0x6400,
+};
+
+static const struct rcar_canfd_shift_data rcar_gen3_shift_data = {
+ .ntseg2 = 24,
+ .ntseg1 = 16,
+ .nsjw = 11,
+ .dtseg2 = 20,
+ .dtseg1 = 16,
+ .cftml = 20,
+ .cfm = 16,
+ .cfdc = 8,
+};
+
+static const struct rcar_canfd_shift_data rcar_gen4_shift_data = {
+ .ntseg2 = 25,
+ .ntseg1 = 17,
+ .nsjw = 10,
+ .dtseg2 = 16,
+ .dtseg1 = 8,
+ .cftml = 16,
+ .cfm = 8,
+ .cfdc = 21,
+};
+
+static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen3_tdc_const,
+ .regs = &rcar_gen3_regs,
+ .sh = &rcar_gen3_shift_data,
+ .rnc_field_width = 8,
+ .max_aflpn = 31,
+ .max_cftml = 15,
+ .max_channels = 2,
+ .postdiv = 2,
+ .shared_global_irqs = 1,
+ .ch_interface_mode = 0,
+ .shared_can_regs = 0,
+ .external_clk = 1,
+};
+
+static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen4_tdc_const,
+ .regs = &rcar_gen4_regs,
+ .sh = &rcar_gen4_shift_data,
+ .rnc_field_width = 16,
+ .max_aflpn = 127,
+ .max_cftml = 31,
+ .max_channels = 8,
+ .postdiv = 2,
+ .shared_global_irqs = 1,
+ .ch_interface_mode = 1,
+ .shared_can_regs = 1,
+ .external_clk = 1,
+};
+
+static const struct rcar_canfd_hw_info rzg2l_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen3_tdc_const,
+ .regs = &rcar_gen3_regs,
+ .sh = &rcar_gen3_shift_data,
+ .rnc_field_width = 8,
+ .max_aflpn = 31,
+ .max_cftml = 15,
+ .max_channels = 2,
+ .postdiv = 1,
+ .multi_channel_irqs = 1,
+ .ch_interface_mode = 0,
+ .shared_can_regs = 0,
+ .external_clk = 1,
+};
+
+static const struct rcar_canfd_hw_info r9a09g047_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen4_tdc_const,
+ .regs = &rcar_gen4_regs,
+ .sh = &rcar_gen4_shift_data,
+ .rnc_field_width = 16,
+ .max_aflpn = 63,
+ .max_cftml = 31,
+ .max_channels = 6,
+ .postdiv = 1,
+ .multi_channel_irqs = 1,
+ .ch_interface_mode = 1,
+ .shared_can_regs = 1,
+ .external_clk = 0,
+};
+
/* Helper functions */
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
{
@@ -575,50 +681,65 @@ static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
static inline u32 rcar_canfd_read(void __iomem *base, u32 offset)
{
- return readl(base + (offset));
+ return readl(base + offset);
}
static inline void rcar_canfd_write(void __iomem *base, u32 offset, u32 val)
{
- writel(val, base + (offset));
+ writel(val, base + offset);
}
static void rcar_canfd_set_bit(void __iomem *base, u32 reg, u32 val)
{
- rcar_canfd_update(val, val, base + (reg));
+ rcar_canfd_update(val, val, base + reg);
}
static void rcar_canfd_clear_bit(void __iomem *base, u32 reg, u32 val)
{
- rcar_canfd_update(val, 0, base + (reg));
+ rcar_canfd_update(val, 0, base + reg);
}
static void rcar_canfd_update_bit(void __iomem *base, u32 reg,
u32 mask, u32 val)
{
- rcar_canfd_update(mask, val, base + (reg));
+ rcar_canfd_update(mask, val, base + reg);
+}
+
+static void rcar_canfd_set_bit_reg(void __iomem *addr, u32 val)
+{
+ rcar_canfd_update(val, val, addr);
+}
+
+static void rcar_canfd_clear_bit_reg(void __iomem *addr, u32 val)
+{
+ rcar_canfd_update(val, 0, addr);
+}
+
+static void rcar_canfd_update_bit_reg(void __iomem *addr, u32 mask, u32 val)
+{
+ rcar_canfd_update(mask, val, addr);
}
static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
struct canfd_frame *cf, u32 off)
{
+ u32 *data = (u32 *)cf->data;
u32 i, lwords;
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- *((u32 *)cf->data + i) =
- rcar_canfd_read(priv->base, off + (i * sizeof(u32)));
+ data[i] = rcar_canfd_read(priv->base, off + i * sizeof(u32));
}
static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
struct canfd_frame *cf, u32 off)
{
+ const u32 *data = (u32 *)cf->data;
u32 i, lwords;
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- rcar_canfd_write(priv->base, off + (i * sizeof(u32)),
- *((u32 *)cf->data + i));
+ rcar_canfd_write(priv->base, off + i * sizeof(u32), data[i]);
}
static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
@@ -629,8 +750,20 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
can_free_echo_skb(ndev, i, NULL);
}
+static void rcar_canfd_set_rnc(struct rcar_canfd_global *gpriv, unsigned int ch,
+ unsigned int num_rules)
+{
+ unsigned int rnc_stride = 32 / gpriv->info->rnc_field_width;
+ unsigned int shift = 32 - (ch % rnc_stride + 1) * gpriv->info->rnc_field_width;
+ unsigned int w = ch / rnc_stride;
+ u32 rnc = num_rules << shift;
+
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(w), rnc);
+}
+
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
{
+ struct device *dev = &gpriv->pdev->dev;
u32 sts, ch;
int err;
@@ -640,7 +773,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
!(sts & RCANFD_GSTS_GRAMINIT), 2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev, "global raminit failed\n");
+ dev_dbg(dev, "global raminit failed\n");
return err;
}
@@ -653,7 +786,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
(sts & RCANFD_GSTS_GRSTSTS), 2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev, "global reset failed\n");
+ dev_dbg(dev, "global reset failed\n");
return err;
}
@@ -661,15 +794,17 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
/* Set the controller into appropriate mode */
- if (gpriv->fdmode)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
- else
- rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
+ if (!gpriv->info->ch_interface_mode) {
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ else
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ }
/* Transition all Channels to reset mode */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_clear_bit(gpriv->base,
RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR);
@@ -682,11 +817,27 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
(sts & RCANFD_CSTS_CRSTSTS),
2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev,
- "channel %u reset failed\n", ch);
+ dev_dbg(dev, "channel %u reset failed\n", ch);
return err;
}
+
+ /* Set the controller into appropriate mode */
+ if (gpriv->info->ch_interface_mode) {
+ /* Do not set CLOE and FDOE simultaneously */
+ if (!gpriv->fdmode) {
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_FDOE);
+ rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_CLOE);
+ } else {
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_FDOE);
+ rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
+ RCANFD_GEN4_FDCFG_CLOE);
+ }
+ }
}
+
return 0;
}
@@ -704,13 +855,13 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
cfg |= RCANFD_GCFG_CMPOC;
/* Set External Clock if selected */
- if (gpriv->fcan != RCANFD_CANFDCLK)
+ if (gpriv->extclk)
cfg |= RCANFD_GCFG_DCS;
rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
/* Channel configuration settings */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch),
RCANFD_CCTR_ERRD);
rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
@@ -720,43 +871,36 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
}
static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
- u32 ch)
+ u32 ch, u32 rule_entry)
{
- u32 cfg;
- int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ unsigned int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ u32 rule_entry_index = rule_entry % 16;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (ch == 0) {
- start = 0; /* Channel 0 always starts from 0th rule */
- } else {
- /* Get number of Channel 0 rules and adjust */
- cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0);
- start = RCANFD_GAFLCFG_GETRNC(0, cfg);
- }
-
/* Enable write access to entry */
- page = RCANFD_GAFL_PAGENUM(start);
+ page = RCANFD_GAFL_PAGENUM(rule_entry);
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
- (RCANFD_GAFLECTR_AFLPN(page) |
+ (RCANFD_GAFLECTR_AFLPN(gpriv, page) |
RCANFD_GAFLECTR_AFLDAE));
/* Write number of rules for channel */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0,
- RCANFD_GAFLCFG_SETRNC(ch, num_rules));
- if (gpriv->fdmode)
+ rcar_canfd_set_rnc(gpriv, ch, num_rules);
+ if (gpriv->info->shared_can_regs)
+ offset = RCANFD_GEN4_GAFL_OFFSET;
+ else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
else
offset = RCANFD_C_GAFL_OFFSET;
/* Accept all IDs */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0);
/* IDE or RTR is not considered for matching */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0);
/* Any data length accepted */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0);
/* Place the msg in corresponding Rx FIFO entry */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start),
- RCANFD_GAFLP1_GAFLFDP(ridx));
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index),
+ RCANFD_GAFLP1_GAFLFDP(ridx));
/* Disable write access to page */
rcar_canfd_clear_bit(gpriv->base,
@@ -780,7 +924,7 @@ static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch)
cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) |
RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE);
- rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg);
+ rcar_canfd_write(gpriv->base, RCANFD_RFCC(gpriv, ridx), cfg);
}
static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
@@ -802,15 +946,15 @@ static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
else
cfpls = 0; /* b000 - Max 8 bytes payload */
- cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) |
- RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) |
+ cfg = (RCANFD_CFCC_CFTML(gpriv, cftml) | RCANFD_CFCC_CFM(gpriv, cfm) |
+ RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(gpriv, cfdc) |
RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE);
- rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg);
+ rcar_canfd_write(gpriv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), cfg);
if (gpriv->fdmode)
/* Clear FD mode specific control/status register */
rcar_canfd_write(gpriv->base,
- RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0);
+ RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), 0);
}
static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv)
@@ -881,30 +1025,26 @@ static void rcar_canfd_global_error(struct net_device *ndev)
u32 ridx = ch + RCANFD_RFFIFO_IDX;
gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
- if ((gerfl & RCANFD_GERFL_EEF0) && (ch == 0)) {
- netdev_dbg(ndev, "Ch0: ECC Error flag\n");
- stats->tx_dropped++;
- }
- if ((gerfl & RCANFD_GERFL_EEF1) && (ch == 1)) {
- netdev_dbg(ndev, "Ch1: ECC Error flag\n");
+ if (gerfl & FIELD_PREP(RCANFD_GERFL_EEF, BIT(ch))) {
+ netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch);
stats->tx_dropped++;
}
if (gerfl & RCANFD_GERFL_MES) {
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
if (sts & RCANFD_CFSTS_CFMLT) {
netdev_dbg(ndev, "Tx Message Lost flag\n");
stats->tx_dropped++;
rcar_canfd_write(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
sts & ~RCANFD_CFSTS_CFMLT);
}
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
if (sts & RCANFD_RFSTS_RFMLT) {
netdev_dbg(ndev, "Rx Message Lost flag\n");
stats->rx_dropped++;
- rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
sts & ~RCANFD_RFSTS_RFMLT);
}
}
@@ -998,7 +1138,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
netdev_dbg(ndev, "Error warning interrupt\n");
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
cf->data[6] = txerr;
@@ -1008,7 +1148,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
netdev_dbg(ndev, "Error passive interrupt\n");
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
cf->data[6] = txerr;
@@ -1033,14 +1173,13 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
/* Clear channel error interrupts that are handled */
rcar_canfd_write(priv->base, RCANFD_CERFL(ch),
RCANFD_CERFL_ERR(~cerfl));
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
static void rcar_canfd_tx_done(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct net_device_stats *stats = &ndev->stats;
u32 sts;
unsigned long flags;
@@ -1051,14 +1190,12 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
sent = priv->tx_tail % RCANFD_FIFO_DEPTH;
stats->tx_packets++;
- stats->tx_bytes += priv->tx_len[sent];
- priv->tx_len[sent] = 0;
- can_get_echo_skb(ndev, sent, NULL);
+ stats->tx_bytes += can_get_echo_skb(ndev, sent, NULL);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_tail++;
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
unsent = RCANFD_CFSTS_CFMC(sts);
/* Wake producer only when there is room */
@@ -1074,9 +1211,8 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
} while (1);
/* Clear interrupt */
- rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
sts & ~RCANFD_CFSTS_CFTXIF);
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
static void rcar_canfd_handle_global_err(struct rcar_canfd_global *gpriv, u32 ch)
@@ -1096,7 +1232,7 @@ static irqreturn_t rcar_canfd_global_err_interrupt(int irq, void *dev_id)
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels)
rcar_canfd_handle_global_err(gpriv, ch);
return IRQ_HANDLED;
@@ -1106,15 +1242,17 @@ static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u3
{
struct rcar_canfd_channel *priv = gpriv->ch[ch];
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- u32 sts;
+ u32 sts, cc;
/* Handle Rx interrupts */
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
- if (likely(sts & RCANFD_RFSTS_RFIF)) {
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
+ cc = rcar_canfd_read(priv->base, RCANFD_RFCC(gpriv, ridx));
+ if (likely(sts & RCANFD_RFSTS_RFIF &&
+ cc & RCANFD_RFCC_RFIE)) {
if (napi_schedule_prep(&priv->napi)) {
/* Disable Rx FIFO interrupts */
rcar_canfd_clear_bit(priv->base,
- RCANFD_RFCC(ridx),
+ RCANFD_RFCC(gpriv, ridx),
RCANFD_RFCC_RFIE);
__napi_schedule(&priv->napi);
}
@@ -1126,7 +1264,7 @@ static irqreturn_t rcar_canfd_global_receive_fifo_interrupt(int irq, void *dev_i
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels)
rcar_canfd_handle_global_receive(gpriv, ch);
return IRQ_HANDLED;
@@ -1140,7 +1278,7 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
/* Global error interrupts still indicate a condition specific
* to a channel. RxFIFO interrupt is a global interrupt.
*/
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_handle_global_err(gpriv, ch);
rcar_canfd_handle_global_receive(gpriv, ch);
}
@@ -1174,8 +1312,6 @@ static void rcar_canfd_state_change(struct net_device *ndev,
rx_state = txerr <= rxerr ? state : 0;
can_change_state(ndev, cf, tx_state, rx_state);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
}
@@ -1188,18 +1324,16 @@ static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch
/* Handle Tx interrupts */
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
if (likely(sts & RCANFD_CFSTS_CFTXIF))
rcar_canfd_tx_done(ndev);
}
static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id)
{
- struct rcar_canfd_global *gpriv = dev_id;
- u32 ch;
+ struct rcar_canfd_channel *priv = dev_id;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
- rcar_canfd_handle_channel_tx(gpriv, ch);
+ rcar_canfd_handle_channel_tx(priv->gpriv, priv->channel);
return IRQ_HANDLED;
}
@@ -1227,11 +1361,9 @@ static void rcar_canfd_handle_channel_err(struct rcar_canfd_global *gpriv, u32 c
static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id)
{
- struct rcar_canfd_global *gpriv = dev_id;
- u32 ch;
+ struct rcar_canfd_channel *priv = dev_id;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
- rcar_canfd_handle_channel_err(gpriv, ch);
+ rcar_canfd_handle_channel_err(priv->gpriv, priv->channel);
return IRQ_HANDLED;
}
@@ -1242,7 +1374,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
u32 ch;
/* Common FIFO is a per channel resource */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_handle_channel_err(gpriv, ch);
rcar_canfd_handle_channel_tx(gpriv, ch);
}
@@ -1250,13 +1382,52 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rcar_canfd_set_bittiming(struct net_device *dev)
+static inline u32 rcar_canfd_compute_nominal_bit_rate_cfg(struct rcar_canfd_channel *priv,
+ u16 tseg1, u16 tseg2, u16 sjw, u16 brp)
+{
+ struct rcar_canfd_global *gpriv = priv->gpriv;
+ const struct rcar_canfd_hw_info *info = gpriv->info;
+ u32 ntseg1, ntseg2, nsjw, nbrp;
+
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
+ ntseg1 = (tseg1 & (info->nom_bittiming->tseg1_max - 1)) << info->sh->ntseg1;
+ ntseg2 = (tseg2 & (info->nom_bittiming->tseg2_max - 1)) << info->sh->ntseg2;
+ nsjw = (sjw & (info->nom_bittiming->sjw_max - 1)) << info->sh->nsjw;
+ nbrp = FIELD_PREP(RCANFD_NCFG_NBRP, brp);
+ } else {
+ ntseg1 = FIELD_PREP(RCANFD_CFG_TSEG1, tseg1);
+ ntseg2 = FIELD_PREP(RCANFD_CFG_TSEG2, tseg2);
+ nsjw = FIELD_PREP(RCANFD_CFG_SJW, sjw);
+ nbrp = FIELD_PREP(RCANFD_CFG_BRP, brp);
+ }
+
+ return (ntseg1 | ntseg2 | nsjw | nbrp);
+}
+
+static inline u32 rcar_canfd_compute_data_bit_rate_cfg(const struct rcar_canfd_hw_info *info,
+ u16 tseg1, u16 tseg2, u16 sjw, u16 brp)
+{
+ u32 dtseg1, dtseg2, dsjw, dbrp;
+
+ dtseg1 = (tseg1 & (info->data_bittiming->tseg1_max - 1)) << info->sh->dtseg1;
+ dtseg2 = (tseg2 & (info->data_bittiming->tseg2_max - 1)) << info->sh->dtseg2;
+ dsjw = (sjw & (info->data_bittiming->sjw_max - 1)) << 24;
+ dbrp = FIELD_PREP(RCANFD_DCFG_DBRP, brp);
+
+ return (dtseg1 | dtseg2 | dsjw | dbrp);
+}
+
+static void rcar_canfd_set_bittiming(struct net_device *ndev)
{
- struct rcar_canfd_channel *priv = netdev_priv(dev);
+ u32 mask = RCANFD_FDCFG_TDCO | RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ const struct can_tdc_const *tdc_const = priv->can.fd.tdc_const;
+ const struct can_tdc *tdc = &priv->can.fd.tdc;
+ u32 cfg, tdcmode = 0, tdco = 0;
u16 brp, sjw, tseg1, tseg2;
- u32 cfg;
u32 ch = priv->channel;
/* Nominal bit timing settings */
@@ -1264,43 +1435,39 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
sjw = bt->sjw - 1;
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
+ cfg = rcar_canfd_compute_nominal_bit_rate_cfg(priv, tseg1, tseg2, sjw, brp);
+ rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- /* CAN FD only mode */
- cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2));
-
- rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
-
- /* Data bit timing settings */
- brp = dbt->brp - 1;
- sjw = dbt->sjw - 1;
- tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
- tseg2 = dbt->phase_seg2 - 1;
-
- cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2));
-
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
- netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
- } else {
- /* Classical CAN only mode */
- cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
- RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
-
- rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- netdev_dbg(priv->ndev,
- "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return;
+
+ /* Data bit timing settings */
+ brp = dbt->brp - 1;
+ sjw = dbt->sjw - 1;
+ tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+ tseg2 = dbt->phase_seg2 - 1;
+ cfg = rcar_canfd_compute_data_bit_rate_cfg(gpriv->info, tseg1, tseg2, sjw, brp);
+ writel(cfg, &gpriv->fcbase[ch].dcfg);
+
+ /* Transceiver Delay Compensation */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) {
+ /* TDC enabled, measured + offset */
+ tdcmode = RCANFD_FDCFG_TDCE;
+ tdco = tdc->tdco - 1;
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ /* TDC enabled, offset only */
+ tdcmode = RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
+ tdco = min(tdc->tdcv + tdc->tdco, tdc_const->tdco_max) - 1;
}
+
+ rcar_canfd_update_bit_reg(&gpriv->fcbase[ch].cfdcfg, mask,
+ tdcmode | FIELD_PREP(RCANFD_FDCFG_TDCO, tdco));
}
static int rcar_canfd_start(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int err = -EOPNOTSUPP;
u32 sts, ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1322,9 +1489,9 @@ static int rcar_canfd_start(struct net_device *ndev)
}
/* Enable Common & Rx FIFO */
- rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_set_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
RCANFD_CFCC_CFE);
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
return 0;
@@ -1340,16 +1507,22 @@ static int rcar_canfd_open(struct net_device *ndev)
struct rcar_canfd_global *gpriv = priv->gpriv;
int err;
+ err = phy_power_on(priv->transceiver);
+ if (err) {
+ netdev_err(ndev, "failed to power on PHY: %pe\n", ERR_PTR(err));
+ return err;
+ }
+
/* Peripheral clock is already enabled in probe */
err = clk_prepare_enable(gpriv->can_clk);
if (err) {
- netdev_err(ndev, "failed to enable CAN clock, error %d\n", err);
- goto out_clock;
+ netdev_err(ndev, "failed to enable CAN clock: %pe\n", ERR_PTR(err));
+ goto out_phy;
}
err = open_candev(ndev);
if (err) {
- netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ netdev_err(ndev, "open_candev() failed: %pe\n", ERR_PTR(err));
goto out_can_clock;
}
@@ -1358,20 +1531,21 @@ static int rcar_canfd_open(struct net_device *ndev)
if (err)
goto out_close;
netif_start_queue(ndev);
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
return 0;
out_close:
napi_disable(&priv->napi);
close_candev(ndev);
out_can_clock:
clk_disable_unprepare(gpriv->can_clk);
-out_clock:
+out_phy:
+ phy_power_off(priv->transceiver);
return err;
}
static void rcar_canfd_stop(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int err;
u32 sts, ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1389,9 +1563,9 @@ static void rcar_canfd_stop(struct net_device *ndev)
rcar_canfd_disable_channel_interrupts(priv);
/* Disable Common & Rx FIFO */
- rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
RCANFD_CFCC_CFE);
- rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+ rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
/* Set the state as STOPPED */
priv->can.state = CAN_STATE_STOPPED;
@@ -1405,9 +1579,9 @@ static int rcar_canfd_close(struct net_device *ndev)
netif_stop_queue(ndev);
rcar_canfd_stop(ndev);
napi_disable(&priv->napi);
- clk_disable_unprepare(gpriv->can_clk);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
+ clk_disable_unprepare(gpriv->can_clk);
+ phy_power_off(priv->transceiver);
return 0;
}
@@ -1415,12 +1589,13 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
u32 sts = 0, id, dlc;
unsigned long flags;
u32 ch = priv->channel;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) {
@@ -1435,11 +1610,11 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
rcar_canfd_write(priv->base,
- RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id);
+ RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
- RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
+ RCANFD_F_CFPTR(gpriv, ch, RCANFD_CFFIFO_IDX), dlc);
if (can_is_canfd_skb(skb)) {
/* CAN FD frame format */
@@ -1452,10 +1627,10 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
}
rcar_canfd_write(priv->base,
- RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts);
+ RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts);
rcar_canfd_put_data(priv, cf,
- RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
+ RCANFD_F_CFDF(gpriv, ch, RCANFD_CFFIFO_IDX, 0));
} else {
rcar_canfd_write(priv->base,
RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id);
@@ -1465,7 +1640,6 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
RCANFD_C_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
}
- priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len;
can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH, 0);
spin_lock_irqsave(&priv->tx_lock, flags);
@@ -1479,7 +1653,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
* pointer for the Common FIFO
*/
rcar_canfd_write(priv->base,
- RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff);
+ RCANFD_CFPCTR(gpriv, ch, RCANFD_CFFIFO_IDX), 0xff);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_OK;
@@ -1487,27 +1661,30 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
{
- struct net_device_stats *stats = &priv->ndev->stats;
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf;
struct sk_buff *skb;
u32 sts = 0, id, dlc;
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx));
- dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx));
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
+ id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
+ dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
- sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx));
- if (sts & RCANFD_RFFDSTS_RFFDF)
- skb = alloc_canfd_skb(priv->ndev, &cf);
+ sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(gpriv, ridx));
+
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
+ sts & RCANFD_RFFDSTS_RFFDF)
+ skb = alloc_canfd_skb(ndev, &cf);
else
- skb = alloc_can_skb(priv->ndev,
- (struct can_frame **)&cf);
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
} else {
id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx));
- skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
}
if (!skb) {
@@ -1528,7 +1705,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if (sts & RCANFD_RFFDSTS_RFESI) {
cf->flags |= CANFD_ESI;
- netdev_dbg(priv->ndev, "ESI Error\n");
+ netdev_dbg(ndev, "ESI Error\n");
}
if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) {
@@ -1537,12 +1714,14 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if (sts & RCANFD_RFFDSTS_RFBRS)
cf->flags |= CANFD_BRS;
- rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0));
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
}
} else {
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
+ else if (gpriv->info->shared_can_regs)
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
}
@@ -1550,11 +1729,10 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
/* Write 0xff to RFPC to increment the CPU-side
* pointer of the Rx FIFO
*/
- rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff);
-
- can_led_event(priv->ndev, CAN_LED_EVENT_RX);
+ rcar_canfd_write(priv->base, RCANFD_RFPCTR(gpriv, ridx), 0xff);
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
stats->rx_packets++;
netif_receive_skb(skb);
}
@@ -1563,13 +1741,14 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
{
struct rcar_canfd_channel *priv =
container_of(napi, struct rcar_canfd_channel, napi);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int num_pkts;
u32 sts;
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
for (num_pkts = 0; num_pkts < quota; num_pkts++) {
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
/* Check FIFO empty condition */
if (sts & RCANFD_RFSTS_RFEMP)
break;
@@ -1578,7 +1757,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
/* Clear interrupt bit */
if (sts & RCANFD_RFSTS_RFIF)
- rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
sts & ~RCANFD_RFSTS_RFIF);
}
@@ -1586,13 +1765,36 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
if (num_pkts < quota) {
if (napi_complete_done(napi, num_pkts)) {
/* Enable Rx FIFO interrupts */
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx),
RCANFD_RFCC_RFIE);
}
}
return num_pkts;
}
+static unsigned int rcar_canfd_get_tdcr(struct rcar_canfd_global *gpriv,
+ unsigned int ch)
+{
+ u32 sts = readl(&gpriv->fcbase[ch].cfdsts);
+ u32 tdcr = FIELD_GET(RCANFD_FDSTS_TDCR, sts);
+
+ return tdcr & (gpriv->info->tdc_const->tdcv_max - 1);
+}
+
+static int rcar_canfd_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ u32 tdco = priv->can.fd.tdc.tdco;
+ u32 tdcr;
+
+ /* Transceiver Delay Compensation Result */
+ tdcr = rcar_canfd_get_tdcr(priv->gpriv, priv->channel) + 1;
+
+ *tdcv = tdcr < tdco ? 0 : tdcr - tdco;
+
+ return 0;
+}
+
static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
{
int err;
@@ -1609,10 +1811,10 @@ static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
}
-static int rcar_canfd_get_berr_counter(const struct net_device *dev,
+static int rcar_canfd_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
- struct rcar_canfd_channel *priv = netdev_priv(dev);
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
u32 val, ch = priv->channel;
/* Peripheral clock is already enabled in probe */
@@ -1626,117 +1828,134 @@ static const struct net_device_ops rcar_canfd_netdev_ops = {
.ndo_open = rcar_canfd_open,
.ndo_stop = rcar_canfd_close,
.ndo_start_xmit = rcar_canfd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops rcar_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
- u32 fcan_freq)
+ u32 fcan_freq, struct phy *transceiver)
{
+ const struct rcar_canfd_hw_info *info = gpriv->info;
struct platform_device *pdev = gpriv->pdev;
+ struct device *dev = &pdev->dev;
struct rcar_canfd_channel *priv;
struct net_device *ndev;
int err = -ENODEV;
ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH);
- if (!ndev) {
- dev_err(&pdev->dev, "alloc_candev() failed\n");
- err = -ENOMEM;
- goto fail;
- }
+ if (!ndev)
+ return -ENOMEM;
+
priv = netdev_priv(ndev);
ndev->netdev_ops = &rcar_canfd_netdev_ops;
+ ndev->ethtool_ops = &rcar_canfd_ethtool_ops;
ndev->flags |= IFF_ECHO;
priv->ndev = ndev;
priv->base = gpriv->base;
+ priv->transceiver = transceiver;
priv->channel = ch;
+ priv->gpriv = gpriv;
+ if (transceiver)
+ priv->can.bitrate_max = transceiver->attrs.max_link_rate;
priv->can.clock.freq = fcan_freq;
- dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
+ dev_info(dev, "can_clk rate is %u\n", priv->can.clock.freq);
- if (gpriv->chip_id == RENESAS_RZG2L) {
+ if (info->multi_channel_irqs) {
char *irq_name;
+ char name[10];
int err_irq;
int tx_irq;
- err_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_err" : "ch1_err");
+ scnprintf(name, sizeof(name), "ch%u_err", ch);
+ err_irq = platform_get_irq_byname(pdev, name);
if (err_irq < 0) {
err = err_irq;
goto fail;
}
- tx_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_trx" : "ch1_trx");
+ scnprintf(name, sizeof(name), "ch%u_trx", ch);
+ tx_irq = platform_get_irq_byname(pdev, name);
if (tx_irq < 0) {
err = tx_irq;
goto fail;
}
- irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "canfd.ch%d_err", ch);
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_err",
+ ch);
if (!irq_name) {
err = -ENOMEM;
goto fail;
}
- err = devm_request_irq(&pdev->dev, err_irq,
+ err = devm_request_irq(dev, err_irq,
rcar_canfd_channel_err_interrupt, 0,
- irq_name, gpriv);
+ irq_name, priv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq CH Err(%d) failed, error %d\n",
- err_irq, err);
+ dev_err(dev, "devm_request_irq CH Err %d failed: %pe\n",
+ err_irq, ERR_PTR(err));
goto fail;
}
- irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "canfd.ch%d_trx", ch);
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_trx",
+ ch);
if (!irq_name) {
err = -ENOMEM;
goto fail;
}
- err = devm_request_irq(&pdev->dev, tx_irq,
+ err = devm_request_irq(dev, tx_irq,
rcar_canfd_channel_tx_interrupt, 0,
- irq_name, gpriv);
+ irq_name, priv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq Tx (%d) failed, error %d\n",
- tx_irq, err);
+ dev_err(dev, "devm_request_irq Tx %d failed: %pe\n",
+ tx_irq, ERR_PTR(err));
goto fail;
}
}
if (gpriv->fdmode) {
- priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const;
- priv->can.data_bittiming_const =
- &rcar_canfd_data_bittiming_const;
+ priv->can.bittiming_const = gpriv->info->nom_bittiming;
+ priv->can.fd.data_bittiming_const = gpriv->info->data_bittiming;
+ priv->can.fd.tdc_const = gpriv->info->tdc_const;
/* Controller starts in CAN FD only mode */
- can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
- priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+ err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
+ if (err)
+ goto fail;
+
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_TDC_MANUAL;
+ priv->can.fd.do_get_auto_tdcv = rcar_canfd_get_auto_tdcv;
} else {
/* Controller starts in Classical CAN only mode */
- priv->can.bittiming_const = &rcar_canfd_bittiming_const;
+ if (gpriv->info->shared_can_regs)
+ priv->can.bittiming_const = gpriv->info->nom_bittiming;
+ else
+ priv->can.bittiming_const = &rcar_canfd_bittiming_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
}
priv->can.do_set_mode = rcar_canfd_do_set_mode;
priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter;
- priv->gpriv = gpriv;
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
- netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
- RCANFD_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll,
+ RCANFD_NAPI_WEIGHT);
+ spin_lock_init(&priv->tx_lock);
+ gpriv->ch[priv->channel] = priv;
err = register_candev(ndev);
if (err) {
- dev_err(&pdev->dev,
- "register_candev() failed, error %d\n", err);
+ dev_err(dev, "register_candev() failed: %pe\n", ERR_PTR(err));
goto fail_candev;
}
- spin_lock_init(&priv->tx_lock);
- devm_can_led_init(ndev);
- gpriv->ch[priv->channel] = priv;
- dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
+ dev_info(dev, "device registered (channel %u)\n", priv->channel);
return 0;
fail_candev:
netif_napi_del(&priv->napi);
- free_candev(ndev);
fail:
+ free_candev(ndev);
return err;
}
@@ -1751,32 +1970,141 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
}
}
+static int rcar_canfd_global_init(struct rcar_canfd_global *gpriv)
+{
+ struct device *dev = &gpriv->pdev->dev;
+ u32 rule_entry = 0;
+ u32 ch, sts;
+ int err;
+
+ err = reset_control_reset(gpriv->rstc1);
+ if (err)
+ return err;
+
+ err = reset_control_reset(gpriv->rstc2);
+ if (err)
+ goto fail_reset1;
+
+ /* Enable peripheral clock for register access */
+ err = clk_prepare_enable(gpriv->clkp);
+ if (err) {
+ dev_err(dev, "failed to enable peripheral clock: %pe\n",
+ ERR_PTR(err));
+ goto fail_reset2;
+ }
+
+ /* Enable RAM clock */
+ err = clk_prepare_enable(gpriv->clk_ram);
+ if (err) {
+ dev_err(dev,
+ "failed to enable RAM clock, error %d\n", err);
+ goto fail_clk;
+ }
+
+ err = rcar_canfd_reset_controller(gpriv);
+ if (err) {
+ dev_err(dev, "reset controller failed: %pe\n", ERR_PTR(err));
+ goto fail_ram_clk;
+ }
+
+ /* Controller in Global reset & Channel reset mode */
+ rcar_canfd_configure_controller(gpriv);
+
+ /* Configure per channel attributes */
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ /* Configure Channel's Rx fifo */
+ rcar_canfd_configure_rx(gpriv, ch);
+
+ /* Configure Channel's Tx (Common) fifo */
+ rcar_canfd_configure_tx(gpriv, ch);
+
+ /* Configure receive rules */
+ rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry);
+ rule_entry += RCANFD_CHANNEL_NUMRULES;
+ }
+
+ /* Configure common interrupts */
+ rcar_canfd_enable_global_interrupts(gpriv);
+
+ /* Start Global operation mode */
+ rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK,
+ RCANFD_GCTR_GMDC_GOPM);
+
+ /* Verify mode change */
+ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
+ !(sts & RCANFD_GSTS_GNOPM), 2, 500000);
+ if (err) {
+ dev_err(dev, "global operational mode failed\n");
+ goto fail_mode;
+ }
+
+ return 0;
+
+fail_mode:
+ rcar_canfd_disable_global_interrupts(gpriv);
+fail_ram_clk:
+ clk_disable_unprepare(gpriv->clk_ram);
+fail_clk:
+ clk_disable_unprepare(gpriv->clkp);
+fail_reset2:
+ reset_control_assert(gpriv->rstc2);
+fail_reset1:
+ reset_control_assert(gpriv->rstc1);
+ return err;
+}
+
+static void rcar_canfd_global_deinit(struct rcar_canfd_global *gpriv, bool full)
+{
+ rcar_canfd_disable_global_interrupts(gpriv);
+
+ if (full) {
+ rcar_canfd_reset_controller(gpriv);
+
+ /* Enter global sleep mode */
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
+ }
+
+ clk_disable_unprepare(gpriv->clk_ram);
+ clk_disable_unprepare(gpriv->clkp);
+ reset_control_assert(gpriv->rstc2);
+ reset_control_assert(gpriv->rstc1);
+}
+
static int rcar_canfd_probe(struct platform_device *pdev)
{
+ struct phy *transceivers[RCANFD_NUM_CHANNELS] = { NULL, };
+ const struct rcar_canfd_hw_info *info;
+ struct device *dev = &pdev->dev;
void __iomem *addr;
- u32 sts, ch, fcan_freq;
struct rcar_canfd_global *gpriv;
struct device_node *of_child;
unsigned long channels_mask = 0;
int err, ch_irq, g_irq;
int g_err_irq, g_recc_irq;
bool fdmode = true; /* CAN FD only mode - default */
- enum rcanfd_chip_id chip_id;
+ char name[9] = "channelX";
+ u32 ch, fcan_freq;
+ int i;
- chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ info = of_device_get_match_data(dev);
- if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
+ if (of_property_read_bool(dev->of_node, "renesas,no-can-fd"))
fdmode = false; /* Classical CAN only mode */
- of_child = of_get_child_by_name(pdev->dev.of_node, "channel0");
- if (of_child && of_device_is_available(of_child))
- channels_mask |= BIT(0); /* Channel 0 */
-
- of_child = of_get_child_by_name(pdev->dev.of_node, "channel1");
- if (of_child && of_device_is_available(of_child))
- channels_mask |= BIT(1); /* Channel 1 */
+ for (i = 0; i < info->max_channels; ++i) {
+ name[7] = '0' + i;
+ of_child = of_get_available_child_by_name(dev->of_node, name);
+ if (of_child) {
+ channels_mask |= BIT(i);
+ transceivers[i] = devm_of_phy_optional_get(dev,
+ of_child, NULL);
+ of_node_put(of_child);
+ }
+ if (IS_ERR(transceivers[i]))
+ return PTR_ERR(transceivers[i]);
+ }
- if (chip_id == RENESAS_RCAR_GEN3) {
+ if (info->shared_global_irqs) {
ch_irq = platform_get_irq_byname_optional(pdev, "ch_int");
if (ch_irq < 0) {
/* For backward compatibility get irq by index */
@@ -1803,59 +2131,52 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
/* Global controller context */
- gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
- if (!gpriv) {
- err = -ENOMEM;
- goto fail_dev;
- }
+ gpriv = devm_kzalloc(dev, sizeof(*gpriv), GFP_KERNEL);
+ if (!gpriv)
+ return -ENOMEM;
+
gpriv->pdev = pdev;
gpriv->channels_mask = channels_mask;
gpriv->fdmode = fdmode;
- gpriv->chip_id = chip_id;
-
- if (gpriv->chip_id == RENESAS_RZG2L) {
- gpriv->rstc1 = devm_reset_control_get_exclusive(&pdev->dev, "rstp_n");
- if (IS_ERR(gpriv->rstc1))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc1),
- "failed to get rstp_n\n");
-
- gpriv->rstc2 = devm_reset_control_get_exclusive(&pdev->dev, "rstc_n");
- if (IS_ERR(gpriv->rstc2))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc2),
- "failed to get rstc_n\n");
- }
+ gpriv->info = info;
+
+ gpriv->rstc1 = devm_reset_control_get_optional_exclusive(dev, "rstp_n");
+ if (IS_ERR(gpriv->rstc1))
+ return dev_err_probe(dev, PTR_ERR(gpriv->rstc1),
+ "failed to get rstp_n\n");
+
+ gpriv->rstc2 = devm_reset_control_get_optional_exclusive(dev, "rstc_n");
+ if (IS_ERR(gpriv->rstc2))
+ return dev_err_probe(dev, PTR_ERR(gpriv->rstc2),
+ "failed to get rstc_n\n");
/* Peripheral clock */
- gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
- if (IS_ERR(gpriv->clkp)) {
- err = PTR_ERR(gpriv->clkp);
- dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
- err);
- goto fail_dev;
- }
+ gpriv->clkp = devm_clk_get(dev, "fck");
+ if (IS_ERR(gpriv->clkp))
+ return dev_err_probe(dev, PTR_ERR(gpriv->clkp),
+ "cannot get peripheral clock\n");
/* fCAN clock: Pick External clock. If not available fallback to
* CANFD clock
*/
- gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
+ gpriv->can_clk = devm_clk_get(dev, "can_clk");
if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
- gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
- if (IS_ERR(gpriv->can_clk)) {
- err = PTR_ERR(gpriv->can_clk);
- dev_err(&pdev->dev,
- "cannot get canfd clock, error %d\n", err);
- goto fail_dev;
- }
- gpriv->fcan = RCANFD_CANFDCLK;
+ gpriv->can_clk = devm_clk_get(dev, "canfd");
+ if (IS_ERR(gpriv->can_clk))
+ return dev_err_probe(dev, PTR_ERR(gpriv->can_clk),
+ "cannot get canfd clock\n");
+ /* CANFD clock may be further divided within the IP */
+ fcan_freq = clk_get_rate(gpriv->can_clk) / info->postdiv;
} else {
- gpriv->fcan = RCANFD_EXTCLK;
+ fcan_freq = clk_get_rate(gpriv->can_clk);
+ gpriv->extclk = gpriv->info->external_clk;
}
- fcan_freq = clk_get_rate(gpriv->can_clk);
- if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id == RENESAS_RCAR_GEN3)
- /* CANFD clock is further divided by (1/2) within the IP */
- fcan_freq /= 2;
+ gpriv->clk_ram = devm_clk_get_optional(dev, "ram_clk");
+ if (IS_ERR(gpriv->clk_ram))
+ return dev_err_probe(dev, PTR_ERR(gpriv->clk_ram),
+ "cannot get ram clock\n");
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
@@ -1863,163 +2184,158 @@ static int rcar_canfd_probe(struct platform_device *pdev)
goto fail_dev;
}
gpriv->base = addr;
+ gpriv->fcbase = addr + gpriv->info->regs->coffset;
/* Request IRQ that's common for both channels */
- if (gpriv->chip_id == RENESAS_RCAR_GEN3) {
- err = devm_request_irq(&pdev->dev, ch_irq,
+ if (info->shared_global_irqs) {
+ err = devm_request_irq(dev, ch_irq,
rcar_canfd_channel_interrupt, 0,
"canfd.ch_int", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
- ch_irq, err);
+ dev_err(dev, "devm_request_irq %d failed: %pe\n",
+ ch_irq, ERR_PTR(err));
goto fail_dev;
}
- err = devm_request_irq(&pdev->dev, g_irq,
- rcar_canfd_global_interrupt, 0,
- "canfd.g_int", gpriv);
+ err = devm_request_irq(dev, g_irq, rcar_canfd_global_interrupt,
+ 0, "canfd.g_int", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
- g_irq, err);
+ dev_err(dev, "devm_request_irq %d failed: %pe\n",
+ g_irq, ERR_PTR(err));
goto fail_dev;
}
} else {
- err = devm_request_irq(&pdev->dev, g_recc_irq,
+ err = devm_request_irq(dev, g_recc_irq,
rcar_canfd_global_receive_fifo_interrupt, 0,
"canfd.g_recc", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
- g_recc_irq, err);
+ dev_err(dev, "devm_request_irq %d failed: %pe\n",
+ g_recc_irq, ERR_PTR(err));
goto fail_dev;
}
- err = devm_request_irq(&pdev->dev, g_err_irq,
+ err = devm_request_irq(dev, g_err_irq,
rcar_canfd_global_err_interrupt, 0,
"canfd.g_err", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
- g_err_irq, err);
+ dev_err(dev, "devm_request_irq %d failed: %pe\n",
+ g_err_irq, ERR_PTR(err));
goto fail_dev;
}
}
- err = reset_control_reset(gpriv->rstc1);
+ err = rcar_canfd_global_init(gpriv);
if (err)
- goto fail_dev;
- err = reset_control_reset(gpriv->rstc2);
- if (err) {
- reset_control_assert(gpriv->rstc1);
- goto fail_dev;
- }
-
- /* Enable peripheral clock for register access */
- err = clk_prepare_enable(gpriv->clkp);
- if (err) {
- dev_err(&pdev->dev,
- "failed to enable peripheral clock, error %d\n", err);
- goto fail_reset;
- }
-
- err = rcar_canfd_reset_controller(gpriv);
- if (err) {
- dev_err(&pdev->dev, "reset controller failed\n");
- goto fail_clk;
- }
-
- /* Controller in Global reset & Channel reset mode */
- rcar_canfd_configure_controller(gpriv);
-
- /* Configure per channel attributes */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
- /* Configure Channel's Rx fifo */
- rcar_canfd_configure_rx(gpriv, ch);
-
- /* Configure Channel's Tx (Common) fifo */
- rcar_canfd_configure_tx(gpriv, ch);
-
- /* Configure receive rules */
- rcar_canfd_configure_afl_rules(gpriv, ch);
- }
-
- /* Configure common interrupts */
- rcar_canfd_enable_global_interrupts(gpriv);
-
- /* Start Global operation mode */
- rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK,
- RCANFD_GCTR_GMDC_GOPM);
-
- /* Verify mode change */
- err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
- !(sts & RCANFD_GSTS_GNOPM), 2, 500000);
- if (err) {
- dev_err(&pdev->dev, "global operational mode failed\n");
goto fail_mode;
- }
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
- err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq);
+ for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) {
+ err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq,
+ transceivers[ch]);
if (err)
goto fail_channel;
}
platform_set_drvdata(pdev, gpriv);
- dev_info(&pdev->dev, "global operational state (clk %d, fdmode %d)\n",
- gpriv->fcan, gpriv->fdmode);
+ dev_info(dev, "global operational state (%s clk, %s mode)\n",
+ gpriv->extclk ? "ext" : "canfd",
+ gpriv->fdmode ? "fd" : "classical");
return 0;
fail_channel:
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels)
rcar_canfd_channel_remove(gpriv, ch);
fail_mode:
- rcar_canfd_disable_global_interrupts(gpriv);
-fail_clk:
- clk_disable_unprepare(gpriv->clkp);
-fail_reset:
- reset_control_assert(gpriv->rstc1);
- reset_control_assert(gpriv->rstc2);
+ rcar_canfd_global_deinit(gpriv, false);
fail_dev:
return err;
}
-static int rcar_canfd_remove(struct platform_device *pdev)
+static void rcar_canfd_remove(struct platform_device *pdev)
{
struct rcar_canfd_global *gpriv = platform_get_drvdata(pdev);
u32 ch;
- rcar_canfd_reset_controller(gpriv);
- rcar_canfd_disable_global_interrupts(gpriv);
-
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
rcar_canfd_channel_remove(gpriv, ch);
}
- /* Enter global sleep mode */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
- clk_disable_unprepare(gpriv->clkp);
- reset_control_assert(gpriv->rstc1);
- reset_control_assert(gpriv->rstc2);
-
- return 0;
+ rcar_canfd_global_deinit(gpriv, true);
}
-static int __maybe_unused rcar_canfd_suspend(struct device *dev)
+static int rcar_canfd_suspend(struct device *dev)
{
+ struct rcar_canfd_global *gpriv = dev_get_drvdata(dev);
+ int err;
+ u32 ch;
+
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+ struct net_device *ndev = priv->ndev;
+
+ if (!netif_running(ndev))
+ continue;
+
+ netif_device_detach(ndev);
+
+ err = rcar_canfd_close(ndev);
+ if (err) {
+ netdev_err(ndev, "rcar_canfd_close() failed %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ priv->can.state = CAN_STATE_SLEEPING;
+ }
+
+ /* TODO Skip if wake-up (which is not yet supported) is enabled */
+ rcar_canfd_global_deinit(gpriv, false);
+
return 0;
}
-static int __maybe_unused rcar_canfd_resume(struct device *dev)
+static int rcar_canfd_resume(struct device *dev)
{
+ struct rcar_canfd_global *gpriv = dev_get_drvdata(dev);
+ int err;
+ u32 ch;
+
+ err = rcar_canfd_global_init(gpriv);
+ if (err) {
+ dev_err(dev, "rcar_canfd_global_init() failed %pe\n", ERR_PTR(err));
+ return err;
+ }
+
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+ struct net_device *ndev = priv->ndev;
+
+ if (!netif_running(ndev))
+ continue;
+
+ err = rcar_canfd_open(ndev);
+ if (err) {
+ netdev_err(ndev, "rcar_canfd_open() failed %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ netif_device_attach(ndev);
+ }
+
return 0;
}
-static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
- rcar_canfd_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
+ rcar_canfd_resume);
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
- { .compatible = "renesas,rcar-gen3-canfd", .data = (void *)RENESAS_RCAR_GEN3 },
- { .compatible = "renesas,rzg2l-canfd", .data = (void *)RENESAS_RZG2L },
+ { .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info },
+ { .compatible = "renesas,r9a09g047-canfd", .data = &r9a09g047_hw_info },
+ { .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info },
+ { .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info },
+ { .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info },
{ }
};
@@ -2029,7 +2345,7 @@ static struct platform_driver rcar_canfd_driver = {
.driver = {
.name = RCANFD_DRV_NAME,
.of_match_table = of_match_ptr(rcar_canfd_of_table),
- .pm = &rcar_canfd_pm_ops,
+ .pm = pm_sleep_ptr(&rcar_canfd_pm_ops),
},
.probe = rcar_canfd_probe,
.remove = rcar_canfd_remove,
diff --git a/drivers/net/can/rockchip/Kconfig b/drivers/net/can/rockchip/Kconfig
new file mode 100644
index 000000000000..d203c530551f
--- /dev/null
+++ b/drivers/net/can/rockchip/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config CAN_ROCKCHIP_CANFD
+ tristate "Rockchip CAN-FD controller"
+ depends on OF
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select CAN_RX_OFFLOAD
+ help
+ Say Y here if you want to use CAN-FD controller found on
+ Rockchip SoCs.
diff --git a/drivers/net/can/rockchip/Makefile b/drivers/net/can/rockchip/Makefile
new file mode 100644
index 000000000000..3760d3e1baa3
--- /dev/null
+++ b/drivers/net/can/rockchip/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_ROCKCHIP_CANFD) += rockchip_canfd.o
+
+rockchip_canfd-objs :=
+rockchip_canfd-objs += rockchip_canfd-core.o
+rockchip_canfd-objs += rockchip_canfd-ethtool.o
+rockchip_canfd-objs += rockchip_canfd-rx.o
+rockchip_canfd-objs += rockchip_canfd-timestamp.o
+rockchip_canfd-objs += rockchip_canfd-tx.o
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
new file mode 100644
index 000000000000..29de0c01e4ed
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// Rockchip CANFD driver
+//
+// Copyright (c) 2020 Rockchip Electronics Co. Ltd.
+//
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/string.h>
+
+#include "rockchip_canfd.h"
+
+static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v2 = {
+ .model = RKCANFD_MODEL_RK3568V2,
+ .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_3 | RKCANFD_QUIRK_RK3568_ERRATUM_4 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_6 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_7 | RKCANFD_QUIRK_RK3568_ERRATUM_8 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_9 | RKCANFD_QUIRK_RK3568_ERRATUM_10 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 |
+ RKCANFD_QUIRK_CANFD_BROKEN,
+};
+
+/* The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00
+ * states that only the rk3568v2 is affected by erratum 5, but tests
+ * with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is
+ * sometimes too high. In contrast to the errata sheet mark rk3568v3
+ * as effected by erratum 5, too.
+ */
+static const struct rkcanfd_devtype_data rkcanfd_devtype_data_rk3568v3 = {
+ .model = RKCANFD_MODEL_RK3568V3,
+ .quirks = RKCANFD_QUIRK_RK3568_ERRATUM_1 | RKCANFD_QUIRK_RK3568_ERRATUM_2 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_5 | RKCANFD_QUIRK_RK3568_ERRATUM_7 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_8 | RKCANFD_QUIRK_RK3568_ERRATUM_10 |
+ RKCANFD_QUIRK_RK3568_ERRATUM_11 | RKCANFD_QUIRK_RK3568_ERRATUM_12 |
+ RKCANFD_QUIRK_CANFD_BROKEN,
+};
+
+static const char *__rkcanfd_get_model_str(enum rkcanfd_model model)
+{
+ switch (model) {
+ case RKCANFD_MODEL_RK3568V2:
+ return "rk3568v2";
+ case RKCANFD_MODEL_RK3568V3:
+ return "rk3568v3";
+ }
+
+ return "<unknown>";
+}
+
+static inline const char *
+rkcanfd_get_model_str(const struct rkcanfd_priv *priv)
+{
+ return __rkcanfd_get_model_str(priv->devtype_data.model);
+}
+
+/* Note:
+ *
+ * The formula to calculate the CAN System Clock is:
+ *
+ * Tsclk = 2 x Tclk x (brp + 1)
+ *
+ * Double the data sheet's brp_min, brp_max and brp_inc values (both
+ * for the arbitration and data bit timing) to take the "2 x" into
+ * account.
+ */
+static const struct can_bittiming_const rkcanfd_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 2, /* value from data sheet x2 */
+ .brp_max = 512, /* value from data sheet x2 */
+ .brp_inc = 2, /* value from data sheet x2 */
+};
+
+static const struct can_bittiming_const rkcanfd_data_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 2, /* value from data sheet x2 */
+ .brp_max = 512, /* value from data sheet x2 */
+ .brp_inc = 2, /* value from data sheet x2 */
+};
+
+static void rkcanfd_chip_set_reset_mode(const struct rkcanfd_priv *priv)
+{
+ reset_control_assert(priv->reset);
+ udelay(2);
+ reset_control_deassert(priv->reset);
+
+ rkcanfd_write(priv, RKCANFD_REG_MODE, 0x0);
+}
+
+static void rkcanfd_chip_set_work_mode(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default);
+}
+
+static int rkcanfd_set_bittiming(struct rkcanfd_priv *priv)
+{
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 reg_nbt, reg_dbt, reg_tdc;
+ u32 tdco;
+
+ reg_nbt = FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW,
+ bt->sjw - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP,
+ (bt->brp / 2) - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2,
+ bt->phase_seg2 - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1,
+ bt->prop_seg + bt->phase_seg1 - 1);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_NOMINAL_BITTIMING, reg_nbt);
+
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return 0;
+
+ reg_dbt = FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_SJW,
+ dbt->sjw - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_BRP,
+ (dbt->brp / 2) - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG2,
+ dbt->phase_seg2 - 1) |
+ FIELD_PREP(RKCANFD_REG_FD_DATA_BITTIMING_TSEG1,
+ dbt->prop_seg + dbt->phase_seg1 - 1);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_DATA_BITTIMING, reg_dbt);
+
+ tdco = (priv->can.clock.freq / dbt->bitrate) * 2 / 3;
+ tdco = min(tdco, FIELD_MAX(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET));
+
+ reg_tdc = FIELD_PREP(RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET, tdco) |
+ RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION,
+ reg_tdc);
+
+ return 0;
+}
+
+static void rkcanfd_get_berr_counter_corrected(struct rkcanfd_priv *priv,
+ struct can_berr_counter *bec)
+{
+ struct can_berr_counter bec_raw;
+ u32 reg_state;
+
+ bec->rxerr = rkcanfd_read(priv, RKCANFD_REG_RXERRORCNT);
+ bec->txerr = rkcanfd_read(priv, RKCANFD_REG_TXERRORCNT);
+ bec_raw = *bec;
+
+ /* Tests show that sometimes both CAN bus error counters read
+ * 0x0, even if the controller is in warning mode
+ * (RKCANFD_REG_STATE_ERROR_WARNING_STATE in RKCANFD_REG_STATE
+ * set).
+ *
+ * In case both error counters read 0x0, use the struct
+ * priv->bec, otherwise save the read value to priv->bec.
+ *
+ * rkcanfd_handle_rx_int_one() handles the decrementing of
+ * priv->bec.rxerr for successfully RX'ed CAN frames.
+ *
+ * Luckily the controller doesn't decrement the RX CAN bus
+ * error counter in hardware for self received TX'ed CAN
+ * frames (RKCANFD_REG_MODE_RXSTX_MODE), so RXSTX doesn't
+ * interfere with proper RX CAN bus error counters.
+ *
+ * rkcanfd_handle_tx_done_one() handles the decrementing of
+ * priv->bec.txerr for successfully TX'ed CAN frames.
+ */
+ if (!bec->rxerr && !bec->txerr)
+ *bec = priv->bec;
+ else
+ priv->bec = *bec;
+
+ reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE);
+ netdev_vdbg(priv->ndev,
+ "%s: Raw/Cor: txerr=%3u/%3u rxerr=%3u/%3u Bus Off=%u Warning=%u\n",
+ __func__,
+ bec_raw.txerr, bec->txerr, bec_raw.rxerr, bec->rxerr,
+ !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE),
+ !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE));
+}
+
+static int rkcanfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ return err;
+
+ rkcanfd_get_berr_counter_corrected(priv, bec);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+}
+
+static void rkcanfd_chip_interrupts_enable(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_INT_MASK, priv->reg_int_mask_default);
+
+ netdev_dbg(priv->ndev, "%s: reg_int_mask=0x%08x\n", __func__,
+ rkcanfd_read(priv, RKCANFD_REG_INT_MASK));
+}
+
+static void rkcanfd_chip_interrupts_disable(const struct rkcanfd_priv *priv)
+{
+ rkcanfd_write(priv, RKCANFD_REG_INT_MASK, RKCANFD_REG_INT_ALL);
+}
+
+static void rkcanfd_chip_fifo_setup(struct rkcanfd_priv *priv)
+{
+ u32 reg;
+
+ /* RX FIFO */
+ reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
+ reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg);
+
+ WRITE_ONCE(priv->tx_head, 0);
+ WRITE_ONCE(priv->tx_tail, 0);
+ netdev_reset_queue(priv->ndev);
+}
+
+static void rkcanfd_chip_start(struct rkcanfd_priv *priv)
+{
+ u32 reg;
+
+ rkcanfd_chip_set_reset_mode(priv);
+
+ /* Receiving Filter: accept all */
+ rkcanfd_write(priv, RKCANFD_REG_IDCODE, 0x0);
+ rkcanfd_write(priv, RKCANFD_REG_IDMASK, RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID);
+
+ /* enable:
+ * - CAN_FD: enable CAN-FD
+ * - AUTO_RETX_MODE: auto retransmission on TX error
+ * - COVER_MODE: RX-FIFO overwrite mode, do not send OVERLOAD frames
+ * - RXSTX_MODE: Receive Self Transmit data mode
+ * - WORK_MODE: transition from reset to working mode
+ */
+ reg = rkcanfd_read(priv, RKCANFD_REG_MODE);
+ priv->reg_mode_default = reg |
+ RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE |
+ RKCANFD_REG_MODE_AUTO_RETX_MODE |
+ RKCANFD_REG_MODE_COVER_MODE |
+ RKCANFD_REG_MODE_RXSTX_MODE |
+ RKCANFD_REG_MODE_WORK_MODE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ priv->reg_mode_default |= RKCANFD_REG_MODE_LBACK_MODE |
+ RKCANFD_REG_MODE_SILENT_MODE |
+ RKCANFD_REG_MODE_SELF_TEST;
+
+ /* mask, i.e. ignore:
+ * - TIMESTAMP_COUNTER_OVERFLOW_INT - timestamp counter overflow interrupt
+ * - TX_ARBIT_FAIL_INT - TX arbitration fail interrupt
+ * - OVERLOAD_INT - CAN bus overload interrupt
+ * - TX_FINISH_INT - Transmit finish interrupt
+ */
+ priv->reg_int_mask_default =
+ RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT |
+ RKCANFD_REG_INT_TX_ARBIT_FAIL_INT |
+ RKCANFD_REG_INT_OVERLOAD_INT |
+ RKCANFD_REG_INT_TX_FINISH_INT;
+
+ /* Do not mask the bus error interrupt if the bus error
+ * reporting is requested.
+ */
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ priv->reg_int_mask_default |= RKCANFD_REG_INT_ERROR_INT;
+
+ memset(&priv->bec, 0x0, sizeof(priv->bec));
+
+ rkcanfd_chip_fifo_setup(priv);
+ rkcanfd_timestamp_init(priv);
+ rkcanfd_timestamp_start(priv);
+
+ rkcanfd_set_bittiming(priv);
+
+ rkcanfd_chip_interrupts_disable(priv);
+ rkcanfd_chip_set_work_mode(priv);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ netdev_dbg(priv->ndev, "%s: reg_mode=0x%08x\n", __func__,
+ rkcanfd_read(priv, RKCANFD_REG_MODE));
+}
+
+static void __rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_chip_set_reset_mode(priv);
+ rkcanfd_chip_interrupts_disable(priv);
+}
+
+static void rkcanfd_chip_stop(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_timestamp_stop(priv);
+ __rkcanfd_chip_stop(priv, state);
+}
+
+static void rkcanfd_chip_stop_sync(struct rkcanfd_priv *priv, const enum can_state state)
+{
+ priv->can.state = state;
+
+ rkcanfd_timestamp_stop_sync(priv);
+ __rkcanfd_chip_stop(priv, state);
+}
+
+static int rkcanfd_set_mode(struct net_device *ndev,
+ enum can_mode mode)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ rkcanfd_chip_start(priv);
+ rkcanfd_chip_interrupts_enable(priv);
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *
+rkcanfd_alloc_can_err_skb(struct rkcanfd_priv *priv,
+ struct can_frame **cf, u32 *timestamp)
+{
+ struct sk_buff *skb;
+
+ *timestamp = rkcanfd_get_timestamp(priv);
+
+ skb = alloc_can_err_skb(priv->ndev, cf);
+ if (skb)
+ rkcanfd_skb_set_timestamp(priv, skb, *timestamp);
+
+ return skb;
+}
+
+static const char *rkcanfd_get_error_type_str(unsigned int type)
+{
+ switch (type) {
+ case RKCANFD_REG_ERROR_CODE_TYPE_BIT:
+ return "Bit";
+ case RKCANFD_REG_ERROR_CODE_TYPE_STUFF:
+ return "Stuff";
+ case RKCANFD_REG_ERROR_CODE_TYPE_FORM:
+ return "Form";
+ case RKCANFD_REG_ERROR_CODE_TYPE_ACK:
+ return "ACK";
+ case RKCANFD_REG_ERROR_CODE_TYPE_CRC:
+ return "CRC";
+ }
+
+ return "<unknown>";
+}
+
+#define RKCAN_ERROR_CODE(reg_ec, code) \
+ ((reg_ec) & RKCANFD_REG_ERROR_CODE_##code ? __stringify(code) " " : "")
+
+static void
+rkcanfd_handle_error_int_reg_ec(struct rkcanfd_priv *priv, struct can_frame *cf,
+ const u32 reg_ec)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ unsigned int type;
+ u32 reg_state, reg_cmd;
+
+ type = FIELD_GET(RKCANFD_REG_ERROR_CODE_TYPE, reg_ec);
+ reg_cmd = rkcanfd_read(priv, RKCANFD_REG_CMD);
+ reg_state = rkcanfd_read(priv, RKCANFD_REG_STATE);
+
+ netdev_dbg(priv->ndev, "%s Error in %s %s Phase: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s(0x%08x) CMD=%u RX=%u TX=%u Error-Warning=%u Bus-Off=%u\n",
+ rkcanfd_get_error_type_str(type),
+ reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX ? "RX" : "TX",
+ reg_ec & RKCANFD_REG_ERROR_CODE_PHASE ? "Data" : "Arbitration",
+ RKCAN_ERROR_CODE(reg_ec, TX_OVERLOAD),
+ RKCAN_ERROR_CODE(reg_ec, TX_ERROR),
+ RKCAN_ERROR_CODE(reg_ec, TX_ACK),
+ RKCAN_ERROR_CODE(reg_ec, TX_ACK_EOF),
+ RKCAN_ERROR_CODE(reg_ec, TX_CRC),
+ RKCAN_ERROR_CODE(reg_ec, TX_STUFF_COUNT),
+ RKCAN_ERROR_CODE(reg_ec, TX_DATA),
+ RKCAN_ERROR_CODE(reg_ec, TX_SOF_DLC),
+ RKCAN_ERROR_CODE(reg_ec, TX_IDLE),
+ RKCAN_ERROR_CODE(reg_ec, RX_BUF_INT),
+ RKCAN_ERROR_CODE(reg_ec, RX_SPACE),
+ RKCAN_ERROR_CODE(reg_ec, RX_EOF),
+ RKCAN_ERROR_CODE(reg_ec, RX_ACK_LIM),
+ RKCAN_ERROR_CODE(reg_ec, RX_ACK),
+ RKCAN_ERROR_CODE(reg_ec, RX_CRC_LIM),
+ RKCAN_ERROR_CODE(reg_ec, RX_CRC),
+ RKCAN_ERROR_CODE(reg_ec, RX_STUFF_COUNT),
+ RKCAN_ERROR_CODE(reg_ec, RX_DATA),
+ RKCAN_ERROR_CODE(reg_ec, RX_DLC),
+ RKCAN_ERROR_CODE(reg_ec, RX_BRS_ESI),
+ RKCAN_ERROR_CODE(reg_ec, RX_RES),
+ RKCAN_ERROR_CODE(reg_ec, RX_FDF),
+ RKCAN_ERROR_CODE(reg_ec, RX_ID2_RTR),
+ RKCAN_ERROR_CODE(reg_ec, RX_SOF_IDE),
+ RKCAN_ERROR_CODE(reg_ec, RX_IDLE),
+ reg_ec, reg_cmd,
+ !!(reg_state & RKCANFD_REG_STATE_RX_PERIOD),
+ !!(reg_state & RKCANFD_REG_STATE_TX_PERIOD),
+ !!(reg_state & RKCANFD_REG_STATE_ERROR_WARNING_STATE),
+ !!(reg_state & RKCANFD_REG_STATE_BUS_OFF_STATE));
+
+ priv->can.can_stats.bus_error++;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX)
+ stats->rx_errors++;
+ else
+ stats->tx_errors++;
+
+ if (!cf)
+ return;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_DIRECTION_RX) {
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SOF_IDE)
+ cf->data[3] = CAN_ERR_PROT_LOC_SOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ID2_RTR)
+ cf->data[3] = CAN_ERR_PROT_LOC_RTR;
+ /* RKCANFD_REG_ERROR_CODE_RX_FDF */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_RES)
+ cf->data[3] = CAN_ERR_PROT_LOC_RES0;
+ /* RKCANFD_REG_ERROR_CODE_RX_BRS_ESI */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DLC)
+ cf->data[3] = CAN_ERR_PROT_LOC_DLC;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_DATA)
+ cf->data[3] = CAN_ERR_PROT_LOC_DATA;
+ /* RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_CRC_LIM)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_ACK_LIM)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_EOF)
+ cf->data[3] = CAN_ERR_PROT_LOC_EOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_SPACE)
+ cf->data[3] = CAN_ERR_PROT_LOC_EOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_RX_BUF_INT)
+ cf->data[3] = CAN_ERR_PROT_LOC_INTERM;
+ } else {
+ cf->data[2] |= CAN_ERR_PROT_TX;
+
+ if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_SOF_DLC)
+ cf->data[3] = CAN_ERR_PROT_LOC_SOF;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_DATA)
+ cf->data[3] = CAN_ERR_PROT_LOC_DATA;
+ /* RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_CRC)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK_EOF)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_ACK)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ /* RKCANFD_REG_ERROR_CODE_TX_ERROR */
+ else if (reg_ec & RKCANFD_REG_ERROR_CODE_TX_OVERLOAD)
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
+
+ switch (reg_ec & RKCANFD_REG_ERROR_CODE_TYPE) {
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_BIT):
+
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_STUFF):
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_FORM):
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_ACK):
+ cf->can_id |= CAN_ERR_ACK;
+ break;
+ case FIELD_PREP_CONST(RKCANFD_REG_ERROR_CODE_TYPE,
+ RKCANFD_REG_ERROR_CODE_TYPE_CRC):
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+ }
+}
+
+static int rkcanfd_handle_error_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_frame *cf = NULL;
+ u32 reg_ec, timestamp;
+ struct sk_buff *skb;
+ int err;
+
+ reg_ec = rkcanfd_read(priv, RKCANFD_REG_ERROR_CODE);
+
+ if (!reg_ec)
+ return 0;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ if (cf) {
+ struct can_berr_counter bec;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ }
+
+ rkcanfd_handle_error_int_reg_ec(priv, cf, reg_ec);
+
+ if (!cf)
+ return 0;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int rkcanfd_handle_state_error_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ enum can_state new_state, rx_state, tx_state;
+ struct net_device *ndev = priv->ndev;
+ struct can_berr_counter bec;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ u32 timestamp;
+ int err;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+ can_state_get_by_berr_counter(ndev, &bec, &tx_state, &rx_state);
+
+ new_state = max(tx_state, rx_state);
+ if (new_state == priv->can.state)
+ return 0;
+
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ rkcanfd_chip_stop(priv, CAN_STATE_BUS_OFF);
+ can_bus_off(ndev);
+ }
+
+ if (!skb)
+ return 0;
+
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int
+rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_berr_counter bec;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ u32 timestamp;
+ int err;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ netdev_dbg(priv->ndev, "RX-FIFO overflow\n");
+
+ skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ if (!skb)
+ return 0;
+
+ rkcanfd_get_berr_counter_corrected(priv, &bec);
+
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+#define rkcanfd_handle(priv, irq, ...) \
+({ \
+ struct rkcanfd_priv *_priv = (priv); \
+ int err; \
+\
+ err = rkcanfd_handle_##irq(_priv, ## __VA_ARGS__); \
+ if (err) \
+ netdev_err(_priv->ndev, \
+ "IRQ handler rkcanfd_handle_%s() returned error: %pe\n", \
+ __stringify(irq), ERR_PTR(err)); \
+ err; \
+})
+
+static irqreturn_t rkcanfd_irq(int irq, void *dev_id)
+{
+ struct rkcanfd_priv *priv = dev_id;
+ u32 reg_int_unmasked, reg_int;
+
+ reg_int_unmasked = rkcanfd_read(priv, RKCANFD_REG_INT);
+ reg_int = reg_int_unmasked & ~priv->reg_int_mask_default;
+
+ if (!reg_int)
+ return IRQ_NONE;
+
+ /* First ACK then handle, to avoid lost-IRQ race condition on
+ * fast re-occurring interrupts.
+ */
+ rkcanfd_write(priv, RKCANFD_REG_INT, reg_int);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FINISH_INT)
+ rkcanfd_handle(priv, rx_int);
+
+ if (reg_int & RKCANFD_REG_INT_ERROR_INT)
+ rkcanfd_handle(priv, error_int);
+
+ if (reg_int & (RKCANFD_REG_INT_BUS_OFF_INT |
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT |
+ RKCANFD_REG_INT_ERROR_WARNING_INT) ||
+ priv->can.state > CAN_STATE_ERROR_ACTIVE)
+ rkcanfd_handle(priv, state_error_int);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT)
+ rkcanfd_handle(priv, rx_fifo_overflow_int);
+
+ if (reg_int & ~(RKCANFD_REG_INT_ALL_ERROR |
+ RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT |
+ RKCANFD_REG_INT_RX_FINISH_INT))
+ netdev_err(priv->ndev, "%s: int=0x%08x\n", __func__, reg_int);
+
+ if (reg_int & RKCANFD_REG_INT_WAKEUP_INT)
+ netdev_info(priv->ndev, "%s: WAKEUP_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_TXE_FIFO_FULL_INT)
+ netdev_info(priv->ndev, "%s: TXE_FIFO_FULL_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_TXE_FIFO_OV_INT)
+ netdev_info(priv->ndev, "%s: TXE_FIFO_OV_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT)
+ netdev_info(priv->ndev, "%s: BUS_OFF_RECOVERY_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_RX_FIFO_FULL_INT)
+ netdev_info(priv->ndev, "%s: RX_FIFO_FULL_INT\n", __func__);
+
+ if (reg_int & RKCANFD_REG_INT_OVERLOAD_INT)
+ netdev_info(priv->ndev, "%s: OVERLOAD_INT\n", __func__);
+
+ can_rx_offload_irq_finish(&priv->offload);
+
+ return IRQ_HANDLED;
+}
+
+static int rkcanfd_open(struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = open_candev(ndev);
+ if (err)
+ return err;
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ goto out_close_candev;
+
+ rkcanfd_chip_start(priv);
+ can_rx_offload_enable(&priv->offload);
+
+ err = request_irq(ndev->irq, rkcanfd_irq, IRQF_SHARED, ndev->name, priv);
+ if (err)
+ goto out_rkcanfd_chip_stop;
+
+ rkcanfd_chip_interrupts_enable(priv);
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+out_rkcanfd_chip_stop:
+ rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED);
+ pm_runtime_put(ndev->dev.parent);
+out_close_candev:
+ close_candev(ndev);
+ return err;
+}
+
+static int rkcanfd_stop(struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+
+ rkcanfd_chip_interrupts_disable(priv);
+ free_irq(ndev->irq, priv);
+ can_rx_offload_disable(&priv->offload);
+ rkcanfd_chip_stop_sync(priv, CAN_STATE_STOPPED);
+ close_candev(ndev);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+}
+
+static const struct net_device_ops rkcanfd_netdev_ops = {
+ .ndo_open = rkcanfd_open,
+ .ndo_stop = rkcanfd_stop,
+ .ndo_start_xmit = rkcanfd_start_xmit,
+};
+
+static int __maybe_unused rkcanfd_runtime_suspend(struct device *dev)
+{
+ struct rkcanfd_priv *priv = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(priv->clks_num, priv->clks);
+
+ return 0;
+}
+
+static int __maybe_unused rkcanfd_runtime_resume(struct device *dev)
+{
+ struct rkcanfd_priv *priv = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(priv->clks_num, priv->clks);
+}
+
+static void rkcanfd_register_done(const struct rkcanfd_priv *priv)
+{
+ u32 dev_id;
+
+ dev_id = rkcanfd_read(priv, RKCANFD_REG_RTL_VERSION);
+
+ netdev_info(priv->ndev,
+ "Rockchip-CANFD %s rev%lu.%lu (errata 0x%04x) found\n",
+ rkcanfd_get_model_str(priv),
+ FIELD_GET(RKCANFD_REG_RTL_VERSION_MAJOR, dev_id),
+ FIELD_GET(RKCANFD_REG_RTL_VERSION_MINOR, dev_id),
+ priv->devtype_data.quirks);
+
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_5 &&
+ priv->can.clock.freq < RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN)
+ netdev_info(priv->ndev,
+ "Erratum 5: CAN clock frequency (%luMHz) lower than known good (%luMHz), expect degraded performance\n",
+ priv->can.clock.freq / MEGA,
+ RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN / MEGA);
+}
+
+static int rkcanfd_register(struct rkcanfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ int err;
+
+ pm_runtime_enable(ndev->dev.parent);
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err)
+ goto out_pm_runtime_disable;
+
+ rkcanfd_ethtool_init(priv);
+
+ err = register_candev(ndev);
+ if (err)
+ goto out_pm_runtime_put_sync;
+
+ rkcanfd_register_done(priv);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+
+out_pm_runtime_put_sync:
+ pm_runtime_put_sync(ndev->dev.parent);
+out_pm_runtime_disable:
+ pm_runtime_disable(ndev->dev.parent);
+
+ return err;
+}
+
+static inline void rkcanfd_unregister(struct rkcanfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+
+ unregister_candev(ndev);
+ pm_runtime_disable(ndev->dev.parent);
+}
+
+static const struct of_device_id rkcanfd_of_match[] = {
+ {
+ .compatible = "rockchip,rk3568v2-canfd",
+ .data = &rkcanfd_devtype_data_rk3568v2,
+ }, {
+ .compatible = "rockchip,rk3568v3-canfd",
+ .data = &rkcanfd_devtype_data_rk3568v3,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, rkcanfd_of_match);
+
+static int rkcanfd_probe(struct platform_device *pdev)
+{
+ struct rkcanfd_priv *priv;
+ struct net_device *ndev;
+ const void *match;
+ int err;
+
+ ndev = alloc_candev(sizeof(struct rkcanfd_priv), RKCANFD_TXFIFO_DEPTH);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ err = ndev->irq;
+ goto out_free_candev;
+ }
+
+ priv->clks_num = devm_clk_bulk_get_all(&pdev->dev, &priv->clks);
+ if (priv->clks_num < 0) {
+ err = priv->clks_num;
+ goto out_free_candev;
+ }
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs)) {
+ err = PTR_ERR(priv->regs);
+ goto out_free_candev;
+ }
+
+ priv->reset = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(priv->reset)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(priv->reset),
+ "Failed to get reset line\n");
+ goto out_free_candev;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->netdev_ops = &rkcanfd_netdev_ops;
+ ndev->flags |= IFF_ECHO;
+
+ platform_set_drvdata(pdev, priv);
+ priv->can.clock.freq = clk_get_rate(priv->clks[0].clk);
+ priv->can.bittiming_const = &rkcanfd_bittiming_const;
+ priv->can.fd.data_bittiming_const = &rkcanfd_data_bittiming_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_BERR_REPORTING;
+ priv->can.do_set_mode = rkcanfd_set_mode;
+ priv->can.do_get_berr_counter = rkcanfd_get_berr_counter;
+ priv->ndev = ndev;
+
+ match = device_get_match_data(&pdev->dev);
+ if (match) {
+ priv->devtype_data = *(struct rkcanfd_devtype_data *)match;
+ if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_CANFD_BROKEN))
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ }
+
+ err = can_rx_offload_add_manual(ndev, &priv->offload,
+ RKCANFD_NAPI_WEIGHT);
+ if (err)
+ goto out_free_candev;
+
+ err = rkcanfd_register(priv);
+ if (err)
+ goto out_can_rx_offload_del;
+
+ return 0;
+
+out_can_rx_offload_del:
+ can_rx_offload_del(&priv->offload);
+out_free_candev:
+ free_candev(ndev);
+
+ return err;
+}
+
+static void rkcanfd_remove(struct platform_device *pdev)
+{
+ struct rkcanfd_priv *priv = platform_get_drvdata(pdev);
+ struct net_device *ndev = priv->ndev;
+
+ rkcanfd_unregister(priv);
+ can_rx_offload_del(&priv->offload);
+ free_candev(ndev);
+}
+
+static const struct dev_pm_ops rkcanfd_pm_ops = {
+ SET_RUNTIME_PM_OPS(rkcanfd_runtime_suspend,
+ rkcanfd_runtime_resume, NULL)
+};
+
+static struct platform_driver rkcanfd_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .pm = &rkcanfd_pm_ops,
+ .of_match_table = rkcanfd_of_match,
+ },
+ .probe = rkcanfd_probe,
+ .remove = rkcanfd_remove,
+};
+module_platform_driver(rkcanfd_driver);
+
+MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
+MODULE_DESCRIPTION("Rockchip CAN-FD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/rockchip/rockchip_canfd-ethtool.c b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c
new file mode 100644
index 000000000000..5aeeef64a67a
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-ethtool.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/ethtool.h>
+
+#include "rockchip_canfd.h"
+
+enum rkcanfd_stats_type {
+ RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS,
+ RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS,
+};
+
+static const char rkcanfd_stats_strings[][ETH_GSTRING_LEN] = {
+ [RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] = "rx_fifo_empty_errors",
+ [RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] = "tx_extended_as_standard_errors",
+};
+
+static void
+rkcanfd_ethtool_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, rkcanfd_stats_strings,
+ sizeof(rkcanfd_stats_strings));
+ }
+}
+
+static int rkcanfd_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(rkcanfd_stats_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+rkcanfd_ethtool_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ struct rkcanfd_stats *rkcanfd_stats;
+ unsigned int start;
+
+ rkcanfd_stats = &priv->stats;
+
+ do {
+ start = u64_stats_fetch_begin(&rkcanfd_stats->syncp);
+
+ data[RKCANFD_STATS_TYPE_RX_FIFO_EMPTY_ERRORS] =
+ u64_stats_read(&rkcanfd_stats->rx_fifo_empty_errors);
+ data[RKCANFD_STATS_TYPE_TX_EXTENDED_AS_STANDARD_ERRORS] =
+ u64_stats_read(&rkcanfd_stats->tx_extended_as_standard_errors);
+ } while (u64_stats_fetch_retry(&rkcanfd_stats->syncp, start));
+}
+
+static const struct ethtool_ops rkcanfd_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .get_strings = rkcanfd_ethtool_get_strings,
+ .get_sset_count = rkcanfd_ethtool_get_sset_count,
+ .get_ethtool_stats = rkcanfd_ethtool_get_ethtool_stats,
+};
+
+void rkcanfd_ethtool_init(struct rkcanfd_priv *priv)
+{
+ priv->ndev->ethtool_ops = &rkcanfd_ethtool_ops;
+
+ u64_stats_init(&priv->stats.syncp);
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-rx.c b/drivers/net/can/rockchip/rockchip_canfd-rx.c
new file mode 100644
index 000000000000..475c0409e215
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-rx.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <net/netdev_queues.h>
+
+#include "rockchip_canfd.h"
+
+static bool rkcanfd_can_frame_header_equal(const struct canfd_frame *const cfd1,
+ const struct canfd_frame *const cfd2,
+ const bool is_canfd)
+{
+ const u8 mask_flags = CANFD_BRS | CANFD_ESI | CANFD_FDF;
+ canid_t mask = CAN_EFF_FLAG;
+
+ if (canfd_sanitize_len(cfd1->len) != canfd_sanitize_len(cfd2->len))
+ return false;
+
+ if (!is_canfd)
+ mask |= CAN_RTR_FLAG;
+
+ if (cfd1->can_id & CAN_EFF_FLAG)
+ mask |= CAN_EFF_MASK;
+ else
+ mask |= CAN_SFF_MASK;
+
+ if ((cfd1->can_id & mask) != (cfd2->can_id & mask))
+ return false;
+
+ if (is_canfd &&
+ (cfd1->flags & mask_flags) != (cfd2->flags & mask_flags))
+ return false;
+
+ return true;
+}
+
+static bool rkcanfd_can_frame_data_equal(const struct canfd_frame *cfd1,
+ const struct canfd_frame *cfd2,
+ const bool is_canfd)
+{
+ u8 len;
+
+ if (!is_canfd && (cfd1->can_id & CAN_RTR_FLAG))
+ return true;
+
+ len = canfd_sanitize_len(cfd1->len);
+
+ return !memcmp(cfd1->data, cfd2->data, len);
+}
+
+static unsigned int
+rkcanfd_fifo_header_to_cfd_header(const struct rkcanfd_priv *priv,
+ const struct rkcanfd_fifo_header *header,
+ struct canfd_frame *cfd)
+{
+ unsigned int len = sizeof(*cfd) - sizeof(cfd->data);
+ u8 dlc;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT)
+ cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_EFF, header->id) |
+ CAN_EFF_FLAG;
+ else
+ cfd->can_id = FIELD_GET(RKCANFD_REG_FD_ID_SFF, header->id);
+
+ dlc = FIELD_GET(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ header->frameinfo);
+
+ /* CAN-FD */
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF) {
+ cfd->len = can_fd_dlc2len(dlc);
+
+ /* The cfd is not allocated by alloc_canfd_skb(), so
+ * set CANFD_FDF here.
+ */
+ cfd->flags |= CANFD_FDF;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_BRS)
+ cfd->flags |= CANFD_BRS;
+ } else {
+ cfd->len = can_cc_dlc2len(dlc);
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_RTR) {
+ cfd->can_id |= CAN_RTR_FLAG;
+
+ return len;
+ }
+ }
+
+ return len + cfd->len;
+}
+
+static int rkcanfd_rxstx_filter(struct rkcanfd_priv *priv,
+ const struct canfd_frame *cfd_rx, const u32 ts,
+ bool *tx_done)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct rkcanfd_stats *rkcanfd_stats = &priv->stats;
+ const struct canfd_frame *cfd_nominal;
+ const struct sk_buff *skb;
+ unsigned int tx_tail;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+ if (!skb) {
+ netdev_err(priv->ndev,
+ "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n",
+ __func__, tx_tail,
+ priv->tx_head, priv->tx_tail);
+
+ return -ENOMSG;
+ }
+ cfd_nominal = (struct canfd_frame *)skb->data;
+
+ /* We RX'ed a frame identical to our pending TX frame. */
+ if (rkcanfd_can_frame_header_equal(cfd_rx, cfd_nominal,
+ cfd_rx->flags & CANFD_FDF) &&
+ rkcanfd_can_frame_data_equal(cfd_rx, cfd_nominal,
+ cfd_rx->flags & CANFD_FDF)) {
+ unsigned int frame_len;
+
+ rkcanfd_handle_tx_done_one(priv, ts, &frame_len);
+
+ WRITE_ONCE(priv->tx_tail, priv->tx_tail + 1);
+ netif_subqueue_completed_wake(priv->ndev, 0, 1, frame_len,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_START_THRESHOLD);
+
+ *tx_done = true;
+
+ return 0;
+ }
+
+ if (!(priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6))
+ return 0;
+
+ /* Erratum 6: Extended frames may be send as standard frames.
+ *
+ * Not affected if:
+ * - TX'ed a standard frame -or-
+ * - RX'ed an extended frame
+ */
+ if (!(cfd_nominal->can_id & CAN_EFF_FLAG) ||
+ (cfd_rx->can_id & CAN_EFF_FLAG))
+ return 0;
+
+ /* Not affected if:
+ * - standard part and RTR flag of the TX'ed frame
+ * is not equal the CAN-ID and RTR flag of the RX'ed frame.
+ */
+ if ((cfd_nominal->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK)) !=
+ (cfd_rx->can_id & (CAN_RTR_FLAG | CAN_SFF_MASK)))
+ return 0;
+
+ /* Not affected if:
+ * - length is not the same
+ */
+ if (cfd_nominal->len != cfd_rx->len)
+ return 0;
+
+ /* Not affected if:
+ * - the data of non RTR frames is different
+ */
+ if (!(cfd_nominal->can_id & CAN_RTR_FLAG) &&
+ memcmp(cfd_nominal->data, cfd_rx->data, cfd_nominal->len))
+ return 0;
+
+ /* Affected by Erratum 6 */
+ u64_stats_update_begin(&rkcanfd_stats->syncp);
+ u64_stats_inc(&rkcanfd_stats->tx_extended_as_standard_errors);
+ u64_stats_update_end(&rkcanfd_stats->syncp);
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.txerr)
+ priv->bec.txerr--;
+
+ *tx_done = true;
+
+ stats->tx_packets++;
+ stats->tx_errors++;
+
+ rkcanfd_xmit_retry(priv);
+
+ return 0;
+}
+
+static inline bool
+rkcanfd_fifo_header_empty(const struct rkcanfd_fifo_header *header)
+{
+ /* Erratum 5: If the FIFO is empty, we read the same value for
+ * all elements.
+ */
+ return header->frameinfo == header->id &&
+ header->frameinfo == header->ts;
+}
+
+static int rkcanfd_handle_rx_int_one(struct rkcanfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct canfd_frame cfd[1] = { }, *skb_cfd;
+ struct rkcanfd_fifo_header header[1] = { };
+ struct sk_buff *skb;
+ unsigned int len;
+ int err;
+
+ /* read header into separate struct and convert it later */
+ rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA,
+ header, sizeof(*header));
+ /* read data directly into cfd */
+ rkcanfd_read_rep(priv, RKCANFD_REG_RX_FIFO_RDATA,
+ cfd->data, sizeof(cfd->data));
+
+ /* Erratum 5: Counters for TXEFIFO and RXFIFO may be wrong */
+ if (rkcanfd_fifo_header_empty(header)) {
+ struct rkcanfd_stats *rkcanfd_stats = &priv->stats;
+
+ u64_stats_update_begin(&rkcanfd_stats->syncp);
+ u64_stats_inc(&rkcanfd_stats->rx_fifo_empty_errors);
+ u64_stats_update_end(&rkcanfd_stats->syncp);
+
+ return 0;
+ }
+
+ len = rkcanfd_fifo_header_to_cfd_header(priv, header, cfd);
+
+ /* Drop any received CAN-FD frames if CAN-FD mode is not
+ * requested.
+ */
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_FD)) {
+ stats->rx_dropped++;
+
+ return 0;
+ }
+
+ if (rkcanfd_get_tx_pending(priv)) {
+ bool tx_done = false;
+
+ err = rkcanfd_rxstx_filter(priv, cfd, header->ts, &tx_done);
+ if (err)
+ return err;
+ if (tx_done && !(priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK))
+ return 0;
+ }
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.rxerr)
+ priv->bec.rxerr = min(CAN_ERROR_PASSIVE_THRESHOLD,
+ priv->bec.rxerr) - 1;
+
+ if (header->frameinfo & RKCANFD_REG_FD_FRAMEINFO_FDF)
+ skb = alloc_canfd_skb(priv->ndev, &skb_cfd);
+ else
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&skb_cfd);
+
+ if (!skb) {
+ stats->rx_dropped++;
+
+ return 0;
+ }
+
+ memcpy(skb_cfd, cfd, len);
+ rkcanfd_skb_set_timestamp(priv, skb, header->ts);
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, header->ts);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static inline unsigned int
+rkcanfd_rx_fifo_get_len(const struct rkcanfd_priv *priv)
+{
+ const u32 reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
+
+ return FIELD_GET(RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT, reg);
+}
+
+int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv)
+{
+ unsigned int len;
+ int err;
+
+ while ((len = rkcanfd_rx_fifo_get_len(priv))) {
+ err = rkcanfd_handle_rx_int_one(priv);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
new file mode 100644
index 000000000000..72774cd2f94b
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/clocksource.h>
+
+#include "rockchip_canfd.h"
+
+static u64 rkcanfd_timestamp_read(struct cyclecounter *cc)
+{
+ const struct rkcanfd_priv *priv = container_of(cc, struct rkcanfd_priv, cc);
+
+ return rkcanfd_get_timestamp(priv);
+}
+
+void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv,
+ struct sk_buff *skb, const u32 timestamp)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ u64 ns;
+
+ ns = timecounter_cyc2time(&priv->tc, timestamp);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void rkcanfd_timestamp_work(struct work_struct *work)
+{
+ const struct delayed_work *delayed_work = to_delayed_work(work);
+ struct rkcanfd_priv *priv;
+
+ priv = container_of(delayed_work, struct rkcanfd_priv, timestamp);
+ timecounter_read(&priv->tc);
+
+ schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies);
+}
+
+void rkcanfd_timestamp_init(struct rkcanfd_priv *priv)
+{
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ struct cyclecounter *cc = &priv->cc;
+ u32 bitrate, div, reg, rate;
+ u64 work_delay_ns;
+ u64 max_cycles;
+
+ /* At the standard clock rate of 300Mhz on the rk3658, the 32
+ * bit timer overflows every 14s. This means that we have to
+ * poll it quite often to avoid missing a wrap around.
+ *
+ * Divide it down to a reasonable rate, at least twice the bit
+ * rate.
+ */
+ bitrate = max(bt->bitrate, dbt->bitrate);
+ div = min(DIV_ROUND_UP(priv->can.clock.freq, bitrate * 2),
+ FIELD_MAX(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE) + 1);
+
+ reg = FIELD_PREP(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE,
+ div - 1) |
+ RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE;
+ rkcanfd_write(priv, RKCANFD_REG_TIMESTAMP_CTRL, reg);
+
+ cc->read = rkcanfd_timestamp_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+
+ rate = priv->can.clock.freq / div;
+ clocks_calc_mult_shift(&cc->mult, &cc->shift, rate, NSEC_PER_SEC,
+ RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC);
+
+ max_cycles = div_u64(ULLONG_MAX, cc->mult);
+ max_cycles = min(max_cycles, cc->mask);
+ work_delay_ns = clocksource_cyc2ns(max_cycles, cc->mult, cc->shift);
+ priv->work_delay_jiffies = div_u64(work_delay_ns, 3u * NSEC_PER_SEC / HZ);
+ INIT_DELAYED_WORK(&priv->timestamp, rkcanfd_timestamp_work);
+
+ netdev_dbg(priv->ndev, "clock=%lu.%02luMHz bitrate=%lu.%02luMBit/s div=%u rate=%lu.%02luMHz mult=%u shift=%u delay=%lus\n",
+ priv->can.clock.freq / MEGA,
+ priv->can.clock.freq % MEGA / KILO / 10,
+ bitrate / MEGA,
+ bitrate % MEGA / KILO / 100,
+ div,
+ rate / MEGA,
+ rate % MEGA / KILO / 10,
+ cc->mult, cc->shift,
+ priv->work_delay_jiffies / HZ);
+}
+
+void rkcanfd_timestamp_start(struct rkcanfd_priv *priv)
+{
+ timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
+
+ schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies);
+}
+
+void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv)
+{
+ cancel_delayed_work(&priv->timestamp);
+}
+
+void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->timestamp);
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c
new file mode 100644
index 000000000000..12200dcfd338
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2023, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <net/netdev_queues.h>
+
+#include "rockchip_canfd.h"
+
+static bool rkcanfd_tx_tail_is_eff(const struct rkcanfd_priv *priv)
+{
+ const struct canfd_frame *cfd;
+ const struct sk_buff *skb;
+ unsigned int tx_tail;
+
+ if (!rkcanfd_get_tx_pending(priv))
+ return false;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+ if (!skb) {
+ netdev_err(priv->ndev,
+ "%s: echo_skb[%u]=NULL tx_head=0x%08x tx_tail=0x%08x\n",
+ __func__, tx_tail,
+ priv->tx_head, priv->tx_tail);
+
+ return false;
+ }
+
+ cfd = (struct canfd_frame *)skb->data;
+
+ return cfd->can_id & CAN_EFF_FLAG;
+}
+
+unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv)
+{
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_6 &&
+ rkcanfd_tx_tail_is_eff(priv))
+ return 0;
+
+ return rkcanfd_get_tx_free(priv);
+}
+
+static void rkcanfd_start_xmit_write_cmd(const struct rkcanfd_priv *priv,
+ const u32 reg_cmd)
+{
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12)
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default |
+ RKCANFD_REG_MODE_SPACE_RX_MODE);
+
+ rkcanfd_write(priv, RKCANFD_REG_CMD, reg_cmd);
+
+ if (priv->devtype_data.quirks & RKCANFD_QUIRK_RK3568_ERRATUM_12)
+ rkcanfd_write(priv, RKCANFD_REG_MODE, priv->reg_mode_default);
+}
+
+void rkcanfd_xmit_retry(struct rkcanfd_priv *priv)
+{
+ const unsigned int tx_head = rkcanfd_get_tx_head(priv);
+ const u32 reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head);
+
+ rkcanfd_start_xmit_write_cmd(priv, reg_cmd);
+}
+
+netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rkcanfd_priv *priv = netdev_priv(ndev);
+ u32 reg_frameinfo, reg_id, reg_cmd;
+ unsigned int tx_head, frame_len;
+ const struct canfd_frame *cfd;
+ int err;
+ u8 i;
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (!netif_subqueue_maybe_stop(priv->ndev, 0,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_STOP_THRESHOLD,
+ RKCANFD_TX_START_THRESHOLD)) {
+ if (net_ratelimit())
+ netdev_info(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, tx_pending=%d)\n",
+ priv->tx_head, priv->tx_tail,
+ rkcanfd_get_tx_pending(priv));
+
+ return NETDEV_TX_BUSY;
+ }
+
+ cfd = (struct canfd_frame *)skb->data;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ reg_frameinfo = RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT;
+ reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_EFF, cfd->can_id);
+ } else {
+ reg_frameinfo = 0;
+ reg_id = FIELD_PREP(RKCANFD_REG_FD_ID_SFF, cfd->can_id);
+ }
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_RTR;
+
+ if (can_is_canfd_skb(skb)) {
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_FDF;
+
+ if (cfd->flags & CANFD_BRS)
+ reg_frameinfo |= RKCANFD_REG_FD_FRAMEINFO_BRS;
+
+ reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ can_fd_len2dlc(cfd->len));
+ } else {
+ reg_frameinfo |= FIELD_PREP(RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH,
+ cfd->len);
+ }
+
+ tx_head = rkcanfd_get_tx_head(priv);
+ reg_cmd = RKCANFD_REG_CMD_TX_REQ(tx_head);
+
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXFRAMEINFO, reg_frameinfo);
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXID, reg_id);
+ for (i = 0; i < cfd->len; i += 4)
+ rkcanfd_write(priv, RKCANFD_REG_FD_TXDATA0 + i,
+ *(u32 *)(cfd->data + i));
+
+ frame_len = can_skb_get_frame_len(skb);
+ err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
+ if (!err)
+ netdev_sent_queue(priv->ndev, frame_len);
+
+ WRITE_ONCE(priv->tx_head, priv->tx_head + 1);
+
+ rkcanfd_start_xmit_write_cmd(priv, reg_cmd);
+
+ netif_subqueue_maybe_stop(priv->ndev, 0,
+ rkcanfd_get_effective_tx_free(priv),
+ RKCANFD_TX_STOP_THRESHOLD,
+ RKCANFD_TX_START_THRESHOLD);
+
+ return NETDEV_TX_OK;
+}
+
+void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts,
+ unsigned int *frame_len_p)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ unsigned int tx_tail;
+ struct sk_buff *skb;
+
+ tx_tail = rkcanfd_get_tx_tail(priv);
+ skb = priv->can.echo_skb[tx_tail];
+
+ /* Manual handling of CAN Bus Error counters. See
+ * rkcanfd_get_corrected_berr_counter() for detailed
+ * explanation.
+ */
+ if (priv->bec.txerr)
+ priv->bec.txerr--;
+
+ if (skb)
+ rkcanfd_skb_set_timestamp(priv, skb, ts);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload,
+ tx_tail, ts,
+ frame_len_p);
+ stats->tx_packets++;
+}
diff --git a/drivers/net/can/rockchip/rockchip_canfd.h b/drivers/net/can/rockchip/rockchip_canfd.h
new file mode 100644
index 000000000000..93131c7d7f54
--- /dev/null
+++ b/drivers/net/can/rockchip/rockchip_canfd.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2023, 2024 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _ROCKCHIP_CANFD_H
+#define _ROCKCHIP_CANFD_H
+
+#include <linux/bitfield.h>
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/reset.h>
+#include <linux/skbuff.h>
+#include <linux/timecounter.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/units.h>
+
+#define RKCANFD_REG_MODE 0x000
+#define RKCANFD_REG_MODE_CAN_FD_MODE_ENABLE BIT(15)
+#define RKCANFD_REG_MODE_DPEE BIT(14)
+#define RKCANFD_REG_MODE_BRSD BIT(13)
+#define RKCANFD_REG_MODE_SPACE_RX_MODE BIT(12)
+#define RKCANFD_REG_MODE_AUTO_BUS_ON BIT(11)
+#define RKCANFD_REG_MODE_AUTO_RETX_MODE BIT(10)
+#define RKCANFD_REG_MODE_OVLD_MODE BIT(9)
+#define RKCANFD_REG_MODE_COVER_MODE BIT(8)
+#define RKCANFD_REG_MODE_RXSORT_MODE BIT(7)
+#define RKCANFD_REG_MODE_TXORDER_MODE BIT(6)
+#define RKCANFD_REG_MODE_RXSTX_MODE BIT(5)
+#define RKCANFD_REG_MODE_LBACK_MODE BIT(4)
+#define RKCANFD_REG_MODE_SILENT_MODE BIT(3)
+#define RKCANFD_REG_MODE_SELF_TEST BIT(2)
+#define RKCANFD_REG_MODE_SLEEP_MODE BIT(1)
+#define RKCANFD_REG_MODE_WORK_MODE BIT(0)
+
+#define RKCANFD_REG_CMD 0x004
+#define RKCANFD_REG_CMD_TX1_REQ BIT(1)
+#define RKCANFD_REG_CMD_TX0_REQ BIT(0)
+#define RKCANFD_REG_CMD_TX_REQ(i) (RKCANFD_REG_CMD_TX0_REQ << (i))
+
+#define RKCANFD_REG_STATE 0x008
+#define RKCANFD_REG_STATE_SLEEP_STATE BIT(6)
+#define RKCANFD_REG_STATE_BUS_OFF_STATE BIT(5)
+#define RKCANFD_REG_STATE_ERROR_WARNING_STATE BIT(4)
+#define RKCANFD_REG_STATE_TX_PERIOD BIT(3)
+#define RKCANFD_REG_STATE_RX_PERIOD BIT(2)
+#define RKCANFD_REG_STATE_TX_BUFFER_FULL BIT(1)
+#define RKCANFD_REG_STATE_RX_BUFFER_FULL BIT(0)
+
+#define RKCANFD_REG_INT 0x00c
+#define RKCANFD_REG_INT_WAKEUP_INT BIT(14)
+#define RKCANFD_REG_INT_TXE_FIFO_FULL_INT BIT(13)
+#define RKCANFD_REG_INT_TXE_FIFO_OV_INT BIT(12)
+#define RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT BIT(11)
+#define RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT BIT(10)
+#define RKCANFD_REG_INT_BUS_OFF_INT BIT(9)
+#define RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT BIT(8)
+#define RKCANFD_REG_INT_RX_FIFO_FULL_INT BIT(7)
+#define RKCANFD_REG_INT_ERROR_INT BIT(6)
+#define RKCANFD_REG_INT_TX_ARBIT_FAIL_INT BIT(5)
+#define RKCANFD_REG_INT_PASSIVE_ERROR_INT BIT(4)
+#define RKCANFD_REG_INT_OVERLOAD_INT BIT(3)
+#define RKCANFD_REG_INT_ERROR_WARNING_INT BIT(2)
+#define RKCANFD_REG_INT_TX_FINISH_INT BIT(1)
+#define RKCANFD_REG_INT_RX_FINISH_INT BIT(0)
+
+#define RKCANFD_REG_INT_ALL \
+ (RKCANFD_REG_INT_WAKEUP_INT | \
+ RKCANFD_REG_INT_TXE_FIFO_FULL_INT | \
+ RKCANFD_REG_INT_TXE_FIFO_OV_INT | \
+ RKCANFD_REG_INT_TIMESTAMP_COUNTER_OVERFLOW_INT | \
+ RKCANFD_REG_INT_BUS_OFF_RECOVERY_INT | \
+ RKCANFD_REG_INT_BUS_OFF_INT | \
+ RKCANFD_REG_INT_RX_FIFO_OVERFLOW_INT | \
+ RKCANFD_REG_INT_RX_FIFO_FULL_INT | \
+ RKCANFD_REG_INT_ERROR_INT | \
+ RKCANFD_REG_INT_TX_ARBIT_FAIL_INT | \
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT | \
+ RKCANFD_REG_INT_OVERLOAD_INT | \
+ RKCANFD_REG_INT_ERROR_WARNING_INT | \
+ RKCANFD_REG_INT_TX_FINISH_INT | \
+ RKCANFD_REG_INT_RX_FINISH_INT)
+
+#define RKCANFD_REG_INT_ALL_ERROR \
+ (RKCANFD_REG_INT_BUS_OFF_INT | \
+ RKCANFD_REG_INT_ERROR_INT | \
+ RKCANFD_REG_INT_PASSIVE_ERROR_INT | \
+ RKCANFD_REG_INT_ERROR_WARNING_INT)
+
+#define RKCANFD_REG_INT_MASK 0x010
+
+#define RKCANFD_REG_DMA_CTL 0x014
+#define RKCANFD_REG_DMA_CTL_DMA_RX_MODE BIT(1)
+#define RKCANFD_REG_DMA_CTL_DMA_TX_MODE BIT(9)
+
+#define RKCANFD_REG_BITTIMING 0x018
+#define RKCANFD_REG_BITTIMING_SAMPLE_MODE BIT(16)
+#define RKCANFD_REG_BITTIMING_SJW GENMASK(15, 14)
+#define RKCANFD_REG_BITTIMING_BRP GENMASK(13, 8)
+#define RKCANFD_REG_BITTIMING_TSEG2 GENMASK(6, 4)
+#define RKCANFD_REG_BITTIMING_TSEG1 GENMASK(3, 0)
+
+#define RKCANFD_REG_ARBITFAIL 0x028
+#define RKCANFD_REG_ARBITFAIL_ARBIT_FAIL_CODE GENMASK(6, 0)
+
+/* Register seems to be clear or read */
+#define RKCANFD_REG_ERROR_CODE 0x02c
+#define RKCANFD_REG_ERROR_CODE_PHASE BIT(29)
+#define RKCANFD_REG_ERROR_CODE_TYPE GENMASK(28, 26)
+#define RKCANFD_REG_ERROR_CODE_TYPE_BIT 0x0
+#define RKCANFD_REG_ERROR_CODE_TYPE_STUFF 0x1
+#define RKCANFD_REG_ERROR_CODE_TYPE_FORM 0x2
+#define RKCANFD_REG_ERROR_CODE_TYPE_ACK 0x3
+#define RKCANFD_REG_ERROR_CODE_TYPE_CRC 0x4
+#define RKCANFD_REG_ERROR_CODE_DIRECTION_RX BIT(25)
+#define RKCANFD_REG_ERROR_CODE_TX GENMASK(24, 16)
+#define RKCANFD_REG_ERROR_CODE_TX_OVERLOAD BIT(24)
+#define RKCANFD_REG_ERROR_CODE_TX_ERROR BIT(23)
+#define RKCANFD_REG_ERROR_CODE_TX_ACK BIT(22)
+#define RKCANFD_REG_ERROR_CODE_TX_ACK_EOF BIT(21)
+#define RKCANFD_REG_ERROR_CODE_TX_CRC BIT(20)
+#define RKCANFD_REG_ERROR_CODE_TX_STUFF_COUNT BIT(19)
+#define RKCANFD_REG_ERROR_CODE_TX_DATA BIT(18)
+#define RKCANFD_REG_ERROR_CODE_TX_SOF_DLC BIT(17)
+#define RKCANFD_REG_ERROR_CODE_TX_IDLE BIT(16)
+#define RKCANFD_REG_ERROR_CODE_RX GENMASK(15, 0)
+#define RKCANFD_REG_ERROR_CODE_RX_BUF_INT BIT(15)
+#define RKCANFD_REG_ERROR_CODE_RX_SPACE BIT(14)
+#define RKCANFD_REG_ERROR_CODE_RX_EOF BIT(13)
+#define RKCANFD_REG_ERROR_CODE_RX_ACK_LIM BIT(12)
+#define RKCANFD_REG_ERROR_CODE_RX_ACK BIT(11)
+#define RKCANFD_REG_ERROR_CODE_RX_CRC_LIM BIT(10)
+#define RKCANFD_REG_ERROR_CODE_RX_CRC BIT(9)
+#define RKCANFD_REG_ERROR_CODE_RX_STUFF_COUNT BIT(8)
+#define RKCANFD_REG_ERROR_CODE_RX_DATA BIT(7)
+#define RKCANFD_REG_ERROR_CODE_RX_DLC BIT(6)
+#define RKCANFD_REG_ERROR_CODE_RX_BRS_ESI BIT(5)
+#define RKCANFD_REG_ERROR_CODE_RX_RES BIT(4)
+#define RKCANFD_REG_ERROR_CODE_RX_FDF BIT(3)
+#define RKCANFD_REG_ERROR_CODE_RX_ID2_RTR BIT(2)
+#define RKCANFD_REG_ERROR_CODE_RX_SOF_IDE BIT(1)
+#define RKCANFD_REG_ERROR_CODE_RX_IDLE BIT(0)
+
+#define RKCANFD_REG_ERROR_CODE_NOACK \
+ (FIELD_PREP(RKCANFD_REG_ERROR_CODE_TYPE, \
+ RKCANFD_REG_ERROR_CODE_TYPE_ACK) | \
+ RKCANFD_REG_ERROR_CODE_TX_ACK_EOF | \
+ RKCANFD_REG_ERROR_CODE_RX_ACK)
+
+#define RKCANFD_REG_RXERRORCNT 0x034
+#define RKCANFD_REG_RXERRORCNT_RX_ERR_CNT GENMASK(7, 0)
+
+#define RKCANFD_REG_TXERRORCNT 0x038
+#define RKCANFD_REG_TXERRORCNT_TX_ERR_CNT GENMASK(8, 0)
+
+#define RKCANFD_REG_IDCODE 0x03c
+#define RKCANFD_REG_IDCODE_STANDARD_FRAME_ID GENMASK(10, 0)
+#define RKCANFD_REG_IDCODE_EXTENDED_FRAME_ID GENMASK(28, 0)
+
+#define RKCANFD_REG_IDMASK 0x040
+
+#define RKCANFD_REG_TXFRAMEINFO 0x050
+#define RKCANFD_REG_FRAMEINFO_FRAME_FORMAT BIT(7)
+#define RKCANFD_REG_FRAMEINFO_RTR BIT(6)
+#define RKCANFD_REG_FRAMEINFO_DATA_LENGTH GENMASK(3, 0)
+
+#define RKCANFD_REG_TXID 0x054
+#define RKCANFD_REG_TXID_TX_ID GENMASK(28, 0)
+
+#define RKCANFD_REG_TXDATA0 0x058
+#define RKCANFD_REG_TXDATA1 0x05C
+#define RKCANFD_REG_RXFRAMEINFO 0x060
+#define RKCANFD_REG_RXID 0x064
+#define RKCANFD_REG_RXDATA0 0x068
+#define RKCANFD_REG_RXDATA1 0x06c
+
+#define RKCANFD_REG_RTL_VERSION 0x070
+#define RKCANFD_REG_RTL_VERSION_MAJOR GENMASK(7, 4)
+#define RKCANFD_REG_RTL_VERSION_MINOR GENMASK(3, 0)
+
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING 0x100
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SAMPLE_MODE BIT(31)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_SJW GENMASK(30, 24)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_BRP GENMASK(23, 16)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG2 GENMASK(14, 8)
+#define RKCANFD_REG_FD_NOMINAL_BITTIMING_TSEG1 GENMASK(7, 0)
+
+#define RKCANFD_REG_FD_DATA_BITTIMING 0x104
+#define RKCANFD_REG_FD_DATA_BITTIMING_SAMPLE_MODE BIT(21)
+#define RKCANFD_REG_FD_DATA_BITTIMING_SJW GENMASK(20, 17)
+#define RKCANFD_REG_FD_DATA_BITTIMING_BRP GENMASK(16, 9)
+#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG2 GENMASK(8, 5)
+#define RKCANFD_REG_FD_DATA_BITTIMING_TSEG1 GENMASK(4, 0)
+
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION 0x108
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_OFFSET GENMASK(6, 1)
+#define RKCANFD_REG_TRANSMIT_DELAY_COMPENSATION_TDC_ENABLE BIT(0)
+
+#define RKCANFD_REG_TIMESTAMP_CTRL 0x10c
+/* datasheet says 6:1, which is wrong */
+#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE GENMASK(5, 1)
+#define RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE BIT(0)
+
+#define RKCANFD_REG_TIMESTAMP 0x110
+
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL 0x114
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_CNT GENMASK(8, 5)
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_WATERMARK GENMASK(4, 1)
+#define RKCANFD_REG_TXEVENT_FIFO_CTRL_TXE_FIFO_ENABLE BIT(0)
+
+#define RKCANFD_REG_RX_FIFO_CTRL 0x118
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_CNT GENMASK(6, 4)
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_FULL_WATERMARK GENMASK(3, 1)
+#define RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE BIT(0)
+
+#define RKCANFD_REG_AFC_CTRL 0x11c
+#define RKCANFD_REG_AFC_CTRL_UAF5 BIT(4)
+#define RKCANFD_REG_AFC_CTRL_UAF4 BIT(3)
+#define RKCANFD_REG_AFC_CTRL_UAF3 BIT(2)
+#define RKCANFD_REG_AFC_CTRL_UAF2 BIT(1)
+#define RKCANFD_REG_AFC_CTRL_UAF1 BIT(0)
+
+#define RKCANFD_REG_IDCODE0 0x120
+#define RKCANFD_REG_IDMASK0 0x124
+#define RKCANFD_REG_IDCODE1 0x128
+#define RKCANFD_REG_IDMASK1 0x12c
+#define RKCANFD_REG_IDCODE2 0x130
+#define RKCANFD_REG_IDMASK2 0x134
+#define RKCANFD_REG_IDCODE3 0x138
+#define RKCANFD_REG_IDMASK3 0x13c
+#define RKCANFD_REG_IDCODE4 0x140
+#define RKCANFD_REG_IDMASK4 0x144
+
+#define RKCANFD_REG_FD_TXFRAMEINFO 0x200
+#define RKCANFD_REG_FD_FRAMEINFO_FRAME_FORMAT BIT(7)
+#define RKCANFD_REG_FD_FRAMEINFO_RTR BIT(6)
+#define RKCANFD_REG_FD_FRAMEINFO_FDF BIT(5)
+#define RKCANFD_REG_FD_FRAMEINFO_BRS BIT(4)
+#define RKCANFD_REG_FD_FRAMEINFO_DATA_LENGTH GENMASK(3, 0)
+
+#define RKCANFD_REG_FD_TXID 0x204
+#define RKCANFD_REG_FD_ID_EFF GENMASK(28, 0)
+#define RKCANFD_REG_FD_ID_SFF GENMASK(11, 0)
+
+#define RKCANFD_REG_FD_TXDATA0 0x208
+#define RKCANFD_REG_FD_TXDATA1 0x20c
+#define RKCANFD_REG_FD_TXDATA2 0x210
+#define RKCANFD_REG_FD_TXDATA3 0x214
+#define RKCANFD_REG_FD_TXDATA4 0x218
+#define RKCANFD_REG_FD_TXDATA5 0x21c
+#define RKCANFD_REG_FD_TXDATA6 0x220
+#define RKCANFD_REG_FD_TXDATA7 0x224
+#define RKCANFD_REG_FD_TXDATA8 0x228
+#define RKCANFD_REG_FD_TXDATA9 0x22c
+#define RKCANFD_REG_FD_TXDATA10 0x230
+#define RKCANFD_REG_FD_TXDATA11 0x234
+#define RKCANFD_REG_FD_TXDATA12 0x238
+#define RKCANFD_REG_FD_TXDATA13 0x23c
+#define RKCANFD_REG_FD_TXDATA14 0x240
+#define RKCANFD_REG_FD_TXDATA15 0x244
+
+#define RKCANFD_REG_FD_RXFRAMEINFO 0x300
+#define RKCANFD_REG_FD_RXID 0x304
+#define RKCANFD_REG_FD_RXTIMESTAMP 0x308
+#define RKCANFD_REG_FD_RXDATA0 0x30c
+#define RKCANFD_REG_FD_RXDATA1 0x310
+#define RKCANFD_REG_FD_RXDATA2 0x314
+#define RKCANFD_REG_FD_RXDATA3 0x318
+#define RKCANFD_REG_FD_RXDATA4 0x31c
+#define RKCANFD_REG_FD_RXDATA5 0x320
+#define RKCANFD_REG_FD_RXDATA6 0x320
+#define RKCANFD_REG_FD_RXDATA7 0x328
+#define RKCANFD_REG_FD_RXDATA8 0x32c
+#define RKCANFD_REG_FD_RXDATA9 0x330
+#define RKCANFD_REG_FD_RXDATA10 0x334
+#define RKCANFD_REG_FD_RXDATA11 0x338
+#define RKCANFD_REG_FD_RXDATA12 0x33c
+#define RKCANFD_REG_FD_RXDATA13 0x340
+#define RKCANFD_REG_FD_RXDATA14 0x344
+#define RKCANFD_REG_FD_RXDATA15 0x348
+
+#define RKCANFD_REG_RX_FIFO_RDATA 0x400
+#define RKCANFD_REG_TXE_FIFO_RDATA 0x500
+
+#define DEVICE_NAME "rockchip_canfd"
+#define RKCANFD_NAPI_WEIGHT 32
+#define RKCANFD_TXFIFO_DEPTH 2
+#define RKCANFD_TX_STOP_THRESHOLD 1
+#define RKCANFD_TX_START_THRESHOLD 1
+
+#define RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC 60
+#define RKCANFD_ERRATUM_5_SYSCLOCK_HZ_MIN (300 * MEGA)
+
+/* rk3568 CAN-FD Errata, as of Tue 07 Nov 2023 11:25:31 +08:00 */
+
+/* Erratum 1: The error frame sent by the CAN controller has an
+ * abnormal format.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_1 BIT(0)
+
+/* Erratum 2: The error frame sent after detecting a CRC error has an
+ * abnormal position.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_2 BIT(1)
+
+/* Erratum 3: Intermittent CRC calculation errors. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_3 BIT(2)
+
+/* Erratum 4: Intermittent occurrence of stuffing errors. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_4 BIT(3)
+
+/* Erratum 5: Counters related to the TXFIFO and RXFIFO exhibit
+ * abnormal counting behavior.
+ *
+ * The rk3568 CAN-FD errata sheet as of Tue 07 Nov 2023 11:25:31 +08:00
+ * states that only the rk3568v2 is affected by this erratum, but
+ * tests with the rk3568v2 and rk3568v3 show that the RX_FIFO_CNT is
+ * sometimes too high. This leads to CAN frames being read from the
+ * FIFO, which is then already empty.
+ *
+ * Further tests on the rk3568v2 and rk3568v3 show that in this
+ * situation (i.e. empty FIFO) all elements of the FIFO header
+ * (frameinfo, id, ts) contain the same data.
+ *
+ * On the rk3568v2 and rk3568v3, this problem only occurs extremely
+ * rarely with the standard clock of 300 MHz, but almost immediately
+ * at 80 MHz.
+ *
+ * To workaround this problem, check for empty FIFO with
+ * rkcanfd_fifo_header_empty() in rkcanfd_handle_rx_int_one() and exit
+ * early.
+ *
+ * To reproduce:
+ * assigned-clocks = <&cru CLK_CANx>;
+ * assigned-clock-rates = <80000000>;
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_5 BIT(4)
+
+/* Erratum 6: The CAN controller's transmission of extended frames may
+ * intermittently change into standard frames
+ *
+ * Work around this issue by activating self reception (RXSTX). If we
+ * have pending TX CAN frames, check all RX'ed CAN frames in
+ * rkcanfd_rxstx_filter().
+ *
+ * If it's a frame we've send and it's OK, call the TX complete
+ * handler: rkcanfd_handle_tx_done_one(). Mask the TX complete IRQ.
+ *
+ * If it's a frame we've send, but the CAN-ID is mangled, resend the
+ * original extended frame.
+ *
+ * To reproduce:
+ * host:
+ * canfdtest -evx -g can0
+ * candump any,0:80000000 -cexdtA
+ * dut:
+ * canfdtest -evx can0
+ * ethtool -S can0
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_6 BIT(5)
+
+/* Erratum 7: In the passive error state, the CAN controller's
+ * interframe space segment counting is inaccurate.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_7 BIT(6)
+
+/* Erratum 8: The Format-Error error flag is transmitted one bit
+ * later.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_8 BIT(7)
+
+/* Erratum 9: In the arbitration segment, the CAN controller will
+ * identify stuffing errors as arbitration failures.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_9 BIT(8)
+
+/* Erratum 10: Does not support the BUSOFF slow recovery mechanism. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_10 BIT(9)
+
+/* Erratum 11: Arbitration error. */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_11 BIT(10)
+
+/* Erratum 12: A dominant bit at the third bit of the intermission may
+ * cause a transmission error.
+ */
+#define RKCANFD_QUIRK_RK3568_ERRATUM_12 BIT(11)
+
+/* Tests on the rk3568v2 and rk3568v3 show that receiving certain
+ * CAN-FD frames trigger an Error Interrupt.
+ *
+ * - Form Error in RX Arbitration Phase: TX_IDLE RX_STUFF_COUNT (0x0a010100) CMD=0 RX=0 TX=0
+ * Error-Warning=1 Bus-Off=0
+ * To reproduce:
+ * host:
+ * cansend can0 002##01f
+ * DUT:
+ * candump any,0:0,#FFFFFFFF -cexdHtA
+ *
+ * - Form Error in RX Arbitration Phase: TX_IDLE RX_CRC (0x0a010200) CMD=0 RX=0 TX=0
+ * Error-Warning=1 Bus-Off=0
+ * To reproduce:
+ * host:
+ * cansend can0 002##07217010000000000
+ * DUT:
+ * candump any,0:0,#FFFFFFFF -cexdHtA
+ */
+#define RKCANFD_QUIRK_CANFD_BROKEN BIT(12)
+
+/* known issues with rk3568v3:
+ *
+ * - Overload situation during high bus load
+ * To reproduce:
+ * host:
+ * # add a 2nd CAN adapter to the CAN bus
+ * cangen can0 -I 1 -Li -Di -p10 -g 0.3
+ * cansequence -rve
+ * DUT:
+ * cangen can0 -I2 -L1 -Di -p10 -c10 -g 1 -e
+ * cansequence -rv -i 1
+ *
+ * - TX starvation after repeated Bus-Off
+ * To reproduce:
+ * host:
+ * sleep 3 && cangen can0 -I2 -Li -Di -p10 -g 0.0
+ * DUT:
+ * cangen can0 -I2 -Li -Di -p10 -g 0.05
+ */
+
+enum rkcanfd_model {
+ RKCANFD_MODEL_RK3568V2 = 0x35682,
+ RKCANFD_MODEL_RK3568V3 = 0x35683,
+};
+
+struct rkcanfd_devtype_data {
+ enum rkcanfd_model model;
+ u32 quirks;
+};
+
+struct rkcanfd_fifo_header {
+ u32 frameinfo;
+ u32 id;
+ u32 ts;
+};
+
+struct rkcanfd_stats {
+ struct u64_stats_sync syncp;
+
+ /* Erratum 5 */
+ u64_stats_t rx_fifo_empty_errors;
+
+ /* Erratum 6 */
+ u64_stats_t tx_extended_as_standard_errors;
+};
+
+struct rkcanfd_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct net_device *ndev;
+
+ void __iomem *regs;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+
+ u32 reg_mode_default;
+ u32 reg_int_mask_default;
+ struct rkcanfd_devtype_data devtype_data;
+
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct delayed_work timestamp;
+ unsigned long work_delay_jiffies;
+
+ struct can_berr_counter bec;
+
+ struct rkcanfd_stats stats;
+
+ struct reset_control *reset;
+ struct clk_bulk_data *clks;
+ int clks_num;
+};
+
+static inline u32
+rkcanfd_read(const struct rkcanfd_priv *priv, u32 reg)
+{
+ return readl(priv->regs + reg);
+}
+
+static inline void
+rkcanfd_read_rep(const struct rkcanfd_priv *priv, u32 reg,
+ void *buf, unsigned int len)
+{
+ readsl(priv->regs + reg, buf, len / sizeof(u32));
+}
+
+static inline void
+rkcanfd_write(const struct rkcanfd_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->regs + reg);
+}
+
+static inline u32
+rkcanfd_get_timestamp(const struct rkcanfd_priv *priv)
+{
+ return rkcanfd_read(priv, RKCANFD_REG_TIMESTAMP);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_head(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_head) & (RKCANFD_TXFIFO_DEPTH - 1);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_tail(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_tail) & (RKCANFD_TXFIFO_DEPTH - 1);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_pending(const struct rkcanfd_priv *priv)
+{
+ return READ_ONCE(priv->tx_head) - READ_ONCE(priv->tx_tail);
+}
+
+static inline unsigned int
+rkcanfd_get_tx_free(const struct rkcanfd_priv *priv)
+{
+ return RKCANFD_TXFIFO_DEPTH - rkcanfd_get_tx_pending(priv);
+}
+
+void rkcanfd_ethtool_init(struct rkcanfd_priv *priv);
+
+int rkcanfd_handle_rx_int(struct rkcanfd_priv *priv);
+
+void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv,
+ struct sk_buff *skb, const u32 timestamp);
+void rkcanfd_timestamp_init(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_start(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv);
+void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv);
+
+unsigned int rkcanfd_get_effective_tx_free(const struct rkcanfd_priv *priv);
+void rkcanfd_xmit_retry(struct rkcanfd_priv *priv);
+netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+void rkcanfd_handle_tx_done_one(struct rkcanfd_priv *priv, const u32 ts,
+ unsigned int *frame_len_p);
+
+#endif
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 110071b26921..e061e35769bf 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -87,6 +87,7 @@ config CAN_PLX_PCI
config CAN_SJA1000_ISA
tristate "ISA Bus based legacy SJA1000 driver"
+ depends on HAS_IOPORT
help
This driver adds legacy support for SJA1000 chips connected to
the ISA bus using I/O port, memory mapped or indirect access.
@@ -104,10 +105,10 @@ config CAN_SJA1000_PLATFORM
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
- depends on ISA
+ depends on (ISA && PC104) || (COMPILE_TEST && HAS_IOPORT)
help
This driver is for Technologic Systems' TSCAN-1 PC104 boards.
- http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ https://www.embeddedts.com/products/TS-CAN1
The driver supports multiple boards and automatically configures them:
PLD IO base addresses are read from jumpers JP1 and JP2,
IRQ numbers are read from jumpers JP4 and JP5,
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 4ab91759a5c6..5bca719d61f5 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -3,6 +3,7 @@
* Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
* Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ * Copyright (C) 2023 EMS Dr. Thomas Wuensche
*/
#include <linux/kernel.h>
@@ -19,12 +20,14 @@
#define DRV_NAME "ems_pci"
-MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>");
+MODULE_AUTHOR("Sebastian Haas <support@ems-wuensche.com>");
+MODULE_AUTHOR("Gerhard Uttenthaler <uttenthaler@ems-wuensche.com>");
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards");
MODULE_LICENSE("GPL v2");
#define EMS_PCI_V1_MAX_CHAN 2
#define EMS_PCI_V2_MAX_CHAN 4
+#define EMS_PCI_V3_MAX_CHAN 4
#define EMS_PCI_MAX_CHAN EMS_PCI_V2_MAX_CHAN
struct ems_pci_card {
@@ -40,8 +43,7 @@ struct ems_pci_card {
#define EMS_PCI_CAN_CLOCK (16000000 / 2)
-/*
- * Register definitions and descriptions are from LinCAN 0.3.3.
+/* Register definitions and descriptions are from LinCAN 0.3.3.
*
* PSB4610 PITA-2 bridge control registers
*/
@@ -52,8 +54,7 @@ struct ems_pci_card {
#define PITA2_MISC 0x1c /* Miscellaneous Register */
#define PITA2_MISC_CONFIG 0x04000000 /* Multiplexed parallel interface */
-/*
- * Register definitions for the PLX 9030
+/* Register definitions for the PLX 9030
*/
#define PLX_ICSR 0x4c /* Interrupt Control/Status register */
#define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */
@@ -62,8 +63,16 @@ struct ems_pci_card {
#define PLX_ICSR_ENA_CLR (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \
PLX_ICSR_LINTI1_CLR)
-/*
- * The board configuration is probably following:
+/* Register definitions for the ASIX99100
+ */
+#define ASIX_LINTSR 0x28 /* Interrupt Control/Status register */
+#define ASIX_LINTSR_INT0AC BIT(0) /* Writing 1 enables or clears interrupt */
+
+#define ASIX_LIEMR 0x24 /* Local Interrupt Enable / Miscellaneous Register */
+#define ASIX_LIEMR_L0EINTEN BIT(16) /* Local INT0 input assertion enable */
+#define ASIX_LIEMR_LRST BIT(14) /* Local Reset assert */
+
+/* The board configuration is probably following:
* RX1 is connected to ground.
* TX1 is not connected.
* CLKO is not connected.
@@ -72,23 +81,35 @@ struct ems_pci_card {
*/
#define EMS_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
-/*
- * In the CDR register, you should set CBP to 1.
+/* In the CDR register, you should set CBP to 1.
* You will probably also want to set the clock divider value to 7
* (meaning direct oscillator output) because the second SJA1000 chip
* is driven by the first one CLKOUT output.
*/
#define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
-#define EMS_PCI_V1_BASE_BAR 1
-#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
-#define EMS_PCI_V2_BASE_BAR 2
-#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
-#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
-#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+#define EMS_PCI_V1_BASE_BAR 1
+#define EMS_PCI_V1_CONF_BAR 0
+#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
+#define EMS_PCI_V1_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */
+#define EMS_PCI_V1_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+
+#define EMS_PCI_V2_BASE_BAR 2
+#define EMS_PCI_V2_CONF_BAR 0
+#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
+#define EMS_PCI_V2_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */
+#define EMS_PCI_V2_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+
+#define EMS_PCI_V3_BASE_BAR 0
+#define EMS_PCI_V3_CONF_BAR 5
+#define EMS_PCI_V3_CONF_SIZE 128 /* size of ASIX control area */
+#define EMS_PCI_V3_CAN_BASE_OFFSET 0x00 /* offset where the controllers starts */
+#define EMS_PCI_V3_CAN_CTRL_SIZE 0x100 /* memory size for each controller */
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
+#define PCI_SUBDEVICE_ID_EMS 0x4010
+
static const struct pci_device_id ems_pci_tbl[] = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
@@ -96,12 +117,13 @@ static const struct pci_device_id ems_pci_tbl[] = {
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000},
/* CPC-104P v2 */
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002},
+ /* CPC-PCIe v3 */
+ {PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100_LB, 0xa000, PCI_SUBDEVICE_ID_EMS},
{0,}
};
MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
-/*
- * Helper to read internal registers from card logic (not CAN)
+/* Helper to read internal registers from card logic (not CAN)
*/
static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port)
{
@@ -121,7 +143,7 @@ static void ems_pci_v1_write_reg(const struct sja1000_priv *priv,
static void ems_pci_v1_post_irq(const struct sja1000_priv *priv)
{
- struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
+ struct ems_pci_card *card = priv->priv;
/* reset int flag of pita */
writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
@@ -141,13 +163,30 @@ static void ems_pci_v2_write_reg(const struct sja1000_priv *priv,
static void ems_pci_v2_post_irq(const struct sja1000_priv *priv)
{
- struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
+ struct ems_pci_card *card = priv->priv;
writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR);
}
-/*
- * Check if a CAN controller is present at the specified location
+static u8 ems_pci_v3_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return readb(priv->reg_base + port);
+}
+
+static void ems_pci_v3_write_reg(const struct sja1000_priv *priv,
+ int port, u8 val)
+{
+ writeb(val, priv->reg_base + port);
+}
+
+static void ems_pci_v3_post_irq(const struct sja1000_priv *priv)
+{
+ struct ems_pci_card *card = priv->priv;
+
+ writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR);
+}
+
+/* Check if a CAN controller is present at the specified location
* by trying to set 'em into the PeliCAN mode
*/
static inline int ems_pci_check_chan(const struct sja1000_priv *priv)
@@ -185,10 +224,10 @@ static void ems_pci_del_card(struct pci_dev *pdev)
free_sja1000dev(dev);
}
- if (card->base_addr != NULL)
+ if (card->base_addr)
pci_iounmap(card->pci_dev, card->base_addr);
- if (card->conf_addr != NULL)
+ if (card->conf_addr)
pci_iounmap(card->pci_dev, card->conf_addr);
kfree(card);
@@ -202,8 +241,7 @@ static void ems_pci_card_reset(struct ems_pci_card *card)
writeb(0, card->base_addr);
}
-/*
- * Probe PCI device for EMS CAN signature and register each available
+/* Probe PCI device for EMS CAN signature and register each available
* CAN channel to SJA1000 Socket-CAN subsystem.
*/
static int ems_pci_add_card(struct pci_dev *pdev,
@@ -212,7 +250,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
struct sja1000_priv *priv;
struct net_device *dev;
struct ems_pci_card *card;
- int max_chan, conf_size, base_bar;
+ int max_chan, conf_size, base_bar, conf_bar;
int err, i;
/* Enabling PCI device */
@@ -222,8 +260,8 @@ static int ems_pci_add_card(struct pci_dev *pdev,
}
/* Allocating card structures to hold addresses, ... */
- card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL);
- if (card == NULL) {
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card) {
pci_disable_device(pdev);
return -ENOMEM;
}
@@ -234,27 +272,35 @@ static int ems_pci_add_card(struct pci_dev *pdev,
card->channels = 0;
- if (pdev->vendor == PCI_VENDOR_ID_PLX) {
+ if (pdev->vendor == PCI_VENDOR_ID_ASIX) {
+ card->version = 3; /* CPC-PCI v3 */
+ max_chan = EMS_PCI_V3_MAX_CHAN;
+ base_bar = EMS_PCI_V3_BASE_BAR;
+ conf_bar = EMS_PCI_V3_CONF_BAR;
+ conf_size = EMS_PCI_V3_CONF_SIZE;
+ } else if (pdev->vendor == PCI_VENDOR_ID_PLX) {
card->version = 2; /* CPC-PCI v2 */
max_chan = EMS_PCI_V2_MAX_CHAN;
base_bar = EMS_PCI_V2_BASE_BAR;
+ conf_bar = EMS_PCI_V2_CONF_BAR;
conf_size = EMS_PCI_V2_CONF_SIZE;
} else {
card->version = 1; /* CPC-PCI v1 */
max_chan = EMS_PCI_V1_MAX_CHAN;
base_bar = EMS_PCI_V1_BASE_BAR;
+ conf_bar = EMS_PCI_V1_CONF_BAR;
conf_size = EMS_PCI_V1_CONF_SIZE;
}
/* Remap configuration space and controller memory area */
- card->conf_addr = pci_iomap(pdev, 0, conf_size);
- if (card->conf_addr == NULL) {
+ card->conf_addr = pci_iomap(pdev, conf_bar, conf_size);
+ if (!card->conf_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE);
- if (card->base_addr == NULL) {
+ if (!card->base_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
@@ -276,12 +322,20 @@ static int ems_pci_add_card(struct pci_dev *pdev,
}
}
+ if (card->version == 3) {
+ /* ASIX chip asserts local reset to CAN controllers
+ * after bootup until it is deasserted
+ */
+ writel(readl(card->conf_addr + ASIX_LIEMR) & ~ASIX_LIEMR_LRST,
+ card->conf_addr + ASIX_LIEMR);
+ }
+
ems_pci_card_reset(card);
/* Detect available channels */
for (i = 0; i < max_chan; i++) {
dev = alloc_sja1000dev(0);
- if (dev == NULL) {
+ if (!dev) {
err = -ENOMEM;
goto failure_cleanup;
}
@@ -292,16 +346,25 @@ static int ems_pci_add_card(struct pci_dev *pdev,
priv->irq_flags = IRQF_SHARED;
dev->irq = pdev->irq;
- priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET
- + (i * EMS_PCI_CAN_CTRL_SIZE);
+
if (card->version == 1) {
priv->read_reg = ems_pci_v1_read_reg;
priv->write_reg = ems_pci_v1_write_reg;
priv->post_irq = ems_pci_v1_post_irq;
- } else {
+ priv->reg_base = card->base_addr + EMS_PCI_V1_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V1_CAN_CTRL_SIZE);
+ } else if (card->version == 2) {
priv->read_reg = ems_pci_v2_read_reg;
priv->write_reg = ems_pci_v2_write_reg;
priv->post_irq = ems_pci_v2_post_irq;
+ priv->reg_base = card->base_addr + EMS_PCI_V2_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V2_CAN_CTRL_SIZE);
+ } else {
+ priv->read_reg = ems_pci_v3_read_reg;
+ priv->write_reg = ems_pci_v3_write_reg;
+ priv->post_irq = ems_pci_v3_post_irq;
+ priv->reg_base = card->base_addr + EMS_PCI_V3_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V3_CAN_CTRL_SIZE);
}
/* Check if channel is present */
@@ -313,20 +376,28 @@ static int ems_pci_add_card(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
dev->dev_id = i;
- if (card->version == 1)
+ if (card->version == 1) {
/* reset int flag of pita */
writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
card->conf_addr + PITA2_ICR);
- else
+ } else if (card->version == 2) {
/* enable IRQ in PLX 9030 */
writel(PLX_ICSR_ENA_CLR,
card->conf_addr + PLX_ICSR);
+ } else {
+ /* Enable IRQ in AX99100 */
+ writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR);
+ /* Enable local INT0 input enable */
+ writel(readl(card->conf_addr + ASIX_LIEMR) | ASIX_LIEMR_L0EINTEN,
+ card->conf_addr + ASIX_LIEMR);
+ }
/* Register SJA1000 device */
err = register_sja1000dev(dev);
if (err) {
- dev_err(&pdev->dev, "Registering device failed "
- "(err=%d)\n", err);
+ dev_err(&pdev->dev,
+ "Registering device failed: %pe\n",
+ ERR_PTR(err));
free_sja1000dev(dev);
goto failure_cleanup;
}
@@ -334,7 +405,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
card->channels++;
dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n",
- i + 1, priv->reg_base, dev->irq);
+ i + 1, priv->reg_base, dev->irq);
} else {
free_sja1000dev(dev);
}
diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
index e21b169c14c0..4642b6d4aaf7 100644
--- a/drivers/net/can/sja1000/ems_pcmcia.c
+++ b/drivers/net/can/sja1000/ems_pcmcia.c
@@ -234,7 +234,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
free_sja1000dev(dev);
}
- err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
+ if (!card->channels) {
+ err = -ENODEV;
+ goto failure_cleanup;
+ }
+
+ err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
DRV_NAME, card);
if (!err)
return 0;
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 84f34020aafb..10d88cbda465 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
*
- * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ * Copyright (C) 2001-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
@@ -22,7 +22,7 @@
#include "sja1000.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
MODULE_LICENSE("GPL v2");
@@ -462,7 +462,7 @@ static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev)
card->led_chip.owner = THIS_MODULE;
card->led_chip.dev.parent = &pdev->dev;
card->led_chip.algo_data = &card->i2c_bit;
- strncpy(card->led_chip.name, "peak_i2c",
+ strscpy(card->led_chip.name, "peak_i2c",
sizeof(card->led_chip.name));
card->i2c_bit = peak_pciec_i2c_bit_ops;
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 131a084c3535..e1610b527d13 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
- *
* CAN driver for PEAK-System PCAN-PC Card
* Derived from the PCAN project file driver/src/pcan_pccard.c
- * Copyright (C) 2006-2010 PEAK System-Technik GmbH
+ *
+ * Copyright (C) 2006-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -19,7 +19,7 @@
#include <linux/can/dev.h>
#include "sja1000.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards");
MODULE_LICENSE("GPL v2");
@@ -167,7 +167,7 @@ static void pcan_start_led_timer(struct pcan_pccard *card)
*/
static void pcan_stop_led_timer(struct pcan_pccard *card)
{
- del_timer_sync(&card->led_timer);
+ timer_delete_sync(&card->led_timer);
}
/*
@@ -374,7 +374,7 @@ static inline void pcan_set_can_power(struct pcan_pccard *card, int onoff)
*/
static void pcan_led_timer(struct timer_list *t)
{
- struct pcan_pccard *card = from_timer(card, t, led_timer);
+ struct pcan_pccard *card = timer_container_of(card, t, led_timer);
struct net_device *netdev;
int i, up_count = 0;
u8 ccr;
@@ -478,7 +478,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
if (!netdev)
continue;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
unregister_sja1000dev(netdev);
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 5de1ebb0c6f0..67e5316c6372 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -122,7 +122,6 @@ struct plx_pci_card {
#define TEWS_PCI_VENDOR_ID 0x1498
#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
-#define CTI_PCI_VENDOR_ID 0x12c4
#define CTI_PCI_DEVICE_ID_CRG001 0x0900
#define MOXA_PCI_VENDOR_ID 0x1393
@@ -358,7 +357,7 @@ static const struct pci_device_id plx_pci_tbl[] = {
{
/* Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card */
PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
- CTI_PCI_VENDOR_ID, CTI_PCI_DEVICE_ID_CRG001,
+ PCI_SUBVENDOR_ID_CONNECT_TECH, CTI_PCI_DEVICE_ID_CRG001,
0, 0,
(kernel_ulong_t)&plx_pci_card_info_cti
},
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 3fad54646746..a8fa0d6516b9 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -52,6 +52,7 @@
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -60,7 +61,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include "sja1000.h"
@@ -184,8 +184,9 @@ static void chipset_init(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
- /* set clock divider and output control register */
- priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
+ if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG))
+ /* set clock divider and output control register */
+ priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
/* set acceptance filter (accept all) */
priv->write_reg(priv, SJA1000_ACCC0, 0x00);
@@ -205,12 +206,13 @@ static void sja1000_start(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
- /* leave reset mode */
+ /* enter reset mode */
if (priv->can.state != CAN_STATE_STOPPED)
set_reset_mode(dev);
/* Initialize chip if uninitialized at this stage */
- if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
+ if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG ||
+ priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
chipset_init(dev);
/* Clear error counters and error code capture */
@@ -289,7 +291,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
u8 cmd_reg_val = 0x00;
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -372,18 +374,33 @@ static void sja1000_rx(struct net_device *dev)
} else {
for (i = 0; i < cf->len; i++)
cf->data[i] = priv->read_reg(priv, dreg++);
+
+ stats->rx_bytes += cf->len;
}
+ stats->rx_packets++;
cf->can_id = id;
/* release receive buffer */
sja1000_write_cmdreg(priv, CMD_RRB);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
+}
+
+static irqreturn_t sja1000_reset_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+
+ netdev_dbg(dev, "performing a soft reset upon overrun\n");
+
+ netif_tx_lock(dev);
+
+ can_free_echo_skb(dev, 0, NULL);
+ sja1000_set_mode(dev, CAN_MODE_START);
+
+ netif_tx_unlock(dev);
- can_led_event(dev, CAN_LED_EVENT_RX);
+ return IRQ_HANDLED;
}
static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
@@ -396,25 +413,33 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
enum can_state rx_state, tx_state;
unsigned int rxerr, txerr;
uint8_t ecc, alc;
+ int ret = 0;
skb = alloc_can_err_skb(dev, &cf);
- if (skb == NULL)
- return -ENOMEM;
txerr = priv->read_reg(priv, SJA1000_TXERR);
rxerr = priv->read_reg(priv, SJA1000_RXERR);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
-
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
stats->rx_over_errors++;
stats->rx_errors++;
sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
+
+ /* Some controllers needs additional handling upon overrun
+ * condition: the controller may sometimes be totally confused
+ * and refuse any new frame while its buffer is empty. The only
+ * way to re-sync the read vs. write buffer offsets is to
+ * stop any current handling and perform a reset.
+ */
+ if (priv->flags & SJA1000_QUIRK_RESET_ON_OVERRUN)
+ ret = IRQ_WAKE_THREAD;
}
if (isrc & IRQ_EI) {
@@ -428,36 +453,46 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (state != CAN_STATE_BUS_OFF && skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & IRQ_BEI) {
/* bus error interrupt */
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
ecc = priv->read_reg(priv, SJA1000_ECC);
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
-
- /* set error type */
- switch (ecc & ECC_MASK) {
- case ECC_BIT:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- break;
- case ECC_FORM:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- break;
- case ECC_STUFF:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- break;
- default:
- break;
- }
+ /* set error type */
+ switch (ecc & ECC_MASK) {
+ case ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ break;
+ }
- /* set error location */
- cf->data[3] = ecc & ECC_SEG;
+ /* set error location */
+ cf->data[3] = ecc & ECC_SEG;
+ }
/* Error occurred during transmission? */
- if ((ecc & ECC_DIR) == 0)
- cf->data[2] |= CAN_ERR_PROT_TX;
+ if ((ecc & ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
+ }
}
if (isrc & IRQ_EPI) {
/* error passive interrupt */
@@ -473,8 +508,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
netdev_dbg(dev, "arbitration lost interrupt\n");
alc = priv->read_reg(priv, SJA1000_ALC);
priv->can.can_stats.arbitration_lost++;
- cf->can_id |= CAN_ERR_LOSTARB;
- cf->data[0] = alc & 0x1f;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = alc & 0x1f;
+ }
}
if (state != priv->can.state) {
@@ -487,11 +524,12 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
can_bus_off(dev);
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!skb)
+ return -ENOMEM;
+
netif_rx(skb);
- return 0;
+ return ret;
}
irqreturn_t sja1000_interrupt(int irq, void *dev_id)
@@ -500,7 +538,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
struct sja1000_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
uint8_t isrc, status;
- int n = 0;
+ irqreturn_t ret = 0;
+ int n = 0, err;
if (priv->pre_irq)
priv->pre_irq(priv);
@@ -509,8 +548,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
goto out;
- while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
- (n < SJA1000_MAX_IRQ)) {
+ while ((n < SJA1000_MAX_IRQ) &&
+ (isrc = priv->read_reg(priv, SJA1000_IR))) {
status = priv->read_reg(priv, SJA1000_SR);
/* check for absent controller due to hw unplug */
@@ -528,13 +567,10 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
can_free_echo_skb(dev, 0, NULL);
} else {
/* transmission complete */
- stats->tx_bytes +=
- priv->read_reg(priv, SJA1000_FI) & 0xf;
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
- can_get_echo_skb(dev, 0, NULL);
}
netif_wake_queue(dev);
- can_led_event(dev, CAN_LED_EVENT_TX);
}
if (isrc & IRQ_RI) {
/* receive interrupt */
@@ -548,19 +584,25 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
}
if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
/* error interrupt */
- if (sja1000_err(dev, isrc, status))
+ err = sja1000_err(dev, isrc, status);
+ if (err == IRQ_WAKE_THREAD)
+ ret = err;
+ if (err)
break;
}
n++;
}
out:
+ if (!ret)
+ ret = (n) ? IRQ_HANDLED : IRQ_NONE;
+
if (priv->post_irq)
priv->post_irq(priv);
if (n >= SJA1000_MAX_IRQ)
netdev_dbg(dev, "%d messages handled in ISR", n);
- return (n) ? IRQ_HANDLED : IRQ_NONE;
+ return ret;
}
EXPORT_SYMBOL_GPL(sja1000_interrupt);
@@ -579,8 +621,9 @@ static int sja1000_open(struct net_device *dev)
/* register interrupt handler, if not done by the device driver */
if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) {
- err = request_irq(dev->irq, sja1000_interrupt, priv->irq_flags,
- dev->name, (void *)dev);
+ err = request_threaded_irq(dev->irq, sja1000_interrupt,
+ sja1000_reset_interrupt,
+ priv->irq_flags, dev->name, (void *)dev);
if (err) {
close_candev(dev);
return -EAGAIN;
@@ -590,8 +633,6 @@ static int sja1000_open(struct net_device *dev)
/* init and start chi */
sja1000_start(dev);
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
netif_start_queue(dev);
return 0;
@@ -609,8 +650,6 @@ static int sja1000_close(struct net_device *dev)
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -658,28 +697,25 @@ static const struct net_device_ops sja1000_netdev_ops = {
.ndo_open = sja1000_open,
.ndo_stop = sja1000_close,
.ndo_start_xmit = sja1000_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops sja1000_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
int register_sja1000dev(struct net_device *dev)
{
- int ret;
-
if (!sja1000_probe_chip(dev))
return -ENODEV;
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &sja1000_netdev_ops;
+ dev->ethtool_ops = &sja1000_ethtool_ops;
set_reset_mode(dev);
chipset_init(dev);
- ret = register_candev(dev);
-
- if (!ret)
- devm_can_led_init(dev);
-
- return ret;
+ return register_candev(dev);
}
EXPORT_SYMBOL_GPL(register_sja1000dev);
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 9d46398f8154..f015e39e2224 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -145,7 +145,9 @@
/*
* Flags for sja1000priv.flags
*/
-#define SJA1000_CUSTOM_IRQ_HANDLER 0x1
+#define SJA1000_CUSTOM_IRQ_HANDLER BIT(0)
+#define SJA1000_QUIRK_NO_CDR_REG BIT(1)
+#define SJA1000_QUIRK_RESET_ON_OVERRUN BIT(2)
/*
* SJA1000 private data structure
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index d513fac50718..2d1f715459d7 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -202,26 +202,28 @@ static int sja1000_isa_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
- goto exit_unmap;
+ goto exit_free;
}
dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n",
DRV_NAME, priv->reg_base, dev->irq);
return 0;
- exit_unmap:
+exit_free:
+ free_sja1000dev(dev);
+exit_unmap:
if (mem[idx])
iounmap(base);
- exit_release:
+exit_release:
if (mem[idx])
release_mem_region(mem[idx], iosize);
else
release_region(port[idx], iosize);
- exit:
+exit:
return err;
}
-static int sja1000_isa_remove(struct platform_device *pdev)
+static void sja1000_isa_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sja1000_priv *priv = netdev_priv(dev);
@@ -239,8 +241,6 @@ static int sja1000_isa_remove(struct platform_device *pdev)
release_region(port[idx], SJA1000_IOSIZE);
}
free_sja1000dev(dev);
-
- return 0;
}
static struct platform_driver sja1000_isa_driver = {
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index d7c2ec529b8f..2d555f854008 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -14,10 +14,9 @@
#include <linux/irq.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include "sja1000.h"
@@ -32,7 +31,7 @@ MODULE_LICENSE("GPL v2");
struct sja1000_of_data {
size_t priv_sz;
- int (*init)(struct sja1000_priv *priv, struct device_node *of);
+ void (*init)(struct sja1000_priv *priv, struct device_node *of);
};
struct technologic_priv {
@@ -95,15 +94,18 @@ static void sp_technologic_write_reg16(const struct sja1000_priv *priv,
spin_unlock_irqrestore(&tp->io_lock, flags);
}
-static int sp_technologic_init(struct sja1000_priv *priv, struct device_node *of)
+static void sp_technologic_init(struct sja1000_priv *priv, struct device_node *of)
{
struct technologic_priv *tp = priv->priv;
priv->read_reg = sp_technologic_read_reg16;
priv->write_reg = sp_technologic_write_reg16;
spin_lock_init(&tp->io_lock);
+}
- return 0;
+static void sp_rzn1_init(struct sja1000_priv *priv, struct device_node *of)
+{
+ priv->flags = SJA1000_QUIRK_NO_CDR_REG | SJA1000_QUIRK_RESET_ON_OVERRUN;
}
static void sp_populate(struct sja1000_priv *priv,
@@ -156,11 +158,13 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
priv->write_reg = sp_write_reg8;
}
- err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
- else
- priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ if (!priv->can.clock.freq) {
+ err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
+ if (!err)
+ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ }
err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
if (!err)
@@ -195,8 +199,13 @@ static struct sja1000_of_data technologic_data = {
.init = sp_technologic_init,
};
+static struct sja1000_of_data renesas_data = {
+ .init = sp_rzn1_init,
+};
+
static const struct of_device_id sp_of_table[] = {
{ .compatible = "nxp,sja1000", .data = NULL, },
+ { .compatible = "renesas,rzn1-sja1000", .data = &renesas_data, },
{ .compatible = "technologic,sja1000", .data = &technologic_data, },
{ /* sentinel */ },
};
@@ -211,9 +220,9 @@ static int sp_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq = NULL;
struct sja1000_platform_data *pdata;
struct device_node *of = pdev->dev.of_node;
- const struct of_device_id *of_id;
const struct sja1000_of_data *of_data = NULL;
size_t priv_sz = 0;
+ struct clk *clk;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata && !of) {
@@ -221,32 +230,28 @@ static int sp_probe(struct platform_device *pdev)
return -ENODEV;
}
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res_mem->start,
- resource_size(res_mem), DRV_NAME))
- return -EBUSY;
+ addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
- addr = devm_ioremap(&pdev->dev, res_mem->start,
- resource_size(res_mem));
- if (!addr)
- return -ENOMEM;
-
- if (of)
- irq = irq_of_parse_and_map(of, 0);
- else
+ if (of) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "CAN clk operation failed");
+ } else {
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ return -ENODEV;
+ }
- if (!irq && !res_irq)
- return -ENODEV;
-
- of_id = of_match_device(sp_of_table, &pdev->dev);
- if (of_id && of_id->data) {
- of_data = of_id->data;
+ of_data = device_get_match_data(&pdev->dev);
+ if (of_data)
priv_sz = of_data->priv_sz;
- }
dev = alloc_sja1000dev(priv_sz);
if (!dev)
@@ -262,17 +267,26 @@ static int sp_probe(struct platform_device *pdev)
priv->irq_flags = IRQF_SHARED;
}
+ if (priv->flags & SJA1000_QUIRK_RESET_ON_OVERRUN)
+ priv->irq_flags |= IRQF_ONESHOT;
+
dev->irq = irq;
priv->reg_base = addr;
if (of) {
- sp_populate_of(priv, of);
-
- if (of_data && of_data->init) {
- err = of_data->init(priv, of);
- if (err)
+ if (clk) {
+ priv->can.clock.freq = clk_get_rate(clk) / 2;
+ if (!priv->can.clock.freq) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Zero CAN clk rate");
goto exit_free;
+ }
}
+
+ sp_populate_of(priv, of);
+
+ if (of_data && of_data->init)
+ of_data->init(priv, of);
} else {
sp_populate(priv, pdata, res_mem->flags);
}
@@ -296,14 +310,12 @@ static int sp_probe(struct platform_device *pdev)
return err;
}
-static int sp_remove(struct platform_device *pdev)
+static void sp_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
-
- return 0;
}
static struct platform_driver sp_driver = {
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 3dbba8d61afb..f3862bed3d40 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -5,10 +5,9 @@
* Copyright 2010 Andre B. Oliveira
*/
-/*
- * References:
- * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
- * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+/* References:
+ * - Getting started with TS-CAN1, Technologic Systems, Feb 2022
+ * https://docs.embeddedts.com/TS-CAN1
*/
#include <linux/init.h>
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
deleted file mode 100644
index d42ec7d1bc14..000000000000
--- a/drivers/net/can/slcan.c
+++ /dev/null
@@ -1,791 +0,0 @@
-/*
- * slcan.c - serial line CAN interface driver (using tty line discipline)
- *
- * This file is derived from linux/drivers/net/slip/slip.c
- *
- * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
- * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
- * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see http://www.gnu.org/licenses/gpl.html
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/workqueue.h>
-#include <linux/can.h>
-#include <linux/can/skb.h>
-#include <linux/can/can-ml.h>
-
-MODULE_ALIAS_LDISC(N_SLCAN);
-MODULE_DESCRIPTION("serial line CAN interface");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
-
-#define SLCAN_MAGIC 0x53CA
-
-static int maxdev = 10; /* MAX number of SLCAN channels;
- This can be overridden with
- insmod slcan.ko maxdev=nnn */
-module_param(maxdev, int, 0);
-MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
-
-/* maximum rx buffer len: extended CAN frame with timestamp */
-#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
-
-#define SLC_CMD_LEN 1
-#define SLC_SFF_ID_LEN 3
-#define SLC_EFF_ID_LEN 8
-
-struct slcan {
- int magic;
-
- /* Various fields. */
- struct tty_struct *tty; /* ptr to TTY structure */
- struct net_device *dev; /* easy for intr handling */
- spinlock_t lock;
- struct work_struct tx_work; /* Flushes transmit buffer */
-
- /* These are pointers to the malloc()ed frame buffers. */
- unsigned char rbuff[SLC_MTU]; /* receiver buffer */
- int rcount; /* received chars counter */
- unsigned char xbuff[SLC_MTU]; /* transmitter buffer */
- unsigned char *xhead; /* pointer to next XMIT byte */
- int xleft; /* bytes left in XMIT queue */
-
- unsigned long flags; /* Flag values/ mode etc */
-#define SLF_INUSE 0 /* Channel in use */
-#define SLF_ERROR 1 /* Parity, etc. error */
-};
-
-static struct net_device **slcan_devs;
-
- /************************************************************************
- * SLCAN ENCAPSULATION FORMAT *
- ************************************************************************/
-
-/*
- * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
- * frame format) a data length code (len) which can be from 0 to 8
- * and up to <len> data bytes as payload.
- * Additionally a CAN frame may become a remote transmission frame if the
- * RTR-bit is set. This causes another ECU to send a CAN frame with the
- * given can_id.
- *
- * The SLCAN ASCII representation of these different frame types is:
- * <type> <id> <dlc> <data>*
- *
- * Extended frames (29 bit) are defined by capital characters in the type.
- * RTR frames are defined as 'r' types - normal frames have 't' type:
- * t => 11 bit data frame
- * r => 11 bit RTR frame
- * T => 29 bit data frame
- * R => 29 bit RTR frame
- *
- * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
- * The <dlc> is a one byte ASCII number ('0' - '8')
- * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
- *
- * Examples:
- *
- * t1230 : can_id 0x123, len 0, no data
- * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33
- * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55
- * r1230 : can_id 0x123, len 0, no data, remote transmission request
- *
- */
-
- /************************************************************************
- * STANDARD SLCAN DECAPSULATION *
- ************************************************************************/
-
-/* Send one completely decapsulated can_frame to the network layer */
-static void slc_bump(struct slcan *sl)
-{
- struct sk_buff *skb;
- struct can_frame cf;
- int i, tmp;
- u32 tmpid;
- char *cmd = sl->rbuff;
-
- memset(&cf, 0, sizeof(cf));
-
- switch (*cmd) {
- case 'r':
- cf.can_id = CAN_RTR_FLAG;
- fallthrough;
- case 't':
- /* store dlc ASCII value and terminate SFF CAN ID string */
- cf.len = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
- sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
- /* point to payload data behind the dlc */
- cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
- break;
- case 'R':
- cf.can_id = CAN_RTR_FLAG;
- fallthrough;
- case 'T':
- cf.can_id |= CAN_EFF_FLAG;
- /* store dlc ASCII value and terminate EFF CAN ID string */
- cf.len = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
- sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
- /* point to payload data behind the dlc */
- cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
- break;
- default:
- return;
- }
-
- if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
- return;
-
- cf.can_id |= tmpid;
-
- /* get len from sanitized ASCII value */
- if (cf.len >= '0' && cf.len < '9')
- cf.len -= '0';
- else
- return;
-
- /* RTR frames may have a dlc > 0 but they never have any data bytes */
- if (!(cf.can_id & CAN_RTR_FLAG)) {
- for (i = 0; i < cf.len; i++) {
- tmp = hex_to_bin(*cmd++);
- if (tmp < 0)
- return;
- cf.data[i] = (tmp << 4);
- tmp = hex_to_bin(*cmd++);
- if (tmp < 0)
- return;
- cf.data[i] |= tmp;
- }
- }
-
- skb = dev_alloc_skb(sizeof(struct can_frame) +
- sizeof(struct can_skb_priv));
- if (!skb)
- return;
-
- skb->dev = sl->dev;
- skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = sl->dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- skb_put_data(skb, &cf, sizeof(struct can_frame));
-
- sl->dev->stats.rx_packets++;
- sl->dev->stats.rx_bytes += cf.len;
- netif_rx_ni(skb);
-}
-
-/* parse tty input stream */
-static void slcan_unesc(struct slcan *sl, unsigned char s)
-{
- if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- (sl->rcount > 4)) {
- slc_bump(sl);
- }
- sl->rcount = 0;
- } else {
- if (!test_bit(SLF_ERROR, &sl->flags)) {
- if (sl->rcount < SLC_MTU) {
- sl->rbuff[sl->rcount++] = s;
- return;
- } else {
- sl->dev->stats.rx_over_errors++;
- set_bit(SLF_ERROR, &sl->flags);
- }
- }
- }
-}
-
- /************************************************************************
- * STANDARD SLCAN ENCAPSULATION *
- ************************************************************************/
-
-/* Encapsulate one can_frame and stuff into a TTY queue. */
-static void slc_encaps(struct slcan *sl, struct can_frame *cf)
-{
- int actual, i;
- unsigned char *pos;
- unsigned char *endpos;
- canid_t id = cf->can_id;
-
- pos = sl->xbuff;
-
- if (cf->can_id & CAN_RTR_FLAG)
- *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
- else
- *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
-
- /* determine number of chars for the CAN-identifier */
- if (cf->can_id & CAN_EFF_FLAG) {
- id &= CAN_EFF_MASK;
- endpos = pos + SLC_EFF_ID_LEN;
- } else {
- *pos |= 0x20; /* convert R/T to lower case for SFF */
- id &= CAN_SFF_MASK;
- endpos = pos + SLC_SFF_ID_LEN;
- }
-
- /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
- pos++;
- while (endpos >= pos) {
- *endpos-- = hex_asc_upper[id & 0xf];
- id >>= 4;
- }
-
- pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
-
- *pos++ = cf->len + '0';
-
- /* RTR frames may have a dlc > 0 but they never have any data bytes */
- if (!(cf->can_id & CAN_RTR_FLAG)) {
- for (i = 0; i < cf->len; i++)
- pos = hex_byte_pack_upper(pos, cf->data[i]);
- }
-
- *pos++ = '\r';
-
- /* Order of next two lines is *very* important.
- * When we are sending a little amount of data,
- * the transfer may be completed inside the ops->write()
- * routine, because it's running with interrupts enabled.
- * In this case we *never* got WRITE_WAKEUP event,
- * if we did not request it before write operation.
- * 14 Oct 1994 Dmitry Gorodchanin.
- */
- set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
- sl->xleft = (pos - sl->xbuff) - actual;
- sl->xhead = sl->xbuff + actual;
- sl->dev->stats.tx_bytes += cf->len;
-}
-
-/* Write out any remaining transmit buffer. Scheduled when tty is writable */
-static void slcan_transmit(struct work_struct *work)
-{
- struct slcan *sl = container_of(work, struct slcan, tx_work);
- int actual;
-
- spin_lock_bh(&sl->lock);
- /* First make sure we're connected. */
- if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
- spin_unlock_bh(&sl->lock);
- return;
- }
-
- if (sl->xleft <= 0) {
- /* Now serial buffer is almost free & we can start
- * transmission of another packet */
- sl->dev->stats.tx_packets++;
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- spin_unlock_bh(&sl->lock);
- netif_wake_queue(sl->dev);
- return;
- }
-
- actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
- sl->xleft -= actual;
- sl->xhead += actual;
- spin_unlock_bh(&sl->lock);
-}
-
-/*
- * Called by the driver when there's room for more data.
- * Schedule the transmit.
- */
-static void slcan_write_wakeup(struct tty_struct *tty)
-{
- struct slcan *sl;
-
- rcu_read_lock();
- sl = rcu_dereference(tty->disc_data);
- if (sl)
- schedule_work(&sl->tx_work);
- rcu_read_unlock();
-}
-
-/* Send a can_frame to a TTY queue. */
-static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- if (skb->len != CAN_MTU)
- goto out;
-
- spin_lock(&sl->lock);
- if (!netif_running(dev)) {
- spin_unlock(&sl->lock);
- printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
- goto out;
- }
- if (sl->tty == NULL) {
- spin_unlock(&sl->lock);
- goto out;
- }
-
- netif_stop_queue(sl->dev);
- slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
- spin_unlock(&sl->lock);
-
-out:
- kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-
-/******************************************
- * Routines looking at netdevice side.
- ******************************************/
-
-/* Netdevice UP -> DOWN routine */
-static int slc_close(struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- spin_lock_bh(&sl->lock);
- if (sl->tty) {
- /* TTY discipline is running. */
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- }
- netif_stop_queue(dev);
- sl->rcount = 0;
- sl->xleft = 0;
- spin_unlock_bh(&sl->lock);
-
- return 0;
-}
-
-/* Netdevice DOWN -> UP routine */
-static int slc_open(struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- if (sl->tty == NULL)
- return -ENODEV;
-
- sl->flags &= (1 << SLF_INUSE);
- netif_start_queue(dev);
- return 0;
-}
-
-/* Hook the destructor so we can free slcan devs at the right point in time */
-static void slc_free_netdev(struct net_device *dev)
-{
- int i = dev->base_addr;
-
- slcan_devs[i] = NULL;
-}
-
-static int slcan_change_mtu(struct net_device *dev, int new_mtu)
-{
- return -EINVAL;
-}
-
-static const struct net_device_ops slc_netdev_ops = {
- .ndo_open = slc_open,
- .ndo_stop = slc_close,
- .ndo_start_xmit = slc_xmit,
- .ndo_change_mtu = slcan_change_mtu,
-};
-
-static void slc_setup(struct net_device *dev)
-{
- dev->netdev_ops = &slc_netdev_ops;
- dev->needs_free_netdev = true;
- dev->priv_destructor = slc_free_netdev;
-
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->tx_queue_len = 10;
-
- dev->mtu = CAN_MTU;
- dev->type = ARPHRD_CAN;
-
- /* New-style flags. */
- dev->flags = IFF_NOARP;
- dev->features = NETIF_F_HW_CSUM;
-}
-
-/******************************************
- Routines looking at TTY side.
- ******************************************/
-
-/*
- * Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
- * a block of SLCAN data has been received, which can now be decapsulated
- * and sent on to some IP layer for further processing. This will not
- * be re-entered while running but other ldisc functions may be called
- * in parallel
- */
-
-static void slcan_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, const char *fp,
- int count)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
-
- if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
- return;
-
- /* Read the characters out of the buffer */
- while (count--) {
- if (fp && *fp++) {
- if (!test_and_set_bit(SLF_ERROR, &sl->flags))
- sl->dev->stats.rx_errors++;
- cp++;
- continue;
- }
- slcan_unesc(sl, *cp++);
- }
-}
-
-/************************************
- * slcan_open helper routines.
- ************************************/
-
-/* Collect hanged up channels */
-static void slc_sync(void)
-{
- int i;
- struct net_device *dev;
- struct slcan *sl;
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (dev == NULL)
- break;
-
- sl = netdev_priv(dev);
- if (sl->tty)
- continue;
- if (dev->flags & IFF_UP)
- dev_close(dev);
- }
-}
-
-/* Find a free SLCAN channel, and link in this `tty' line. */
-static struct slcan *slc_alloc(void)
-{
- int i;
- char name[IFNAMSIZ];
- struct net_device *dev = NULL;
- struct can_ml_priv *can_ml;
- struct slcan *sl;
- int size;
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (dev == NULL)
- break;
-
- }
-
- /* Sorry, too many, all slots in use */
- if (i >= maxdev)
- return NULL;
-
- sprintf(name, "slcan%d", i);
- size = ALIGN(sizeof(*sl), NETDEV_ALIGN) + sizeof(struct can_ml_priv);
- dev = alloc_netdev(size, name, NET_NAME_UNKNOWN, slc_setup);
- if (!dev)
- return NULL;
-
- dev->base_addr = i;
- sl = netdev_priv(dev);
- can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
- can_set_ml_priv(dev, can_ml);
-
- /* Initialize channel control data */
- sl->magic = SLCAN_MAGIC;
- sl->dev = dev;
- spin_lock_init(&sl->lock);
- INIT_WORK(&sl->tx_work, slcan_transmit);
- slcan_devs[i] = dev;
-
- return sl;
-}
-
-/*
- * Open the high-level part of the SLCAN channel.
- * This function is called by the TTY module when the
- * SLCAN line discipline is called for. Because we are
- * sure the tty line exists, we only have to link it to
- * a free SLCAN channel...
- *
- * Called in process context serialized from other ldisc calls.
- */
-
-static int slcan_open(struct tty_struct *tty)
-{
- struct slcan *sl;
- int err;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (tty->ops->write == NULL)
- return -EOPNOTSUPP;
-
- /* RTnetlink lock is misused here to serialize concurrent
- opens of slcan channels. There are better ways, but it is
- the simplest one.
- */
- rtnl_lock();
-
- /* Collect hanged up channels. */
- slc_sync();
-
- sl = tty->disc_data;
-
- err = -EEXIST;
- /* First make sure we're not already connected. */
- if (sl && sl->magic == SLCAN_MAGIC)
- goto err_exit;
-
- /* OK. Find a free SLCAN channel to use. */
- err = -ENFILE;
- sl = slc_alloc();
- if (sl == NULL)
- goto err_exit;
-
- sl->tty = tty;
- tty->disc_data = sl;
-
- if (!test_bit(SLF_INUSE, &sl->flags)) {
- /* Perform the low-level SLCAN initialization. */
- sl->rcount = 0;
- sl->xleft = 0;
-
- set_bit(SLF_INUSE, &sl->flags);
-
- err = register_netdevice(sl->dev);
- if (err)
- goto err_free_chan;
- }
-
- /* Done. We have linked the TTY line to a channel. */
- rtnl_unlock();
- tty->receive_room = 65536; /* We don't flow control */
-
- /* TTY layer expects 0 on success */
- return 0;
-
-err_free_chan:
- sl->tty = NULL;
- tty->disc_data = NULL;
- clear_bit(SLF_INUSE, &sl->flags);
- slc_free_netdev(sl->dev);
- /* do not call free_netdev before rtnl_unlock */
- rtnl_unlock();
- free_netdev(sl->dev);
- return err;
-
-err_exit:
- rtnl_unlock();
-
- /* Count references from TTY module */
- return err;
-}
-
-/*
- * Close down a SLCAN channel.
- * This means flushing out any pending queues, and then returning. This
- * call is serialized against other ldisc functions.
- *
- * We also use this method for a hangup event.
- */
-
-static void slcan_close(struct tty_struct *tty)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
- return;
-
- spin_lock_bh(&sl->lock);
- rcu_assign_pointer(tty->disc_data, NULL);
- sl->tty = NULL;
- spin_unlock_bh(&sl->lock);
-
- synchronize_rcu();
- flush_work(&sl->tx_work);
-
- /* Flush network side */
- unregister_netdev(sl->dev);
- /* This will complete via sl_free_netdev */
-}
-
-static int slcan_hangup(struct tty_struct *tty)
-{
- slcan_close(tty);
- return 0;
-}
-
-/* Perform I/O control on an active SLCAN channel. */
-static int slcan_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
- unsigned int tmp;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != SLCAN_MAGIC)
- return -EINVAL;
-
- switch (cmd) {
- case SIOCGIFNAME:
- tmp = strlen(sl->dev->name) + 1;
- if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
- return -EFAULT;
- return 0;
-
- case SIOCSIFHWADDR:
- return -EINVAL;
-
- default:
- return tty_mode_ioctl(tty, file, cmd, arg);
- }
-}
-
-static struct tty_ldisc_ops slc_ldisc = {
- .owner = THIS_MODULE,
- .num = N_SLCAN,
- .name = "slcan",
- .open = slcan_open,
- .close = slcan_close,
- .hangup = slcan_hangup,
- .ioctl = slcan_ioctl,
- .receive_buf = slcan_receive_buf,
- .write_wakeup = slcan_write_wakeup,
-};
-
-static int __init slcan_init(void)
-{
- int status;
-
- if (maxdev < 4)
- maxdev = 4; /* Sanity */
-
- pr_info("slcan: serial line CAN interface driver\n");
- pr_info("slcan: %d dynamic interface channels.\n", maxdev);
-
- slcan_devs = kcalloc(maxdev, sizeof(struct net_device *), GFP_KERNEL);
- if (!slcan_devs)
- return -ENOMEM;
-
- /* Fill in our line protocol discipline, and register it */
- status = tty_register_ldisc(&slc_ldisc);
- if (status) {
- printk(KERN_ERR "slcan: can't register line discipline\n");
- kfree(slcan_devs);
- }
- return status;
-}
-
-static void __exit slcan_exit(void)
-{
- int i;
- struct net_device *dev;
- struct slcan *sl;
- unsigned long timeout = jiffies + HZ;
- int busy = 0;
-
- if (slcan_devs == NULL)
- return;
-
- /* First of all: check for active disciplines and hangup them.
- */
- do {
- if (busy)
- msleep_interruptible(100);
-
- busy = 0;
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- continue;
- sl = netdev_priv(dev);
- spin_lock_bh(&sl->lock);
- if (sl->tty) {
- busy++;
- tty_hangup(sl->tty);
- }
- spin_unlock_bh(&sl->lock);
- }
- } while (busy && time_before(jiffies, timeout));
-
- /* FIXME: hangup is async so we should wait when doing this second
- phase */
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- continue;
- slcan_devs[i] = NULL;
-
- sl = netdev_priv(dev);
- if (sl->tty) {
- printk(KERN_ERR "%s: tty discipline still running\n",
- dev->name);
- }
-
- unregister_netdev(dev);
- }
-
- kfree(slcan_devs);
- slcan_devs = NULL;
-
- tty_unregister_ldisc(&slc_ldisc);
-}
-
-module_init(slcan_init);
-module_exit(slcan_exit);
diff --git a/drivers/net/can/slcan/Makefile b/drivers/net/can/slcan/Makefile
new file mode 100644
index 000000000000..8a88e484ee21
--- /dev/null
+++ b/drivers/net/can/slcan/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_SLCAN) += slcan.o
+
+slcan-objs :=
+slcan-objs += slcan-core.o
+slcan-objs += slcan-ethtool.o
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
new file mode 100644
index 000000000000..cd789e178d34
--- /dev/null
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -0,0 +1,953 @@
+/*
+ * slcan.c - serial line CAN interface driver (using tty line discipline)
+ *
+ * This file is derived from linux/drivers/net/slip/slip.c and got
+ * inspiration from linux/drivers/net/can/can327.c for the rework made
+ * on the line discipline code.
+ *
+ * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ * can327.c Author : Max Staudt <max-linux@enpas.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see http://www.gnu.org/licenses/gpl.html
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+
+#include "slcan.h"
+
+MODULE_ALIAS_LDISC(N_SLCAN);
+MODULE_DESCRIPTION("serial line CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
+
+/* maximum rx buffer len: extended CAN frame with timestamp */
+#define SLCAN_MTU (sizeof("T1111222281122334455667788EA5F\r") + 1)
+
+#define SLCAN_CMD_LEN 1
+#define SLCAN_SFF_ID_LEN 3
+#define SLCAN_EFF_ID_LEN 8
+#define SLCAN_DATA_LENGTH_LEN 1
+#define SLCAN_ERROR_LEN 1
+#define SLCAN_STATE_LEN 1
+#define SLCAN_STATE_BE_RXCNT_LEN 3
+#define SLCAN_STATE_BE_TXCNT_LEN 3
+#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \
+ SLCAN_STATE_LEN + \
+ SLCAN_STATE_BE_RXCNT_LEN + \
+ SLCAN_STATE_BE_TXCNT_LEN)
+#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_ERROR_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
+#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_SFF_ID_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
+struct slcan {
+ struct can_priv can;
+
+ /* Various fields. */
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+ spinlock_t lock;
+ struct work_struct tx_work; /* Flushes transmit buffer */
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ unsigned char rbuff[SLCAN_MTU]; /* receiver buffer */
+ int rcount; /* received chars counter */
+ unsigned char xbuff[SLCAN_MTU]; /* transmitter buffer*/
+ unsigned char *xhead; /* pointer to next XMIT byte */
+ int xleft; /* bytes left in XMIT queue */
+
+ unsigned long flags; /* Flag values/ mode etc */
+#define SLF_ERROR 0 /* Parity, etc. error */
+#define SLF_XCMD 1 /* Command transmission */
+ unsigned long cmd_flags; /* Command flags */
+#define CF_ERR_RST 0 /* Reset errors on open */
+ wait_queue_head_t xcmd_wait; /* Wait queue for commands */
+ /* transmission */
+};
+
+static const u32 slcan_bitrate_const[] = {
+ 10000, 20000, 50000, 100000, 125000,
+ 250000, 500000, 800000, 1000000
+};
+
+bool slcan_err_rst_on_open(struct net_device *ndev)
+{
+ struct slcan *sl = netdev_priv(ndev);
+
+ return !!test_bit(CF_ERR_RST, &sl->cmd_flags);
+}
+
+int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on)
+{
+ struct slcan *sl = netdev_priv(ndev);
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (on)
+ set_bit(CF_ERR_RST, &sl->cmd_flags);
+ else
+ clear_bit(CF_ERR_RST, &sl->cmd_flags);
+
+ return 0;
+}
+
+/*************************************************************************
+ * SLCAN ENCAPSULATION FORMAT *
+ *************************************************************************/
+
+/* A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
+ * frame format) a data length code (len) which can be from 0 to 8
+ * and up to <len> data bytes as payload.
+ * Additionally a CAN frame may become a remote transmission frame if the
+ * RTR-bit is set. This causes another ECU to send a CAN frame with the
+ * given can_id.
+ *
+ * The SLCAN ASCII representation of these different frame types is:
+ * <type> <id> <dlc> <data>*
+ *
+ * Extended frames (29 bit) are defined by capital characters in the type.
+ * RTR frames are defined as 'r' types - normal frames have 't' type:
+ * t => 11 bit data frame
+ * r => 11 bit RTR frame
+ * T => 29 bit data frame
+ * R => 29 bit RTR frame
+ *
+ * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
+ * The <dlc> is a one byte ASCII number ('0' - '8')
+ * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
+ *
+ * Examples:
+ *
+ * t1230 : can_id 0x123, len 0, no data
+ * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33
+ * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55
+ * r1230 : can_id 0x123, len 0, no data, remote transmission request
+ *
+ */
+
+/*************************************************************************
+ * STANDARD SLCAN DECAPSULATION *
+ *************************************************************************/
+
+/* Send one completely decapsulated can_frame to the network layer */
+static void slcan_bump_frame(struct slcan *sl)
+{
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ int i, tmp;
+ u32 tmpid;
+ char *cmd = sl->rbuff;
+
+ if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN)
+ return;
+
+ skb = alloc_can_skb(sl->dev, &cf);
+ if (unlikely(!skb)) {
+ sl->dev->stats.rx_dropped++;
+ return;
+ }
+
+ switch (*cmd) {
+ case 'r':
+ cf->can_id = CAN_RTR_FLAG;
+ fallthrough;
+ case 't':
+ /* store dlc ASCII value and terminate SFF CAN ID string */
+ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN];
+ sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN] = 0;
+ /* point to payload data behind the dlc */
+ cmd += SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN + 1;
+ break;
+ case 'R':
+ cf->can_id = CAN_RTR_FLAG;
+ fallthrough;
+ case 'T':
+ cf->can_id |= CAN_EFF_FLAG;
+ /* store dlc ASCII value and terminate EFF CAN ID string */
+ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN];
+ sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN] = 0;
+ /* point to payload data behind the dlc */
+ cmd += SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN + 1;
+ break;
+ default:
+ goto decode_failed;
+ }
+
+ if (kstrtou32(sl->rbuff + SLCAN_CMD_LEN, 16, &tmpid))
+ goto decode_failed;
+
+ cf->can_id |= tmpid;
+
+ /* get len from sanitized ASCII value */
+ if (cf->len >= '0' && cf->len < '9')
+ cf->len -= '0';
+ else
+ goto decode_failed;
+
+ /* RTR frames may have a dlc > 0 but they never have any data bytes */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i++) {
+ tmp = hex_to_bin(*cmd++);
+ if (tmp < 0)
+ goto decode_failed;
+
+ cf->data[i] = (tmp << 4);
+ tmp = hex_to_bin(*cmd++);
+ if (tmp < 0)
+ goto decode_failed;
+
+ cf->data[i] |= tmp;
+ }
+ }
+
+ sl->dev->stats.rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ sl->dev->stats.rx_bytes += cf->len;
+
+ netif_rx(skb);
+ return;
+
+decode_failed:
+ sl->dev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+}
+
+/* A change state frame must contain state info and receive and transmit
+ * error counters.
+ *
+ * Examples:
+ *
+ * sb256256 : state bus-off: rx counter 256, tx counter 256
+ * sa057033 : state active, rx counter 57, tx counter 33
+ */
+static void slcan_bump_state(struct slcan *sl)
+{
+ struct net_device *dev = sl->dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ char *cmd = sl->rbuff;
+ u32 rxerr, txerr;
+ enum can_state state, rx_state, tx_state;
+
+ switch (cmd[1]) {
+ case 'a':
+ state = CAN_STATE_ERROR_ACTIVE;
+ break;
+ case 'w':
+ state = CAN_STATE_ERROR_WARNING;
+ break;
+ case 'p':
+ state = CAN_STATE_ERROR_PASSIVE;
+ break;
+ case 'b':
+ state = CAN_STATE_BUS_OFF;
+ break;
+ default:
+ return;
+ }
+
+ if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN)
+ return;
+
+ cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
+ cmd[SLCAN_STATE_BE_TXCNT_LEN] = 0;
+ if (kstrtou32(cmd, 10, &txerr))
+ return;
+
+ *cmd = 0;
+ cmd -= SLCAN_STATE_BE_RXCNT_LEN;
+ if (kstrtou32(cmd, 10, &rxerr))
+ return;
+
+ skb = alloc_can_err_skb(dev, &cf);
+
+ tx_state = txerr >= rxerr ? state : 0;
+ rx_state = txerr <= rxerr ? state : 0;
+ can_change_state(dev, cf, tx_state, rx_state);
+
+ if (state == CAN_STATE_BUS_OFF) {
+ can_bus_off(dev);
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ if (skb)
+ netif_rx(skb);
+}
+
+/* An error frame can contain more than one type of error.
+ *
+ * Examples:
+ *
+ * e1a : len 1, errors: ACK error
+ * e3bcO: len 3, errors: Bit0 error, CRC error, Tx overrun error
+ */
+static void slcan_bump_err(struct slcan *sl)
+{
+ struct net_device *dev = sl->dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ char *cmd = sl->rbuff;
+ bool rx_errors = false, tx_errors = false, rx_over_errors = false;
+ int i, len;
+
+ if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN)
+ return;
+
+ /* get len from sanitized ASCII value */
+ len = cmd[1];
+ if (len >= '0' && len < '9')
+ len -= '0';
+ else
+ return;
+
+ if ((len + SLCAN_CMD_LEN + 1) > sl->rcount)
+ return;
+
+ skb = alloc_can_err_skb(dev, &cf);
+
+ if (skb)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ cmd += SLCAN_CMD_LEN + 1;
+ for (i = 0; i < len; i++, cmd++) {
+ switch (*cmd) {
+ case 'a':
+ netdev_dbg(dev, "ACK error\n");
+ tx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
+
+ break;
+ case 'b':
+ netdev_dbg(dev, "Bit0 error\n");
+ tx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+
+ break;
+ case 'B':
+ netdev_dbg(dev, "Bit1 error\n");
+ tx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+
+ break;
+ case 'c':
+ netdev_dbg(dev, "CRC error\n");
+ rx_errors = true;
+ if (skb) {
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+
+ break;
+ case 'f':
+ netdev_dbg(dev, "Form Error\n");
+ rx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+
+ break;
+ case 'o':
+ netdev_dbg(dev, "Rx overrun error\n");
+ rx_over_errors = true;
+ rx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
+ break;
+ case 'O':
+ netdev_dbg(dev, "Tx overrun error\n");
+ tx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_TX_OVERFLOW;
+ }
+
+ break;
+ case 's':
+ netdev_dbg(dev, "Stuff error\n");
+ rx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+
+ break;
+ default:
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return;
+ }
+ }
+
+ if (rx_errors)
+ dev->stats.rx_errors++;
+
+ if (rx_over_errors)
+ dev->stats.rx_over_errors++;
+
+ if (tx_errors)
+ dev->stats.tx_errors++;
+
+ if (skb)
+ netif_rx(skb);
+}
+
+static void slcan_bump(struct slcan *sl)
+{
+ switch (sl->rbuff[0]) {
+ case 'r':
+ fallthrough;
+ case 't':
+ fallthrough;
+ case 'R':
+ fallthrough;
+ case 'T':
+ return slcan_bump_frame(sl);
+ case 'e':
+ return slcan_bump_err(sl);
+ case 's':
+ return slcan_bump_state(sl);
+ default:
+ return;
+ }
+}
+
+/* parse tty input stream */
+static void slcan_unesc(struct slcan *sl, unsigned char s)
+{
+ if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags))
+ slcan_bump(sl);
+
+ sl->rcount = 0;
+ } else {
+ if (!test_bit(SLF_ERROR, &sl->flags)) {
+ if (sl->rcount < SLCAN_MTU) {
+ sl->rbuff[sl->rcount++] = s;
+ return;
+ }
+
+ sl->dev->stats.rx_over_errors++;
+ set_bit(SLF_ERROR, &sl->flags);
+ }
+ }
+}
+
+/*************************************************************************
+ * STANDARD SLCAN ENCAPSULATION *
+ *************************************************************************/
+
+/* Encapsulate one can_frame and stuff into a TTY queue. */
+static void slcan_encaps(struct slcan *sl, struct can_frame *cf)
+{
+ int actual, i;
+ unsigned char *pos;
+ unsigned char *endpos;
+ canid_t id = cf->can_id;
+
+ pos = sl->xbuff;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
+ else
+ *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
+
+ /* determine number of chars for the CAN-identifier */
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id &= CAN_EFF_MASK;
+ endpos = pos + SLCAN_EFF_ID_LEN;
+ } else {
+ *pos |= 0x20; /* convert R/T to lower case for SFF */
+ id &= CAN_SFF_MASK;
+ endpos = pos + SLCAN_SFF_ID_LEN;
+ }
+
+ /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
+ pos++;
+ while (endpos >= pos) {
+ *endpos-- = hex_asc_upper[id & 0xf];
+ id >>= 4;
+ }
+
+ pos += (cf->can_id & CAN_EFF_FLAG) ?
+ SLCAN_EFF_ID_LEN : SLCAN_SFF_ID_LEN;
+
+ *pos++ = cf->len + '0';
+
+ /* RTR frames may have a dlc > 0 but they never have any data bytes */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i++)
+ pos = hex_byte_pack_upper(pos, cf->data[i]);
+
+ sl->dev->stats.tx_bytes += cf->len;
+ }
+
+ *pos++ = '\r';
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside the ops->write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
+ sl->xleft = (pos - sl->xbuff) - actual;
+ sl->xhead = sl->xbuff + actual;
+}
+
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void slcan_transmit(struct work_struct *work)
+{
+ struct slcan *sl = container_of(work, struct slcan, tx_work);
+ int actual;
+
+ spin_lock_bh(&sl->lock);
+ /* First make sure we're connected. */
+ if (unlikely(!netif_running(sl->dev)) &&
+ likely(!test_bit(SLF_XCMD, &sl->flags))) {
+ spin_unlock_bh(&sl->lock);
+ return;
+ }
+
+ if (sl->xleft <= 0) {
+ if (unlikely(test_bit(SLF_XCMD, &sl->flags))) {
+ clear_bit(SLF_XCMD, &sl->flags);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ spin_unlock_bh(&sl->lock);
+ wake_up(&sl->xcmd_wait);
+ return;
+ }
+
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet
+ */
+ sl->dev->stats.tx_packets++;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ spin_unlock_bh(&sl->lock);
+ netif_wake_queue(sl->dev);
+ return;
+ }
+
+ actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
+ sl->xleft -= actual;
+ sl->xhead += actual;
+ spin_unlock_bh(&sl->lock);
+}
+
+/* Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void slcan_write_wakeup(struct tty_struct *tty)
+{
+ struct slcan *sl = tty->disc_data;
+
+ schedule_work(&sl->tx_work);
+}
+
+/* Send a can_frame to a TTY queue. */
+static netdev_tx_t slcan_netdev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+
+ if (can_dev_dropped_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ spin_lock(&sl->lock);
+ if (!netif_running(dev)) {
+ spin_unlock(&sl->lock);
+ netdev_warn(dev, "xmit: iface is down\n");
+ goto out;
+ }
+ if (!sl->tty) {
+ spin_unlock(&sl->lock);
+ goto out;
+ }
+
+ netif_stop_queue(sl->dev);
+ slcan_encaps(sl, (struct can_frame *)skb->data); /* encaps & send */
+ spin_unlock(&sl->lock);
+
+ skb_tx_timestamp(skb);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/******************************************
+ * Routines looking at netdevice side.
+ ******************************************/
+
+static int slcan_transmit_cmd(struct slcan *sl, const unsigned char *cmd)
+{
+ int ret, actual, n;
+
+ spin_lock(&sl->lock);
+ if (!sl->tty) {
+ spin_unlock(&sl->lock);
+ return -ENODEV;
+ }
+
+ n = scnprintf(sl->xbuff, sizeof(sl->xbuff), "%s", cmd);
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ actual = sl->tty->ops->write(sl->tty, sl->xbuff, n);
+ sl->xleft = n - actual;
+ sl->xhead = sl->xbuff + actual;
+ set_bit(SLF_XCMD, &sl->flags);
+ spin_unlock(&sl->lock);
+ ret = wait_event_interruptible_timeout(sl->xcmd_wait,
+ !test_bit(SLF_XCMD, &sl->flags),
+ HZ);
+ clear_bit(SLF_XCMD, &sl->flags);
+ if (ret == -ERESTARTSYS)
+ return ret;
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* Netdevice UP -> DOWN routine */
+static int slcan_netdev_close(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+ int err;
+
+ if (sl->can.bittiming.bitrate &&
+ sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
+ err = slcan_transmit_cmd(sl, "C\r");
+ if (err)
+ netdev_warn(dev,
+ "failed to send close command 'C\\r'\n");
+ }
+
+ /* TTY discipline is running. */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ flush_work(&sl->tx_work);
+
+ netif_stop_queue(dev);
+ sl->rcount = 0;
+ sl->xleft = 0;
+ close_candev(dev);
+ sl->can.state = CAN_STATE_STOPPED;
+ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNKNOWN)
+ sl->can.bittiming.bitrate = CAN_BITRATE_UNSET;
+
+ return 0;
+}
+
+/* Netdevice DOWN -> UP routine */
+static int slcan_netdev_open(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+ unsigned char cmd[SLCAN_MTU];
+ int err, s;
+
+ /* The baud rate is not set with the command
+ * `ip link set <iface> type can bitrate <baud>' and therefore
+ * can.bittiming.bitrate is CAN_BITRATE_UNSET (0), causing
+ * open_candev() to fail. So let's set to a fake value.
+ */
+ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNSET)
+ sl->can.bittiming.bitrate = CAN_BITRATE_UNKNOWN;
+
+ err = open_candev(dev);
+ if (err) {
+ netdev_err(dev, "failed to open can device\n");
+ return err;
+ }
+
+ if (sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
+ for (s = 0; s < ARRAY_SIZE(slcan_bitrate_const); s++) {
+ if (sl->can.bittiming.bitrate == slcan_bitrate_const[s])
+ break;
+ }
+
+ /* The CAN framework has already validate the bitrate value,
+ * so we can avoid to check if `s' has been properly set.
+ */
+ snprintf(cmd, sizeof(cmd), "C\rS%d\r", s);
+ err = slcan_transmit_cmd(sl, cmd);
+ if (err) {
+ netdev_err(dev,
+ "failed to send bitrate command 'C\\rS%d\\r'\n",
+ s);
+ goto cmd_transmit_failed;
+ }
+
+ if (test_bit(CF_ERR_RST, &sl->cmd_flags)) {
+ err = slcan_transmit_cmd(sl, "F\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send error command 'F\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ }
+
+ if (sl->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ err = slcan_transmit_cmd(sl, "L\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send listen-only command 'L\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ } else {
+ err = slcan_transmit_cmd(sl, "O\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send open command 'O\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ }
+ }
+
+ sl->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(dev);
+ return 0;
+
+cmd_transmit_failed:
+ close_candev(dev);
+ return err;
+}
+
+static const struct net_device_ops slcan_netdev_ops = {
+ .ndo_open = slcan_netdev_open,
+ .ndo_stop = slcan_netdev_close,
+ .ndo_start_xmit = slcan_netdev_xmit,
+};
+
+/******************************************
+ * Routines looking at TTY side.
+ ******************************************/
+
+/* Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of SLCAN data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing. This will not
+ * be re-entered while running but other ldisc functions may be called
+ * in parallel
+ */
+static void slcan_receive_buf(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
+{
+ struct slcan *sl = tty->disc_data;
+
+ if (!netif_running(sl->dev))
+ return;
+
+ /* Read the characters out of the buffer */
+ while (count--) {
+ if (fp && *fp++) {
+ if (!test_and_set_bit(SLF_ERROR, &sl->flags))
+ sl->dev->stats.rx_errors++;
+ cp++;
+ continue;
+ }
+ slcan_unesc(sl, *cp++);
+ }
+}
+
+/* Open the high-level part of the SLCAN channel.
+ * This function is called by the TTY module when the
+ * SLCAN line discipline is called for.
+ *
+ * Called in process context serialized from other ldisc calls.
+ */
+static int slcan_open(struct tty_struct *tty)
+{
+ struct net_device *dev;
+ struct slcan *sl;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ dev = alloc_candev(sizeof(*sl), 1);
+ if (!dev)
+ return -ENFILE;
+
+ sl = netdev_priv(dev);
+
+ /* Configure TTY interface */
+ tty->receive_room = 65536; /* We don't flow control */
+ sl->rcount = 0;
+ sl->xleft = 0;
+ spin_lock_init(&sl->lock);
+ INIT_WORK(&sl->tx_work, slcan_transmit);
+ init_waitqueue_head(&sl->xcmd_wait);
+
+ /* Configure CAN metadata */
+ sl->can.bitrate_const = slcan_bitrate_const;
+ sl->can.bitrate_const_cnt = ARRAY_SIZE(slcan_bitrate_const);
+ sl->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+ /* Configure netdev interface */
+ sl->dev = dev;
+ dev->netdev_ops = &slcan_netdev_ops;
+ dev->ethtool_ops = &slcan_ethtool_ops;
+
+ /* Mark ldisc channel as alive */
+ sl->tty = tty;
+ tty->disc_data = sl;
+
+ err = register_candev(dev);
+ if (err) {
+ free_candev(dev);
+ pr_err("can't register candev\n");
+ return err;
+ }
+
+ netdev_info(dev, "slcan on %s.\n", tty->name);
+ /* TTY layer expects 0 on success */
+ return 0;
+}
+
+/* Close down a SLCAN channel.
+ * This means flushing out any pending queues, and then returning. This
+ * call is serialized against other ldisc functions.
+ * Once this is called, no other ldisc function of ours is entered.
+ *
+ * We also use this method for a hangup event.
+ */
+static void slcan_close(struct tty_struct *tty)
+{
+ struct slcan *sl = tty->disc_data;
+
+ unregister_candev(sl->dev);
+
+ /*
+ * The netdev needn't be UP (so .ndo_stop() is not called). Hence make
+ * sure this is not running before freeing it up.
+ */
+ flush_work(&sl->tx_work);
+
+ /* Mark channel as dead */
+ spin_lock_bh(&sl->lock);
+ tty->disc_data = NULL;
+ sl->tty = NULL;
+ spin_unlock_bh(&sl->lock);
+
+ netdev_info(sl->dev, "slcan off %s.\n", tty->name);
+ free_candev(sl->dev);
+}
+
+/* Perform I/O control on an active SLCAN channel. */
+static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct slcan *sl = tty->disc_data;
+ unsigned int tmp;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ tmp = strlen(sl->dev->name) + 1;
+ if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+ default:
+ return tty_mode_ioctl(tty, cmd, arg);
+ }
+}
+
+static struct tty_ldisc_ops slcan_ldisc = {
+ .owner = THIS_MODULE,
+ .num = N_SLCAN,
+ .name = KBUILD_MODNAME,
+ .open = slcan_open,
+ .close = slcan_close,
+ .ioctl = slcan_ioctl,
+ .receive_buf = slcan_receive_buf,
+ .write_wakeup = slcan_write_wakeup,
+};
+
+static int __init slcan_init(void)
+{
+ int status;
+
+ pr_info("serial line CAN interface driver\n");
+
+ /* Fill in our line protocol discipline, and register it */
+ status = tty_register_ldisc(&slcan_ldisc);
+ if (status)
+ pr_err("can't register line discipline\n");
+
+ return status;
+}
+
+static void __exit slcan_exit(void)
+{
+ /* This will only be called when all channels have been closed by
+ * userspace - tty_ldisc.c takes care of the module's refcount.
+ */
+ tty_unregister_ldisc(&slcan_ldisc);
+}
+
+module_init(slcan_init);
+module_exit(slcan_exit);
diff --git a/drivers/net/can/slcan/slcan-ethtool.c b/drivers/net/can/slcan/slcan-ethtool.c
new file mode 100644
index 000000000000..f598c653fbfa
--- /dev/null
+++ b/drivers/net/can/slcan/slcan-ethtool.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ */
+
+#include <linux/can/dev.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "slcan.h"
+
+static const char slcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN BIT(0)
+ "err-rst-on-open",
+};
+
+static void slcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, slcan_priv_flags_strings,
+ sizeof(slcan_priv_flags_strings));
+ }
+}
+
+static u32 slcan_get_priv_flags(struct net_device *ndev)
+{
+ u32 flags = 0;
+
+ if (slcan_err_rst_on_open(ndev))
+ flags |= SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN;
+
+ return flags;
+}
+
+static int slcan_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ bool err_rst_op_open = !!(flags & SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN);
+
+ return slcan_enable_err_rst_on_open(ndev, err_rst_op_open);
+}
+
+static int slcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(slcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+const struct ethtool_ops slcan_ethtool_ops = {
+ .get_strings = slcan_get_strings,
+ .get_priv_flags = slcan_get_priv_flags,
+ .set_priv_flags = slcan_set_priv_flags,
+ .get_sset_count = slcan_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
diff --git a/drivers/net/can/slcan/slcan.h b/drivers/net/can/slcan/slcan.h
new file mode 100644
index 000000000000..85cedf856db3
--- /dev/null
+++ b/drivers/net/can/slcan/slcan.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * slcan.h - serial line CAN interface driver
+ *
+ * Copyright (C) Laurence Culhane <loz@holmes.demon.co.uk>
+ * Copyright (C) Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * Copyright (C) Oliver Hartkopp <socketcan@hartkopp.net>
+ * Copyright (C) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ */
+
+#ifndef _SLCAN_H
+#define _SLCAN_H
+
+bool slcan_err_rst_on_open(struct net_device *ndev);
+int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on);
+
+extern const struct ethtool_ops slcan_ethtool_ops;
+
+#endif /* _SLCAN_H */
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index 2e93ee792373..e5c939b63fa6 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -293,7 +293,7 @@ static int softingcs_probe(struct pcmcia_device *pcmcia)
return 0;
platform_failed:
- kfree(dev);
+ platform_device_put(pdev);
mem_failed:
pcmcia_bad:
pcmcia_failed:
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 7e1536877993..721df91cdbfb 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -436,7 +436,7 @@ int softing_startstop(struct net_device *dev, int up)
return ret;
bus_bitmask_start = 0;
- if (dev && up)
+ if (up)
/* prepare to start this bus as well */
bus_bitmask_start |= (1 << priv->index);
/* bring netdevs down */
@@ -565,18 +565,19 @@ int softing_startstop(struct net_device *dev, int up)
if (ret < 0)
goto failed;
}
- /* enable_error_frame */
- /*
+
+ /* enable_error_frame
+ *
* Error reporting is switched off at the moment since
* the receiving of them is not yet 100% verified
* This should be enabled sooner or later
- *
- if (error_reporting) {
+ */
+ if (0 && error_reporting) {
ret = softing_fct_cmd(card, 51, "enable_error_frame");
if (ret < 0)
goto failed;
}
- */
+
/* initialize interface */
iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index cfc1325aad10..79bc64395ac4 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -5,6 +5,7 @@
* - Kurt Van Dijck, EIA Electronics
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <asm/io.h>
@@ -59,7 +60,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
uint8_t buf[DPRAM_TX_SIZE];
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
spin_lock(&card->spin);
@@ -282,7 +283,10 @@ static int softing_handle_1(struct softing *card)
skb = priv->can.echo_skb[priv->tx.echo_get];
if (skb)
skb->tstamp = ktime;
- can_get_echo_skb(netdev, priv->tx.echo_get, NULL);
+ ++netdev->stats.tx_packets;
+ netdev->stats.tx_bytes +=
+ can_get_echo_skb(netdev, priv->tx.echo_get,
+ NULL);
++priv->tx.echo_get;
if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
priv->tx.echo_get = 0;
@@ -290,9 +294,6 @@ static int softing_handle_1(struct softing *card)
--priv->tx.pending;
if (card->tx.pending)
--card->tx.pending;
- ++netdev->stats.tx_packets;
- if (!(msg.can_id & CAN_RTR_FLAG))
- netdev->stats.tx_bytes += msg.len;
} else {
int ret;
@@ -392,13 +393,10 @@ static int softing_netdev_open(struct net_device *ndev)
static int softing_netdev_stop(struct net_device *ndev)
{
- int ret;
-
netif_stop_queue(ndev);
/* softing cycle does close_candev() */
- ret = softing_startstop(ndev, 0);
- return ret;
+ return softing_startstop(ndev, 0);
}
static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
@@ -611,11 +609,14 @@ static const struct net_device_ops softing_netdev_ops = {
.ndo_open = softing_netdev_open,
.ndo_stop = softing_netdev_stop,
.ndo_start_xmit = softing_netdev_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops softing_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
static const struct can_bittiming_const softing_btr_const = {
- .name = "softing",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -652,6 +653,7 @@ static struct net_device *softing_netdev_create(struct softing *card,
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &softing_netdev_ops;
+ netdev->ethtool_ops = &softing_ethtool_ops;
priv->can.do_set_mode = softing_candev_set_mode;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
@@ -726,7 +728,7 @@ static const struct attribute_group softing_pdev_group = {
/*
* platform driver
*/
-static int softing_pdev_remove(struct platform_device *pdev)
+static void softing_pdev_remove(struct platform_device *pdev)
{
struct softing *card = platform_get_drvdata(pdev);
int j;
@@ -744,7 +746,6 @@ static int softing_pdev_remove(struct platform_device *pdev)
iounmap(card->dpram);
kfree(card);
- return 0;
}
static int softing_pdev_probe(struct platform_device *pdev)
@@ -849,7 +850,7 @@ platform_resource_failed:
static struct platform_driver softing_driver = {
.driver = {
- .name = "softing",
+ .name = KBUILD_MODNAME,
},
.probe = softing_pdev_probe,
.remove = softing_pdev_remove,
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 89d9c986a229..e00d3dbc4cf4 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -16,20 +16,20 @@
#include <linux/can/core.h>
#include <linux/can/dev.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
@@ -153,7 +153,6 @@ struct hi3110_priv {
u8 *spi_rx_buf;
struct sk_buff *tx_skb;
- int tx_len;
struct workqueue_struct *wq;
struct work_struct tx_work;
@@ -166,6 +165,8 @@ struct hi3110_priv {
#define HI3110_AFTER_SUSPEND_POWER 4
#define HI3110_AFTER_SUSPEND_RESTART 8
int restart_tx;
+ bool tx_busy;
+
struct regulator *power;
struct regulator *transceiver;
struct clk *clk;
@@ -175,13 +176,13 @@ static void hi3110_clean(struct net_device *net)
{
struct hi3110_priv *priv = netdev_priv(net);
- if (priv->tx_skb || priv->tx_len)
+ if (priv->tx_skb || priv->tx_busy)
net->stats.tx_errors++;
dev_kfree_skb(priv->tx_skb);
- if (priv->tx_len)
+ if (priv->tx_busy)
can_free_echo_skb(priv->net, 0, NULL);
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
}
/* Note about handling of error return of hi3110_spi_trans: accessing
@@ -343,18 +344,17 @@ static void hi3110_hw_rx(struct spi_device *spi)
/* Data length */
frame->len = can_cc_dlc2len(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F);
- if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR)
+ if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR) {
frame->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF,
frame->len);
+ priv->net->stats.rx_bytes += frame->len;
+ }
priv->net->stats.rx_packets++;
- priv->net->stats.rx_bytes += frame->len;
- can_led_event(priv->net, CAN_LED_EVENT_RX);
-
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void hi3110_hw_sleep(struct spi_device *spi)
@@ -368,12 +368,12 @@ static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb,
struct hi3110_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- if (priv->tx_skb || priv->tx_len) {
+ if (priv->tx_skb || priv->tx_busy) {
dev_err(&spi->dev, "hard_xmit called while tx busy\n");
return NETDEV_TX_BUSY;
}
- if (can_dropped_invalid_skb(net, skb))
+ if (can_dev_dropped_skb(net, skb))
return NETDEV_TX_OK;
netif_stop_queue(net);
@@ -545,8 +545,6 @@ static int hi3110_stop(struct net_device *net)
priv->force_quit = 1;
free_irq(spi->irq, priv);
- destroy_workqueue(priv->wq);
- priv->wq = NULL;
mutex_lock(&priv->hi3110_lock);
@@ -565,8 +563,6 @@ static int hi3110_stop(struct net_device *net)
mutex_unlock(&priv->hi3110_lock);
- can_led_event(net, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -585,7 +581,7 @@ static void hi3110_tx_work_handler(struct work_struct *ws)
} else {
frame = (struct can_frame *)priv->tx_skb->data;
hi3110_hw_tx(spi, frame);
- priv->tx_len = 1 + frame->len;
+ priv->tx_busy = true;
can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
@@ -665,25 +661,27 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
u8 rxerr, txerr;
skb = alloc_can_err_skb(net, &cf);
- if (!skb)
- break;
txerr = hi3110_read(spi, HI3110_READ_TEC);
rxerr = hi3110_read(spi, HI3110_READ_REC);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
- netif_rx_ni(skb);
if (new_state == CAN_STATE_BUS_OFF) {
+ if (skb)
+ netif_rx(skb);
can_bus_off(net);
if (priv->can.restart_ms == 0) {
priv->force_quit = 1;
hi3110_hw_sleep(spi);
break;
}
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ netif_rx(skb);
}
}
@@ -696,38 +694,45 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
/* Check for protocol errors */
if (eflag & HI3110_ERR_PROTOCOL_MASK) {
skb = alloc_can_err_skb(net, &cf);
- if (!skb)
- break;
+ if (skb)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
priv->can.can_stats.bus_error++;
- priv->net->stats.rx_errors++;
- if (eflag & HI3110_ERR_BITERR)
- cf->data[2] |= CAN_ERR_PROT_BIT;
- else if (eflag & HI3110_ERR_FRMERR)
- cf->data[2] |= CAN_ERR_PROT_FORM;
- else if (eflag & HI3110_ERR_STUFERR)
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- else if (eflag & HI3110_ERR_CRCERR)
- cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
- else if (eflag & HI3110_ERR_ACKERR)
- cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
-
- cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
- cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
+ if (eflag & HI3110_ERR_BITERR) {
+ priv->net->stats.tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ } else if (eflag & HI3110_ERR_FRMERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ } else if (eflag & HI3110_ERR_STUFERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ } else if (eflag & HI3110_ERR_CRCERR) {
+ priv->net->stats.rx_errors++;
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ } else if (eflag & HI3110_ERR_ACKERR) {
+ priv->net->stats.tx_errors++;
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ }
+
netdev_dbg(priv->net, "Bus Error\n");
- netif_rx_ni(skb);
+ if (skb) {
+ cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
+ cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
+ netif_rx(skb);
+ }
}
}
- if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
+ if (priv->tx_busy && statf & HI3110_STAT_TXMTY) {
net->stats.tx_packets++;
- net->stats.tx_bytes += priv->tx_len - 1;
- can_led_event(net, CAN_LED_EVENT_TX);
- if (priv->tx_len) {
- can_get_echo_skb(net, 0, NULL);
- priv->tx_len = 0;
- }
+ net->stats.tx_bytes += can_get_echo_skb(net, 0, NULL);
+ priv->tx_busy = false;
netif_wake_queue(net);
}
@@ -754,7 +759,7 @@ static int hi3110_open(struct net_device *net)
priv->force_quit = 0;
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
ret = request_threaded_irq(spi->irq, NULL, hi3110_can_ist,
flags, DEVICE_NAME, priv);
@@ -763,35 +768,23 @@ static int hi3110_open(struct net_device *net)
goto out_close;
}
- priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
- 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto out_free_irq;
- }
- INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
- INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
-
ret = hi3110_hw_reset(spi);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
ret = hi3110_setup(net);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
ret = hi3110_set_normal_mode(spi);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
- can_led_event(net, CAN_LED_EVENT_OPEN);
netif_wake_queue(net);
mutex_unlock(&priv->hi3110_lock);
return 0;
- out_free_wq:
- destroy_workqueue(priv->wq);
out_free_irq:
free_irq(spi->irq, priv);
hi3110_hw_sleep(spi);
@@ -808,6 +801,10 @@ static const struct net_device_ops hi3110_netdev_ops = {
.ndo_start_xmit = hi3110_hard_start_xmit,
};
+static const struct ethtool_ops hi3110_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id hi3110_of_match[] = {
{
.compatible = "holt,hi3110",
@@ -828,19 +825,24 @@ MODULE_DEVICE_TABLE(spi, hi3110_id_table);
static int hi3110_can_probe(struct spi_device *spi)
{
- const struct of_device_id *of_id = of_match_device(hi3110_of_match,
- &spi->dev);
+ struct device *dev = &spi->dev;
struct net_device *net;
struct hi3110_priv *priv;
struct clk *clk;
- int freq, ret;
+ u32 freq;
+ int ret;
- clk = devm_clk_get(&spi->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&spi->dev, "no CAN clock source defined\n");
- return PTR_ERR(clk);
+ clk = devm_clk_get_optional(&spi->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "no CAN clock source defined\n");
+
+ if (clk) {
+ freq = clk_get_rate(clk);
+ } else {
+ ret = device_property_read_u32(dev, "clock-frequency", &freq);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clock-frequency!\n");
}
- freq = clk_get_rate(clk);
/* Sanity check */
if (freq > 40000000)
@@ -851,13 +853,12 @@ static int hi3110_can_probe(struct spi_device *spi)
if (!net)
return -ENOMEM;
- if (!IS_ERR(clk)) {
- ret = clk_prepare_enable(clk);
- if (ret)
- goto out_free;
- }
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_free;
net->netdev_ops = &hi3110_netdev_ops;
+ net->ethtool_ops = &hi3110_ethtool_ops;
net->flags |= IFF_ECHO;
priv = netdev_priv(net);
@@ -870,10 +871,7 @@ static int hi3110_can_probe(struct spi_device *spi)
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING;
- if (of_id)
- priv->model = (enum hi3110_model)(uintptr_t)of_id->data;
- else
- priv->model = spi_get_device_id(spi)->driver_data;
+ priv->model = (enum hi3110_model)(uintptr_t)spi_get_device_match_data(spi);
priv->net = net;
priv->clk = clk;
@@ -897,6 +895,16 @@ static int hi3110_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
+ priv->wq = alloc_workqueue("hi3110_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto out_clk;
+ }
+ INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
+ INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
+
priv->spi = spi;
mutex_init(&priv->hi3110_lock);
@@ -918,9 +926,7 @@ static int hi3110_can_probe(struct spi_device *spi)
ret = hi3110_hw_probe(spi);
if (ret) {
- if (ret == -ENODEV)
- dev_err(&spi->dev, "Cannot initialize %x. Wrong wiring?\n",
- priv->model);
+ dev_err_probe(dev, ret, "Cannot initialize %x. Wrong wiring?\n", priv->model);
goto error_probe;
}
hi3110_hw_sleep(spi);
@@ -929,26 +935,25 @@ static int hi3110_can_probe(struct spi_device *spi)
if (ret)
goto error_probe;
- devm_can_led_init(net);
netdev_info(net, "%x successfully initialized.\n", priv->model);
return 0;
error_probe:
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
hi3110_power_enable(priv->power, 0);
out_clk:
- if (!IS_ERR(clk))
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(clk);
out_free:
free_candev(net);
- dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
- return ret;
+ return dev_err_probe(dev, ret, "Probe failed\n");
}
-static int hi3110_can_remove(struct spi_device *spi)
+static void hi3110_can_remove(struct spi_device *spi)
{
struct hi3110_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -957,12 +962,12 @@ static int hi3110_can_remove(struct spi_device *spi)
hi3110_power_enable(priv->power, 0);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
+ clk_disable_unprepare(priv->clk);
free_candev(net);
-
- return 0;
}
static int __maybe_unused hi3110_can_suspend(struct device *dev)
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 0579ab74f728..fa97adf25b73 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -22,13 +22,12 @@
#include <linux/bitfield.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/freezer.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -237,7 +236,6 @@ struct mcp251x_priv {
u8 *spi_rx_buf;
struct sk_buff *tx_skb;
- int tx_len;
struct workqueue_struct *wq;
struct work_struct tx_work;
@@ -250,6 +248,8 @@ struct mcp251x_priv {
#define AFTER_SUSPEND_POWER 4
#define AFTER_SUSPEND_RESTART 8
int restart_tx;
+ bool tx_busy;
+
struct regulator *power;
struct regulator *transceiver;
struct clk *clk;
@@ -272,13 +272,13 @@ static void mcp251x_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
- if (priv->tx_skb || priv->tx_len)
+ if (priv->tx_skb || priv->tx_busy)
net->stats.tx_errors++;
dev_kfree_skb(priv->tx_skb);
- if (priv->tx_len)
+ if (priv->tx_busy)
can_free_echo_skb(priv->net, 0, NULL);
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
}
/* Note about handling of error return of mcp251x_spi_trans: accessing
@@ -388,8 +388,8 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
mcp251x_spi_write(spi, 4);
}
-static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
- u8 mask, u8 val)
+static int mcp251x_write_bits(struct spi_device *spi, u8 reg,
+ u8 mask, u8 val)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -398,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val;
- mcp251x_spi_write(spi, 4);
+ return mcp251x_spi_write(spi, 4);
}
static u8 mcp251x_read_stat(struct spi_device *spi)
@@ -441,6 +441,7 @@ static int mcp251x_gpio_request(struct gpio_chip *chip,
unsigned int offset)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ int ret;
u8 val;
/* nothing to be done for inputs */
@@ -450,8 +451,10 @@ static int mcp251x_gpio_request(struct gpio_chip *chip,
val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF);
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl |= val;
@@ -481,9 +484,9 @@ static int mcp251x_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
if (mcp251x_gpio_is_input(offset))
- return GPIOF_DIR_IN;
+ return GPIO_LINE_DIRECTION_IN;
- return GPIOF_DIR_OUT;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int mcp251x_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -530,29 +533,35 @@ static int mcp251x_gpio_get_multiple(struct gpio_chip *chip,
return 0;
}
-static void mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
u8 mask, val;
+ int ret;
mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF);
val = value ? mask : 0;
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl &= ~mask;
priv->reg_bfpctrl |= val;
+
+ return 0;
}
-static void
+static int
mcp251x_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *maskp, unsigned long *bitsp)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
u8 mask, val;
+ int ret;
mask = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, maskp[0]);
mask = FIELD_PREP(BFPCTRL_BFS_MASK, mask);
@@ -561,14 +570,18 @@ mcp251x_gpio_set_multiple(struct gpio_chip *chip,
val = FIELD_PREP(BFPCTRL_BFS_MASK, val);
if (!mask)
- return;
+ return 0;
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl &= ~mask;
priv->reg_bfpctrl |= val;
+
+ return 0;
}
static void mcp251x_gpio_restore(struct spi_device *spi)
@@ -600,9 +613,6 @@ static int mcp251x_gpio_setup(struct mcp251x_priv *priv)
gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names);
gpio->names = mcp251x_gpio_names;
gpio->can_sleep = true;
-#ifdef CONFIG_OF_GPIO
- gpio->of_node = priv->spi->dev.of_node;
-#endif
return devm_gpiochip_add_data(&priv->spi->dev, gpio, priv);
}
@@ -733,14 +743,14 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
}
/* Data length */
frame->len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
- memcpy(frame->data, buf + RXBDAT_OFF, frame->len);
+ if (!(frame->can_id & CAN_RTR_FLAG)) {
+ memcpy(frame->data, buf + RXBDAT_OFF, frame->len);
+ priv->net->stats.rx_bytes += frame->len;
+ }
priv->net->stats.rx_packets++;
- priv->net->stats.rx_bytes += frame->len;
- can_led_event(priv->net, CAN_LED_EVENT_RX);
-
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void mcp251x_hw_sleep(struct spi_device *spi)
@@ -755,7 +765,7 @@ static int mcp251x_hw_wake(struct spi_device *spi)
int ret;
/* Force wakeup interrupt to wake device, but don't execute IST */
- disable_irq(spi->irq);
+ disable_irq_nosync(spi->irq);
mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
/* Wait for oscillator startup timer after wake up */
@@ -786,12 +796,12 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- if (priv->tx_skb || priv->tx_len) {
+ if (priv->tx_skb || priv->tx_busy) {
dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
return NETDEV_TX_BUSY;
}
- if (can_dropped_invalid_skb(net, skb))
+ if (can_dev_dropped_skb(net, skb))
return NETDEV_TX_OK;
netif_stop_queue(net);
@@ -973,8 +983,6 @@ static int mcp251x_stop(struct net_device *net)
mutex_unlock(&priv->mcp_lock);
- can_led_event(net, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -987,7 +995,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
if (skb) {
frame->can_id |= can_id;
frame->data[1] = data1;
- netif_rx_ni(skb);
+ netif_rx(skb);
} else {
netdev_err(net, "cannot allocate error skb\n");
}
@@ -1011,7 +1019,7 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
if (frame->len > CAN_FRAME_MAX_DATA_LEN)
frame->len = CAN_FRAME_MAX_DATA_LEN;
mcp251x_hw_tx(spi, frame, 0);
- priv->tx_len = 1 + frame->len;
+ priv->tx_busy = true;
can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
@@ -1074,9 +1082,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
- /* mask out flags we don't care about */
- intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
-
/* receive buffer 0 */
if (intf & CANINTF_RX0IF) {
mcp251x_hw_rx(spi, 0);
@@ -1086,6 +1091,18 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (mcp251x_is_2510(spi))
mcp251x_write_bits(spi, CANINTF,
CANINTF_RX0IF, 0x00);
+
+ /* check if buffer 1 is already known to be full, no need to re-read */
+ if (!(intf & CANINTF_RX1IF)) {
+ u8 intf1, eflag1;
+
+ /* intf needs to be read again to avoid a race condition */
+ mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1);
+
+ /* combine flags from both operations for error handling */
+ intf |= intf1;
+ eflag |= eflag1;
+ }
}
/* receive buffer 1 */
@@ -1096,6 +1113,9 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
clear_intf |= CANINTF_RX1IF;
}
+ /* mask out flags we don't care about */
+ intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
+
/* any error or tx interrupt we need to clear? */
if (intf & (CANINTF_ERR | CANINTF_TX))
clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
@@ -1177,12 +1197,11 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
break;
if (intf & CANINTF_TX) {
- net->stats.tx_packets++;
- net->stats.tx_bytes += priv->tx_len - 1;
- can_led_event(net, CAN_LED_EVENT_TX);
- if (priv->tx_len) {
- can_get_echo_skb(net, 0, NULL);
- priv->tx_len = 0;
+ if (priv->tx_busy) {
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += can_get_echo_skb(net, 0,
+ NULL);
+ priv->tx_busy = false;
}
netif_wake_queue(net);
}
@@ -1209,7 +1228,7 @@ static int mcp251x_open(struct net_device *net)
priv->force_quit = 0;
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
if (!dev_fwnode(&spi->dev))
flags = IRQF_TRIGGER_FALLING;
@@ -1232,8 +1251,6 @@ static int mcp251x_open(struct net_device *net)
if (ret)
goto out_free_irq;
- can_led_event(net, CAN_LED_EVENT_OPEN);
-
netif_wake_queue(net);
mutex_unlock(&priv->mcp_lock);
@@ -1253,7 +1270,10 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_open = mcp251x_open,
.ndo_stop = mcp251x_stop,
.ndo_start_xmit = mcp251x_hard_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops mcp251x_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
static const struct of_device_id mcp251x_of_match[] = {
@@ -1292,7 +1312,6 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
static int mcp251x_can_probe(struct spi_device *spi)
{
- const void *match = device_get_match_data(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
struct clk *clk;
@@ -1301,7 +1320,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
clk = devm_clk_get_optional(&spi->dev, NULL);
if (IS_ERR(clk))
- return PTR_ERR(clk);
+ return dev_err_probe(&spi->dev, PTR_ERR(clk), "Cannot get clock\n");
freq = clk_get_rate(clk);
if (freq == 0)
@@ -1309,7 +1328,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
/* Sanity check */
if (freq < 1000000 || freq > 25000000)
- return -ERANGE;
+ return dev_err_probe(&spi->dev, -ERANGE, "clock frequency out of range\n");
/* Allocate can/net device */
net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
@@ -1317,10 +1336,13 @@ static int mcp251x_can_probe(struct spi_device *spi)
return -ENOMEM;
ret = clk_prepare_enable(clk);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot enable clock\n");
goto out_free;
+ }
net->netdev_ops = &mcp251x_netdev_ops;
+ net->ethtool_ops = &mcp251x_ethtool_ops;
net->flags |= IFF_ECHO;
priv = netdev_priv(net);
@@ -1329,10 +1351,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->can.clock.freq = freq / 2;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
- if (match)
- priv->model = (enum mcp251x_model)(uintptr_t)match;
- else
- priv->model = spi_get_device_id(spi)->driver_data;
+ priv->model = (enum mcp251x_model)(uintptr_t)spi_get_device_match_data(spi);
priv->net = net;
priv->clk = clk;
@@ -1345,22 +1364,28 @@ static int mcp251x_can_probe(struct spi_device *spi)
else
spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
ret = spi_setup(spi);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot set up spi\n");
goto out_clk;
+ }
priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
(PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
ret = -EPROBE_DEFER;
+ dev_err_probe(&spi->dev, ret, "supply deferred\n");
goto out_clk;
}
ret = mcp251x_power_enable(priv->power, 1);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot enable power\n");
goto out_clk;
+ }
- priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ priv->wq = alloc_workqueue("mcp251x_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!priv->wq) {
ret = -ENOMEM;
@@ -1391,27 +1416,31 @@ static int mcp251x_can_probe(struct spi_device *spi)
/* Here is OK to not lock the MCP, no one knows about it yet */
ret = mcp251x_hw_probe(spi);
if (ret) {
- if (ret == -ENODEV)
- dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n",
- priv->model);
+ dev_err_probe(&spi->dev, ret, "Cannot initialize MCP%x. Wrong wiring?\n",
+ priv->model);
goto error_probe;
}
mcp251x_hw_sleep(spi);
ret = register_candev(net);
- if (ret)
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot register CAN device\n");
goto error_probe;
-
- devm_can_led_init(net);
+ }
ret = mcp251x_gpio_setup(priv);
- if (ret)
- goto error_probe;
+ if (ret) {
+ dev_err_probe(&spi->dev, ret, "Cannot set up gpios\n");
+ goto out_unregister_candev;
+ }
netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
return 0;
+out_unregister_candev:
+ unregister_candev(net);
+
error_probe:
destroy_workqueue(priv->wq);
priv->wq = NULL;
@@ -1423,11 +1452,10 @@ out_clk:
out_free:
free_candev(net);
- dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
return ret;
}
-static int mcp251x_can_remove(struct spi_device *spi)
+static void mcp251x_can_remove(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -1442,8 +1470,6 @@ static int mcp251x_can_remove(struct spi_device *spi)
clk_disable_unprepare(priv->clk);
free_candev(net);
-
- return 0;
}
static int __maybe_unused mcp251x_can_suspend(struct device *dev)
diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig
index dd0fc0a54be1..7c29846e6051 100644
--- a/drivers/net/can/spi/mcp251xfd/Kconfig
+++ b/drivers/net/can/spi/mcp251xfd/Kconfig
@@ -2,8 +2,10 @@
config CAN_MCP251XFD
tristate "Microchip MCP251xFD SPI CAN controllers"
+ select CAN_RX_OFFLOAD
select REGMAP
select WANT_DEV_COREDUMP
+ select GPIOLIB
help
Driver for the Microchip MCP251XFD SPI FD-CAN controller
family.
diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile
index 3cba3b9447ea..94d7de954294 100644
--- a/drivers/net/can/spi/mcp251xfd/Makefile
+++ b/drivers/net/can/spi/mcp251xfd/Makefile
@@ -3,9 +3,16 @@
obj-$(CONFIG_CAN_MCP251XFD) += mcp251xfd.o
mcp251xfd-objs :=
+mcp251xfd-objs += mcp251xfd-chip-fifo.o
mcp251xfd-objs += mcp251xfd-core.o
mcp251xfd-objs += mcp251xfd-crc16.o
+mcp251xfd-objs += mcp251xfd-ethtool.o
+mcp251xfd-objs += mcp251xfd-ram.o
mcp251xfd-objs += mcp251xfd-regmap.o
+mcp251xfd-objs += mcp251xfd-ring.o
+mcp251xfd-objs += mcp251xfd-rx.o
+mcp251xfd-objs += mcp251xfd-tef.o
mcp251xfd-objs += mcp251xfd-timestamp.o
+mcp251xfd-objs += mcp251xfd-tx.o
mcp251xfd-$(CONFIG_DEV_COREDUMP) += mcp251xfd-dump.o
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
new file mode 100644
index 000000000000..0d96097a2547
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static int
+mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u32 fifo_con;
+
+ /* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
+ *
+ * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
+ * generate a RXOVIF, use this to properly detect RX MAB
+ * overflows.
+ */
+ fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
+ ring->obj_num - 1) |
+ MCP251XFD_REG_FIFOCON_RXTSEN |
+ MCP251XFD_REG_FIFOCON_RXOVIE |
+ MCP251XFD_REG_FIFOCON_TFNRFNIE;
+
+ if (mcp251xfd_is_fd_mode(priv))
+ fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_64);
+ else
+ fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_8);
+
+ return regmap_write(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
+}
+
+static int
+mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u32 fltcon;
+
+ fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
+ MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
+
+ return regmap_update_bits(priv->map_reg,
+ MCP251XFD_REG_FLTCON(ring->nr >> 2),
+ MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
+ fltcon);
+}
+
+int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ const struct mcp251xfd_rx_ring *rx_ring;
+ u32 val;
+ int err, n;
+
+ /* TEF */
+ val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP251XFD_REG_TEFCON_TEFTSEN |
+ MCP251XFD_REG_TEFCON_TEFOVIE |
+ MCP251XFD_REG_TEFCON_TEFNEIE;
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
+ if (err)
+ return err;
+
+ /* TX FIFO */
+ val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP251XFD_REG_FIFOCON_TXEN |
+ MCP251XFD_REG_FIFOCON_TXATIE;
+
+ if (mcp251xfd_is_fd_mode(priv))
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_64);
+ else
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_8);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
+ MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
+ else
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
+ MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
+
+ err = regmap_write(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(priv->tx->fifo_nr),
+ val);
+ if (err)
+ return err;
+
+ /* RX FIFOs */
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
+ err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 673861ab665a..5134ebb85880 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -12,6 +12,7 @@
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
+#include <linux/unaligned.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -20,8 +21,6 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
-#include <asm/unaligned.h>
-
#include "mcp251xfd.h"
#define DEVICE_NAME "mcp251xfd"
@@ -39,6 +38,12 @@ static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
.model = MCP251XFD_MODEL_MCP2518FD,
};
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251863 = {
+ .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
+ MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP251863,
+};
+
/* Autodetect model, start with CRC enabled. */
static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
@@ -70,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
.brp_inc = 1,
};
+/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of
+ * [-64,63] for TDCO, indicating a relative TDCO.
+ *
+ * Manual tests have shown, that using a relative TDCO configuration
+ * results in bus off, while an absolute configuration works.
+ *
+ * For TDCO use the max value (63) from the data sheet, but 0 as the
+ * minimum.
+ */
+static const struct can_tdc_const mcp251xfd_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 63,
+ .tdco_min = 0,
+ .tdco_max = 63,
+ .tdcf_min = 0,
+ .tdcf_max = 0,
+};
+
static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
{
switch (model) {
@@ -77,6 +100,8 @@ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
return "MCP2517FD";
case MCP251XFD_MODEL_MCP2518FD:
return "MCP2518FD";
+ case MCP251XFD_MODEL_MCP251863:
+ return "MCP251863";
case MCP251XFD_MODEL_MCP251XFD:
return "MCP251xFD";
}
@@ -114,6 +139,22 @@ static const char *mcp251xfd_get_mode_str(const u8 mode)
return "<unknown>";
}
+static const char *
+mcp251xfd_get_osc_str(const u32 osc, const u32 osc_reference)
+{
+ switch (~osc & osc_reference &
+ (MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY)) {
+ case MCP251XFD_REG_OSC_PLLRDY:
+ return "PLL";
+ case MCP251XFD_REG_OSC_OSCRDY:
+ return "Oscillator";
+ case MCP251XFD_REG_OSC_PLLRDY | MCP251XFD_REG_OSC_OSCRDY:
+ return "Oscillator/PLL";
+ }
+
+ return "<unknown>";
+}
+
static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
{
if (!priv->reg_vdd)
@@ -180,328 +221,9 @@ static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
return 0;
}
-static inline u8
-mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
- union mcp251xfd_write_reg_buf *write_reg_buf,
- const u16 reg, const u32 mask, const u32 val)
-{
- u8 first_byte, last_byte, len;
- u8 *data;
- __le32 val_le32;
-
- first_byte = mcp251xfd_first_byte_set(mask);
- last_byte = mcp251xfd_last_byte_set(mask);
- len = last_byte - first_byte + 1;
-
- data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
- val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
- memcpy(data, &val_le32, len);
-
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
- u16 crc;
-
- mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
- len);
- /* CRC */
- len += sizeof(write_reg_buf->crc.cmd);
- crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
- put_unaligned_be16(crc, (void *)write_reg_buf + len);
-
- /* Total length */
- len += sizeof(write_reg_buf->crc.crc);
- } else {
- len += sizeof(write_reg_buf->nocrc.cmd);
- }
-
- return len;
-}
-
-static inline int
-mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
- u8 *tef_tail)
-{
- u32 tef_ua;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
- if (err)
- return err;
-
- *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
-
- return 0;
-}
-
-static inline int
-mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
- u8 *tx_tail)
-{
- u32 fifo_sta;
- int err;
-
- err = regmap_read(priv->map_reg,
- MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
- &fifo_sta);
- if (err)
- return err;
-
- *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
-
- return 0;
-}
-
-static inline int
-mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- u8 *rx_head)
-{
- u32 fifo_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
- &fifo_sta);
- if (err)
- return err;
-
- *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
-
- return 0;
-}
-
-static inline int
-mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- u8 *rx_tail)
-{
- u32 fifo_ua;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
- &fifo_ua);
- if (err)
- return err;
-
- fifo_ua -= ring->base - MCP251XFD_RAM_START;
- *rx_tail = fifo_ua / ring->obj_size;
-
- return 0;
-}
-
-static void
-mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_tx_ring *ring,
- struct mcp251xfd_tx_obj *tx_obj,
- const u8 rts_buf_len,
- const u8 n)
-{
- struct spi_transfer *xfer;
- u16 addr;
-
- /* FIFO load */
- addr = mcp251xfd_get_tx_obj_addr(ring, n);
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
- mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
- addr);
- else
- mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
- addr);
-
- xfer = &tx_obj->xfer[0];
- xfer->tx_buf = &tx_obj->buf;
- xfer->len = 0; /* actual len is assigned on the fly */
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
-
- /* FIFO request to send */
- xfer = &tx_obj->xfer[1];
- xfer->tx_buf = &ring->rts_buf;
- xfer->len = rts_buf_len;
-
- /* SPI message */
- spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
- ARRAY_SIZE(tx_obj->xfer));
-}
-
-static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_tef_ring *tef_ring;
- struct mcp251xfd_tx_ring *tx_ring;
- struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
- struct mcp251xfd_tx_obj *tx_obj;
- struct spi_transfer *xfer;
- u32 val;
- u16 addr;
- u8 len;
- int i, j;
-
- netdev_reset_queue(priv->ndev);
-
- /* TEF */
- tef_ring = priv->tef;
- tef_ring->head = 0;
- tef_ring->tail = 0;
-
- /* FIFO increment TEF tail pointer */
- addr = MCP251XFD_REG_TEFCON;
- val = MCP251XFD_REG_TEFCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
- addr, val, val);
-
- for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
- xfer = &tef_ring->uinc_xfer[j];
- xfer->tx_buf = &tef_ring->uinc_buf;
- xfer->len = len;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- }
-
- /* "cs_change == 1" on the last transfer results in an active
- * chip select after the complete SPI message. This causes the
- * controller to interpret the next register access as
- * data. Set "cs_change" of the last transfer to "0" to
- * properly deactivate the chip select at the end of the
- * message.
- */
- xfer->cs_change = 0;
-
- /* TX */
- tx_ring = priv->tx;
- tx_ring->head = 0;
- tx_ring->tail = 0;
- tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
-
- /* FIFO request to send */
- addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
- val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
- addr, val, val);
-
- mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
- mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
-
- /* RX */
- mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
- rx_ring->head = 0;
- rx_ring->tail = 0;
- rx_ring->nr = i;
- rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
-
- if (!prev_rx_ring)
- rx_ring->base =
- mcp251xfd_get_tx_obj_addr(tx_ring,
- tx_ring->obj_num);
- else
- rx_ring->base = prev_rx_ring->base +
- prev_rx_ring->obj_size *
- prev_rx_ring->obj_num;
-
- prev_rx_ring = rx_ring;
-
- /* FIFO increment RX tail pointer */
- addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
- val = MCP251XFD_REG_FIFOCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
- addr, val, val);
-
- for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
- xfer = &rx_ring->uinc_xfer[j];
- xfer->tx_buf = &rx_ring->uinc_buf;
- xfer->len = len;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- }
-
- /* "cs_change == 1" on the last transfer results in an
- * active chip select after the complete SPI
- * message. This causes the controller to interpret
- * the next register access as data. Set "cs_change"
- * of the last transfer to "0" to properly deactivate
- * the chip select at the end of the message.
- */
- xfer->cs_change = 0;
- }
-}
-
-static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
-{
- int i;
-
- for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
- kfree(priv->rx[i]);
- priv->rx[i] = NULL;
- }
-}
-
-static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+static inline bool mcp251xfd_reg_invalid(u32 reg)
{
- struct mcp251xfd_tx_ring *tx_ring;
- struct mcp251xfd_rx_ring *rx_ring;
- int tef_obj_size, tx_obj_size, rx_obj_size;
- int tx_obj_num;
- int ram_free, i;
-
- tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
- /* listen-only mode works like FD mode */
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
- tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
- rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
- } else {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
- tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
- rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
- }
-
- tx_ring = priv->tx;
- tx_ring->obj_num = tx_obj_num;
- tx_ring->obj_size = tx_obj_size;
-
- ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
- (tef_obj_size + tx_obj_size);
-
- for (i = 0;
- i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
- i++) {
- int rx_obj_num;
-
- rx_obj_num = ram_free / rx_obj_size;
- rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
- MCP251XFD_RX_OBJ_NUM_MAX);
-
- rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
- GFP_KERNEL);
- if (!rx_ring) {
- mcp251xfd_ring_free(priv);
- return -ENOMEM;
- }
- rx_ring->obj_num = rx_obj_num;
- rx_ring->obj_size = rx_obj_size;
- priv->rx[i] = rx_ring;
-
- ram_free -= rx_ring->obj_num * rx_ring->obj_size;
- }
- priv->rx_ring_num = i;
-
- netdev_dbg(priv->ndev,
- "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
- tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
- tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
-
- mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
- netdev_dbg(priv->ndev,
- "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
- i, rx_ring->obj_num, rx_ring->obj_size,
- rx_ring->obj_size * rx_ring->obj_num);
- }
-
- netdev_dbg(priv->ndev,
- "FIFO setup: free: %d bytes\n",
- ram_free);
-
- return 0;
+ return reg == 0x0 || reg == 0xffffffff;
}
static inline int
@@ -523,34 +245,61 @@ static int
__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
const u8 mode_req, bool nowait)
{
- u32 con, con_reqop;
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ unsigned long timeout_us = MCP251XFD_POLL_TIMEOUT_US;
+ u32 con = 0, con_reqop, osc = 0;
+ u8 mode;
int err;
con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
- if (err)
+ if (err == -EBADMSG) {
+ netdev_err(priv->ndev,
+ "Failed to set Requested Operation Mode.\n");
+
+ return -ENODEV;
+ } else if (err) {
return err;
+ }
if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
return 0;
+ if (bt->bitrate)
+ timeout_us = max_t(unsigned long, timeout_us,
+ MCP251XFD_FRAME_LEN_MAX_BITS * USEC_PER_SEC /
+ bt->bitrate);
+
err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
+ !mcp251xfd_reg_invalid(con) &&
FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
con) == mode_req,
- MCP251XFD_POLL_SLEEP_US,
- MCP251XFD_POLL_TIMEOUT_US);
- if (err) {
- u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+ MCP251XFD_POLL_SLEEP_US, timeout_us);
+ if (err != -ETIMEDOUT && err != -EBADMSG)
+ return err;
+
+ /* Ignore return value.
+ * Print below error messages, even if this fails.
+ */
+ regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ if (mcp251xfd_reg_invalid(con)) {
netdev_err(priv->ndev,
- "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
- mcp251xfd_get_mode_str(mode_req), mode_req,
- mcp251xfd_get_mode_str(mode), mode);
- return err;
+ "Failed to read CAN Control Register (con=0x%08x, osc=0x%08x).\n",
+ con, osc);
+
+ return -ENODEV;
}
- return 0;
+ mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+ netdev_err(priv->ndev,
+ "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u) (con=0x%08x, osc=0x%08x).\n",
+ mcp251xfd_get_mode_str(mode_req), mode_req,
+ mcp251xfd_get_mode_str(mode), mode,
+ con, osc);
+
+ return -ETIMEDOUT;
}
static inline int
@@ -567,27 +316,58 @@ mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
return __mcp251xfd_chip_set_mode(priv, mode_req, true);
}
-static inline bool mcp251xfd_osc_invalid(u32 reg)
+static int
+mcp251xfd_chip_wait_for_osc_ready(const struct mcp251xfd_priv *priv,
+ u32 osc_reference, u32 osc_mask)
{
- return reg == 0x0 || reg == 0xffffffff;
+ u32 osc;
+ int err;
+
+ err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
+ !mcp251xfd_reg_invalid(osc) &&
+ (osc & osc_mask) == osc_reference,
+ MCP251XFD_OSC_STAB_SLEEP_US,
+ MCP251XFD_OSC_STAB_TIMEOUT_US);
+ if (err != -ETIMEDOUT)
+ return err;
+
+ if (mcp251xfd_reg_invalid(osc)) {
+ netdev_err(priv->ndev,
+ "Failed to read Oscillator Configuration Register (osc=0x%08x).\n",
+ osc);
+ return -ENODEV;
+ }
+
+ netdev_err(priv->ndev,
+ "Timeout waiting for %s ready (osc=0x%08x, osc_reference=0x%08x, osc_mask=0x%08x).\n",
+ mcp251xfd_get_osc_str(osc, osc_reference),
+ osc, osc_reference, osc_mask);
+
+ return -ETIMEDOUT;
}
-static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
+static int mcp251xfd_chip_wake(const struct mcp251xfd_priv *priv)
{
u32 osc, osc_reference, osc_mask;
int err;
- /* Set Power On Defaults for "Clock Output Divisor" and remove
- * "Oscillator Disable" bit.
+ /* For normal sleep on MCP2517FD and MCP2518FD, clearing
+ * "Oscillator Disable" will wake the chip. For low power mode
+ * on MCP2518FD, asserting the chip select will wake the
+ * chip. Writing to the Oscillator register will wake it in
+ * both cases.
*/
osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+
+ /* We cannot check for the PLL ready bit (either set or
+ * unset), as the PLL might be enabled. This can happen if the
+ * system reboots, while the mcp251xfd stays powered.
+ */
osc_reference = MCP251XFD_REG_OSC_OSCRDY;
- osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY;
- /* Note:
- *
- * If the controller is in Sleep Mode the following write only
+ /* If the controller is in Sleep Mode the following write only
* removes the "Oscillator Disable" bit and powers it up. All
* other bits are unaffected.
*/
@@ -595,24 +375,31 @@ static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
if (err)
return err;
- /* Wait for "Oscillator Ready" bit */
- err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
- (osc & osc_mask) == osc_reference,
- MCP251XFD_OSC_STAB_SLEEP_US,
- MCP251XFD_OSC_STAB_TIMEOUT_US);
- if (mcp251xfd_osc_invalid(osc)) {
- netdev_err(priv->ndev,
- "Failed to detect %s (osc=0x%08x).\n",
- mcp251xfd_get_model_str(priv), osc);
- return -ENODEV;
- } else if (err == -ETIMEDOUT) {
- netdev_err(priv->ndev,
- "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
- osc, osc_reference);
- return -ETIMEDOUT;
+ /* Sometimes the PLL is stuck enabled, the controller never
+ * sets the OSC Ready bit, and we get an -ETIMEDOUT. Our
+ * caller takes care of retry.
+ */
+ return mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+}
+
+static inline int mcp251xfd_chip_sleep(const struct mcp251xfd_priv *priv)
+{
+ if (priv->pll_enable) {
+ u32 osc;
+ int err;
+
+ /* Turn off PLL */
+ osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
+ MCP251XFD_REG_OSC_CLKODIV_10);
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
+ if (err)
+ netdev_err(priv->ndev,
+ "Failed to disable PLL.\n");
+
+ priv->spi->max_speed_hz = priv->spi_max_speed_hz_slow;
}
- return err;
+ return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
}
static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
@@ -620,10 +407,10 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
const __be16 cmd = mcp251xfd_cmd_reset();
int err;
- /* The Set Mode and SPI Reset command only seems to works if
- * the controller is not in Sleep Mode.
+ /* The Set Mode and SPI Reset command only works if the
+ * controller is not in Sleep Mode.
*/
- err = mcp251xfd_chip_clock_enable(priv);
+ err = mcp251xfd_chip_wake(priv);
if (err)
return err;
@@ -637,34 +424,29 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
{
- u32 osc, osc_reference;
+ u32 osc_reference, osc_mask;
u8 mode;
int err;
- err = mcp251xfd_chip_get_mode(priv, &mode);
- if (err)
- return err;
-
- if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
- netdev_info(priv->ndev,
- "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
- mcp251xfd_get_mode_str(mode), mode);
- return -ETIMEDOUT;
- }
-
+ /* Check for reset defaults of OSC reg.
+ * This will take care of stabilization period.
+ */
osc_reference = MCP251XFD_REG_OSC_OSCRDY |
FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_mask = osc_reference | MCP251XFD_REG_OSC_PLLRDY;
+ err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+ if (err)
+ return err;
- /* check reset defaults of OSC reg */
- err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ err = mcp251xfd_chip_get_mode(priv, &mode);
if (err)
return err;
- if (osc != osc_reference) {
+ if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
netdev_info(priv->ndev,
- "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
- osc, osc_reference);
+ "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
return -ETIMEDOUT;
}
@@ -700,7 +482,7 @@ static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
{
- u32 osc;
+ u32 osc, osc_reference, osc_mask;
int err;
/* Activate Low Power Mode on Oscillator Disable. This only
@@ -710,10 +492,29 @@ static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
osc = MCP251XFD_REG_OSC_LPMEN |
FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_reference = MCP251XFD_REG_OSC_OSCRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+
+ if (priv->pll_enable) {
+ osc |= MCP251XFD_REG_OSC_PLLEN;
+ osc_reference |= MCP251XFD_REG_OSC_PLLRDY;
+ }
+
err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
if (err)
return err;
+ err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+ if (err)
+ return err;
+
+ priv->spi->max_speed_hz = priv->spi_max_speed_hz_fast;
+
+ return 0;
+}
+
+static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv)
+{
/* Set Time Base Counter Prescaler to 1.
*
* This means an overflow of the 32 bit Time Base Counter
@@ -726,9 +527,8 @@ static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
{
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
- u32 val = 0;
- s8 tdco;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ u32 tdcmod, val = 0;
int err;
/* CAN Control Register
@@ -792,34 +592,37 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
return err;
/* Transmitter Delay Compensation */
- tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
- -64, 63);
- val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
- MCP251XFD_REG_TDC_TDCMOD_AUTO) |
- FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO)
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO;
+ else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL)
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL;
+ else
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED;
+
+ val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.fd.tdc.tdcv) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.fd.tdc.tdco);
return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
}
static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
{
- u32 val;
+ u32 val, mask;
if (!priv->rx_int)
return 0;
- /* Configure GPIOs:
- * - PIN0: GPIO Input
- * - PIN1: GPIO Input/RX Interrupt
+ /* Configure PIN1 as RX Interrupt:
*
* PIN1 must be Input, otherwise there is a glitch on the
* rx-INT line. It happens between setting the PIN as output
* (in the first byte of the SPI transfer) and configuring the
* PIN as interrupt (in the last byte of the SPI transfer).
*/
- val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
- MCP251XFD_REG_IOCON_TRIS0;
- return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
+ val = MCP251XFD_REG_IOCON_TRIS(1);
+ mask = MCP251XFD_REG_IOCON_TRIS(1) | MCP251XFD_REG_IOCON_PM(1);
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, mask, val);
}
static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
@@ -829,115 +632,9 @@ static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
if (!priv->rx_int)
return 0;
- /* Configure GPIOs:
- * - PIN0: GPIO Input
- * - PIN1: GPIO Input
- */
- val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
- MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
- return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
-}
-
-static int
-mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring)
-{
- u32 fifo_con;
-
- /* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
- *
- * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
- * generate a RXOVIF, use this to properly detect RX MAB
- * overflows.
- */
- fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
- ring->obj_num - 1) |
- MCP251XFD_REG_FIFOCON_RXTSEN |
- MCP251XFD_REG_FIFOCON_RXOVIE |
- MCP251XFD_REG_FIFOCON_TFNRFNIE;
-
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
- fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_64);
- else
- fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_8);
-
- return regmap_write(priv->map_reg,
- MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
-}
-
-static int
-mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring)
-{
- u32 fltcon;
-
- fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
- MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
-
- return regmap_update_bits(priv->map_reg,
- MCP251XFD_REG_FLTCON(ring->nr >> 2),
- MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
- fltcon);
-}
-
-static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- const struct mcp251xfd_rx_ring *rx_ring;
- u32 val;
- int err, n;
-
- /* TEF */
- val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
- tx_ring->obj_num - 1) |
- MCP251XFD_REG_TEFCON_TEFTSEN |
- MCP251XFD_REG_TEFCON_TEFOVIE |
- MCP251XFD_REG_TEFCON_TEFNEIE;
-
- err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
- if (err)
- return err;
-
- /* FIFO 1 - TX */
- val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
- tx_ring->obj_num - 1) |
- MCP251XFD_REG_FIFOCON_TXEN |
- MCP251XFD_REG_FIFOCON_TXATIE;
-
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_64);
- else
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_8);
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
- MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
- else
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
- MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
-
- err = regmap_write(priv->map_reg,
- MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
- val);
- if (err)
- return err;
-
- /* RX FIFOs */
- mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
- err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
- if (err)
- return err;
-
- err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
- if (err)
- return err;
- }
-
- return 0;
+ /* Configure PIN1 as GPIO Input */
+ val = MCP251XFD_REG_IOCON_PM(1) | MCP251XFD_REG_IOCON_TRIS(1);
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, val, val);
}
static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
@@ -968,18 +665,10 @@ static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
return err;
}
-static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_ecc *ecc = &priv->ecc;
-
- ecc->ecc_stat = 0;
-}
-
static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
{
u8 mode;
-
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK;
else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -1064,27 +753,26 @@ static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
}
-static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
- const enum can_state state)
+static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
+ const enum can_state state)
{
priv->can.state = state;
mcp251xfd_chip_interrupts_disable(priv);
mcp251xfd_chip_rx_int_disable(priv);
- return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ mcp251xfd_timestamp_stop(priv);
+ mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
}
static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
{
int err;
- err = mcp251xfd_chip_softreset(priv);
+ err = mcp251xfd_chip_timestamp_init(priv);
if (err)
goto out_chip_stop;
- err = mcp251xfd_chip_clock_init(priv);
- if (err)
- goto out_chip_stop;
+ mcp251xfd_timestamp_start(priv);
err = mcp251xfd_set_bittiming(priv);
if (err)
@@ -1092,13 +780,15 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
err = mcp251xfd_chip_rx_int_enable(priv);
if (err)
- return err;
+ goto out_chip_stop;
err = mcp251xfd_chip_ecc_init(priv);
if (err)
goto out_chip_stop;
- mcp251xfd_ring_init(priv);
+ err = mcp251xfd_ring_init(priv);
+ if (err)
+ goto out_chip_stop;
err = mcp251xfd_chip_fifo_init(priv);
if (err)
@@ -1112,7 +802,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
return 0;
- out_chip_stop:
+out_chip_stop:
mcp251xfd_dump(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
@@ -1158,7 +848,7 @@ static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
return err;
if (trec & MCP251XFD_REG_TREC_TXBO)
- bec->txerr = 256;
+ bec->txerr = CAN_BUS_OFF_THRESHOLD;
else
bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
@@ -1186,447 +876,20 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
return __mcp251xfd_get_berr_counter(ndev, bec);
}
-static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
-{
- u8 tef_tail_chip, tef_tail;
- int err;
-
- if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
- return 0;
-
- err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
- if (err)
- return err;
-
- tef_tail = mcp251xfd_get_tef_tail(priv);
- if (tef_tail_chip != tef_tail) {
- netdev_err(priv->ndev,
- "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
- tef_tail_chip, tef_tail);
- return -EILSEQ;
- }
-
- return 0;
-}
-
-static int
-mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring)
-{
- u8 rx_tail_chip, rx_tail;
- int err;
-
- if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
- return 0;
-
- err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
- if (err)
- return err;
-
- rx_tail = mcp251xfd_get_rx_tail(ring);
- if (rx_tail_chip != rx_tail) {
- netdev_err(priv->ndev,
- "RX tail of chip (%d) and ours (%d) inconsistent.\n",
- rx_tail_chip, rx_tail);
- return -EILSEQ;
- }
-
- return 0;
-}
-
-static int
-mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- u32 tef_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
- if (err)
- return err;
-
- if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
- netdev_err(priv->ndev,
- "Transmit Event FIFO buffer overflow.\n");
- return -ENOBUFS;
- }
-
- netdev_info(priv->ndev,
- "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
- tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
- "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
- "not empty" : "empty",
- seq, priv->tef->tail, priv->tef->head, tx_ring->head);
-
- /* The Sequence Number in the TEF doesn't match our tef_tail. */
- return -EAGAIN;
-}
-
-static int
-mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
- const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
- unsigned int *frame_len_ptr)
-{
- struct net_device_stats *stats = &priv->ndev->stats;
- struct sk_buff *skb;
- u32 seq, seq_masked, tef_tail_masked, tef_tail;
-
- seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
- hw_tef_obj->flags);
-
- /* Use the MCP2517FD mask on the MCP2518FD, too. We only
- * compare 7 bits, this should be enough to detect
- * net-yet-completed, i.e. old TEF objects.
- */
- seq_masked = seq &
- field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
- tef_tail_masked = priv->tef->tail &
- field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
- if (seq_masked != tef_tail_masked)
- return mcp251xfd_handle_tefif_recover(priv, seq);
-
- tef_tail = mcp251xfd_get_tef_tail(priv);
- skb = priv->can.echo_skb[tef_tail];
- if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
- stats->tx_bytes +=
- can_rx_offload_get_echo_skb(&priv->offload,
- tef_tail, hw_tef_obj->ts,
- frame_len_ptr);
- stats->tx_packets++;
- priv->tef->tail++;
-
- return 0;
-}
-
-static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- unsigned int new_head;
- u8 chip_tx_tail;
- int err;
-
- err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
- if (err)
- return err;
-
- /* chip_tx_tail, is the next TX-Object send by the HW.
- * The new TEF head must be >= the old head, ...
- */
- new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
- if (new_head <= priv->tef->head)
- new_head += tx_ring->obj_num;
-
- /* ... but it cannot exceed the TX head. */
- priv->tef->head = min(new_head, tx_ring->head);
-
- return mcp251xfd_check_tef_tail(priv);
-}
-
-static inline int
-mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_hw_tef_obj *hw_tef_obj,
- const u8 offset, const u8 len)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- const int val_bytes = regmap_get_val_bytes(priv->map_rx);
-
- if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
- (offset > tx_ring->obj_num ||
- len > tx_ring->obj_num ||
- offset + len > tx_ring->obj_num)) {
- netdev_err(priv->ndev,
- "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
- tx_ring->obj_num, offset, len);
- return -ERANGE;
- }
-
- return regmap_bulk_read(priv->map_rx,
- mcp251xfd_get_tef_obj_addr(offset),
- hw_tef_obj,
- sizeof(*hw_tef_obj) / val_bytes * len);
-}
-
-static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
- unsigned int total_frame_len = 0;
- u8 tef_tail, len, l;
- int err, i;
-
- err = mcp251xfd_tef_ring_update(priv);
- if (err)
- return err;
-
- tef_tail = mcp251xfd_get_tef_tail(priv);
- len = mcp251xfd_get_tef_len(priv);
- l = mcp251xfd_get_tef_linear_len(priv);
- err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
- if (err)
- return err;
-
- if (l < len) {
- err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
- if (err)
- return err;
- }
-
- for (i = 0; i < len; i++) {
- unsigned int frame_len = 0;
-
- err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
- /* -EAGAIN means the Sequence Number in the TEF
- * doesn't match our tef_tail. This can happen if we
- * read the TEF objects too early. Leave loop let the
- * interrupt handler call us again.
- */
- if (err == -EAGAIN)
- goto out_netif_wake_queue;
- if (err)
- return err;
-
- total_frame_len += frame_len;
- }
-
- out_netif_wake_queue:
- len = i; /* number of handled goods TEFs */
- if (len) {
- struct mcp251xfd_tef_ring *ring = priv->tef;
- struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- int offset;
-
- /* Increment the TEF FIFO tail pointer 'len' times in
- * a single SPI message.
- *
- * Note:
- * Calculate offset, so that the SPI transfer ends on
- * the last message of the uinc_xfer array, which has
- * "cs_change == 0", to properly deactivate the chip
- * select.
- */
- offset = ARRAY_SIZE(ring->uinc_xfer) - len;
- err = spi_sync_transfer(priv->spi,
- ring->uinc_xfer + offset, len);
- if (err)
- return err;
-
- tx_ring->tail += len;
- netdev_completed_queue(priv->ndev, len, total_frame_len);
-
- err = mcp251xfd_check_tef_tail(priv);
- if (err)
- return err;
- }
-
- mcp251xfd_ecc_tefif_successful(priv);
-
- if (mcp251xfd_get_tx_free(priv->tx)) {
- /* Make sure that anybody stopping the queue after
- * this sees the new tx_ring->tail.
- */
- smp_mb();
- netif_wake_queue(priv->ndev);
- }
-
- return 0;
-}
-
-static int
-mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring)
-{
- u32 new_head;
- u8 chip_rx_head;
- int err;
-
- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
- if (err)
- return err;
-
- /* chip_rx_head, is the next RX-Object filled by the HW.
- * The new RX head must be >= the old head.
- */
- new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
- if (new_head <= ring->head)
- new_head += ring->obj_num;
-
- ring->head = new_head;
-
- return mcp251xfd_check_rx_tail(priv, ring);
-}
-
-static void
-mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
- struct sk_buff *skb)
-{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- u8 dlc;
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
- u32 sid, eid;
-
- eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
- sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
-
- cfd->can_id = CAN_EFF_FLAG |
- FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
- FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
- } else {
- cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
- hw_rx_obj->id);
- }
-
- dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
-
- /* CANFD */
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
- cfd->flags |= CANFD_ESI;
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
- cfd->flags |= CANFD_BRS;
-
- cfd->len = can_fd_dlc2len(dlc);
- } else {
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
- cfd->can_id |= CAN_RTR_FLAG;
-
- can_frame_set_cc_len((struct can_frame *)cfd, dlc,
- priv->can.ctrlmode);
- }
-
- if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
- memcpy(cfd->data, hw_rx_obj->data, cfd->len);
-
- mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
-}
-
-static int
-mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring,
- const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
-{
- struct net_device_stats *stats = &priv->ndev->stats;
- struct sk_buff *skb;
- struct canfd_frame *cfd;
- int err;
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
- skb = alloc_canfd_skb(priv->ndev, &cfd);
- else
- skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
-
- if (!skb) {
- stats->rx_dropped++;
- return 0;
- }
-
- mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
- if (err)
- stats->rx_fifo_errors++;
-
- return 0;
-}
-
-static inline int
-mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
- const u8 offset, const u8 len)
-{
- const int val_bytes = regmap_get_val_bytes(priv->map_rx);
- int err;
-
- err = regmap_bulk_read(priv->map_rx,
- mcp251xfd_get_rx_obj_addr(ring, offset),
- hw_rx_obj,
- len * ring->obj_size / val_bytes);
-
- return err;
-}
-
-static int
-mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring)
-{
- struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
- u8 rx_tail, len;
- int err, i;
-
- err = mcp251xfd_rx_ring_update(priv, ring);
- if (err)
- return err;
-
- while ((len = mcp251xfd_get_rx_linear_len(ring))) {
- int offset;
-
- rx_tail = mcp251xfd_get_rx_tail(ring);
-
- err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
- rx_tail, len);
- if (err)
- return err;
-
- for (i = 0; i < len; i++) {
- err = mcp251xfd_handle_rxif_one(priv, ring,
- (void *)hw_rx_obj +
- i * ring->obj_size);
- if (err)
- return err;
- }
-
- /* Increment the RX FIFO tail pointer 'len' times in a
- * single SPI message.
- *
- * Note:
- * Calculate offset, so that the SPI transfer ends on
- * the last message of the uinc_xfer array, which has
- * "cs_change == 0", to properly deactivate the chip
- * select.
- */
- offset = ARRAY_SIZE(ring->uinc_xfer) - len;
- err = spi_sync_transfer(priv->spi,
- ring->uinc_xfer + offset, len);
- if (err)
- return err;
-
- ring->tail += len;
- }
-
- return 0;
-}
-
-static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_rx_ring *ring;
- int err, n;
-
- mcp251xfd_for_each_rx_ring(priv, ring, n) {
- err = mcp251xfd_handle_rxif_ring(priv, ring);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static struct sk_buff *
mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
- struct can_frame **cf, u32 *timestamp)
+ struct can_frame **cf, u32 *ts_raw)
{
struct sk_buff *skb;
int err;
- err = mcp251xfd_get_timestamp(priv, timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, ts_raw);
if (err)
return NULL;
skb = alloc_can_err_skb(priv->ndev, cf);
if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, *ts_raw);
return skb;
}
@@ -1637,7 +900,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
struct mcp251xfd_rx_ring *ring;
struct sk_buff *skb;
struct can_frame *cf;
- u32 timestamp, rxovif;
+ u32 ts_raw, rxovif;
int err, i;
stats->rx_over_errors++;
@@ -1653,12 +916,15 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
/* If SERRIF is active, there was a RX MAB overflow. */
if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
- netdev_info(priv->ndev,
- "RX-%d: MAB overflow detected.\n",
- ring->nr);
+ if (net_ratelimit())
+ netdev_dbg(priv->ndev,
+ "RX-%d: MAB overflow detected.\n",
+ ring->nr);
} else {
- netdev_info(priv->ndev,
- "RX-%d: FIFO overflow.\n", ring->nr);
+ if (net_ratelimit())
+ netdev_dbg(priv->ndev,
+ "RX-%d: FIFO overflow.\n",
+ ring->nr);
}
err = regmap_update_bits(priv->map_reg,
@@ -1669,14 +935,14 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
return err;
}
- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
if (!skb)
return 0;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -1693,12 +959,12 @@ static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
- u32 bdiag1, timestamp;
+ u32 bdiag1, ts_raw;
struct sk_buff *skb;
struct can_frame *cf = NULL;
int err;
- err = mcp251xfd_get_timestamp(priv, &timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
if (err)
return err;
@@ -1780,8 +1046,8 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
if (!cf)
return 0;
- mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, ts_raw);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -1794,7 +1060,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
struct sk_buff *skb;
struct can_frame *cf = NULL;
enum can_state new_state, rx_state, tx_state;
- u32 trec, timestamp;
+ u32 trec, ts_raw;
int err;
err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
@@ -1824,7 +1090,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
/* The skb allocation might fail, but can_change_state()
* handles cf == NULL.
*/
- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
can_change_state(priv->ndev, cf, tx_state, rx_state);
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1850,11 +1116,12 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
if (err)
return err;
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
}
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -1879,7 +1146,7 @@ mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
return 0;
}
- /* According to MCP2517FD errata DS80000792B 1., during a TX
+ /* According to MCP2517FD errata DS80000792C 1., during a TX
* MAB underflow, the controller will transition to Restricted
* Operation Mode or Listen Only Mode (depending on SERR2LOM).
*
@@ -1924,7 +1191,7 @@ static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
/* TX MAB underflow
*
- * According to MCP2517FD Errata DS80000792B 1. a TX MAB
+ * According to MCP2517FD Errata DS80000792C 1. a TX MAB
* underflow is indicated by SERRIF and MODIF.
*
* In addition to the effects mentioned in the Errata, there
@@ -1968,7 +1235,7 @@ static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
/* RX MAB overflow
*
- * According to MCP2517FD Errata DS80000792B 1. a RX MAB
+ * According to MCP2517FD Errata DS80000792C 1. a RX MAB
* overflow is indicated by SERRIF.
*
* In addition to the effects mentioned in the Errata, (most
@@ -2019,7 +1286,8 @@ mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
* - for mcp2518fd: offset not 0 or 1
*/
if (chip_tx_tail != tx_tail ||
- !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
+ !(offset == 0 || (offset == 1 && (mcp251xfd_is_2518FD(priv) ||
+ mcp251xfd_is_251863(priv))))) {
netdev_err(priv->ndev,
"ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
@@ -2074,7 +1342,8 @@ mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
return err;
/* Errata Reference:
- * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
+ * mcp2517fd: DS80000789C 3., mcp2518fd: DS80000792E 2.,
+ * mcp251863: DS80000984A 2.
*
* ECC single error correction does not work in all cases:
*
@@ -2144,6 +1413,20 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
return 0;
}
+static int mcp251xfd_read_regs_status(struct mcp251xfd_priv *priv)
+{
+ const int val_bytes = regmap_get_val_bytes(priv->map_reg);
+ size_t len;
+
+ if (priv->rx_ring_num == 1)
+ len = sizeof(priv->regs_status.intf);
+ else
+ len = sizeof(priv->regs_status);
+
+ return regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
+ &priv->regs_status, len / val_bytes);
+}
+
#define mcp251xfd_handle(priv, irq, ...) \
({ \
struct mcp251xfd_priv *_priv = (priv); \
@@ -2160,7 +1443,6 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
{
struct mcp251xfd_priv *priv = dev_id;
- const int val_bytes = regmap_get_val_bytes(priv->map_reg);
irqreturn_t handled = IRQ_NONE;
int err;
@@ -2172,21 +1454,28 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
if (!rx_pending)
break;
+ /* Assume 1st RX-FIFO pending, if other FIFOs
+ * are pending the main IRQ handler will take
+ * care.
+ */
+ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
err = mcp251xfd_handle(priv, rxif);
if (err)
goto out_fail;
handled = IRQ_HANDLED;
- } while (1);
+
+ /* We don't know which RX-FIFO is pending, but only
+ * handle the 1st RX-FIFO. Leave loop here if we have
+ * more than 1 RX-FIFO to avoid starvation.
+ */
+ } while (priv->rx_ring_num == 1);
do {
u32 intf_pending, intf_pending_clearable;
bool set_normal_mode = false;
- err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
- &priv->regs_status,
- sizeof(priv->regs_status) /
- val_bytes);
+ err = mcp251xfd_read_regs_status(priv);
if (err)
goto out_fail;
@@ -2290,14 +1579,16 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
* check will fail, too. So leave IRQ handler
* directly.
*/
- if (priv->can.state == CAN_STATE_BUS_OFF)
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ can_rx_offload_threaded_irq_finish(&priv->offload);
return IRQ_HANDLED;
+ }
}
handled = IRQ_HANDLED;
} while (1);
- out_fail:
+out_fail:
can_rx_offload_threaded_irq_finish(&priv->offload);
netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
@@ -2309,229 +1600,54 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
return handled;
}
-static inline struct
-mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
-{
- u8 tx_head;
-
- tx_head = mcp251xfd_get_tx_head(tx_ring);
-
- return &tx_ring->obj[tx_head];
-}
-
-static void
-mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_tx_obj *tx_obj,
- const struct sk_buff *skb,
- unsigned int seq)
-{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
- union mcp251xfd_tx_obj_load_buf *load_buf;
- u8 dlc;
- u32 id, flags;
- int len_sanitized = 0, len;
-
- if (cfd->can_id & CAN_EFF_FLAG) {
- u32 sid, eid;
-
- sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
- eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
-
- id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
- FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
-
- flags = MCP251XFD_OBJ_FLAGS_IDE;
- } else {
- id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
- flags = 0;
- }
-
- /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
- * harm, only the lower 7 bits will be transferred into the
- * TEF object.
- */
- flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
-
- if (cfd->can_id & CAN_RTR_FLAG)
- flags |= MCP251XFD_OBJ_FLAGS_RTR;
- else
- len_sanitized = canfd_sanitize_len(cfd->len);
-
- /* CANFD */
- if (can_is_canfd_skb(skb)) {
- if (cfd->flags & CANFD_ESI)
- flags |= MCP251XFD_OBJ_FLAGS_ESI;
-
- flags |= MCP251XFD_OBJ_FLAGS_FDF;
-
- if (cfd->flags & CANFD_BRS)
- flags |= MCP251XFD_OBJ_FLAGS_BRS;
-
- dlc = can_fd_len2dlc(cfd->len);
- } else {
- dlc = can_get_cc_dlc((struct can_frame *)cfd,
- priv->can.ctrlmode);
- }
-
- flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
-
- load_buf = &tx_obj->buf;
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
- hw_tx_obj = &load_buf->crc.hw_tx_obj;
- else
- hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
-
- put_unaligned_le32(id, &hw_tx_obj->id);
- put_unaligned_le32(flags, &hw_tx_obj->flags);
-
- /* Copy data */
- memcpy(hw_tx_obj->data, cfd->data, cfd->len);
-
- /* Clear unused data at end of CAN frame */
- if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
- int pad_len;
-
- pad_len = len_sanitized - cfd->len;
- if (pad_len)
- memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
- }
-
- /* Number of bytes to be written into the RAM of the controller */
- len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
- if (MCP251XFD_SANITIZE_CAN)
- len += round_up(len_sanitized, sizeof(u32));
- else
- len += round_up(cfd->len, sizeof(u32));
-
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
- u16 crc;
-
- mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
- len);
- /* CRC */
- len += sizeof(load_buf->crc.cmd);
- crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
- put_unaligned_be16(crc, (void *)load_buf + len);
-
- /* Total length */
- len += sizeof(load_buf->crc.crc);
- } else {
- len += sizeof(load_buf->nocrc.cmd);
- }
-
- tx_obj->xfer[0].len = len;
-}
-
-static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_tx_obj *tx_obj)
-{
- return spi_async(priv->spi, &tx_obj->msg);
-}
-
-static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_tx_ring *tx_ring)
-{
- if (mcp251xfd_get_tx_free(tx_ring) > 0)
- return false;
-
- netif_stop_queue(priv->ndev);
-
- /* Memory barrier before checking tx_free (head and tail) */
- smp_mb();
-
- if (mcp251xfd_get_tx_free(tx_ring) == 0) {
- netdev_dbg(priv->ndev,
- "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
- tx_ring->head, tx_ring->tail,
- tx_ring->head - tx_ring->tail);
-
- return true;
- }
-
- netif_start_queue(priv->ndev);
-
- return false;
-}
-
-static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
-{
- struct mcp251xfd_priv *priv = netdev_priv(ndev);
- struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- struct mcp251xfd_tx_obj *tx_obj;
- unsigned int frame_len;
- u8 tx_head;
- int err;
-
- if (can_dropped_invalid_skb(ndev, skb))
- return NETDEV_TX_OK;
-
- if (mcp251xfd_tx_busy(priv, tx_ring))
- return NETDEV_TX_BUSY;
-
- tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
- mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
-
- /* Stop queue if we occupy the complete TX FIFO */
- tx_head = mcp251xfd_get_tx_head(tx_ring);
- tx_ring->head++;
- if (mcp251xfd_get_tx_free(tx_ring) == 0)
- netif_stop_queue(ndev);
-
- frame_len = can_skb_get_frame_len(skb);
- err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
- if (!err)
- netdev_sent_queue(priv->ndev, frame_len);
-
- err = mcp251xfd_tx_obj_write(priv, tx_obj);
- if (err)
- goto out_err;
-
- return NETDEV_TX_OK;
-
- out_err:
- netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
-
- return NETDEV_TX_OK;
-}
-
static int mcp251xfd_open(struct net_device *ndev)
{
struct mcp251xfd_priv *priv = netdev_priv(ndev);
const struct spi_device *spi = priv->spi;
int err;
- err = pm_runtime_get_sync(ndev->dev.parent);
- if (err < 0) {
- pm_runtime_put_noidle(ndev->dev.parent);
- return err;
- }
-
err = open_candev(ndev);
if (err)
- goto out_pm_runtime_put;
+ return err;
+
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
+ if (err) {
+ if (err == -ETIMEDOUT || err == -ENODEV)
+ pm_runtime_set_suspended(ndev->dev.parent);
+ goto out_close_candev;
+ }
err = mcp251xfd_ring_alloc(priv);
if (err)
- goto out_close_candev;
+ goto out_pm_runtime_put;
err = mcp251xfd_transceiver_enable(priv);
if (err)
goto out_mcp251xfd_ring_free;
+ mcp251xfd_timestamp_init(priv);
+
err = mcp251xfd_chip_start(priv);
if (err)
goto out_transceiver_disable;
- mcp251xfd_timestamp_init(priv);
+ clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
can_rx_offload_enable(&priv->offload);
+ priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ dev_name(&spi->dev));
+ if (!priv->wq) {
+ err = -ENOMEM;
+ goto out_can_rx_offload_disable;
+ }
+ INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync);
+
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&spi->dev), priv);
if (err)
- goto out_can_rx_offload_disable;
+ goto out_destroy_workqueue;
err = mcp251xfd_chip_interrupts_enable(priv);
if (err)
@@ -2541,20 +1657,22 @@ static int mcp251xfd_open(struct net_device *ndev)
return 0;
- out_free_irq:
+out_free_irq:
free_irq(spi->irq, priv);
- out_can_rx_offload_disable:
+out_destroy_workqueue:
+ destroy_workqueue(priv->wq);
+out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
- mcp251xfd_timestamp_stop(priv);
- out_transceiver_disable:
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+out_transceiver_disable:
mcp251xfd_transceiver_disable(priv);
- out_mcp251xfd_ring_free:
+out_mcp251xfd_ring_free:
mcp251xfd_ring_free(priv);
- out_close_candev:
- close_candev(ndev);
- out_pm_runtime_put:
+out_pm_runtime_put:
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
pm_runtime_put(ndev->dev.parent);
+out_close_candev:
+ close_candev(ndev);
return err;
}
@@ -2564,10 +1682,13 @@ static int mcp251xfd_stop(struct net_device *ndev)
struct mcp251xfd_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+ hrtimer_cancel(&priv->rx_irq_timer);
+ hrtimer_cancel(&priv->tx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
+ destroy_workqueue(priv->wq);
can_rx_offload_disable(&priv->offload);
- mcp251xfd_timestamp_stop(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
mcp251xfd_transceiver_disable(priv);
mcp251xfd_ring_free(priv);
@@ -2582,7 +1703,8 @@ static const struct net_device_ops mcp251xfd_netdev_ops = {
.ndo_open = mcp251xfd_open,
.ndo_stop = mcp251xfd_stop,
.ndo_start_xmit = mcp251xfd_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
};
static void
@@ -2602,8 +1724,8 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
u32 osc;
int err;
- /* The OSC_LPMEN is only supported on MCP2518FD, so use it to
- * autodetect the model.
+ /* The OSC_LPMEN is only supported on MCP2518FD and MCP251863,
+ * so use it to autodetect the model.
*/
err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
MCP251XFD_REG_OSC_LPMEN,
@@ -2615,15 +1737,23 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
if (err)
return err;
- if (osc & MCP251XFD_REG_OSC_LPMEN)
- devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
- else
+ if (osc & MCP251XFD_REG_OSC_LPMEN) {
+ /* We cannot distinguish between MCP2518FD and
+ * MCP251863. If firmware specifies MCP251863, keep
+ * it, otherwise set to MCP2518FD.
+ */
+ if (mcp251xfd_is_251863(priv))
+ devtype_data = &mcp251xfd_devtype_data_mcp251863;
+ else
+ devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
+ } else {
devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
+ }
- if (!mcp251xfd_is_251X(priv) &&
+ if (!mcp251xfd_is_251XFD(priv) &&
priv->devtype_data.model != devtype_data->model) {
netdev_info(ndev,
- "Detected %s, but firmware specifies a %s. Fixing up.",
+ "Detected %s, but firmware specifies a %s. Fixing up.\n",
__mcp251xfd_get_model_str(devtype_data->model),
mcp251xfd_get_model_str(priv));
}
@@ -2660,16 +1790,171 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
return 0;
netdev_info(priv->ndev,
- "RX_INT active after softreset, disabling RX_INT support.");
+ "RX_INT active after softreset, disabling RX_INT support.\n");
devm_gpiod_put(&priv->spi->dev, priv->rx_int);
priv->rx_int = NULL;
return 0;
}
+static const char * const mcp251xfd_gpio_names[] = { "GPIO0", "GPIO1" };
+
+static int mcp251xfd_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 pin_mask = MCP251XFD_REG_IOCON_PM(offset);
+ int ret;
+
+ if (priv->rx_int && offset == 1) {
+ netdev_err(priv->ndev, "Can't use GPIO 1 with RX-INT!\n");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(priv->ndev->dev.parent);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, pin_mask, pin_mask);
+}
+
+static void mcp251xfd_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+
+ pm_runtime_put(priv->ndev->dev.parent);
+}
+
+static int mcp251xfd_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 mask = MCP251XFD_REG_IOCON_TRIS(offset);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val);
+ if (ret)
+ return ret;
+
+ if (mask & val)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int mcp251xfd_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 mask = MCP251XFD_REG_IOCON_GPIO(offset);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val);
+ if (ret)
+ return ret;
+
+ return !!(mask & val);
+}
+
+static int mcp251xfd_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bit)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->map_reg, MCP251XFD_REG_IOCON, &val);
+ if (ret)
+ return ret;
+
+ *bit = FIELD_GET(MCP251XFD_REG_IOCON_GPIO_MASK, val) & *mask;
+
+ return 0;
+}
+
+static int mcp251xfd_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 dir_mask = MCP251XFD_REG_IOCON_TRIS(offset);
+ u32 val_mask = MCP251XFD_REG_IOCON_LAT(offset);
+ u32 val;
+
+ if (value)
+ val = val_mask;
+ else
+ val = 0;
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON,
+ dir_mask | val_mask, val);
+}
+
+static int mcp251xfd_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 dir_mask = MCP251XFD_REG_IOCON_TRIS(offset);
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, dir_mask, dir_mask);
+}
+
+static int mcp251xfd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 val_mask = MCP251XFD_REG_IOCON_LAT(offset);
+ u32 val;
+
+ if (value)
+ val = val_mask;
+ else
+ val = 0;
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON, val_mask, val);
+}
+
+static int mcp251xfd_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct mcp251xfd_priv *priv = gpiochip_get_data(chip);
+ u32 val;
+
+ val = FIELD_PREP(MCP251XFD_REG_IOCON_LAT_MASK, *bits);
+
+ return regmap_update_bits(priv->map_reg, MCP251XFD_REG_IOCON,
+ MCP251XFD_REG_IOCON_LAT_MASK, val);
+}
+
+static int mcp251fdx_gpio_setup(struct mcp251xfd_priv *priv)
+{
+ struct gpio_chip *gc = &priv->gc;
+
+ if (!device_property_present(&priv->spi->dev, "gpio-controller"))
+ return 0;
+
+ gc->label = dev_name(&priv->spi->dev);
+ gc->parent = &priv->spi->dev;
+ gc->owner = THIS_MODULE;
+ gc->request = mcp251xfd_gpio_request;
+ gc->free = mcp251xfd_gpio_free;
+ gc->get_direction = mcp251xfd_gpio_get_direction;
+ gc->direction_output = mcp251xfd_gpio_direction_output;
+ gc->direction_input = mcp251xfd_gpio_direction_input;
+ gc->get = mcp251xfd_gpio_get;
+ gc->get_multiple = mcp251xfd_gpio_get_multiple;
+ gc->set = mcp251xfd_gpio_set;
+ gc->set_multiple = mcp251xfd_gpio_set_multiple;
+ gc->base = -1;
+ gc->can_sleep = true;
+ gc->ngpio = ARRAY_SIZE(mcp251xfd_gpio_names);
+ gc->names = mcp251xfd_gpio_names;
+
+ return devm_gpiochip_add_data(&priv->spi->dev, gc, priv);
+}
+
static int
-mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
- u32 *dev_id, u32 *effective_speed_hz)
+mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
+ u32 *effective_speed_hz_slow,
+ u32 *effective_speed_hz_fast)
{
struct mcp251xfd_map_buf_nocrc *buf_rx;
struct mcp251xfd_map_buf_nocrc *buf_tx;
@@ -2688,23 +1973,27 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
xfer[0].tx_buf = buf_tx;
xfer[0].len = sizeof(buf_tx->cmd);
+ xfer[0].speed_hz = priv->spi_max_speed_hz_slow;
xfer[1].rx_buf = buf_rx->data;
- xfer[1].len = sizeof(dev_id);
+ xfer[1].len = sizeof(*dev_id);
+ xfer[1].speed_hz = priv->spi_max_speed_hz_fast;
mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
+
err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
if (err)
goto out_kfree_buf_tx;
- *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
- *effective_speed_hz = xfer->effective_speed_hz;
+ *dev_id = get_unaligned_le32(buf_rx->data);
+ *effective_speed_hz_slow = xfer[0].effective_speed_hz;
+ *effective_speed_hz_fast = xfer[1].effective_speed_hz;
- out_kfree_buf_tx:
+out_kfree_buf_tx:
kfree(buf_tx);
- out_kfree_buf_rx:
+out_kfree_buf_rx:
kfree(buf_rx);
- return 0;
+ return err;
}
#define MCP251XFD_QUIRK_ACTIVE(quirk) \
@@ -2713,34 +2002,45 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
static int
mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
{
- u32 dev_id, effective_speed_hz;
+ u32 dev_id, effective_speed_hz_slow, effective_speed_hz_fast;
+ unsigned long clk_rate;
int err;
err = mcp251xfd_register_get_dev_id(priv, &dev_id,
- &effective_speed_hz);
+ &effective_speed_hz_slow,
+ &effective_speed_hz_fast);
if (err)
return err;
+ clk_rate = clk_get_rate(priv->clk);
+
netdev_info(priv->ndev,
- "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
+ "%s rev%lu.%lu (%cRX_INT %cPLL %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD o:%lu.%02luMHz c:%u.%02uMHz m:%u.%02uMHz rs:%u.%02uMHz es:%u.%02uMHz rf:%u.%02uMHz ef:%u.%02uMHz) successfully initialized.\n",
mcp251xfd_get_model_str(priv),
FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
priv->rx_int ? '+' : '-',
+ priv->pll_enable ? '+' : '-',
MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
MCP251XFD_QUIRK_ACTIVE(CRC_REG),
MCP251XFD_QUIRK_ACTIVE(CRC_RX),
MCP251XFD_QUIRK_ACTIVE(CRC_TX),
MCP251XFD_QUIRK_ACTIVE(ECC),
MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
+ clk_rate / 1000000,
+ clk_rate % 1000000 / 1000 / 10,
priv->can.clock.freq / 1000000,
priv->can.clock.freq % 1000000 / 1000 / 10,
priv->spi_max_speed_hz_orig / 1000000,
priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
- priv->spi->max_speed_hz / 1000000,
- priv->spi->max_speed_hz % 1000000 / 1000 / 10,
- effective_speed_hz / 1000000,
- effective_speed_hz % 1000000 / 1000 / 10);
+ priv->spi_max_speed_hz_slow / 1000000,
+ priv->spi_max_speed_hz_slow % 1000000 / 1000 / 10,
+ effective_speed_hz_slow / 1000000,
+ effective_speed_hz_slow % 1000000 / 1000 / 10,
+ priv->spi_max_speed_hz_fast / 1000000,
+ priv->spi_max_speed_hz_fast % 1000000 / 1000 / 10,
+ effective_speed_hz_fast / 1000000,
+ effective_speed_hz_fast % 1000000 / 1000 / 10);
return 0;
}
@@ -2750,45 +2050,59 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
struct net_device *ndev = priv->ndev;
int err;
+ mcp251xfd_register_quirks(priv);
+
err = mcp251xfd_clks_and_vdd_enable(priv);
if (err)
return err;
+ err = mcp251xfd_chip_softreset(priv);
+ if (err == -ENODEV)
+ goto out_clks_and_vdd_disable;
+ if (err)
+ goto out_chip_sleep;
+
+ err = mcp251xfd_chip_clock_init(priv);
+ if (err == -ENODEV)
+ goto out_clks_and_vdd_disable;
+ if (err)
+ goto out_chip_sleep;
+
pm_runtime_get_noresume(ndev->dev.parent);
err = pm_runtime_set_active(ndev->dev.parent);
if (err)
goto out_runtime_put_noidle;
pm_runtime_enable(ndev->dev.parent);
- mcp251xfd_register_quirks(priv);
-
- err = mcp251xfd_chip_softreset(priv);
- if (err == -ENODEV)
- goto out_runtime_disable;
- if (err)
- goto out_chip_set_mode_sleep;
-
err = mcp251xfd_register_chip_detect(priv);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_runtime_disable;
err = mcp251xfd_register_check_rx_int(priv);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_runtime_disable;
+
+ mcp251xfd_ethtool_init(priv);
+
+ err = mcp251fdx_gpio_setup(priv);
+ if (err) {
+ dev_err_probe(&priv->spi->dev, err, "Failed to register gpio-controller.\n");
+ goto out_runtime_disable;
+ }
err = register_candev(ndev);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_runtime_disable;
err = mcp251xfd_register_done(priv);
if (err)
goto out_unregister_candev;
- /* Put controller into sleep mode and let pm_runtime_put()
- * disable the clocks and vdd. If CONFIG_PM is not enabled,
- * the clocks and vdd will stay powered.
+ /* Put controller into Config mode and let pm_runtime_put()
+ * put in sleep mode, disable the clocks and vdd. If CONFIG_PM
+ * is not enabled, the clocks and vdd will stay powered.
*/
- err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
if (err)
goto out_unregister_candev;
@@ -2796,14 +2110,15 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
return 0;
- out_unregister_candev:
+out_unregister_candev:
unregister_candev(ndev);
- out_chip_set_mode_sleep:
- mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
- out_runtime_disable:
+out_runtime_disable:
pm_runtime_disable(ndev->dev.parent);
- out_runtime_put_noidle:
+out_runtime_put_noidle:
pm_runtime_put_noidle(ndev->dev.parent);
+out_chip_sleep:
+ mcp251xfd_chip_sleep(priv);
+out_clks_and_vdd_disable:
mcp251xfd_clks_and_vdd_disable(priv);
return err;
@@ -2815,10 +2130,12 @@ static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
unregister_candev(ndev);
- pm_runtime_get_sync(ndev->dev.parent);
- pm_runtime_put_noidle(ndev->dev.parent);
- mcp251xfd_clks_and_vdd_disable(priv);
- pm_runtime_disable(ndev->dev.parent);
+ if (pm_runtime_enabled(ndev->dev.parent)) {
+ pm_runtime_disable(ndev->dev.parent);
+ } else {
+ mcp251xfd_chip_sleep(priv);
+ mcp251xfd_clks_and_vdd_disable(priv);
+ }
}
static const struct of_device_id mcp251xfd_of_match[] = {
@@ -2829,6 +2146,9 @@ static const struct of_device_id mcp251xfd_of_match[] = {
.compatible = "microchip,mcp2518fd",
.data = &mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .compatible = "microchip,mcp251863",
+ .data = &mcp251xfd_devtype_data_mcp251863,
+ }, {
.compatible = "microchip,mcp251xfd",
.data = &mcp251xfd_devtype_data_mcp251xfd,
}, {
@@ -2845,6 +2165,9 @@ static const struct spi_device_id mcp251xfd_id_table[] = {
.name = "mcp2518fd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .name = "mcp251863",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251863,
+ }, {
.name = "mcp251xfd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
}, {
@@ -2855,12 +2178,12 @@ MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
static int mcp251xfd_probe(struct spi_device *spi)
{
- const void *match;
struct net_device *ndev;
struct mcp251xfd_priv *priv;
struct gpio_desc *rx_int;
struct regulator *reg_vdd, *reg_xceiver;
struct clk *clk;
+ bool pll_enable = false;
u32 freq = 0;
int err;
@@ -2911,12 +2234,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
return -ERANGE;
}
- if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
- dev_err(&spi->dev,
- "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
- freq);
- return -ERANGE;
- }
+ if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER)
+ pll_enable = true;
ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
MCP251XFD_TX_OBJ_NUM_MAX);
@@ -2932,30 +2251,31 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv = netdev_priv(ndev);
spi_set_drvdata(spi, priv);
priv->can.clock.freq = freq;
+ if (pll_enable)
+ priv->can.clock.freq *= MCP251XFD_OSC_PLL_MULTIPLIER;
priv->can.do_set_mode = mcp251xfd_set_mode;
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
- priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.fd.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.fd.tdc_const = &mcp251xfd_tdc_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC;
+ CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_TDC_MANUAL;
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
priv->ndev = ndev;
priv->spi = spi;
priv->rx_int = rx_int;
priv->clk = clk;
+ priv->pll_enable = pll_enable;
priv->reg_vdd = reg_vdd;
priv->reg_xceiver = reg_xceiver;
-
- match = device_get_match_data(&spi->dev);
- if (match)
- priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
- else
- priv->devtype_data = *(struct mcp251xfd_devtype_data *)
- spi_get_device_id(spi)->driver_data;
+ priv->devtype_data = *(struct mcp251xfd_devtype_data *)spi_get_device_match_data(spi);
/* Errata Reference:
- * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
+ * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789E 4.,
+ * mcp251863: DS80000984A 4.
*
* The SPI can write corrupted data to the RAM at fast SPI
* speeds:
@@ -2981,7 +2301,16 @@ static int mcp251xfd_probe(struct spi_device *spi)
*
*/
priv->spi_max_speed_hz_orig = spi->max_speed_hz;
- spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
+ priv->spi_max_speed_hz_slow = min(spi->max_speed_hz,
+ freq / 2 / 1000 * 850);
+ if (priv->pll_enable)
+ priv->spi_max_speed_hz_fast = min(spi->max_speed_hz,
+ freq *
+ MCP251XFD_OSC_PLL_MULTIPLIER /
+ 2 / 1000 * 850);
+ else
+ priv->spi_max_speed_hz_fast = priv->spi_max_speed_hz_slow;
+ spi->max_speed_hz = priv->spi_max_speed_hz_slow;
spi->bits_per_word = 8;
spi->rt = true;
err = spi_setup(spi);
@@ -2998,14 +2327,17 @@ static int mcp251xfd_probe(struct spi_device *spi)
goto out_free_candev;
err = mcp251xfd_register(priv);
- if (err)
+ if (err) {
+ dev_err_probe(&spi->dev, err, "Failed to detect %s.\n",
+ mcp251xfd_get_model_str(priv));
goto out_can_rx_offload_del;
+ }
return 0;
- out_can_rx_offload_del:
+out_can_rx_offload_del:
can_rx_offload_del(&priv->offload);
- out_free_candev:
+out_free_candev:
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
@@ -3013,31 +2345,54 @@ static int mcp251xfd_probe(struct spi_device *spi)
return err;
}
-static int mcp251xfd_remove(struct spi_device *spi)
+static void mcp251xfd_remove(struct spi_device *spi)
{
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
struct net_device *ndev = priv->ndev;
- can_rx_offload_del(&priv->offload);
mcp251xfd_unregister(priv);
+ can_rx_offload_del(&priv->offload);
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
-
- return 0;
}
static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
{
- const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ mcp251xfd_chip_sleep(priv);
return mcp251xfd_clks_and_vdd_disable(priv);
}
static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
{
- const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+ int err;
+
+ err = mcp251xfd_clks_and_vdd_enable(priv);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_softreset(priv);
+ if (err == -ENODEV)
+ goto out_clks_and_vdd_disable;
+ if (err)
+ goto out_chip_sleep;
+
+ err = mcp251xfd_chip_clock_init(priv);
+ if (err == -ENODEV)
+ goto out_clks_and_vdd_disable;
+ if (err)
+ goto out_chip_sleep;
- return mcp251xfd_clks_and_vdd_enable(priv);
+ return 0;
+
+out_chip_sleep:
+ mcp251xfd_chip_sleep(priv);
+out_clks_and_vdd_disable:
+ mcp251xfd_clks_and_vdd_disable(priv);
+
+ return err;
}
static const struct dev_pm_ops mcp251xfd_pm_ops = {
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
index ffae8fdd3af0..050321345304 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
@@ -94,7 +94,7 @@ static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv,
kfree(buf);
}
- out:
+out:
mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg);
}
@@ -207,10 +207,10 @@ static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv,
.val = tx->base,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR,
- .val = 0,
+ .val = tx->nr,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR,
- .val = MCP251XFD_TX_FIFO,
+ .val = tx->fifo_nr,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM,
.val = tx->obj_num,
@@ -253,7 +253,7 @@ void mcp251xfd_dump(const struct mcp251xfd_priv *priv)
file_size += mcp251xfd_dump_reg_space[i].size / sizeof(u32) *
sizeof(struct mcp251xfd_dump_object_reg);
- /* TEF ring, RX ring, TX rings */
+ /* TEF ring, RX rings, TX ring */
rings_num = 1 + priv->rx_ring_num + 1;
obj_num += rings_num;
file_size += rings_num * __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX *
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
new file mode 100644
index 000000000000..57eeb066a945
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/ethtool.h>
+
+#include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
+
+static void
+mcp251xfd_ring_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ const struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+ ring->rx_max_pending = layout.max_rx;
+ ring->tx_max_pending = layout.max_tx;
+
+ ring->rx_pending = priv->rx_obj_num;
+ ring->tx_pending = priv->tx->obj_num;
+}
+
+static int
+mcp251xfd_ring_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, ring, NULL, fd_mode);
+ if ((layout.cur_rx != priv->rx_obj_num ||
+ layout.cur_tx != priv->tx->obj_num) &&
+ netif_running(ndev))
+ return -EBUSY;
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+ priv->tx->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+
+ return 0;
+}
+
+static int mcp251xfd_ring_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ u32 rx_max_frames, tx_max_frames;
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (priv->rx_obj_num_coalesce_irq == 0)
+ rx_max_frames = 1;
+ else
+ rx_max_frames = priv->rx_obj_num_coalesce_irq;
+
+ ec->rx_max_coalesced_frames_irq = rx_max_frames;
+ ec->rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq;
+
+ if (priv->tx_obj_num_coalesce_irq == 0)
+ tx_max_frames = 1;
+ else
+ tx_max_frames = priv->tx_obj_num_coalesce_irq;
+
+ ec->tx_max_coalesced_frames_irq = tx_max_frames;
+ ec->tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static int mcp251xfd_ring_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ const struct ethtool_ringparam ring = {
+ .rx_pending = priv->rx_obj_num,
+ .tx_pending = priv->tx->obj_num,
+ };
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, ec, fd_mode);
+
+ if ((layout.rx_coalesce != priv->rx_obj_num_coalesce_irq ||
+ ec->rx_coalesce_usecs_irq != priv->rx_coalesce_usecs_irq ||
+ layout.tx_coalesce != priv->tx_obj_num_coalesce_irq ||
+ ec->tx_coalesce_usecs_irq != priv->tx_coalesce_usecs_irq) &&
+ netif_running(ndev))
+ return -EBUSY;
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+ priv->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+
+ priv->tx->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+ priv->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static const struct ethtool_ops mcp251xfd_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ .get_ringparam = mcp251xfd_ring_get_ringparam,
+ .set_ringparam = mcp251xfd_ring_set_ringparam,
+ .get_coalesce = mcp251xfd_ring_get_coalesce,
+ .set_coalesce = mcp251xfd_ring_set_coalesce,
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv)
+{
+ struct can_ram_layout layout;
+
+ priv->ndev->ethtool_ops = &mcp251xfd_ethtool_ops;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, false);
+ priv->rx_obj_num = layout.default_rx;
+ priv->tx->obj_num = layout.default_tx;
+
+ priv->rx_obj_num_coalesce_irq = 0;
+ priv->tx_obj_num_coalesce_irq = 0;
+ priv->rx_coalesce_usecs_irq = 0;
+ priv->tx_coalesce_usecs_irq = 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
new file mode 100644
index 000000000000..61b0d6fa52dd
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include "mcp251xfd-ram.h"
+
+static inline u8 can_ram_clamp(const struct can_ram_config *config,
+ const struct can_ram_obj_config *obj,
+ u8 val)
+{
+ u8 max;
+
+ max = min_t(u8, obj->max, obj->fifo_num * config->fifo_depth);
+ return clamp(val, obj->min, max);
+}
+
+static u8
+can_ram_rounddown_pow_of_two(const struct can_ram_config *config,
+ const struct can_ram_obj_config *obj,
+ const u8 coalesce, u8 val)
+{
+ u8 fifo_num = obj->fifo_num;
+ u8 ret = 0, i;
+
+ val = can_ram_clamp(config, obj, val);
+
+ if (coalesce) {
+ /* Use 1st FIFO for coalescing, if requested.
+ *
+ * Either use complete FIFO (and FIFO Full IRQ) for
+ * coalescing or only half of FIFO (FIFO Half Full
+ * IRQ) and use remaining half for normal objects.
+ */
+ ret = min_t(u8, coalesce * 2, config->fifo_depth);
+ val -= ret;
+ fifo_num--;
+ }
+
+ for (i = 0; i < fifo_num && val; i++) {
+ u8 n;
+
+ n = min_t(u8, rounddown_pow_of_two(val),
+ config->fifo_depth);
+
+ /* skip small FIFOs */
+ if (n < obj->fifo_depth_min)
+ return ret;
+
+ ret += n;
+ val -= n;
+ }
+
+ return ret;
+}
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+ const struct can_ram_config *config,
+ const struct ethtool_ringparam *ring,
+ const struct ethtool_coalesce *ec,
+ const bool fd_mode)
+{
+ u8 num_rx, num_tx;
+ u16 ram_free;
+
+ /* default CAN */
+
+ num_tx = config->tx.def[fd_mode];
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ ram_free = config->size;
+ ram_free -= config->tx.size[fd_mode] * num_tx;
+
+ num_rx = ram_free / config->rx.size[fd_mode];
+
+ layout->default_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+ layout->default_tx = num_tx;
+
+ /* MAX CAN */
+
+ ram_free = config->size;
+ ram_free -= config->tx.size[fd_mode] * config->tx.min;
+ num_rx = ram_free / config->rx.size[fd_mode];
+
+ ram_free = config->size;
+ ram_free -= config->rx.size[fd_mode] * config->rx.min;
+ num_tx = ram_free / config->tx.size[fd_mode];
+
+ layout->max_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+ layout->max_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ /* cur CAN */
+
+ if (ring) {
+ u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
+
+ /* If the ring parameters have been configured in
+ * CAN-CC mode, but and we are in CAN-FD mode now,
+ * they might be to big. Use the default CAN-FD values
+ * in this case.
+ */
+ num_rx = ring->rx_pending;
+ if (num_rx > layout->max_rx)
+ num_rx = layout->default_rx;
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (ec && !(ec->rx_coalesce_usecs_irq == 0 &&
+ ec->rx_max_coalesced_frames_irq == 1)) {
+ u8 max;
+
+ /* use only max half of available objects for coalescing */
+ max = min_t(u8, num_rx / 2, config->fifo_depth);
+ num_rx_coalesce = clamp(ec->rx_max_coalesced_frames_irq,
+ (u32)config->rx.fifo_depth_coalesce_min,
+ (u32)max);
+ num_rx_coalesce = rounddown_pow_of_two(num_rx_coalesce);
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx,
+ num_rx_coalesce, num_rx);
+ }
+
+ ram_free = config->size - config->rx.size[fd_mode] * num_rx;
+ num_tx = ram_free / config->tx.size[fd_mode];
+ num_tx = min_t(u8, ring->tx_pending, num_tx);
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (ec && !(ec->tx_coalesce_usecs_irq == 0 &&
+ ec->tx_max_coalesced_frames_irq == 1)) {
+ u8 max;
+
+ /* use only max half of available objects for coalescing */
+ max = min_t(u8, num_tx / 2, config->fifo_depth);
+ num_tx_coalesce = clamp(ec->tx_max_coalesced_frames_irq,
+ (u32)config->tx.fifo_depth_coalesce_min,
+ (u32)max);
+ num_tx_coalesce = rounddown_pow_of_two(num_tx_coalesce);
+
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx,
+ num_tx_coalesce, num_tx);
+ }
+
+ layout->cur_rx = num_rx;
+ layout->cur_tx = num_tx;
+ layout->rx_coalesce = num_rx_coalesce;
+ layout->tx_coalesce = num_tx_coalesce;
+ } else {
+ layout->cur_rx = layout->default_rx;
+ layout->cur_tx = layout->default_tx;
+ layout->rx_coalesce = 0;
+ layout->tx_coalesce = 0;
+ }
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h
new file mode 100644
index 000000000000..7558c1510cbf
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ *
+ * Copyright (c) 2021, 2022 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _MCP251XFD_RAM_H
+#define _MCP251XFD_RAM_H
+
+#include <linux/ethtool.h>
+
+#define CAN_RAM_NUM_MAX (-1)
+
+enum can_ram_mode {
+ CAN_RAM_MODE_CAN,
+ CAN_RAM_MODE_CANFD,
+ __CAN_RAM_MODE_MAX
+};
+
+struct can_ram_obj_config {
+ u8 size[__CAN_RAM_MODE_MAX];
+
+ u8 def[__CAN_RAM_MODE_MAX];
+ u8 min;
+ u8 max;
+
+ u8 fifo_num;
+ u8 fifo_depth_min;
+ u8 fifo_depth_coalesce_min;
+};
+
+struct can_ram_config {
+ const struct can_ram_obj_config rx;
+ const struct can_ram_obj_config tx;
+
+ u16 size;
+ u8 fifo_depth;
+};
+
+struct can_ram_layout {
+ u8 default_rx;
+ u8 default_tx;
+
+ u8 max_rx;
+ u8 max_tx;
+
+ u8 cur_rx;
+ u8 cur_tx;
+
+ u8 rx_coalesce;
+ u8 tx_coalesce;
+};
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+ const struct can_ram_config *config,
+ const struct ethtool_ringparam *ring,
+ const struct ethtool_coalesce *ec,
+ const bool fd_mode);
+
+#endif
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 297491516a26..70d5ff0ae7ac 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -2,28 +2,20 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020 Pengutronix,
-// Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
//
#include "mcp251xfd.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static const struct regmap_config mcp251xfd_regmap_crc;
static int
-mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count)
-{
- struct spi_device *spi = context;
-
- return spi_write(spi, data, count);
-}
-
-static int
-mcp251xfd_regmap_nocrc_gather_write(void *context,
- const void *reg, size_t reg_len,
- const void *val, size_t val_len)
+_mcp251xfd_regmap_nocrc_gather_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
{
struct spi_device *spi = context;
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
@@ -47,22 +39,81 @@ mcp251xfd_regmap_nocrc_gather_write(void *context,
return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
}
-static inline bool mcp251xfd_update_bits_read_reg(unsigned int reg)
+static int
+mcp251xfd_regmap_nocrc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ const u16 byte_exclude = MCP251XFD_REG_IOCON +
+ mcp251xfd_first_byte_set(MCP251XFD_REG_IOCON_GPIO_MASK);
+ u16 reg = be16_to_cpu(*(__be16 *)reg_p) & MCP251XFD_SPI_ADDRESS_MASK;
+ int ret;
+
+ /* Never write to bits 16..23 of IOCON register to avoid clearing of LAT0/LAT1
+ *
+ * According to MCP2518FD Errata DS80000789E 5 writing IOCON register using one
+ * SPI write command clears LAT0/LAT1.
+ *
+ * Errata Fix/Work Around suggests to write registers with single byte
+ * write instructions. However, it seems that the byte at 0xe06(IOCON[23:16])
+ * is for read-only access and writing to it causes the clearing of LAT0/LAT1.
+ */
+ if (reg <= byte_exclude && reg + val_len > byte_exclude) {
+ size_t len = byte_exclude - reg;
+
+ /* Write up to 0xe05 */
+ ret = _mcp251xfd_regmap_nocrc_gather_write(context, reg_p, reg_len, val, len);
+ if (ret)
+ return ret;
+
+ /* Write from 0xe07 on */
+ reg += len + 1;
+ reg = (__force unsigned short)cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE | reg);
+ return _mcp251xfd_regmap_nocrc_gather_write(context, &reg, reg_len,
+ val + len + 1,
+ val_len - len - 1);
+ }
+
+ return _mcp251xfd_regmap_nocrc_gather_write(context, reg_p, reg_len,
+ val, val_len);
+}
+
+static int
+mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count)
+{
+ const size_t data_offset = sizeof(__be16);
+
+ return mcp251xfd_regmap_nocrc_gather_write(context, data, data_offset,
+ data + data_offset, count - data_offset);
+}
+
+static inline bool
+mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv,
+ unsigned int reg)
{
+ struct mcp251xfd_rx_ring *ring;
+ int n;
+
switch (reg) {
case MCP251XFD_REG_INT:
case MCP251XFD_REG_TEFCON:
- case MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO(0)):
case MCP251XFD_REG_FLTCON(0):
case MCP251XFD_REG_ECCSTAT:
case MCP251XFD_REG_CRC:
return false;
case MCP251XFD_REG_CON:
- case MCP251XFD_REG_FIFOSTA(MCP251XFD_RX_FIFO(0)):
case MCP251XFD_REG_OSC:
case MCP251XFD_REG_ECCCON:
+ case MCP251XFD_REG_IOCON:
return true;
default:
+ mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ if (reg == MCP251XFD_REG_FIFOCON(ring->fifo_nr))
+ return false;
+ if (reg == MCP251XFD_REG_FIFOSTA(ring->fifo_nr))
+ return true;
+ }
+
WARN(1, "Status of reg 0x%04x unknown.\n", reg);
}
@@ -92,7 +143,7 @@ mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg,
last_byte = mcp251xfd_last_byte_set(mask);
len = last_byte - first_byte + 1;
- if (mcp251xfd_update_bits_read_reg(reg)) {
+ if (mcp251xfd_update_bits_read_reg(priv, reg)) {
struct spi_transfer xfer[2] = { };
struct spi_message msg;
@@ -129,10 +180,9 @@ mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg,
tmp_le32 = orig_le32 & ~mask_le32;
tmp_le32 |= val_le32 & mask_le32;
- mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg + first_byte);
- memcpy(buf_tx->data, &tmp_le32, len);
-
- return spi_write(spi, buf_tx, sizeof(buf_tx->cmd) + len);
+ reg += first_byte;
+ mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg);
+ return mcp251xfd_regmap_nocrc_gather_write(context, &buf_tx->cmd, 2, &tmp_le32, len);
}
static int
@@ -186,9 +236,9 @@ mcp251xfd_regmap_nocrc_read(void *context,
}
static int
-mcp251xfd_regmap_crc_gather_write(void *context,
- const void *reg_p, size_t reg_len,
- const void *val, size_t val_len)
+_mcp251xfd_regmap_crc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
{
struct spi_device *spi = context;
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
@@ -220,6 +270,44 @@ mcp251xfd_regmap_crc_gather_write(void *context,
}
static int
+mcp251xfd_regmap_crc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ const u16 byte_exclude = MCP251XFD_REG_IOCON +
+ mcp251xfd_first_byte_set(MCP251XFD_REG_IOCON_GPIO_MASK);
+ u16 reg = *(u16 *)reg_p;
+ int ret;
+
+ /* Never write to bits 16..23 of IOCON register to avoid clearing of LAT0/LAT1
+ *
+ * According to MCP2518FD Errata DS80000789E 5 writing IOCON register using one
+ * SPI write command clears LAT0/LAT1.
+ *
+ * Errata Fix/Work Around suggests to write registers with single byte
+ * write instructions. However, it seems that the byte at 0xe06(IOCON[23:16])
+ * is for read-only access and writing to it causes the clearing of LAT0/LAT1.
+ */
+ if (reg <= byte_exclude && reg + val_len > byte_exclude) {
+ size_t len = byte_exclude - reg;
+
+ /* Write up to 0xe05 */
+ ret = _mcp251xfd_regmap_crc_gather_write(context, &reg, reg_len, val, len);
+ if (ret)
+ return ret;
+
+ /* Write from 0xe07 on */
+ reg += len + 1;
+ return _mcp251xfd_regmap_crc_gather_write(context, &reg, reg_len,
+ val + len + 1,
+ val_len - len - 1);
+ }
+
+ return _mcp251xfd_regmap_crc_gather_write(context, reg_p, reg_len,
+ val, val_len);
+}
+
+static int
mcp251xfd_regmap_crc_write(void *context,
const void *data, size_t count)
{
@@ -250,7 +338,6 @@ mcp251xfd_regmap_crc_read_check_crc(const struct mcp251xfd_map_buf_crc * const b
return 0;
}
-
static int
mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv,
struct spi_message *msg, unsigned int data_len)
@@ -325,19 +412,21 @@ mcp251xfd_regmap_crc_read(void *context,
* register. It increments once per SYS clock tick,
* which is 20 or 40 MHz.
*
- * Observation shows that if the lowest byte (which is
- * transferred first on the SPI bus) of that register
- * is 0x00 or 0x80 the calculated CRC doesn't always
- * match the transferred one.
+ * Observation on the mcp2518fd shows that if the
+ * lowest byte (which is transferred first on the SPI
+ * bus) of that register is 0x00 or 0x80 the
+ * calculated CRC doesn't always match the transferred
+ * one. On the mcp2517fd this problem is not limited
+ * to the first byte being 0x00 or 0x80.
*
* If the highest bit in the lowest byte is flipped
* the transferred CRC matches the calculated one. We
- * assume for now the CRC calculation in the chip
- * works on wrong data and the transferred data is
- * correct.
+ * assume for now the CRC operates on the correct
+ * data.
*/
if (reg == MCP251XFD_REG_TBC &&
- (buf_rx->data[0] == 0x0 || buf_rx->data[0] == 0x80)) {
+ ((buf_rx->data[0] & 0xf8) == 0x0 ||
+ (buf_rx->data[0] & 0xf8) == 0x80)) {
/* Flip highest bit in lowest byte of le32 */
buf_rx->data[0] ^= 0x80;
@@ -347,10 +436,8 @@ mcp251xfd_regmap_crc_read(void *context,
val_len);
if (!err) {
/* If CRC is now correct, assume
- * transferred data was OK, flip bit
- * back to original value.
+ * flipped data is OK.
*/
- buf_rx->data[0] ^= 0x80;
goto out;
}
}
@@ -369,7 +456,7 @@ mcp251xfd_regmap_crc_read(void *context,
* to the caller. It will take care of both cases.
*
*/
- if (reg == MCP251XFD_REG_OSC) {
+ if (reg == MCP251XFD_REG_OSC && val_len == sizeof(__le32)) {
err = 0;
goto out;
}
@@ -388,7 +475,7 @@ mcp251xfd_regmap_crc_read(void *context,
return err;
}
- out:
+out:
memcpy(val_buf, buf_rx->data, val_len);
return 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
new file mode 100644
index 000000000000..c34f2067a989
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021, 2024 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/unaligned.h>
+
+#include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
+
+static inline u8
+mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
+ union mcp251xfd_write_reg_buf *write_reg_buf,
+ const u16 reg, const u32 mask, const u32 val)
+{
+ u8 first_byte, last_byte, len;
+ u8 *data;
+ __le32 val_le32;
+
+ first_byte = mcp251xfd_first_byte_set(mask);
+ last_byte = mcp251xfd_last_byte_set(mask);
+ len = last_byte - first_byte + 1;
+
+ data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte, len);
+ val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
+ memcpy(data, &val_le32, len);
+
+ if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) {
+ len += sizeof(write_reg_buf->nocrc.cmd);
+ } else if (len == 1) {
+ u16 crc;
+
+ /* CRC */
+ len += sizeof(write_reg_buf->safe.cmd);
+ crc = mcp251xfd_crc16_compute(&write_reg_buf->safe, len);
+ put_unaligned_be16(crc, (void *)write_reg_buf + len);
+
+ /* Total length */
+ len += sizeof(write_reg_buf->safe.crc);
+ } else {
+ u16 crc;
+
+ mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(write_reg_buf->crc.cmd);
+ crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
+ put_unaligned_be16(crc, (void *)write_reg_buf + len);
+
+ /* Total length */
+ len += sizeof(write_reg_buf->crc.crc);
+ }
+
+ return len;
+}
+
+static void
+mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
+{
+ struct mcp251xfd_tef_ring *tef_ring;
+ struct spi_transfer *xfer;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i;
+
+ /* TEF */
+ tef_ring = priv->tef;
+ tef_ring->head = 0;
+ tef_ring->tail = 0;
+
+ /* TEF- and TX-FIFO have same number of objects */
+ *base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num);
+
+ /* FIFO IRQ enable */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf,
+ addr, val, val);
+ tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf;
+ tef_ring->irq_enable_xfer.len = len;
+ spi_message_init_with_transfers(&tef_ring->irq_enable_msg,
+ &tef_ring->irq_enable_xfer, 1);
+
+ /* FIFO increment TEF tail pointer */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
+ addr, val, val);
+
+ for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) {
+ xfer = &tef_ring->uinc_xfer[i];
+ xfer->tx_buf = &tef_ring->uinc_buf;
+ xfer->len = len;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ }
+
+ /* "cs_change == 1" on the last transfer results in an active
+ * chip select after the complete SPI message. This causes the
+ * controller to interpret the next register access as
+ * data. Set "cs_change" of the last transfer to "0" to
+ * properly deactivate the chip select at the end of the
+ * message.
+ */
+ xfer->cs_change = 0;
+
+ if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) {
+ val = MCP251XFD_REG_TEFCON_UINC |
+ MCP251XFD_REG_TEFCON_TEFOVIE |
+ MCP251XFD_REG_TEFCON_TEFHIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv,
+ &tef_ring->uinc_irq_disable_buf,
+ addr, val, val);
+ xfer->tx_buf = &tef_ring->uinc_irq_disable_buf;
+ xfer->len = len;
+ }
+}
+
+static void
+mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_tx_ring *ring,
+ struct mcp251xfd_tx_obj *tx_obj,
+ const u8 rts_buf_len,
+ const u8 n)
+{
+ struct spi_transfer *xfer;
+ u16 addr;
+
+ /* FIFO load */
+ addr = mcp251xfd_get_tx_obj_addr(ring, n);
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
+ mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
+ addr);
+ else
+ mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
+ addr);
+
+ xfer = &tx_obj->xfer[0];
+ xfer->tx_buf = &tx_obj->buf;
+ xfer->len = 0; /* actual len is assigned on the fly */
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ /* FIFO request to send */
+ xfer = &tx_obj->xfer[1];
+ xfer->tx_buf = &ring->rts_buf;
+ xfer->len = rts_buf_len;
+
+ /* SPI message */
+ spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
+ ARRAY_SIZE(tx_obj->xfer));
+}
+
+static void
+mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
+{
+ struct mcp251xfd_tx_ring *tx_ring;
+ struct mcp251xfd_tx_obj *tx_obj;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i;
+
+ tx_ring = priv->tx;
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+ tx_ring->base = *base;
+ tx_ring->nr = 0;
+ tx_ring->fifo_nr = *fifo_nr;
+
+ *base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num);
+ *fifo_nr += 1;
+
+ /* FIFO request to send */
+ addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr);
+ val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
+ addr, val, val);
+
+ mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
+ mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
+}
+
+static void
+mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
+{
+ struct mcp251xfd_rx_ring *rx_ring;
+ struct spi_transfer *xfer;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i, j;
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ rx_ring->last_valid = timecounter_read(&priv->tc);
+ rx_ring->head = 0;
+ rx_ring->tail = 0;
+ rx_ring->base = *base;
+ rx_ring->nr = i;
+ rx_ring->fifo_nr = *fifo_nr;
+
+ *base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num);
+ *fifo_nr += 1;
+
+ /* FIFO IRQ enable */
+ addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
+ val = MCP251XFD_REG_FIFOCON_RXOVIE |
+ MCP251XFD_REG_FIFOCON_TFNRFNIE;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf,
+ addr, val, val);
+ rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf;
+ rx_ring->irq_enable_xfer.len = len;
+ spi_message_init_with_transfers(&rx_ring->irq_enable_msg,
+ &rx_ring->irq_enable_xfer, 1);
+
+ /* FIFO increment RX tail pointer */
+ val = MCP251XFD_REG_FIFOCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
+ addr, val, val);
+
+ for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
+ xfer = &rx_ring->uinc_xfer[j];
+ xfer->tx_buf = &rx_ring->uinc_buf;
+ xfer->len = len;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ }
+
+ /* "cs_change == 1" on the last transfer results in an
+ * active chip select after the complete SPI
+ * message. This causes the controller to interpret
+ * the next register access as data. Set "cs_change"
+ * of the last transfer to "0" to properly deactivate
+ * the chip select at the end of the message.
+ */
+ xfer->cs_change = 0;
+
+ /* Use 1st RX-FIFO for IRQ coalescing. If enabled
+ * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq
+ * is activated), use the last transfer to disable:
+ *
+ * - TFNRFNIE (Receive FIFO Not Empty Interrupt)
+ *
+ * and enable:
+ *
+ * - TFHRFHIE (Receive FIFO Half Full Interrupt)
+ * - or -
+ * - TFERFFIE (Receive FIFO Full Interrupt)
+ *
+ * depending on rx_max_coalesce_frames_irq.
+ *
+ * The RXOVIE (Overflow Interrupt) is always enabled.
+ */
+ if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq ||
+ priv->rx_obj_num_coalesce_irq)) {
+ val = MCP251XFD_REG_FIFOCON_UINC |
+ MCP251XFD_REG_FIFOCON_RXOVIE;
+
+ if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num)
+ val |= MCP251XFD_REG_FIFOCON_TFERFFIE;
+ else if (priv->rx_obj_num_coalesce_irq)
+ val |= MCP251XFD_REG_FIFOCON_TFHRFHIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv,
+ &rx_ring->uinc_irq_disable_buf,
+ addr, val, val);
+ xfer->tx_buf = &rx_ring->uinc_irq_disable_buf;
+ xfer->len = len;
+ }
+ }
+}
+
+int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_rx_ring *rx_ring;
+ u16 base = 0, ram_used;
+ u8 fifo_nr = 1;
+ int err = 0, i;
+
+ netdev_reset_queue(priv->ndev);
+
+ mcp251xfd_ring_init_tef(priv, &base);
+ mcp251xfd_ring_init_rx(priv, &base, &fifo_nr);
+ mcp251xfd_ring_init_tx(priv, &base, &fifo_nr);
+
+ /* mcp251xfd_handle_rxif() will iterate over all RX rings.
+ * Rings with their corresponding bit set in
+ * priv->regs_status.rxif are read out.
+ *
+ * If the chip is configured for only 1 RX-FIFO, and if there
+ * is an RX interrupt pending (RXIF in INT register is set),
+ * it must be the 1st RX-FIFO.
+ *
+ * We mark the RXIF of the 1st FIFO as pending here, so that
+ * we can skip the read of the RXIF register in
+ * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case.
+ *
+ * If we use more than 1 RX-FIFO, this value gets overwritten
+ * in mcp251xfd_read_regs_status(), so set it unconditionally
+ * here.
+ */
+ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
+
+ if (priv->tx_obj_num_coalesce_irq) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n",
+ mcp251xfd_get_tef_obj_addr(0),
+ priv->tx_obj_num_coalesce_irq,
+ sizeof(struct mcp251xfd_hw_tef_obj),
+ priv->tx_obj_num_coalesce_irq *
+ sizeof(struct mcp251xfd_hw_tef_obj));
+
+ netdev_dbg(priv->ndev,
+ " 0x%03x: %2d*%zu bytes = %4zu bytes\n",
+ mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq),
+ priv->tx->obj_num - priv->tx_obj_num_coalesce_irq,
+ sizeof(struct mcp251xfd_hw_tef_obj),
+ (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) *
+ sizeof(struct mcp251xfd_hw_tef_obj));
+ } else {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes\n",
+ mcp251xfd_get_tef_obj_addr(0),
+ priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
+ priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
+ }
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n",
+ rx_ring->nr, rx_ring->fifo_nr,
+ mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+ priv->rx_obj_num_coalesce_irq, rx_ring->obj_size,
+ priv->rx_obj_num_coalesce_irq * rx_ring->obj_size);
+
+ if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH)
+ continue;
+
+ netdev_dbg(priv->ndev,
+ " 0x%03x: %2u*%u bytes = %4u bytes\n",
+ mcp251xfd_get_rx_obj_addr(rx_ring,
+ priv->rx_obj_num_coalesce_irq),
+ rx_ring->obj_num - priv->rx_obj_num_coalesce_irq,
+ rx_ring->obj_size,
+ (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) *
+ rx_ring->obj_size);
+ } else {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
+ rx_ring->nr, rx_ring->fifo_nr,
+ mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+ rx_ring->obj_num, rx_ring->obj_size,
+ rx_ring->obj_num * rx_ring->obj_size);
+ }
+ }
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TX: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
+ priv->tx->fifo_nr,
+ mcp251xfd_get_tx_obj_addr(priv->tx, 0),
+ priv->tx->obj_num, priv->tx->obj_size,
+ priv->tx->obj_num * priv->tx->obj_size);
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: free: %4d bytes\n",
+ MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START));
+
+ ram_used = base - MCP251XFD_RAM_START;
+ if (ram_used > MCP251XFD_RAM_SIZE) {
+ netdev_err(priv->ndev,
+ "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
+ ram_used, MCP251XFD_RAM_SIZE);
+ err = -ENOMEM;
+ }
+
+ if (priv->tx_obj_num_coalesce_irq &&
+ priv->tx_obj_num_coalesce_irq * 2 != priv->tx->obj_num) {
+ netdev_err(priv->ndev,
+ "Error during ring configuration, number of TEF coalescing buffers (%u) must be half of TEF buffers (%u).\n",
+ priv->tx_obj_num_coalesce_irq, priv->tx->obj_num);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
+ kfree(priv->rx[i]);
+ priv->rx[i] = NULL;
+ }
+}
+
+static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t)
+{
+ struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+ rx_irq_timer);
+ struct mcp251xfd_rx_ring *ring = priv->rx[0];
+
+ if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+ return HRTIMER_NORESTART;
+
+ spi_async(priv->spi, &ring->irq_enable_msg);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t)
+{
+ struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+ tx_irq_timer);
+ struct mcp251xfd_tef_ring *ring = priv->tef;
+
+ if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+ return HRTIMER_NORESTART;
+
+ spi_async(priv->spi, &ring->irq_enable_msg);
+
+ return HRTIMER_NORESTART;
+}
+
+const struct can_ram_config mcp251xfd_ram_config = {
+ .rx = {
+ .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can),
+ .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd),
+ .min = MCP251XFD_RX_OBJ_NUM_MIN,
+ .max = MCP251XFD_RX_OBJ_NUM_MAX,
+ .def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX,
+ .def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX,
+ .fifo_num = MCP251XFD_FIFO_RX_NUM,
+ .fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN,
+ .fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN,
+ },
+ .tx = {
+ .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) +
+ sizeof(struct mcp251xfd_hw_tx_obj_can),
+ .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) +
+ sizeof(struct mcp251xfd_hw_tx_obj_canfd),
+ .min = MCP251XFD_TX_OBJ_NUM_MIN,
+ .max = MCP251XFD_TX_OBJ_NUM_MAX,
+ .def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT,
+ .def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT,
+ .fifo_num = MCP251XFD_FIFO_TX_NUM,
+ .fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN,
+ .fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN,
+ },
+ .size = MCP251XFD_RAM_SIZE,
+ .fifo_depth = MCP251XFD_FIFO_DEPTH,
+};
+
+int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+{
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct mcp251xfd_rx_ring *rx_ring;
+ u8 tx_obj_size, rx_obj_size;
+ u8 rem, i;
+
+ /* switching from CAN-2.0 to CAN-FD mode or vice versa */
+ if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
+ const struct ethtool_ringparam ring = {
+ .rx_pending = priv->rx_obj_num,
+ .tx_pending = priv->tx->obj_num,
+ };
+ const struct ethtool_coalesce ec = {
+ .rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq,
+ .rx_max_coalesced_frames_irq = priv->rx_obj_num_coalesce_irq == 0 ?
+ 1 : priv->rx_obj_num_coalesce_irq,
+ .tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq,
+ .tx_max_coalesced_frames_irq = priv->tx_obj_num_coalesce_irq == 0 ?
+ 1 : priv->tx_obj_num_coalesce_irq,
+ };
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, &ec, fd_mode);
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+
+ tx_ring->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+ }
+
+ if (fd_mode) {
+ tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
+ rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
+ set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
+ } else {
+ tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
+ rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
+ clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
+ }
+
+ tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) -
+ ilog2(tx_ring->obj_num);
+ tx_ring->obj_size = tx_obj_size;
+
+ rem = priv->rx_obj_num;
+ for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) {
+ u8 rx_obj_num;
+
+ if (i == 0 && priv->rx_obj_num_coalesce_irq)
+ rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2,
+ MCP251XFD_FIFO_DEPTH);
+ else
+ rx_obj_num = min_t(u8, rounddown_pow_of_two(rem),
+ MCP251XFD_FIFO_DEPTH);
+ rem -= rx_obj_num;
+
+ rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
+ GFP_KERNEL);
+ if (!rx_ring) {
+ mcp251xfd_ring_free(priv);
+ return -ENOMEM;
+ }
+
+ rx_ring->obj_num = rx_obj_num;
+ rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) -
+ ilog2(rx_obj_num);
+ rx_ring->obj_size = rx_obj_size;
+ priv->rx[i] = rx_ring;
+ }
+ priv->rx_ring_num = i;
+
+ hrtimer_setup(&priv->rx_irq_timer, mcp251xfd_rx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+
+ hrtimer_setup(&priv->tx_irq_timer, mcp251xfd_tx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
new file mode 100644
index 000000000000..fe897f3e4c12
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static inline bool mcp251xfd_rx_fifo_sta_empty(const u32 fifo_sta)
+{
+ return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+}
+
+static inline bool mcp251xfd_rx_fifo_sta_full(const u32 fifo_sta)
+{
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
+}
+
+static inline int
+mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *rx_tail)
+{
+ u32 fifo_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
+ &fifo_ua);
+ if (err)
+ return err;
+
+ fifo_ua -= ring->base - MCP251XFD_RAM_START;
+ *rx_tail = fifo_ua / ring->obj_size;
+
+ return 0;
+}
+
+static int
+mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u8 rx_tail_chip, rx_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
+ return 0;
+
+ err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
+ if (err)
+ return err;
+
+ rx_tail = mcp251xfd_get_rx_tail(ring);
+ if (rx_tail_chip != rx_tail) {
+ netdev_err(priv->ndev,
+ "RX tail of chip (%d) and ours (%d) inconsistent.\n",
+ rx_tail_chip, rx_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_get_rx_len(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *len_p)
+{
+ const u8 shift = ring->obj_num_shift_to_u8;
+ u8 chip_head, tail, len;
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ if (mcp251xfd_rx_fifo_sta_empty(fifo_sta)) {
+ *len_p = 0;
+ return 0;
+ }
+
+ if (mcp251xfd_rx_fifo_sta_full(fifo_sta)) {
+ *len_p = ring->obj_num;
+ return 0;
+ }
+
+ chip_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ err = mcp251xfd_check_rx_tail(priv, ring);
+ if (err)
+ return err;
+ tail = mcp251xfd_get_rx_tail(ring);
+
+ /* First shift to full u8. The subtraction works on signed
+ * values, that keeps the difference steady around the u8
+ * overflow. The right shift acts on len, which is an u8.
+ */
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(chip_head));
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(tail));
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(len));
+
+ len = (chip_head << shift) - (tail << shift);
+ *len_p = len >> shift;
+
+ return 0;
+}
+
+static void
+mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
+ struct sk_buff *skb)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ u8 dlc;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
+ u32 sid, eid;
+
+ eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
+ sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
+
+ cfd->can_id = CAN_EFF_FLAG |
+ FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
+ FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
+ } else {
+ cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
+ hw_rx_obj->id);
+ }
+
+ dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
+
+ /* CANFD */
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
+ cfd->flags |= CANFD_ESI;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
+ cfd->flags |= CANFD_BRS;
+
+ cfd->len = can_fd_dlc2len(dlc);
+ } else {
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
+ cfd->can_id |= CAN_RTR_FLAG;
+
+ can_frame_set_cc_len((struct can_frame *)cfd, dlc,
+ priv->can.ctrlmode);
+ }
+
+ if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
+ memcpy(cfd->data, hw_rx_obj->data, cfd->len);
+}
+
+static int
+mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring,
+ const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ struct canfd_frame *cfd;
+ u64 timestamp;
+ int err;
+
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the RX FIFO head index
+ * might be corrupted and we might process past the RX FIFO's
+ * head into old CAN frames.
+ *
+ * Compare the timestamp of currently processed CAN frame with
+ * last valid frame received. Abort with -EBADMSG if an old
+ * CAN frame is detected.
+ */
+ timestamp = timecounter_cyc2time(&priv->tc, hw_rx_obj->ts);
+ if (timestamp <= ring->last_valid) {
+ stats->rx_fifo_errors++;
+
+ return -EBADMSG;
+ }
+ ring->last_valid = timestamp;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
+ skb = alloc_canfd_skb(priv->ndev, &cfd);
+ else
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return 0;
+ }
+
+ mcp251xfd_skb_set_timestamp(skb, timestamp);
+ mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
+ const u8 offset, const u8 len)
+{
+ const int val_bytes = regmap_get_val_bytes(priv->map_rx);
+ int err;
+
+ err = regmap_bulk_read(priv->map_rx,
+ mcp251xfd_get_rx_obj_addr(ring, offset),
+ hw_rx_obj,
+ len * ring->obj_size / val_bytes);
+
+ return err;
+}
+
+static int
+mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring,
+ u8 len)
+{
+ int offset;
+ int err;
+
+ if (!len)
+ return 0;
+
+ ring->head += len;
+
+ /* Increment the RX FIFO tail pointer 'len' times in a
+ * single SPI message.
+ *
+ * Note:
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
+ */
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
+ if (err)
+ return err;
+
+ ring->tail += len;
+
+ return 0;
+}
+
+static int
+mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring)
+{
+ struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
+ u8 rx_tail, len, l;
+ int err, i;
+
+ err = mcp251xfd_get_rx_len(priv, ring, &len);
+ if (err)
+ return err;
+
+ while ((l = mcp251xfd_get_rx_linear_len(ring, len))) {
+ rx_tail = mcp251xfd_get_rx_tail(ring);
+
+ err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
+ rx_tail, l);
+ if (err)
+ return err;
+
+ for (i = 0; i < l; i++) {
+ err = mcp251xfd_handle_rxif_one(priv, ring,
+ (void *)hw_rx_obj +
+ i * ring->obj_size);
+
+ /* -EBADMSG means we're affected by mcp2518fd
+ * erratum DS80000789E 6., i.e. the timestamp
+ * in the RX object is older that the last
+ * valid received CAN frame. Don't process any
+ * further and mark processed frames as good.
+ */
+ if (err == -EBADMSG)
+ return mcp251xfd_handle_rxif_ring_uinc(priv, ring, i);
+ else if (err)
+ return err;
+ }
+
+ err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, l);
+ if (err)
+ return err;
+
+ len -= l;
+ }
+
+ return 0;
+}
+
+int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_rx_ring *ring;
+ int err, n;
+
+ mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ /* - if RX IRQ coalescing is active always handle ring 0
+ * - only handle rings if RX IRQ is active
+ */
+ if ((ring->nr > 0 || !priv->rx_obj_num_coalesce_irq) &&
+ !(priv->regs_status.rxif & BIT(ring->fifo_nr)))
+ continue;
+
+ err = mcp251xfd_handle_rxif_ring(priv, ring);
+ if (err)
+ return err;
+ }
+
+ if (priv->rx_coalesce_usecs_irq)
+ hrtimer_start(&priv->rx_irq_timer,
+ ns_to_ktime(priv->rx_coalesce_usecs_irq *
+ NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
new file mode 100644
index 000000000000..e94321849fd7
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta)
+{
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
+}
+
+static inline bool mcp251xfd_tx_fifo_sta_less_than_half_full(u32 fifo_sta)
+{
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFHRFHIF;
+}
+
+static inline int
+mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ u8 *tef_tail)
+{
+ u32 tef_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
+ if (err)
+ return err;
+
+ *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
+
+ return 0;
+}
+
+static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
+{
+ u8 tef_tail_chip, tef_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
+ return 0;
+
+ err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
+ if (err)
+ return err;
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ if (tef_tail_chip != tef_tail) {
+ netdev_err(priv->ndev,
+ "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
+ tef_tail_chip, tef_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ unsigned int *frame_len_ptr)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ u32 seq, tef_tail_masked, tef_tail;
+ struct sk_buff *skb;
+
+ /* Use the MCP2517FD mask on the MCP2518FD, too. We only
+ * compare 7 bits, this is enough to detect old TEF objects.
+ */
+ seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK,
+ hw_tef_obj->flags);
+ tef_tail_masked = priv->tef->tail &
+ field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the TX FIFO tail index
+ * might be corrupted and we might process past the TEF FIFO's
+ * head into old CAN frames.
+ *
+ * Compare the sequence number of the currently processed CAN
+ * frame with the expected sequence number. Abort with
+ * -EBADMSG if an old CAN frame is detected.
+ */
+ if (seq != tef_tail_masked) {
+ netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__,
+ seq, tef_tail_masked);
+ stats->tx_fifo_errors++;
+
+ return -EBADMSG;
+ }
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ skb = priv->can.echo_skb[tef_tail];
+ if (skb)
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_tef_obj->ts);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload,
+ tef_tail, hw_tef_obj->ts,
+ frame_len_ptr);
+ stats->tx_packets++;
+ priv->tef->tail++;
+
+ return 0;
+}
+
+static int
+mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ const u8 shift = tx_ring->obj_num_shift_to_u8;
+ u8 chip_tx_tail, tail, len;
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ /* If the chip says the TX-FIFO is empty, but there are no TX
+ * buffers free in the ring, we assume all have been sent.
+ */
+ if (mcp251xfd_tx_fifo_sta_empty(fifo_sta) &&
+ mcp251xfd_get_tx_free(tx_ring) == 0) {
+ *len_p = tx_ring->obj_num;
+ return 0;
+ }
+
+ chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ err = mcp251xfd_check_tef_tail(priv);
+ if (err)
+ return err;
+ tail = mcp251xfd_get_tef_tail(priv);
+
+ /* First shift to full u8. The subtraction works on signed
+ * values, that keeps the difference steady around the u8
+ * overflow. The right shift acts on len, which is an u8.
+ */
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail));
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail));
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
+
+ len = (chip_tx_tail << shift) - (tail << shift);
+ len >>= shift;
+
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the TX-FIFO tail index
+ * might be corrupted.
+ *
+ * However here it seems the bit indicating that the TX-FIFO
+ * is empty (MCP251XFD_REG_FIFOSTA_TFERFFIF) is not correct
+ * while the TX-FIFO tail index is.
+ *
+ * We assume the TX-FIFO is empty, i.e. all pending CAN frames
+ * haven been send, if:
+ * - Chip's head and tail index are equal (len == 0).
+ * - The TX-FIFO is less than half full.
+ * (The TX-FIFO empty case has already been checked at the
+ * beginning of this function.)
+ * - No free buffers in the TX ring.
+ */
+ if (len == 0 && mcp251xfd_tx_fifo_sta_less_than_half_full(fifo_sta) &&
+ mcp251xfd_get_tx_free(tx_ring) == 0)
+ len = tx_ring->obj_num;
+
+ *len_p = len;
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ const u8 offset, const u8 len)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ const int val_bytes = regmap_get_val_bytes(priv->map_rx);
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ (offset > tx_ring->obj_num ||
+ len > tx_ring->obj_num ||
+ offset + len > tx_ring->obj_num)) {
+ netdev_err(priv->ndev,
+ "Trying to read too many TEF objects (max=%d, offset=%d, len=%d).\n",
+ tx_ring->obj_num, offset, len);
+ return -ERANGE;
+ }
+
+ return regmap_bulk_read(priv->map_rx,
+ mcp251xfd_get_tef_obj_addr(offset),
+ hw_tef_obj,
+ sizeof(*hw_tef_obj) / val_bytes * len);
+}
+
+static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+
+ ecc->ecc_stat = 0;
+}
+
+int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
+ unsigned int total_frame_len = 0;
+ u8 tef_tail, len, l;
+ int err, i;
+
+ err = mcp251xfd_get_tef_len(priv, &len);
+ if (err)
+ return err;
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ l = mcp251xfd_get_tef_linear_len(priv, len);
+ err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
+ if (err)
+ return err;
+
+ if (l < len) {
+ err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < len; i++) {
+ unsigned int frame_len = 0;
+
+ err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
+ /* -EBADMSG means we're affected by mcp2518fd erratum
+ * DS80000789E 6., i.e. the Sequence Number in the TEF
+ * doesn't match our tef_tail. Don't process any
+ * further and mark processed frames as good.
+ */
+ if (err == -EBADMSG)
+ goto out_netif_wake_queue;
+ if (err)
+ return err;
+
+ total_frame_len += frame_len;
+ }
+
+out_netif_wake_queue:
+ len = i; /* number of handled goods TEFs */
+ if (len) {
+ struct mcp251xfd_tef_ring *ring = priv->tef;
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ int offset;
+
+ ring->head += len;
+
+ /* Increment the TEF FIFO tail pointer 'len' times in
+ * a single SPI message.
+ *
+ * Note:
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
+ */
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
+ if (err)
+ return err;
+
+ tx_ring->tail += len;
+ netdev_completed_queue(priv->ndev, len, total_frame_len);
+
+ err = mcp251xfd_check_tef_tail(priv);
+ if (err)
+ return err;
+ }
+
+ mcp251xfd_ecc_tefif_successful(priv);
+
+ if (mcp251xfd_get_tx_free(priv->tx)) {
+ /* Make sure that anybody stopping the queue after
+ * this sees the new tx_ring->tail.
+ */
+ smp_mb();
+ netif_wake_queue(priv->ndev);
+ }
+
+ if (priv->tx_coalesce_usecs_irq)
+ hrtimer_start(&priv->tx_irq_timer,
+ ns_to_ktime(priv->tx_coalesce_usecs_irq *
+ NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
index 712e09186987..413a5cb75c13 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2021 Pengutronix,
+// Copyright (c) 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
@@ -11,20 +11,20 @@
#include "mcp251xfd.h"
-static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc)
+static u64 mcp251xfd_timestamp_raw_read(struct cyclecounter *cc)
{
const struct mcp251xfd_priv *priv;
- u32 timestamp = 0;
+ u32 ts_raw = 0;
int err;
priv = container_of(cc, struct mcp251xfd_priv, cc);
- err = mcp251xfd_get_timestamp(priv, &timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
if (err)
netdev_err(priv->ndev,
"Error %d while reading timestamp. HW timestamps may be inaccurate.",
err);
- return timestamp;
+ return ts_raw;
}
static void mcp251xfd_timestamp_work(struct work_struct *work)
@@ -39,28 +39,21 @@ static void mcp251xfd_timestamp_work(struct work_struct *work)
MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
}
-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
- struct sk_buff *skb, u32 timestamp)
-{
- struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
- u64 ns;
-
- ns = timecounter_cyc2time(&priv->tc, timestamp);
- hwtstamps->hwtstamp = ns_to_ktime(ns);
-}
-
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv)
{
struct cyclecounter *cc = &priv->cc;
- cc->read = mcp251xfd_timestamp_read;
+ cc->read = mcp251xfd_timestamp_raw_read;
cc->mask = CYCLECOUNTER_MASK(32);
cc->shift = 1;
cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift);
- timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
-
INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work);
+}
+
+void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv)
+{
+ timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
schedule_delayed_work(&priv->timestamp,
MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
new file mode 100644
index 000000000000..747ae3e8a768
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/unaligned.h>
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static inline struct
+mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
+{
+ u8 tx_head;
+
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+
+ return &tx_ring->obj[tx_head];
+}
+
+static void
+mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_obj *tx_obj,
+ const struct sk_buff *skb,
+ unsigned int seq)
+{
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
+ union mcp251xfd_tx_obj_load_buf *load_buf;
+ u8 dlc;
+ u32 id, flags;
+ int len_sanitized = 0, len;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ u32 sid, eid;
+
+ sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
+ eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
+
+ id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
+ FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
+
+ flags = MCP251XFD_OBJ_FLAGS_IDE;
+ } else {
+ id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
+ flags = 0;
+ }
+
+ /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
+ * harm, only the lower 7 bits will be transferred into the
+ * TEF object.
+ */
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ flags |= MCP251XFD_OBJ_FLAGS_RTR;
+ else
+ len_sanitized = canfd_sanitize_len(cfd->len);
+
+ /* CANFD */
+ if (can_is_canfd_skb(skb)) {
+ if (cfd->flags & CANFD_ESI)
+ flags |= MCP251XFD_OBJ_FLAGS_ESI;
+
+ flags |= MCP251XFD_OBJ_FLAGS_FDF;
+
+ if (cfd->flags & CANFD_BRS)
+ flags |= MCP251XFD_OBJ_FLAGS_BRS;
+
+ dlc = can_fd_len2dlc(cfd->len);
+ } else {
+ dlc = can_get_cc_dlc((struct can_frame *)cfd,
+ priv->can.ctrlmode);
+ }
+
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
+
+ load_buf = &tx_obj->buf;
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
+ hw_tx_obj = &load_buf->crc.hw_tx_obj;
+ else
+ hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
+
+ put_unaligned_le32(id, &hw_tx_obj->id);
+ put_unaligned_le32(flags, &hw_tx_obj->flags);
+
+ /* Copy data */
+ memcpy(hw_tx_obj->data, cfd->data, cfd->len);
+
+ /* Clear unused data at end of CAN frame */
+ if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
+ int pad_len;
+
+ pad_len = len_sanitized - cfd->len;
+ if (pad_len)
+ memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
+ }
+
+ /* Number of bytes to be written into the RAM of the controller */
+ len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
+ if (MCP251XFD_SANITIZE_CAN)
+ len += round_up(len_sanitized, sizeof(u32));
+ else
+ len += round_up(cfd->len, sizeof(u32));
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
+ u16 crc;
+
+ mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(load_buf->crc.cmd);
+ crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
+ put_unaligned_be16(crc, (void *)load_buf + len);
+
+ /* Total length */
+ len += sizeof(load_buf->crc.crc);
+ } else {
+ len += sizeof(load_buf->nocrc.cmd);
+ }
+
+ tx_obj->xfer[0].len = len;
+}
+
+static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_ring *tx_ring,
+ int err)
+{
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int frame_len = 0;
+ u8 tx_head;
+
+ tx_ring->head--;
+ stats->tx_dropped++;
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+ can_free_echo_skb(ndev, tx_head, &frame_len);
+ netdev_completed_queue(ndev, 1, frame_len);
+ netif_wake_queue(ndev);
+
+ if (net_ratelimit())
+ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+}
+
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
+{
+ struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
+ tx_work);
+ struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj;
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ int err;
+
+ err = spi_sync(priv->spi, &tx_obj->msg);
+ if (err)
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+}
+
+static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_obj *tx_obj)
+{
+ return spi_async(priv->spi, &tx_obj->msg);
+}
+
+static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_ring *tx_ring)
+{
+ if (mcp251xfd_get_tx_free(tx_ring) > 0)
+ return false;
+
+ netif_stop_queue(priv->ndev);
+
+ /* Memory barrier before checking tx_free (head and tail) */
+ smp_mb();
+
+ if (mcp251xfd_get_tx_free(tx_ring) == 0) {
+ netdev_dbg(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
+ tx_ring->head, tx_ring->tail,
+ tx_ring->head - tx_ring->tail);
+
+ return true;
+ }
+
+ netif_start_queue(priv->ndev);
+
+ return false;
+}
+
+static bool mcp251xfd_work_busy(struct work_struct *work)
+{
+ return work_busy(work);
+}
+
+netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct mcp251xfd_tx_obj *tx_obj;
+ unsigned int frame_len;
+ u8 tx_head;
+ int err;
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (mcp251xfd_tx_busy(priv, tx_ring) ||
+ mcp251xfd_work_busy(&priv->tx_work))
+ return NETDEV_TX_BUSY;
+
+ tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
+ mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
+
+ /* Stop queue if we occupy the complete TX FIFO */
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+ tx_ring->head++;
+ if (mcp251xfd_get_tx_free(tx_ring) == 0)
+ netif_stop_queue(ndev);
+
+ frame_len = can_skb_get_frame_len(skb);
+ err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
+ if (!err)
+ netdev_sent_queue(priv->ndev, frame_len);
+
+ err = mcp251xfd_tx_obj_write(priv, tx_obj);
+ if (err == -EBUSY) {
+ netif_stop_queue(ndev);
+ priv->tx_work_obj = tx_obj;
+ queue_work(priv->wq, &priv->tx_work);
+ } else if (err) {
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+ }
+
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 0f322dabaf65..085d7101e595 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -2,18 +2,20 @@
*
* mcp251xfd - Microchip MCP251xFD Family CAN controller driver
*
- * Copyright (c) 2019 Pengutronix,
- * Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
* Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
*/
#ifndef _MCP251XFD_H
#define _MCP251XFD_H
+#include <linux/bitfield.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/rx-offload.h>
#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/regmap.h>
@@ -334,13 +336,19 @@
#define MCP251XFD_REG_IOCON_TXCANOD BIT(28)
#define MCP251XFD_REG_IOCON_PM1 BIT(25)
#define MCP251XFD_REG_IOCON_PM0 BIT(24)
+#define MCP251XFD_REG_IOCON_PM(n) (MCP251XFD_REG_IOCON_PM0 << (n))
#define MCP251XFD_REG_IOCON_GPIO1 BIT(17)
#define MCP251XFD_REG_IOCON_GPIO0 BIT(16)
+#define MCP251XFD_REG_IOCON_GPIO(n) (MCP251XFD_REG_IOCON_GPIO0 << (n))
+#define MCP251XFD_REG_IOCON_GPIO_MASK GENMASK(17, 16)
#define MCP251XFD_REG_IOCON_LAT1 BIT(9)
#define MCP251XFD_REG_IOCON_LAT0 BIT(8)
+#define MCP251XFD_REG_IOCON_LAT(n) (MCP251XFD_REG_IOCON_LAT0 << (n))
+#define MCP251XFD_REG_IOCON_LAT_MASK GENMASK(9, 8)
#define MCP251XFD_REG_IOCON_XSTBYEN BIT(6)
#define MCP251XFD_REG_IOCON_TRIS1 BIT(1)
#define MCP251XFD_REG_IOCON_TRIS0 BIT(0)
+#define MCP251XFD_REG_IOCON_TRIS(n) (MCP251XFD_REG_IOCON_TRIS0 << (n))
#define MCP251XFD_REG_CRC 0xe08
#define MCP251XFD_REG_CRC_FERRIE BIT(25)
@@ -366,25 +374,6 @@
#define MCP251XFD_REG_DEVID_ID_MASK GENMASK(7, 4)
#define MCP251XFD_REG_DEVID_REV_MASK GENMASK(3, 0)
-/* number of TX FIFO objects, depending on CAN mode
- *
- * FIFO setup: tef: 8*12 bytes = 96 bytes, tx: 8*16 bytes = 128 bytes
- * FIFO setup: tef: 4*12 bytes = 48 bytes, tx: 4*72 bytes = 288 bytes
- */
-#define MCP251XFD_RX_OBJ_NUM_MAX 32
-#define MCP251XFD_TX_OBJ_NUM_CAN 8
-#define MCP251XFD_TX_OBJ_NUM_CANFD 4
-
-#if MCP251XFD_TX_OBJ_NUM_CAN > MCP251XFD_TX_OBJ_NUM_CANFD
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CAN
-#else
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CANFD
-#endif
-
-#define MCP251XFD_NAPI_WEIGHT 32
-#define MCP251XFD_TX_FIFO 1
-#define MCP251XFD_RX_FIFO(x) (MCP251XFD_TX_FIFO + 1 + (x))
-
/* SPI commands */
#define MCP251XFD_SPI_INSTRUCTION_RESET 0x0000
#define MCP251XFD_SPI_INSTRUCTION_WRITE 0x2000
@@ -405,12 +394,39 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
#define MCP251XFD_POLL_SLEEP_US (10)
#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
+#define MCP251XFD_FRAME_LEN_MAX_BITS (736)
+
+/* Misc */
+#define MCP251XFD_NAPI_WEIGHT 32
#define MCP251XFD_SOFTRESET_RETRIES_MAX 3
#define MCP251XFD_READ_CRC_RETRIES_MAX 3
#define MCP251XFD_ECC_CNT_MAX 2
#define MCP251XFD_SANITIZE_SPI 1
#define MCP251XFD_SANITIZE_CAN 1
+/* FIFO and Ring */
+#define MCP251XFD_FIFO_TEF_NUM 1U
+#define MCP251XFD_FIFO_RX_NUM 3U
+#define MCP251XFD_FIFO_TX_NUM 1U
+
+#define MCP251XFD_FIFO_DEPTH 32U
+
+#define MCP251XFD_RX_OBJ_NUM_MIN 16U
+#define MCP251XFD_RX_OBJ_NUM_MAX (MCP251XFD_FIFO_RX_NUM * MCP251XFD_FIFO_DEPTH)
+#define MCP251XFD_RX_FIFO_DEPTH_MIN 4U
+#define MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN 8U
+
+#define MCP251XFD_TX_OBJ_NUM_MIN 2U
+#define MCP251XFD_TX_OBJ_NUM_MAX 16U
+#define MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT 8U
+#define MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT 4U
+#define MCP251XFD_TX_FIFO_DEPTH_MIN 2U
+#define MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN 2U
+
+static_assert(MCP251XFD_FIFO_TEF_NUM == 1U);
+static_assert(MCP251XFD_FIFO_TEF_NUM == MCP251XFD_FIFO_TX_NUM);
+static_assert(MCP251XFD_FIFO_RX_NUM <= 4U);
+
/* Silence TX MAB overflow warnings */
#define MCP251XFD_QUIRK_MAB_NO_WARN BIT(0)
/* Use CRC to access registers */
@@ -433,7 +449,7 @@ struct mcp251xfd_hw_tef_obj {
/* The tx_obj_raw version is used in spi async, i.e. without
* regmap. We have to take care of endianness ourselves.
*/
-struct mcp251xfd_hw_tx_obj_raw {
+struct __packed mcp251xfd_hw_tx_obj_raw {
__le32 id;
__le32 flags;
u8 data[sizeof_field(struct canfd_frame, data)];
@@ -496,6 +512,11 @@ union mcp251xfd_write_reg_buf {
u8 data[4];
__be16 crc;
} crc;
+ struct __packed {
+ struct mcp251xfd_buf_cmd cmd;
+ u8 data[1];
+ __be16 crc;
+ } safe;
} ____cacheline_aligned;
struct mcp251xfd_tx_obj {
@@ -510,8 +531,14 @@ struct mcp251xfd_tef_ring {
/* u8 obj_num equals tx_ring->obj_num */
/* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
+ /* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */
+
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+ struct spi_transfer irq_enable_xfer;
+ struct spi_message irq_enable_msg;
union mcp251xfd_write_reg_buf uinc_buf;
+ union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
struct spi_transfer uinc_xfer[MCP251XFD_TX_OBJ_NUM_MAX];
};
@@ -520,7 +547,10 @@ struct mcp251xfd_tx_ring {
unsigned int tail;
u16 base;
+ u8 nr;
+ u8 fifo_nr;
u8 obj_num;
+ u8 obj_num_shift_to_u8;
u8 obj_size;
struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX];
@@ -531,14 +561,23 @@ struct mcp251xfd_rx_ring {
unsigned int head;
unsigned int tail;
+ /* timestamp of the last valid received CAN frame */
+ u64 last_valid;
+
u16 base;
u8 nr;
u8 fifo_nr;
u8 obj_num;
+ u8 obj_num_shift_to_u8;
u8 obj_size;
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+ struct spi_transfer irq_enable_xfer;
+ struct spi_message irq_enable_msg;
+
union mcp251xfd_write_reg_buf uinc_buf;
- struct spi_transfer uinc_xfer[MCP251XFD_RX_OBJ_NUM_MAX];
+ union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
+ struct spi_transfer uinc_xfer[MCP251XFD_FIFO_DEPTH];
struct mcp251xfd_hw_rx_obj_canfd obj[];
};
@@ -560,12 +599,14 @@ struct mcp251xfd_ecc {
struct mcp251xfd_regs_status {
u32 intf;
+ u32 rxif;
};
enum mcp251xfd_model {
MCP251XFD_MODEL_MCP2517FD = 0x2517,
MCP251XFD_MODEL_MCP2518FD = 0x2518,
- MCP251XFD_MODEL_MCP251XFD = 0xffff, /* autodetect model */
+ MCP251XFD_MODEL_MCP251863 = 0x251863,
+ MCP251XFD_MODEL_MCP251XFD = 0xffffffff, /* autodetect model */
};
struct mcp251xfd_devtype_data {
@@ -573,6 +614,13 @@ struct mcp251xfd_devtype_data {
u32 quirks;
};
+enum mcp251xfd_flags {
+ MCP251XFD_FLAGS_DOWN,
+ MCP251XFD_FLAGS_FD_MODE,
+
+ __MCP251XFD_FLAGS_SIZE__
+};
+
struct mcp251xfd_priv {
struct can_priv can;
struct can_rx_offload offload;
@@ -591,12 +639,28 @@ struct mcp251xfd_priv {
struct spi_device *spi;
u32 spi_max_speed_hz_orig;
+ u32 spi_max_speed_hz_fast;
+ u32 spi_max_speed_hz_slow;
- struct mcp251xfd_tef_ring tef[1];
- struct mcp251xfd_tx_ring tx[1];
- struct mcp251xfd_rx_ring *rx[1];
+ struct mcp251xfd_tef_ring tef[MCP251XFD_FIFO_TEF_NUM];
+ struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
+ struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
+
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct mcp251xfd_tx_obj *tx_work_obj;
+
+ DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
u8 rx_ring_num;
+ u8 rx_obj_num;
+ u8 rx_obj_num_coalesce_irq;
+ u8 tx_obj_num_coalesce_irq;
+
+ u32 rx_coalesce_usecs_irq;
+ u32 tx_coalesce_usecs_irq;
+ struct hrtimer rx_irq_timer;
+ struct hrtimer tx_irq_timer;
struct mcp251xfd_ecc ecc;
struct mcp251xfd_regs_status regs_status;
@@ -607,23 +671,32 @@ struct mcp251xfd_priv {
struct gpio_desc *rx_int;
struct clk *clk;
+ bool pll_enable;
struct regulator *reg_vdd;
struct regulator *reg_xceiver;
struct mcp251xfd_devtype_data devtype_data;
struct can_berr_counter bec;
+ struct gpio_chip gc;
};
#define MCP251XFD_IS(_model) \
static inline bool \
mcp251xfd_is_##_model(const struct mcp251xfd_priv *priv) \
{ \
- return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model##FD; \
+ return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model; \
}
-MCP251XFD_IS(2517);
-MCP251XFD_IS(2518);
-MCP251XFD_IS(251X);
+MCP251XFD_IS(2517FD);
+MCP251XFD_IS(2518FD);
+MCP251XFD_IS(251863);
+MCP251XFD_IS(251XFD);
+
+static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv)
+{
+ /* listen-only mode works like FD mode */
+ return priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD);
+}
static inline u8 mcp251xfd_first_byte_set(u32 mask)
{
@@ -710,6 +783,13 @@ mcp251xfd_spi_cmd_write_crc_set_addr(struct mcp251xfd_buf_cmd_crc *cmd,
}
static inline void
+mcp251xfd_spi_cmd_write_safe_set_addr(struct mcp251xfd_buf_cmd *cmd,
+ u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE_CRC_SAFE | addr);
+}
+
+static inline void
mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
u16 addr, u16 len)
{
@@ -720,14 +800,20 @@ mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
static inline u8 *
mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
union mcp251xfd_write_reg_buf *write_reg_buf,
- u16 addr)
+ u16 addr, u8 len)
{
u8 *data;
if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
- mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
- addr);
- data = write_reg_buf->crc.data;
+ if (len == 1) {
+ mcp251xfd_spi_cmd_write_safe_set_addr(&write_reg_buf->safe.cmd,
+ addr);
+ data = write_reg_buf->safe.data;
+ } else {
+ mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
+ addr);
+ data = write_reg_buf->crc.data;
+ }
} else {
mcp251xfd_spi_cmd_write_nocrc(&write_reg_buf->nocrc.cmd,
addr);
@@ -737,10 +823,27 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
return data;
}
-static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
- u32 *timestamp)
+static inline int mcp251xfd_get_timestamp_raw(const struct mcp251xfd_priv *priv,
+ u32 *ts_raw)
{
- return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
+ return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, ts_raw);
+}
+
+static inline void mcp251xfd_skb_set_timestamp(struct sk_buff *skb, u64 ns)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static inline
+void mcp251xfd_skb_set_timestamp_raw(const struct mcp251xfd_priv *priv,
+ struct sk_buff *skb, u32 ts_raw)
+{
+ u64 ns;
+
+ ns = timecounter_cyc2time(&priv->tc, ts_raw);
+ mcp251xfd_skb_set_timestamp(skb, ns);
}
static inline u16 mcp251xfd_get_tef_obj_addr(u8 n)
@@ -761,6 +864,24 @@ mcp251xfd_get_rx_obj_addr(const struct mcp251xfd_rx_ring *ring, u8 n)
return ring->base + ring->obj_size * n;
}
+static inline int
+mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ u8 *tx_tail)
+{
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg,
+ MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ return 0;
+}
+
static inline u8 mcp251xfd_get_tef_head(const struct mcp251xfd_priv *priv)
{
return priv->tef->head & (priv->tx->obj_num - 1);
@@ -771,17 +892,8 @@ static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv)
return priv->tef->tail & (priv->tx->obj_num - 1);
}
-static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv)
-{
- return priv->tef->head - priv->tef->tail;
-}
-
-static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv)
+static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len)
{
- u8 len;
-
- len = mcp251xfd_get_tef_len(priv);
-
return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv));
}
@@ -824,18 +936,9 @@ static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring)
-{
- return ring->head - ring->tail;
-}
-
static inline u8
-mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
+mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring, u8 len)
{
- u8 len;
-
- len = mcp251xfd_get_rx_len(ring);
-
return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring));
}
@@ -849,15 +952,26 @@ mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
(n) < (priv)->rx_ring_num; \
(n)++, (ring) = *((priv)->rx + (n)))
-int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
+int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv);
u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
const void *data, size_t data_size);
u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
- struct sk_buff *skb, u32 timestamp);
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv);
+int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
+extern const struct can_ram_config mcp251xfd_ram_config;
+int mcp251xfd_ring_init(struct mcp251xfd_priv *priv);
+void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
+int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
+int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
+int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
+void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
+netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev);
+
#if IS_ENABLED(CONFIG_DEV_COREDUMP)
void mcp251xfd_dump(const struct mcp251xfd_priv *priv);
#else
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 54aa7c25c4de..af52285d5a4e 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -51,16 +51,16 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#define DRV_NAME "sun4i_can"
@@ -90,6 +90,8 @@
#define SUN4I_REG_BUF12_ADDR 0x0070 /* CAN Tx/Rx Buffer 12 */
#define SUN4I_REG_ACPC_ADDR 0x0040 /* CAN Acceptance Code 0 */
#define SUN4I_REG_ACPM_ADDR 0x0044 /* CAN Acceptance Mask 0 */
+#define SUN4I_REG_ACPC_ADDR_D1 0x0028 /* CAN Acceptance Code 0 on the D1 */
+#define SUN4I_REG_ACPM_ADDR_D1 0x002C /* CAN Acceptance Mask 0 on the D1 */
#define SUN4I_REG_RBUF_RBACK_START_ADDR 0x0180 /* CAN transmit buffer start */
#define SUN4I_REG_RBUF_RBACK_END_ADDR 0x01b0 /* CAN transmit buffer end */
@@ -200,11 +202,24 @@
#define SUN4I_CAN_MAX_IRQ 20
#define SUN4I_MODE_MAX_RETRIES 100
+/**
+ * struct sun4ican_quirks - Differences between SoC variants.
+ *
+ * @has_reset: SoC needs reset deasserted.
+ * @acp_offset: Offset of ACPC and ACPM registers
+ */
+struct sun4ican_quirks {
+ bool has_reset;
+ int acp_offset;
+};
+
struct sun4ican_priv {
struct can_priv can;
void __iomem *base;
struct clk *clk;
+ struct reset_control *reset;
spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
+ int acp_offset;
};
static const struct can_bittiming_const sun4ican_bittiming_const = {
@@ -327,8 +342,8 @@ static int sun4i_can_start(struct net_device *dev)
}
/* set filters - we accept all */
- writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR);
- writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR);
+ writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR + priv->acp_offset);
+ writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR + priv->acp_offset);
/* clear error counters and error code capture */
writel(0, priv->base + SUN4I_REG_ERRC_ADDR);
@@ -418,7 +433,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d
canid_t id;
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -490,21 +505,21 @@ static void sun4i_can_rx(struct net_device *dev)
}
/* remote frame ? */
- if (fi & SUN4I_MSG_RTR_FLAG)
+ if (fi & SUN4I_MSG_RTR_FLAG) {
id |= CAN_RTR_FLAG;
- else
+ } else {
for (i = 0; i < cf->len; i++)
cf->data[i] = readl(priv->base + dreg + i * 4);
+ stats->rx_bytes += cf->len;
+ }
+ stats->rx_packets++;
+
cf->can_id = id;
sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
-
- can_led_event(dev, CAN_LED_EVENT_RX);
}
static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
@@ -525,11 +540,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
rxerr = (errc >> 16) & 0xFF;
txerr = errc & 0xFF;
- if (skb) {
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
-
if (isrc & SUN4I_INT_DATA_OR) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -560,15 +570,18 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (likely(skb) && state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & SUN4I_INT_BUS_ERR) {
/* bus error interrupt */
netdev_dbg(dev, "bus error interrupt\n");
priv->can.can_stats.bus_error++;
- stats->rx_errors++;
+ ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
if (likely(skb)) {
- ecc = readl(priv->base + SUN4I_REG_STA_ADDR);
-
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (ecc & SUN4I_STA_MASK_ERR) {
@@ -586,9 +599,15 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
>> 16;
break;
}
- /* error occurred during transmission? */
- if ((ecc & SUN4I_STA_ERR_DIR) == 0)
+ }
+
+ /* error occurred during transmission? */
+ if ((ecc & SUN4I_STA_ERR_DIR) == 0) {
+ if (likely(skb))
cf->data[2] |= CAN_ERR_PROT_TX;
+ stats->tx_errors++;
+ } else {
+ stats->rx_errors++;
}
}
if (isrc & SUN4I_INT_ERR_PASSIVE) {
@@ -614,21 +633,18 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
tx_state = txerr >= rxerr ? state : 0;
rx_state = txerr <= rxerr ? state : 0;
- if (likely(skb))
- can_change_state(dev, cf, tx_state, rx_state);
- else
- priv->can.state = state;
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ can_change_state(dev, cf, tx_state, rx_state);
if (state == CAN_STATE_BUS_OFF)
can_bus_off(dev);
}
- if (likely(skb)) {
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (likely(skb))
netif_rx(skb);
- } else {
+ else
return -ENOMEM;
- }
return 0;
}
@@ -641,8 +657,8 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
u8 isrc, status;
int n = 0;
- while ((isrc = readl(priv->base + SUN4I_REG_INT_ADDR)) &&
- (n < SUN4I_CAN_MAX_IRQ)) {
+ while ((n < SUN4I_CAN_MAX_IRQ) &&
+ (isrc = readl(priv->base + SUN4I_REG_INT_ADDR))) {
n++;
status = readl(priv->base + SUN4I_REG_STA_ADDR);
@@ -651,13 +667,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
if (isrc & SUN4I_INT_TBUF_VLD) {
/* transmission complete interrupt */
- stats->tx_bytes +=
- readl(priv->base +
- SUN4I_REG_RBUF_RBACK_START_ADDR) & 0xf;
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
- can_get_echo_skb(dev, 0, NULL);
netif_wake_queue(dev);
- can_led_event(dev, CAN_LED_EVENT_TX);
}
if ((isrc & SUN4I_INT_RBUF_VLD) &&
!(isrc & SUN4I_INT_DATA_OR)) {
@@ -702,6 +714,13 @@ static int sun4ican_open(struct net_device *dev)
goto exit_irq;
}
+ /* software reset deassert */
+ err = reset_control_deassert(priv->reset);
+ if (err) {
+ netdev_err(dev, "could not deassert CAN reset\n");
+ goto exit_soft_reset;
+ }
+
/* turn on clocking for CAN peripheral block */
err = clk_prepare_enable(priv->clk);
if (err) {
@@ -715,7 +734,6 @@ static int sun4ican_open(struct net_device *dev)
goto exit_can_start;
}
- can_led_event(dev, CAN_LED_EVENT_OPEN);
netif_start_queue(dev);
return 0;
@@ -723,6 +741,8 @@ static int sun4ican_open(struct net_device *dev)
exit_can_start:
clk_disable_unprepare(priv->clk);
exit_clock:
+ reset_control_assert(priv->reset);
+exit_soft_reset:
free_irq(dev->irq, dev);
exit_irq:
close_candev(dev);
@@ -736,10 +756,10 @@ static int sun4ican_close(struct net_device *dev)
netif_stop_queue(dev);
sun4i_can_stop(dev);
clk_disable_unprepare(priv->clk);
+ reset_control_assert(priv->reset);
free_irq(dev->irq, dev);
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -750,31 +770,79 @@ static const struct net_device_ops sun4ican_netdev_ops = {
.ndo_start_xmit = sun4ican_start_xmit,
};
+static const struct ethtool_ops sun4ican_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static const struct sun4ican_quirks sun4ican_quirks_a10 = {
+ .has_reset = false,
+ .acp_offset = 0,
+};
+
+static const struct sun4ican_quirks sun4ican_quirks_r40 = {
+ .has_reset = true,
+ .acp_offset = 0,
+};
+
+static const struct sun4ican_quirks sun4ican_quirks_d1 = {
+ .has_reset = true,
+ .acp_offset = (SUN4I_REG_ACPC_ADDR_D1 - SUN4I_REG_ACPC_ADDR),
+};
+
static const struct of_device_id sun4ican_of_match[] = {
- {.compatible = "allwinner,sun4i-a10-can"},
- {},
+ {
+ .compatible = "allwinner,sun4i-a10-can",
+ .data = &sun4ican_quirks_a10
+ }, {
+ .compatible = "allwinner,sun7i-a20-can",
+ .data = &sun4ican_quirks_a10
+ }, {
+ .compatible = "allwinner,sun8i-r40-can",
+ .data = &sun4ican_quirks_r40
+ }, {
+ .compatible = "allwinner,sun20i-d1-can",
+ .data = &sun4ican_quirks_d1
+ }, {
+ /* sentinel */
+ },
};
MODULE_DEVICE_TABLE(of, sun4ican_of_match);
-static int sun4ican_remove(struct platform_device *pdev)
+static void sun4ican_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
free_candev(dev);
-
- return 0;
}
static int sun4ican_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct clk *clk;
+ struct reset_control *reset = NULL;
void __iomem *addr;
int err, irq;
struct net_device *dev;
struct sun4ican_priv *priv;
+ const struct sun4ican_quirks *quirks;
+
+ quirks = of_device_get_match_data(&pdev->dev);
+ if (!quirks) {
+ dev_err(&pdev->dev, "failed to determine the quirks to use\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ if (quirks->has_reset) {
+ reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(reset)) {
+ dev_err(&pdev->dev, "unable to request reset\n");
+ err = PTR_ERR(reset);
+ goto exit;
+ }
+ }
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
@@ -804,6 +872,7 @@ static int sun4ican_probe(struct platform_device *pdev)
}
dev->netdev_ops = &sun4ican_netdev_ops;
+ dev->ethtool_ops = &sun4ican_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -818,6 +887,8 @@ static int sun4ican_probe(struct platform_device *pdev)
CAN_CTRLMODE_3_SAMPLES;
priv->base = addr;
priv->clk = clk;
+ priv->reset = reset;
+ priv->acp_offset = quirks->acp_offset;
spin_lock_init(&priv->cmdreg_lock);
platform_set_drvdata(pdev, dev);
@@ -829,7 +900,6 @@ static int sun4ican_probe(struct platform_device *pdev)
DRV_NAME, err);
goto exit_free;
}
- devm_can_led_init(dev);
dev_info(&pdev->dev, "device registered (base=%p, irq=%d)\n",
priv->base, dev->irq);
@@ -856,4 +926,4 @@ module_platform_driver(sun4i_can_driver);
MODULE_AUTHOR("Peter Chen <xingkongcp@gmail.com>");
MODULE_AUTHOR("Gerhard Bertelsmann <info@gerhard-bertelsmann.de>");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20)");
+MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20/D1)");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 353062ead98f..1d3dbf28b105 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI HECC (CAN) device driver
*
@@ -6,16 +7,6 @@
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
* Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed as is WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/module.h>
@@ -23,18 +14,17 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/can/rx-offload.h>
#define DRV_NAME "ti_hecc"
@@ -393,7 +383,7 @@ static void ti_hecc_start(struct net_device *ndev)
* overflows instead of the hardware silently dropping the
* messages.
*/
- mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
+ mbx_mask = ~BIT_U32(HECC_RX_LAST_MBOX);
hecc_write(priv, HECC_CANOPC, mbx_mask);
/* Enable interrupts */
@@ -479,7 +469,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 mbxno, mbx_mask, data;
unsigned long flags;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
mbxno = get_tx_head_mb(priv);
@@ -633,8 +623,8 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb,
- timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb,
+ timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
}
@@ -663,12 +653,13 @@ static void ti_hecc_change_state(struct net_device *ndev,
can_change_state(priv->ndev, cf, tx_state, rx_state);
if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = hecc_read(priv, HECC_CANTEC);
cf->data[7] = hecc_read(priv, HECC_CANREC);
}
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
}
@@ -756,10 +747,9 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
stamp = hecc_read_stamp(priv, mbxno);
stats->tx_bytes +=
- can_rx_offload_get_echo_skb(&priv->offload,
- mbxno, stamp, NULL);
+ can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload,
+ mbxno, stamp, NULL);
stats->tx_packets++;
- can_led_event(ndev, CAN_LED_EVENT_TX);
--priv->tx_tail;
}
@@ -814,8 +804,6 @@ static int ti_hecc_open(struct net_device *ndev)
return err;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
-
ti_hecc_start(ndev);
can_rx_offload_enable(&priv->offload);
netif_start_queue(ndev);
@@ -834,8 +822,6 @@ static int ti_hecc_close(struct net_device *ndev)
close_candev(ndev);
ti_hecc_transceiver_switch(priv, 0);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -843,7 +829,10 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
.ndo_open = ti_hecc_open,
.ndo_stop = ti_hecc_close,
.ndo_start_xmit = ti_hecc_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops ti_hecc_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
static const struct of_device_id ti_hecc_dt_ids[] = {
@@ -859,7 +848,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
struct net_device *ndev = (struct net_device *)0;
struct ti_hecc_priv *priv;
struct device_node *np = pdev->dev.of_node;
- struct resource *irq;
struct regulator *reg_xceiver;
int err = -ENODEV;
@@ -904,9 +892,9 @@ static int ti_hecc_probe(struct platform_device *pdev)
goto probe_exit_candev;
}
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "No irq resource\n");
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ err = ndev->irq;
goto probe_exit_candev;
}
@@ -920,11 +908,11 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
spin_lock_init(&priv->mbx_lock);
- ndev->irq = irq->start;
ndev->flags |= IFF_ECHO;
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &ti_hecc_netdev_ops;
+ ndev->ethtool_ops = &ti_hecc_ethtool_ops;
priv->clk = clk_get(&pdev->dev, "hecc_ck");
if (IS_ERR(priv->clk)) {
@@ -956,8 +944,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
goto probe_exit_offload;
}
- devm_can_led_init(ndev);
-
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
priv->base, (u32)ndev->irq);
@@ -975,7 +961,7 @@ probe_exit_candev:
return err;
}
-static int ti_hecc_remove(struct platform_device *pdev)
+static void ti_hecc_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ti_hecc_priv *priv = netdev_priv(ndev);
@@ -985,8 +971,6 @@ static int ti_hecc_remove(struct platform_device *pdev)
clk_put(priv->clk);
can_rx_offload_del(&priv->offload);
free_candev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index f959215c9d53..cf65a90816b9 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -14,15 +14,24 @@ config CAN_EMS_USB
This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
-config CAN_ESD_USB2
- tristate "ESD USB/2 CAN/USB interface"
+config CAN_ESD_USB
+ tristate "esd electronics gmbh CAN/USB interfaces"
help
- This driver supports the CAN-USB/2 interface
- from esd electronic system design gmbh (http://www.esd.eu).
+ This driver adds support for several CAN/USB interfaces
+ from esd electronics gmbh (https://www.esd.eu).
+
+ The drivers supports the following devices:
+ - esd CAN-USB/2
+ - esd CAN-USB/3-FD
+ - esd CAN-USB/Micro
+
+ To compile this driver as a module, choose M here: the module
+ will be called esd_usb.
config CAN_ETAS_ES58X
tristate "ETAS ES58X CAN/USB interfaces"
select CRC16
+ select NET_DEVLINK
help
This driver supports the ES581.4, ES582.1 and ES584.1 interfaces
from ETAS GmbH (https://www.etas.com/en/products/es58x.php).
@@ -30,17 +39,34 @@ config CAN_ETAS_ES58X
To compile this driver as a module, choose M here: the module
will be called etas_es58x.
+config CAN_F81604
+ tristate "Fintek F81604 USB to 2CAN interface"
+ help
+ This driver supports the Fintek F81604 USB to 2CAN interface.
+ The device can support CAN2.0A/B protocol and also support
+ 2 output pins to control external terminator (optional).
+
+ To compile this driver as a module, choose M here: the module will
+ be called f81604.
+
+ (see also https://www.fintek.com.tw).
+
config CAN_GS_USB
- tristate "Geschwister Schneider UG interfaces"
+ tristate "Geschwister Schneider UG and candleLight compatible interfaces"
+ select CAN_RX_OFFLOAD
help
- This driver supports the Geschwister Schneider and bytewerk.org
- candleLight USB CAN interfaces USB/CAN devices
+ This driver supports the Geschwister Schneider and
+ bytewerk.org candleLight compatible
+ (https://github.com/candle-usb/candleLight_fw) USB/CAN
+ interfaces.
+
If unsure choose N,
choose Y for built in support,
M to compile as module (module will be named: gs_usb).
config CAN_KVASER_USB
tristate "Kvaser CAN/USB interface"
+ select NET_DEVLINK
help
This driver adds support for Kvaser CAN/USB devices like Kvaser
Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS.
@@ -67,6 +93,7 @@ config CAN_KVASER_USB
- Kvaser Leaf Light R v2
- Kvaser Mini PCI Express HS
- Kvaser Mini PCI Express 2xHS
+ - Kvaser Mini PCIe 1xCAN
- Kvaser USBcan Light 2xHS
- Kvaser USBcan II HS/HS
- Kvaser USBcan II HS/LS
@@ -76,6 +103,7 @@ config CAN_KVASER_USB
- Scania VCI2 (if you have the Kvaser logo on top)
- Kvaser BlackBird v2
- Kvaser Leaf Pro HS v2
+ - Kvaser Leaf v3
- Kvaser Hybrid CAN/LIN
- Kvaser Hybrid 2xCAN/LIN
- Kvaser Hybrid Pro CAN/LIN
@@ -86,12 +114,14 @@ config CAN_KVASER_USB
- Kvaser USBcan Light 4xHS
- Kvaser USBcan Pro 2xHS v2
- Kvaser USBcan Pro 4xHS
+ - Kvaser USBcan Pro 5xCAN
- Kvaser USBcan Pro 5xHS
- Kvaser U100
- Kvaser U100P
- Kvaser U100S
- ATI Memorator Pro 2xHS v2
- ATI USBcan Pro 2xHS v2
+ - Vining 800
If unsure, say N.
@@ -104,6 +134,17 @@ config CAN_MCBA_USB
This driver supports the CAN BUS Analyzer interface
from Microchip (http://www.microchip.com/development-tools/).
+config CAN_NCT6694
+ tristate "Nuvoton NCT6694 Socket CANfd support"
+ depends on MFD_NCT6694
+ select CAN_RX_OFFLOAD
+ help
+ If you say yes to this option, support will be included for Nuvoton
+ NCT6694, a USB device to socket CANfd controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called nct6694_canfd.
+
config CAN_PEAK_USB
tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
help
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 748cf31a0d53..fcafb1ac262e 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -5,10 +5,12 @@
obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
-obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_ESD_USB) += esd_usb.o
obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x/
+obj-$(CONFIG_CAN_F81604) += f81604.o
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o
+obj-$(CONFIG_CAN_NCT6694) += nct6694_canfd.o
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
obj-$(CONFIG_CAN_UCAN) += ucan.o
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 2b5302e72435..de8e212a1366 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -194,7 +195,7 @@ struct __packed ems_cpc_msg {
__le32 ts_sec; /* timestamp in seconds */
__le32 ts_nsec; /* timestamp in nano seconds */
- union {
+ union __packed {
u8 generic[64];
struct cpc_can_msg can_msg;
struct cpc_can_params can_params;
@@ -230,7 +231,6 @@ struct ems_tx_urb_context {
struct ems_usb *dev;
u32 echo_index;
- u8 dlc;
};
struct ems_usb {
@@ -320,10 +320,11 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
} else {
for (i = 0; i < cf->len; i++)
cf->data[i] = msg->msg.can_msg.msg[i];
- }
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
@@ -334,15 +335,14 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
struct net_device_stats *stats = &dev->netdev->stats;
skb = alloc_can_err_skb(dev->netdev, &cf);
- if (skb == NULL)
- return;
if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
u8 state = msg->msg.can_state;
if (state & SJA1000_SR_BS) {
dev->can.state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
dev->can.can_stats.bus_off++;
can_bus_off(dev->netdev);
@@ -360,46 +360,53 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
/* bus error interrupt */
dev->can.can_stats.bus_error++;
- stats->rx_errors++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
- switch (ecc & SJA1000_ECC_MASK) {
- case SJA1000_ECC_BIT:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- break;
- case SJA1000_ECC_FORM:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- break;
- case SJA1000_ECC_STUFF:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- break;
- default:
- cf->data[3] = ecc & SJA1000_ECC_SEG;
- break;
+ switch (ecc & SJA1000_ECC_MASK) {
+ case SJA1000_ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case SJA1000_ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case SJA1000_ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ cf->data[3] = ecc & SJA1000_ECC_SEG;
+ break;
+ }
}
/* Error occurred during transmission? */
- if ((ecc & SJA1000_ECC_DIR) == 0)
- cf->data[2] |= CAN_ERR_PROT_TX;
+ if ((ecc & SJA1000_ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
+ }
- if (dev->can.state == CAN_STATE_ERROR_WARNING ||
- dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+ if (skb && (dev->can.state == CAN_STATE_ERROR_WARNING ||
+ dev->can.state == CAN_STATE_ERROR_PASSIVE)) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (txerr > rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
}
} else if (msg->type == CPC_MSG_TYPE_OVERRUN) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
stats->rx_over_errors++;
stats->rx_errors++;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_rx(skb);
+ if (skb)
+ netif_rx(skb);
}
/*
@@ -518,9 +525,8 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
/* transmission complete interrupt */
netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += context->dlc;
-
- can_get_echo_skb(netdev, context->echo_index, NULL);
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index,
+ NULL);
/* Release context */
context->echo_index = MAX_TX_URBS;
@@ -749,7 +755,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ sizeof(struct cpc_can_msg);
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
@@ -806,7 +812,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
context->dev = dev;
context->echo_index = i;
- context->dlc = cf->len;
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
size, ems_usb_write_bulk_callback, context);
@@ -823,7 +828,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
- dev_kfree_skb(skb);
atomic_dec(&dev->active_tx_urbs);
@@ -881,11 +885,14 @@ static const struct net_device_ops ems_usb_netdev_ops = {
.ndo_open = ems_usb_open,
.ndo_stop = ems_usb_close,
.ndo_start_xmit = ems_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops ems_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
static const struct can_bittiming_const ems_usb_bittiming_const = {
- .name = "ems_usb",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -995,6 +1002,7 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
netdev->netdev_ops = &ems_usb_netdev_ops;
+ netdev->ethtool_ops = &ems_usb_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -1079,7 +1087,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver ems_usb_driver = {
- .name = "ems_usb",
+ .name = KBUILD_MODNAME,
.probe = ems_usb_probe,
.disconnect = ems_usb_disconnect,
.id_table = ems_usb_table,
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
new file mode 100644
index 000000000000..08da507faef4
--- /dev/null
+++ b/drivers/net/can/usb/esd_usb.c
@@ -0,0 +1,1398 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro
+ *
+ * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
+ * Copyright (C) 2022-2024 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
+ */
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/err.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+#include <linux/usb.h>
+
+MODULE_AUTHOR("Matthias Fuchs <socketcan@esd.eu>");
+MODULE_AUTHOR("Frank Jungclaus <frank.jungclaus@esd.eu>");
+MODULE_DESCRIPTION("CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro interfaces");
+MODULE_LICENSE("GPL v2");
+
+/* USB vendor and product ID */
+#define ESD_USB_ESDGMBH_VENDOR_ID 0x0ab4
+#define ESD_USB_CANUSB2_PRODUCT_ID 0x0010
+#define ESD_USB_CANUSBM_PRODUCT_ID 0x0011
+#define ESD_USB_CANUSB3_PRODUCT_ID 0x0014
+
+/* CAN controller clock frequencies */
+#define ESD_USB_2_CAN_CLOCK (60 * MEGA) /* Hz */
+#define ESD_USB_M_CAN_CLOCK (36 * MEGA) /* Hz */
+#define ESD_USB_3_CAN_CLOCK (80 * MEGA) /* Hz */
+
+/* Maximum number of CAN nets */
+#define ESD_USB_MAX_NETS 2
+
+/* USB commands */
+#define ESD_USB_CMD_VERSION 1 /* also used for VERSION_REPLY */
+#define ESD_USB_CMD_CAN_RX 2 /* device to host only */
+#define ESD_USB_CMD_CAN_TX 3 /* also used for TX_DONE */
+#define ESD_USB_CMD_SETBAUD 4 /* also used for SETBAUD_REPLY */
+#define ESD_USB_CMD_TS 5 /* also used for TS_REPLY */
+#define ESD_USB_CMD_IDADD 6 /* also used for IDADD_REPLY */
+
+/* esd CAN message flags - dlc field */
+#define ESD_USB_RTR BIT(4)
+#define ESD_USB_NO_BRS BIT(4)
+#define ESD_USB_ESI BIT(5)
+#define ESD_USB_FD BIT(7)
+
+/* esd CAN message flags - id field */
+#define ESD_USB_EXTID BIT(29)
+#define ESD_USB_EVENT BIT(30)
+#define ESD_USB_IDMASK GENMASK(28, 0)
+
+/* esd CAN event ids */
+#define ESD_USB_EV_CAN_ERROR_EXT 2 /* CAN controller specific diagnostic data */
+
+/* baudrate message flags */
+#define ESD_USB_LOM BIT(30) /* Listen Only Mode */
+#define ESD_USB_UBR BIT(31) /* User Bit Rate (controller BTR) in bits 0..27 */
+#define ESD_USB_NO_BAUDRATE GENMASK(30, 0) /* bit rate unconfigured */
+
+/* bit timing esd CAN-USB */
+#define ESD_USB_2_TSEG1_SHIFT 16
+#define ESD_USB_2_TSEG2_SHIFT 20
+#define ESD_USB_2_SJW_SHIFT 14
+#define ESD_USB_M_SJW_SHIFT 24
+#define ESD_USB_TRIPLE_SAMPLES BIT(23)
+
+/* Transmitter Delay Compensation */
+#define ESD_USB_3_TDC_MODE_AUTO 0
+
+/* esd IDADD message */
+#define ESD_USB_ID_ENABLE BIT(7)
+#define ESD_USB_MAX_ID_SEGMENT 64
+
+/* SJA1000 ECC register (emulated by usb firmware) */
+#define ESD_USB_SJA1000_ECC_SEG GENMASK(4, 0)
+#define ESD_USB_SJA1000_ECC_DIR BIT(5)
+#define ESD_USB_SJA1000_ECC_ERR BIT(2, 1)
+#define ESD_USB_SJA1000_ECC_BIT 0x00
+#define ESD_USB_SJA1000_ECC_FORM BIT(6)
+#define ESD_USB_SJA1000_ECC_STUFF BIT(7)
+#define ESD_USB_SJA1000_ECC_MASK GENMASK(7, 6)
+
+/* esd bus state event codes */
+#define ESD_USB_BUSSTATE_MASK GENMASK(7, 6)
+#define ESD_USB_BUSSTATE_WARN BIT(6)
+#define ESD_USB_BUSSTATE_ERRPASSIVE BIT(7)
+#define ESD_USB_BUSSTATE_BUSOFF GENMASK(7, 6)
+
+#define ESD_USB_RX_BUFFER_SIZE 1024
+#define ESD_USB_MAX_RX_URBS 4
+#define ESD_USB_MAX_TX_URBS 16 /* must be power of 2 */
+
+/* Modes for CAN-USB/3, to be used for esd_usb_3_set_baudrate_msg_x.mode */
+#define ESD_USB_3_BAUDRATE_MODE_DISABLE 0 /* remove from bus */
+#define ESD_USB_3_BAUDRATE_MODE_INDEX 1 /* ESD (CiA) bit rate idx */
+#define ESD_USB_3_BAUDRATE_MODE_BTR_CTRL 2 /* BTR values (controller)*/
+#define ESD_USB_3_BAUDRATE_MODE_BTR_CANONICAL 3 /* BTR values (canonical) */
+#define ESD_USB_3_BAUDRATE_MODE_NUM 4 /* numerical bit rate */
+#define ESD_USB_3_BAUDRATE_MODE_AUTOBAUD 5 /* autobaud */
+
+/* Flags for CAN-USB/3, to be used for esd_usb_3_set_baudrate_msg_x.flags */
+#define ESD_USB_3_BAUDRATE_FLAG_FD BIT(0) /* enable CAN FD mode */
+#define ESD_USB_3_BAUDRATE_FLAG_LOM BIT(1) /* enable listen only mode */
+#define ESD_USB_3_BAUDRATE_FLAG_STM BIT(2) /* enable self test mode */
+#define ESD_USB_3_BAUDRATE_FLAG_TRS BIT(3) /* enable triple sampling */
+#define ESD_USB_3_BAUDRATE_FLAG_TXP BIT(4) /* enable transmit pause */
+
+struct esd_usb_header_msg {
+ u8 len; /* total message length in 32bit words */
+ u8 cmd;
+ u8 rsvd[2];
+};
+
+struct esd_usb_version_msg {
+ u8 len; /* total message length in 32bit words */
+ u8 cmd;
+ u8 rsvd;
+ u8 flags;
+ __le32 drv_version;
+};
+
+struct esd_usb_version_reply_msg {
+ u8 len; /* total message length in 32bit words */
+ u8 cmd;
+ u8 nets;
+ u8 features;
+ __le32 version;
+ u8 name[16];
+ __le32 rsvd;
+ __le32 ts;
+};
+
+struct esd_usb_rx_msg {
+ u8 len; /* total message length in 32bit words */
+ u8 cmd;
+ u8 net;
+ u8 dlc;
+ __le32 ts;
+ __le32 id; /* upper 3 bits contain flags */
+ union {
+ u8 data[CAN_MAX_DLEN];
+ u8 data_fd[CANFD_MAX_DLEN];
+ struct {
+ u8 status; /* CAN Controller Status */
+ u8 ecc; /* Error Capture Register */
+ u8 rec; /* RX Error Counter */
+ u8 tec; /* TX Error Counter */
+ } ev_can_err_ext; /* For ESD_EV_CAN_ERROR_EXT */
+ };
+};
+
+struct esd_usb_tx_msg {
+ u8 len; /* total message length in 32bit words */
+ u8 cmd;
+ u8 net;
+ u8 dlc;
+ u32 hnd; /* opaque handle, not used by device */
+ __le32 id; /* upper 3 bits contain flags */
+ union {
+ u8 data[CAN_MAX_DLEN];
+ u8 data_fd[CANFD_MAX_DLEN];
+ };
+};
+
+struct esd_usb_tx_done_msg {
+ u8 len; /* total message length in 32bit words */
+ u8 cmd;
+ u8 net;
+ u8 status;
+ u32 hnd; /* opaque handle, not used by device */
+ __le32 ts;
+};
+
+struct esd_usb_id_filter_msg {
+ u8 len; /* total message length in 32bit words */
+ u8 cmd;
+ u8 net;
+ u8 option;
+ __le32 mask[ESD_USB_MAX_ID_SEGMENT + 1]; /* +1 for 29bit extended IDs */
+};
+
+struct esd_usb_set_baudrate_msg {
+ u8 len; /* total message length in 32bit words */
+ u8 cmd;
+ u8 net;
+ u8 rsvd;
+ __le32 baud;
+};
+
+/* CAN-USB/3 baudrate configuration, used for nominal as well as for data bit rate */
+struct esd_usb_3_baudrate_cfg {
+ __le16 brp; /* bit rate pre-scaler */
+ __le16 tseg1; /* time segment before sample point */
+ __le16 tseg2; /* time segment after sample point */
+ __le16 sjw; /* synchronization jump Width */
+};
+
+/* In principle, the esd CAN-USB/3 supports Transmitter Delay Compensation (TDC),
+ * but currently only the automatic TDC mode is supported by this driver.
+ * An implementation for manual TDC configuration will follow.
+ *
+ * For information about struct esd_usb_3_tdc_cfg, see
+ * NTCAN Application Developers Manual, 6.2.25 NTCAN_TDC_CFG + related chapters
+ * https://esd.eu/fileadmin/esd/docs/manuals/NTCAN_Part1_Function_API_Manual_en_56.pdf
+ */
+struct esd_usb_3_tdc_cfg {
+ u8 tdc_mode; /* transmitter delay compensation mode */
+ u8 ssp_offset; /* secondary sample point offset in mtq */
+ s8 ssp_shift; /* secondary sample point shift in mtq */
+ u8 tdc_filter; /* TDC filter in mtq */
+};
+
+/* Extended version of the above set_baudrate_msg for a CAN-USB/3
+ * to define the CAN bit timing configuration of the CAN controller in
+ * CAN FD mode as well as in Classical CAN mode.
+ *
+ * The payload of this command is a NTCAN_BAUDRATE_X structure according to
+ * esd electronics gmbh, NTCAN Application Developers Manual, 6.2.15 NTCAN_BAUDRATE_X
+ * https://esd.eu/fileadmin/esd/docs/manuals/NTCAN_Part1_Function_API_Manual_en_56.pdf
+ */
+struct esd_usb_3_set_baudrate_msg_x {
+ u8 len; /* total message length in 32bit words */
+ u8 cmd;
+ u8 net;
+ u8 rsvd; /*reserved */
+ /* Payload ... */
+ __le16 mode; /* mode word, see ESD_USB_3_BAUDRATE_MODE_xxx */
+ __le16 flags; /* control flags, see ESD_USB_3_BAUDRATE_FLAG_xxx */
+ struct esd_usb_3_tdc_cfg tdc; /* TDC configuration */
+ struct esd_usb_3_baudrate_cfg nom; /* nominal bit rate */
+ struct esd_usb_3_baudrate_cfg data; /* data bit rate */
+};
+
+/* Main message type used between library and application */
+union __packed esd_usb_msg {
+ struct esd_usb_header_msg hdr;
+ struct esd_usb_version_msg version;
+ struct esd_usb_version_reply_msg version_reply;
+ struct esd_usb_rx_msg rx;
+ struct esd_usb_tx_msg tx;
+ struct esd_usb_tx_done_msg txdone;
+ struct esd_usb_set_baudrate_msg setbaud;
+ struct esd_usb_3_set_baudrate_msg_x setbaud_x;
+ struct esd_usb_id_filter_msg filter;
+};
+
+static struct usb_device_id esd_usb_table[] = {
+ {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSB2_PRODUCT_ID)},
+ {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSBM_PRODUCT_ID)},
+ {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSB3_PRODUCT_ID)},
+ {}
+};
+MODULE_DEVICE_TABLE(usb, esd_usb_table);
+
+struct esd_usb_net_priv;
+
+struct esd_tx_urb_context {
+ struct esd_usb_net_priv *priv;
+ u32 echo_index;
+};
+
+struct esd_usb {
+ struct usb_device *udev;
+ struct esd_usb_net_priv *nets[ESD_USB_MAX_NETS];
+
+ struct usb_anchor rx_submitted;
+
+ int net_count;
+ u32 version;
+ int rxinitdone;
+ int in_usb_disconnect;
+ void *rxbuf[ESD_USB_MAX_RX_URBS];
+ dma_addr_t rxbuf_dma[ESD_USB_MAX_RX_URBS];
+};
+
+struct esd_usb_net_priv {
+ struct can_priv can; /* must be the first member */
+
+ atomic_t active_tx_jobs;
+ struct usb_anchor tx_submitted;
+ struct esd_tx_urb_context tx_contexts[ESD_USB_MAX_TX_URBS];
+
+ struct esd_usb *usb;
+ struct net_device *netdev;
+ int index;
+ u8 old_state;
+ struct can_berr_counter bec;
+};
+
+static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
+ union esd_usb_msg *msg)
+{
+ struct net_device_stats *stats = &priv->netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 id = le32_to_cpu(msg->rx.id) & ESD_USB_IDMASK;
+
+ if (id == ESD_USB_EV_CAN_ERROR_EXT) {
+ u8 state = msg->rx.ev_can_err_ext.status;
+ u8 ecc = msg->rx.ev_can_err_ext.ecc;
+
+ priv->bec.rxerr = msg->rx.ev_can_err_ext.rec;
+ priv->bec.txerr = msg->rx.ev_can_err_ext.tec;
+
+ netdev_dbg(priv->netdev,
+ "CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n",
+ msg->rx.dlc, state, ecc,
+ priv->bec.rxerr, priv->bec.txerr);
+
+ /* if berr-reporting is off, only pass through on state change ... */
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ state == priv->old_state)
+ return;
+
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (!skb)
+ stats->rx_dropped++;
+
+ if (state != priv->old_state) {
+ enum can_state tx_state, rx_state;
+ enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
+
+ priv->old_state = state;
+
+ switch (state & ESD_USB_BUSSTATE_MASK) {
+ case ESD_USB_BUSSTATE_BUSOFF:
+ new_state = CAN_STATE_BUS_OFF;
+ can_bus_off(priv->netdev);
+ break;
+ case ESD_USB_BUSSTATE_WARN:
+ new_state = CAN_STATE_ERROR_WARNING;
+ break;
+ case ESD_USB_BUSSTATE_ERRPASSIVE:
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ break;
+ default:
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ priv->bec.txerr = 0;
+ priv->bec.rxerr = 0;
+ break;
+ }
+
+ if (new_state != priv->can.state) {
+ tx_state = (priv->bec.txerr >= priv->bec.rxerr) ? new_state : 0;
+ rx_state = (priv->bec.txerr <= priv->bec.rxerr) ? new_state : 0;
+ can_change_state(priv->netdev, cf,
+ tx_state, rx_state);
+ }
+ } else if (skb) {
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (ecc & ESD_USB_SJA1000_ECC_MASK) {
+ case ESD_USB_SJA1000_ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case ESD_USB_SJA1000_ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case ESD_USB_SJA1000_ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ break;
+ }
+
+ /* Error occurred during transmission? */
+ if (!(ecc & ESD_USB_SJA1000_ECC_DIR))
+ cf->data[2] |= CAN_ERR_PROT_TX;
+
+ /* Bit stream position in CAN frame as the error was detected */
+ cf->data[3] = ecc & ESD_USB_SJA1000_ECC_SEG;
+ }
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = priv->bec.txerr;
+ cf->data[7] = priv->bec.rxerr;
+
+ netif_rx(skb);
+ }
+ }
+}
+
+static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv,
+ union esd_usb_msg *msg)
+{
+ struct net_device_stats *stats = &priv->netdev->stats;
+ struct can_frame *cf;
+ struct canfd_frame *cfd;
+ struct sk_buff *skb;
+ u32 id;
+ u8 len;
+
+ if (!netif_device_present(priv->netdev))
+ return;
+
+ id = le32_to_cpu(msg->rx.id);
+
+ if (id & ESD_USB_EVENT) {
+ esd_usb_rx_event(priv, msg);
+ } else {
+ if (msg->rx.dlc & ESD_USB_FD) {
+ skb = alloc_canfd_skb(priv->netdev, &cfd);
+ } else {
+ skb = alloc_can_skb(priv->netdev, &cf);
+ cfd = (struct canfd_frame *)cf;
+ }
+
+ if (skb == NULL) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cfd->can_id = id & ESD_USB_IDMASK;
+
+ if (msg->rx.dlc & ESD_USB_FD) {
+ /* masking by 0x0F is already done within can_fd_dlc2len() */
+ cfd->len = can_fd_dlc2len(msg->rx.dlc);
+ len = cfd->len;
+ if ((msg->rx.dlc & ESD_USB_NO_BRS) == 0)
+ cfd->flags |= CANFD_BRS;
+ if (msg->rx.dlc & ESD_USB_ESI)
+ cfd->flags |= CANFD_ESI;
+ } else {
+ can_frame_set_cc_len(cf, msg->rx.dlc & ~ESD_USB_RTR, priv->can.ctrlmode);
+ len = cf->len;
+ if (msg->rx.dlc & ESD_USB_RTR) {
+ cf->can_id |= CAN_RTR_FLAG;
+ len = 0;
+ }
+ }
+
+ if (id & ESD_USB_EXTID)
+ cfd->can_id |= CAN_EFF_FLAG;
+
+ memcpy(cfd->data, msg->rx.data_fd, len);
+ stats->rx_bytes += len;
+ stats->rx_packets++;
+
+ netif_rx(skb);
+ }
+}
+
+static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv,
+ union esd_usb_msg *msg)
+{
+ struct net_device_stats *stats = &priv->netdev->stats;
+ struct net_device *netdev = priv->netdev;
+ struct esd_tx_urb_context *context;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ context = &priv->tx_contexts[msg->txdone.hnd & (ESD_USB_MAX_TX_URBS - 1)];
+
+ if (!msg->txdone.status) {
+ stats->tx_packets++;
+ stats->tx_bytes += can_get_echo_skb(netdev, context->echo_index,
+ NULL);
+ } else {
+ stats->tx_errors++;
+ can_free_echo_skb(netdev, context->echo_index, NULL);
+ }
+
+ /* Release context */
+ context->echo_index = ESD_USB_MAX_TX_URBS;
+ atomic_dec(&priv->active_tx_jobs);
+
+ netif_wake_queue(netdev);
+}
+
+static void esd_usb_read_bulk_callback(struct urb *urb)
+{
+ struct esd_usb *dev = urb->context;
+ int err;
+ int pos = 0;
+ int i;
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+
+ case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
+ case -ESHUTDOWN:
+ return;
+
+ default:
+ dev_info(dev->udev->dev.parent,
+ "Rx URB aborted (%pe)\n", ERR_PTR(urb->status));
+ goto resubmit_urb;
+ }
+
+ while (pos < urb->actual_length) {
+ union esd_usb_msg *msg;
+
+ msg = (union esd_usb_msg *)(urb->transfer_buffer + pos);
+
+ switch (msg->hdr.cmd) {
+ case ESD_USB_CMD_CAN_RX:
+ if (msg->rx.net >= dev->net_count) {
+ dev_err(dev->udev->dev.parent, "format error\n");
+ break;
+ }
+
+ esd_usb_rx_can_msg(dev->nets[msg->rx.net], msg);
+ break;
+
+ case ESD_USB_CMD_CAN_TX:
+ if (msg->txdone.net >= dev->net_count) {
+ dev_err(dev->udev->dev.parent, "format error\n");
+ break;
+ }
+
+ esd_usb_tx_done_msg(dev->nets[msg->txdone.net],
+ msg);
+ break;
+ }
+
+ pos += msg->hdr.len * sizeof(u32); /* convert to # of bytes */
+
+ if (pos > urb->actual_length) {
+ dev_err(dev->udev->dev.parent, "format error\n");
+ break;
+ }
+ }
+
+resubmit_urb:
+ usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
+ urb->transfer_buffer, ESD_USB_RX_BUFFER_SIZE,
+ esd_usb_read_bulk_callback, dev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err == -ENODEV) {
+ for (i = 0; i < dev->net_count; i++) {
+ if (dev->nets[i])
+ netif_device_detach(dev->nets[i]->netdev);
+ }
+ } else if (err) {
+ dev_err(dev->udev->dev.parent,
+ "failed resubmitting read bulk urb: %pe\n", ERR_PTR(err));
+ }
+}
+
+/* callback for bulk IN urb */
+static void esd_usb_write_bulk_callback(struct urb *urb)
+{
+ struct esd_tx_urb_context *context = urb->context;
+ struct esd_usb_net_priv *priv;
+ struct net_device *netdev;
+ size_t size = sizeof(union esd_usb_msg);
+
+ WARN_ON(!context);
+
+ priv = context->priv;
+ netdev = priv->netdev;
+
+ /* free up our allocated buffer */
+ usb_free_coherent(urb->dev, size,
+ urb->transfer_buffer, urb->transfer_dma);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ netdev_info(netdev, "Tx URB aborted (%pe)\n", ERR_PTR(urb->status));
+
+ netif_trans_update(netdev);
+}
+
+static ssize_t firmware_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(d);
+ struct esd_usb *dev = usb_get_intfdata(intf);
+
+ return sprintf(buf, "%d.%d.%d\n",
+ (dev->version >> 12) & 0xf,
+ (dev->version >> 8) & 0xf,
+ dev->version & 0xff);
+}
+static DEVICE_ATTR_RO(firmware);
+
+static ssize_t hardware_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(d);
+ struct esd_usb *dev = usb_get_intfdata(intf);
+
+ return sprintf(buf, "%d.%d.%d\n",
+ (dev->version >> 28) & 0xf,
+ (dev->version >> 24) & 0xf,
+ (dev->version >> 16) & 0xff);
+}
+static DEVICE_ATTR_RO(hardware);
+
+static ssize_t nets_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(d);
+ struct esd_usb *dev = usb_get_intfdata(intf);
+
+ return sprintf(buf, "%d", dev->net_count);
+}
+static DEVICE_ATTR_RO(nets);
+
+static int esd_usb_send_msg(struct esd_usb *dev, union esd_usb_msg *msg)
+{
+ int actual_length;
+
+ return usb_bulk_msg(dev->udev,
+ usb_sndbulkpipe(dev->udev, 2),
+ msg,
+ msg->hdr.len * sizeof(u32), /* convert to # of bytes */
+ &actual_length,
+ 1000);
+}
+
+static int esd_usb_wait_msg(struct esd_usb *dev,
+ union esd_usb_msg *msg)
+{
+ int actual_length;
+
+ return usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev, 1),
+ msg,
+ sizeof(*msg),
+ &actual_length,
+ 1000);
+}
+
+static int esd_usb_setup_rx_urbs(struct esd_usb *dev)
+{
+ int i, err = 0;
+
+ if (dev->rxinitdone)
+ return 0;
+
+ for (i = 0; i < ESD_USB_MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+ u8 *buf = NULL;
+ dma_addr_t buf_dma;
+
+ /* create a URB, and a buffer for it */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ err = -ENOMEM;
+ break;
+ }
+
+ buf = usb_alloc_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE, GFP_KERNEL,
+ &buf_dma);
+ if (!buf) {
+ dev_warn(dev->udev->dev.parent,
+ "No memory left for USB buffer\n");
+ err = -ENOMEM;
+ goto freeurb;
+ }
+
+ urb->transfer_dma = buf_dma;
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_rcvbulkpipe(dev->udev, 1),
+ buf, ESD_USB_RX_BUFFER_SIZE,
+ esd_usb_read_bulk_callback, dev);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ usb_unanchor_urb(urb);
+ usb_free_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE, buf,
+ urb->transfer_dma);
+ goto freeurb;
+ }
+
+ dev->rxbuf[i] = buf;
+ dev->rxbuf_dma[i] = buf_dma;
+
+freeurb:
+ /* Drop reference, USB core will take care of freeing it */
+ usb_free_urb(urb);
+ if (err)
+ break;
+ }
+
+ /* Did we submit any URBs */
+ if (i == 0) {
+ dev_err(dev->udev->dev.parent, "couldn't setup read URBs\n");
+ return err;
+ }
+
+ /* Warn if we've couldn't transmit all the URBs */
+ if (i < ESD_USB_MAX_RX_URBS) {
+ dev_warn(dev->udev->dev.parent,
+ "rx performance may be slow\n");
+ }
+
+ dev->rxinitdone = 1;
+ return 0;
+}
+
+/* Start interface */
+static int esd_usb_start(struct esd_usb_net_priv *priv)
+{
+ struct esd_usb *dev = priv->usb;
+ struct net_device *netdev = priv->netdev;
+ union esd_usb_msg *msg;
+ int err, i;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Enable all IDs
+ * The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
+ * Each bit represents one 11 bit CAN identifier. A set bit
+ * enables reception of the corresponding CAN identifier. A cleared
+ * bit disabled this identifier. An additional bitmask value
+ * following the CAN 2.0A bits is used to enable reception of
+ * extended CAN frames. Only the LSB of this final mask is checked
+ * for the complete 29 bit ID range. The IDADD message also allows
+ * filter configuration for an ID subset. In this case you can add
+ * the number of the starting bitmask (0..64) to the filter.option
+ * field followed by only some bitmasks.
+ */
+ msg->hdr.cmd = ESD_USB_CMD_IDADD;
+ msg->hdr.len = sizeof(struct esd_usb_id_filter_msg) / sizeof(u32); /* # of 32bit words */
+ msg->filter.net = priv->index;
+ msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */
+ for (i = 0; i < ESD_USB_MAX_ID_SEGMENT; i++)
+ msg->filter.mask[i] = cpu_to_le32(GENMASK(31, 0));
+ /* enable 29bit extended IDs */
+ msg->filter.mask[ESD_USB_MAX_ID_SEGMENT] = cpu_to_le32(BIT(0));
+
+ err = esd_usb_send_msg(dev, msg);
+ if (err)
+ goto out;
+
+ err = esd_usb_setup_rx_urbs(dev);
+ if (err)
+ goto out;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+out:
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ if (err)
+ netdev_err(netdev, "couldn't start device: %pe\n", ERR_PTR(err));
+
+ kfree(msg);
+ return err;
+}
+
+static void unlink_all_urbs(struct esd_usb *dev)
+{
+ struct esd_usb_net_priv *priv;
+ int i, j;
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+
+ for (i = 0; i < ESD_USB_MAX_RX_URBS; ++i)
+ usb_free_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE,
+ dev->rxbuf[i], dev->rxbuf_dma[i]);
+
+ for (i = 0; i < dev->net_count; i++) {
+ priv = dev->nets[i];
+ if (priv) {
+ usb_kill_anchored_urbs(&priv->tx_submitted);
+ atomic_set(&priv->active_tx_jobs, 0);
+
+ for (j = 0; j < ESD_USB_MAX_TX_URBS; j++)
+ priv->tx_contexts[j].echo_index = ESD_USB_MAX_TX_URBS;
+ }
+ }
+}
+
+static int esd_usb_open(struct net_device *netdev)
+{
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ int err;
+
+ /* common open */
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ /* finally start device */
+ err = esd_usb_start(priv);
+ if (err) {
+ close_candev(netdev);
+ return err;
+ }
+
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb *dev = priv->usb;
+ struct esd_tx_urb_context *context = NULL;
+ struct net_device_stats *stats = &netdev->stats;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ union esd_usb_msg *msg;
+ struct urb *urb;
+ u8 *buf;
+ int i, err;
+ int ret = NETDEV_TX_OK;
+ size_t size = sizeof(union esd_usb_msg);
+
+ if (can_dev_dropped_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* create a URB, and a buffer for it, and copy the data to the URB */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ goto nourbmem;
+ }
+
+ buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!buf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ goto nobufmem;
+ }
+
+ msg = (union esd_usb_msg *)buf;
+
+ /* minimal length as # of 32bit words */
+ msg->hdr.len = offsetof(struct esd_usb_tx_msg, data) / sizeof(u32);
+ msg->hdr.cmd = ESD_USB_CMD_CAN_TX;
+ msg->tx.net = priv->index;
+
+ if (can_is_canfd_skb(skb)) {
+ msg->tx.dlc = can_fd_len2dlc(cfd->len);
+ msg->tx.dlc |= ESD_USB_FD;
+
+ if ((cfd->flags & CANFD_BRS) == 0)
+ msg->tx.dlc |= ESD_USB_NO_BRS;
+ } else {
+ msg->tx.dlc = can_get_cc_dlc((struct can_frame *)cfd, priv->can.ctrlmode);
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ msg->tx.dlc |= ESD_USB_RTR;
+ }
+
+ msg->tx.id = cpu_to_le32(cfd->can_id & CAN_ERR_MASK);
+
+ if (cfd->can_id & CAN_EFF_FLAG)
+ msg->tx.id |= cpu_to_le32(ESD_USB_EXTID);
+
+ memcpy(msg->tx.data_fd, cfd->data, cfd->len);
+
+ /* round up, then divide by 4 to add the payload length as # of 32bit words */
+ msg->hdr.len += DIV_ROUND_UP(cfd->len, sizeof(u32));
+
+ for (i = 0; i < ESD_USB_MAX_TX_URBS; i++) {
+ if (priv->tx_contexts[i].echo_index == ESD_USB_MAX_TX_URBS) {
+ context = &priv->tx_contexts[i];
+ break;
+ }
+ }
+
+ /* This may never happen */
+ if (!context) {
+ netdev_warn(netdev, "couldn't find free context\n");
+ ret = NETDEV_TX_BUSY;
+ goto releasebuf;
+ }
+
+ context->priv = priv;
+ context->echo_index = i;
+
+ /* hnd must not be 0 - MSB is stripped in txdone handling */
+ msg->tx.hnd = BIT(31) | i; /* returned in TX done message */
+
+ usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
+ msg->hdr.len * sizeof(u32), /* convert to # of bytes */
+ esd_usb_write_bulk_callback, context);
+
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
+
+ atomic_inc(&priv->active_tx_jobs);
+
+ /* Slow down tx path */
+ if (atomic_read(&priv->active_tx_jobs) >= ESD_USB_MAX_TX_URBS)
+ netif_stop_queue(netdev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ can_free_echo_skb(netdev, context->echo_index, NULL);
+
+ atomic_dec(&priv->active_tx_jobs);
+ usb_unanchor_urb(urb);
+
+ stats->tx_dropped++;
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_warn(netdev, "failed tx_urb %pe\n", ERR_PTR(err));
+
+ goto releasebuf;
+ }
+
+ netif_trans_update(netdev);
+
+ /* Release our reference to this URB, the USB core will eventually free
+ * it entirely.
+ */
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+releasebuf:
+ usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
+
+nobufmem:
+ usb_free_urb(urb);
+
+nourbmem:
+ return ret;
+}
+
+/* Stop interface */
+static int esd_usb_stop(struct esd_usb_net_priv *priv)
+{
+ union esd_usb_msg *msg;
+ int err;
+ int i;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ /* Disable all IDs (see esd_usb_start()) */
+ msg->hdr.cmd = ESD_USB_CMD_IDADD;
+ msg->hdr.len = sizeof(struct esd_usb_id_filter_msg) / sizeof(u32);/* # of 32bit words */
+ msg->filter.net = priv->index;
+ msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */
+ for (i = 0; i <= ESD_USB_MAX_ID_SEGMENT; i++)
+ msg->filter.mask[i] = 0;
+ err = esd_usb_send_msg(priv->usb, msg);
+ if (err < 0) {
+ netdev_err(priv->netdev, "sending idadd message failed: %pe\n", ERR_PTR(err));
+ goto bail;
+ }
+
+ /* set CAN controller to reset mode */
+ msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */
+ msg->hdr.cmd = ESD_USB_CMD_SETBAUD;
+ msg->setbaud.net = priv->index;
+ msg->setbaud.rsvd = 0;
+ msg->setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE);
+ err = esd_usb_send_msg(priv->usb, msg);
+ if (err < 0)
+ netdev_err(priv->netdev, "sending setbaud message failed: %pe\n", ERR_PTR(err));
+
+bail:
+ kfree(msg);
+
+ return err;
+}
+
+static int esd_usb_close(struct net_device *netdev)
+{
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ if (!priv->usb->in_usb_disconnect) {
+ /* It's moot to try this in usb_disconnect()! */
+ err = esd_usb_stop(priv);
+ }
+
+ priv->can.state = CAN_STATE_STOPPED;
+
+ netif_stop_queue(netdev);
+
+ close_candev(netdev);
+
+ return err;
+}
+
+static const struct net_device_ops esd_usb_netdev_ops = {
+ .ndo_open = esd_usb_open,
+ .ndo_stop = esd_usb_close,
+ .ndo_start_xmit = esd_usb_start_xmit,
+};
+
+static const struct ethtool_ops esd_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static const struct can_bittiming_const esd_usb_2_bittiming_const = {
+ .name = "esd_usb_2",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static int esd_usb_2_set_bittiming(struct net_device *netdev)
+{
+ const struct can_bittiming_const *btc = &esd_usb_2_bittiming_const;
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ union esd_usb_msg *msg;
+ int err;
+ u32 canbtr;
+ int sjw_shift;
+
+ canbtr = ESD_USB_UBR;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ canbtr |= ESD_USB_LOM;
+
+ canbtr |= (bt->brp - 1) & (btc->brp_max - 1);
+
+ if (le16_to_cpu(priv->usb->udev->descriptor.idProduct) ==
+ ESD_USB_CANUSBM_PRODUCT_ID)
+ sjw_shift = ESD_USB_M_SJW_SHIFT;
+ else
+ sjw_shift = ESD_USB_2_SJW_SHIFT;
+
+ canbtr |= ((bt->sjw - 1) & (btc->sjw_max - 1))
+ << sjw_shift;
+ canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1)
+ & (btc->tseg1_max - 1))
+ << ESD_USB_2_TSEG1_SHIFT;
+ canbtr |= ((bt->phase_seg2 - 1) & (btc->tseg2_max - 1))
+ << ESD_USB_2_TSEG2_SHIFT;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ canbtr |= ESD_USB_TRIPLE_SAMPLES;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */
+ msg->hdr.cmd = ESD_USB_CMD_SETBAUD;
+ msg->setbaud.net = priv->index;
+ msg->setbaud.rsvd = 0;
+ msg->setbaud.baud = cpu_to_le32(canbtr);
+
+ netdev_dbg(netdev, "setting BTR=%#x\n", canbtr);
+
+ err = esd_usb_send_msg(priv->usb, msg);
+
+ kfree(msg);
+ return err;
+}
+
+/* Nominal bittiming constants, see
+ * Microchip SAM E70/S70/V70/V71, Data Sheet, Rev. G - 07/2022
+ * 48.6.8 MCAN Nominal Bit Timing and Prescaler Register
+ */
+static const struct can_bittiming_const esd_usb_3_nom_bittiming_const = {
+ .name = "esd_usb_3",
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+/* Data bittiming constants, see
+ * Microchip SAM E70/S70/V70/V71, Data Sheet, Rev. G - 07/2022
+ * 48.6.4 MCAN Data Bit Timing and Prescaler Register
+ */
+static const struct can_bittiming_const esd_usb_3_data_bittiming_const = {
+ .name = "esd_usb_3",
+ .tseg1_min = 2,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 8,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
+static int esd_usb_3_set_bittiming(struct net_device *netdev)
+{
+ const struct can_bittiming_const *nom_btc = &esd_usb_3_nom_bittiming_const;
+ const struct can_bittiming_const *data_btc = &esd_usb_3_data_bittiming_const;
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ struct can_bittiming *nom_bt = &priv->can.bittiming;
+ struct can_bittiming *data_bt = &priv->can.fd.data_bittiming;
+ struct esd_usb_3_set_baudrate_msg_x *baud_x;
+ union esd_usb_msg *msg;
+ u16 flags = 0;
+ int err;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ baud_x = &msg->setbaud_x;
+
+ /* Canonical is the most reasonable mode for SocketCAN on CAN-USB/3 ... */
+ baud_x->mode = cpu_to_le16(ESD_USB_3_BAUDRATE_MODE_BTR_CANONICAL);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ flags |= ESD_USB_3_BAUDRATE_FLAG_LOM;
+
+ baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1));
+ baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1));
+ baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1)
+ & (nom_btc->tseg1_max - 1));
+ baud_x->nom.tseg2 = cpu_to_le16(nom_bt->phase_seg2 & (nom_btc->tseg2_max - 1));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ baud_x->data.brp = cpu_to_le16(data_bt->brp & (data_btc->brp_max - 1));
+ baud_x->data.sjw = cpu_to_le16(data_bt->sjw & (data_btc->sjw_max - 1));
+ baud_x->data.tseg1 = cpu_to_le16((data_bt->prop_seg + data_bt->phase_seg1)
+ & (data_btc->tseg1_max - 1));
+ baud_x->data.tseg2 = cpu_to_le16(data_bt->phase_seg2 & (data_btc->tseg2_max - 1));
+ flags |= ESD_USB_3_BAUDRATE_FLAG_FD;
+ }
+
+ /* Currently this driver only supports the automatic TDC mode */
+ baud_x->tdc.tdc_mode = ESD_USB_3_TDC_MODE_AUTO;
+ baud_x->tdc.ssp_offset = 0;
+ baud_x->tdc.ssp_shift = 0;
+ baud_x->tdc.tdc_filter = 0;
+
+ baud_x->flags = cpu_to_le16(flags);
+ baud_x->net = priv->index;
+ baud_x->rsvd = 0;
+
+ /* set len as # of 32bit words */
+ msg->hdr.len = sizeof(struct esd_usb_3_set_baudrate_msg_x) / sizeof(u32);
+ msg->hdr.cmd = ESD_USB_CMD_SETBAUD;
+
+ netdev_dbg(netdev,
+ "ctrlmode=%#x/%#x, esd-net=%u, esd-mode=%#x, esd-flags=%#x\n",
+ priv->can.ctrlmode, priv->can.ctrlmode_supported,
+ priv->index, le16_to_cpu(baud_x->mode), flags);
+
+ err = esd_usb_send_msg(priv->usb, msg);
+
+ kfree(msg);
+ return err;
+}
+
+static int esd_usb_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+
+ bec->txerr = priv->bec.txerr;
+ bec->rxerr = priv->bec.rxerr;
+
+ return 0;
+}
+
+static int esd_usb_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ netif_wake_queue(netdev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
+{
+ struct esd_usb *dev = usb_get_intfdata(intf);
+ struct net_device *netdev;
+ struct esd_usb_net_priv *priv;
+ int err = 0;
+ int i;
+
+ netdev = alloc_candev(sizeof(*priv), ESD_USB_MAX_TX_URBS);
+ if (!netdev) {
+ dev_err(&intf->dev, "couldn't alloc candev\n");
+ err = -ENOMEM;
+ goto done;
+ }
+
+ priv = netdev_priv(netdev);
+
+ init_usb_anchor(&priv->tx_submitted);
+ atomic_set(&priv->active_tx_jobs, 0);
+
+ for (i = 0; i < ESD_USB_MAX_TX_URBS; i++)
+ priv->tx_contexts[i].echo_index = ESD_USB_MAX_TX_URBS;
+
+ priv->usb = dev;
+ priv->netdev = netdev;
+ priv->index = index;
+
+ priv->can.state = CAN_STATE_STOPPED;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
+
+ switch (le16_to_cpu(dev->udev->descriptor.idProduct)) {
+ case ESD_USB_CANUSB3_PRODUCT_ID:
+ priv->can.clock.freq = ESD_USB_3_CAN_CLOCK;
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const;
+ priv->can.fd.data_bittiming_const = &esd_usb_3_data_bittiming_const;
+ priv->can.do_set_bittiming = esd_usb_3_set_bittiming;
+ priv->can.fd.do_set_data_bittiming = esd_usb_3_set_bittiming;
+ break;
+
+ case ESD_USB_CANUSBM_PRODUCT_ID:
+ priv->can.clock.freq = ESD_USB_M_CAN_CLOCK;
+ priv->can.bittiming_const = &esd_usb_2_bittiming_const;
+ priv->can.do_set_bittiming = esd_usb_2_set_bittiming;
+ break;
+
+ case ESD_USB_CANUSB2_PRODUCT_ID:
+ default:
+ priv->can.clock.freq = ESD_USB_2_CAN_CLOCK;
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ priv->can.bittiming_const = &esd_usb_2_bittiming_const;
+ priv->can.do_set_bittiming = esd_usb_2_set_bittiming;
+ break;
+ }
+
+ priv->can.do_set_mode = esd_usb_set_mode;
+ priv->can.do_get_berr_counter = esd_usb_get_berr_counter;
+
+ netdev->flags |= IFF_ECHO; /* we support local echo */
+
+ netdev->netdev_ops = &esd_usb_netdev_ops;
+ netdev->ethtool_ops = &esd_usb_ethtool_ops;
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+ netdev->dev_id = index;
+
+ err = register_candev(netdev);
+ if (err) {
+ dev_err(&intf->dev, "couldn't register CAN device: %pe\n", ERR_PTR(err));
+ free_candev(netdev);
+ err = -ENOMEM;
+ goto done;
+ }
+
+ dev->nets[index] = priv;
+ netdev_info(netdev, "registered\n");
+
+done:
+ return err;
+}
+
+/* probe function for new USB devices
+ *
+ * check version information and number of available
+ * CAN interfaces
+ */
+static int esd_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct esd_usb *dev;
+ union esd_usb_msg *msg;
+ int i, err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ dev->udev = interface_to_usbdev(intf);
+
+ init_usb_anchor(&dev->rx_submitted);
+
+ usb_set_intfdata(intf, dev);
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto free_msg;
+ }
+
+ /* query number of CAN interfaces (nets) */
+ msg->hdr.cmd = ESD_USB_CMD_VERSION;
+ msg->hdr.len = sizeof(struct esd_usb_version_msg) / sizeof(u32); /* # of 32bit words */
+ msg->version.rsvd = 0;
+ msg->version.flags = 0;
+ msg->version.drv_version = 0;
+
+ err = esd_usb_send_msg(dev, msg);
+ if (err < 0) {
+ dev_err(&intf->dev, "sending version message failed\n");
+ goto free_msg;
+ }
+
+ err = esd_usb_wait_msg(dev, msg);
+ if (err < 0) {
+ dev_err(&intf->dev, "no version message answer\n");
+ goto free_msg;
+ }
+
+ dev->net_count = (int)msg->version_reply.nets;
+ dev->version = le32_to_cpu(msg->version_reply.version);
+
+ if (device_create_file(&intf->dev, &dev_attr_firmware))
+ dev_err(&intf->dev,
+ "Couldn't create device file for firmware\n");
+
+ if (device_create_file(&intf->dev, &dev_attr_hardware))
+ dev_err(&intf->dev,
+ "Couldn't create device file for hardware\n");
+
+ if (device_create_file(&intf->dev, &dev_attr_nets))
+ dev_err(&intf->dev,
+ "Couldn't create device file for nets\n");
+
+ /* do per device probing */
+ for (i = 0; i < dev->net_count; i++)
+ esd_usb_probe_one_net(intf, i);
+
+free_msg:
+ kfree(msg);
+ if (err)
+ kfree(dev);
+done:
+ return err;
+}
+
+/* called by the usb core when the device is removed from the system */
+static void esd_usb_disconnect(struct usb_interface *intf)
+{
+ struct esd_usb *dev = usb_get_intfdata(intf);
+ struct net_device *netdev;
+ int i;
+
+ device_remove_file(&intf->dev, &dev_attr_firmware);
+ device_remove_file(&intf->dev, &dev_attr_hardware);
+ device_remove_file(&intf->dev, &dev_attr_nets);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (dev) {
+ dev->in_usb_disconnect = 1;
+ for (i = 0; i < dev->net_count; i++) {
+ if (dev->nets[i]) {
+ netdev = dev->nets[i]->netdev;
+ netdev_info(netdev, "unregister\n");
+ unregister_netdev(netdev);
+ free_candev(netdev);
+ }
+ }
+ unlink_all_urbs(dev);
+ kfree(dev);
+ }
+}
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver esd_usb_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = esd_usb_probe,
+ .disconnect = esd_usb_disconnect,
+ .id_table = esd_usb_table,
+};
+
+module_usb_driver(esd_usb_driver);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
deleted file mode 100644
index c6068a251fbe..000000000000
--- a/drivers/net/can/usb/esd_usb2.c
+++ /dev/null
@@ -1,1157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * CAN driver for esd CAN-USB/2 and CAN-USB/Micro
- *
- * Copyright (C) 2010-2012 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
- */
-#include <linux/signal.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/usb.h>
-
-#include <linux/can.h>
-#include <linux/can/dev.h>
-#include <linux/can/error.h>
-
-MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
-MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 and CAN-USB/Micro interfaces");
-MODULE_LICENSE("GPL v2");
-
-/* Define these values to match your devices */
-#define USB_ESDGMBH_VENDOR_ID 0x0ab4
-#define USB_CANUSB2_PRODUCT_ID 0x0010
-#define USB_CANUSBM_PRODUCT_ID 0x0011
-
-#define ESD_USB2_CAN_CLOCK 60000000
-#define ESD_USBM_CAN_CLOCK 36000000
-#define ESD_USB2_MAX_NETS 2
-
-/* USB2 commands */
-#define CMD_VERSION 1 /* also used for VERSION_REPLY */
-#define CMD_CAN_RX 2 /* device to host only */
-#define CMD_CAN_TX 3 /* also used for TX_DONE */
-#define CMD_SETBAUD 4 /* also used for SETBAUD_REPLY */
-#define CMD_TS 5 /* also used for TS_REPLY */
-#define CMD_IDADD 6 /* also used for IDADD_REPLY */
-
-/* esd CAN message flags - dlc field */
-#define ESD_RTR 0x10
-
-/* esd CAN message flags - id field */
-#define ESD_EXTID 0x20000000
-#define ESD_EVENT 0x40000000
-#define ESD_IDMASK 0x1fffffff
-
-/* esd CAN event ids used by this driver */
-#define ESD_EV_CAN_ERROR_EXT 2
-
-/* baudrate message flags */
-#define ESD_USB2_UBR 0x80000000
-#define ESD_USB2_LOM 0x40000000
-#define ESD_USB2_NO_BAUDRATE 0x7fffffff
-#define ESD_USB2_TSEG1_MIN 1
-#define ESD_USB2_TSEG1_MAX 16
-#define ESD_USB2_TSEG1_SHIFT 16
-#define ESD_USB2_TSEG2_MIN 1
-#define ESD_USB2_TSEG2_MAX 8
-#define ESD_USB2_TSEG2_SHIFT 20
-#define ESD_USB2_SJW_MAX 4
-#define ESD_USB2_SJW_SHIFT 14
-#define ESD_USBM_SJW_SHIFT 24
-#define ESD_USB2_BRP_MIN 1
-#define ESD_USB2_BRP_MAX 1024
-#define ESD_USB2_BRP_INC 1
-#define ESD_USB2_3_SAMPLES 0x00800000
-
-/* esd IDADD message */
-#define ESD_ID_ENABLE 0x80
-#define ESD_MAX_ID_SEGMENT 64
-
-/* SJA1000 ECC register (emulated by usb2 firmware) */
-#define SJA1000_ECC_SEG 0x1F
-#define SJA1000_ECC_DIR 0x20
-#define SJA1000_ECC_ERR 0x06
-#define SJA1000_ECC_BIT 0x00
-#define SJA1000_ECC_FORM 0x40
-#define SJA1000_ECC_STUFF 0x80
-#define SJA1000_ECC_MASK 0xc0
-
-/* esd bus state event codes */
-#define ESD_BUSSTATE_MASK 0xc0
-#define ESD_BUSSTATE_WARN 0x40
-#define ESD_BUSSTATE_ERRPASSIVE 0x80
-#define ESD_BUSSTATE_BUSOFF 0xc0
-
-#define RX_BUFFER_SIZE 1024
-#define MAX_RX_URBS 4
-#define MAX_TX_URBS 16 /* must be power of 2 */
-
-struct header_msg {
- u8 len; /* len is always the total message length in 32bit words */
- u8 cmd;
- u8 rsvd[2];
-};
-
-struct version_msg {
- u8 len;
- u8 cmd;
- u8 rsvd;
- u8 flags;
- __le32 drv_version;
-};
-
-struct version_reply_msg {
- u8 len;
- u8 cmd;
- u8 nets;
- u8 features;
- __le32 version;
- u8 name[16];
- __le32 rsvd;
- __le32 ts;
-};
-
-struct rx_msg {
- u8 len;
- u8 cmd;
- u8 net;
- u8 dlc;
- __le32 ts;
- __le32 id; /* upper 3 bits contain flags */
- u8 data[8];
-};
-
-struct tx_msg {
- u8 len;
- u8 cmd;
- u8 net;
- u8 dlc;
- u32 hnd; /* opaque handle, not used by device */
- __le32 id; /* upper 3 bits contain flags */
- u8 data[8];
-};
-
-struct tx_done_msg {
- u8 len;
- u8 cmd;
- u8 net;
- u8 status;
- u32 hnd; /* opaque handle, not used by device */
- __le32 ts;
-};
-
-struct id_filter_msg {
- u8 len;
- u8 cmd;
- u8 net;
- u8 option;
- __le32 mask[ESD_MAX_ID_SEGMENT + 1];
-};
-
-struct set_baudrate_msg {
- u8 len;
- u8 cmd;
- u8 net;
- u8 rsvd;
- __le32 baud;
-};
-
-/* Main message type used between library and application */
-struct __attribute__ ((packed)) esd_usb2_msg {
- union {
- struct header_msg hdr;
- struct version_msg version;
- struct version_reply_msg version_reply;
- struct rx_msg rx;
- struct tx_msg tx;
- struct tx_done_msg txdone;
- struct set_baudrate_msg setbaud;
- struct id_filter_msg filter;
- } msg;
-};
-
-static struct usb_device_id esd_usb2_table[] = {
- {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
- {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSBM_PRODUCT_ID)},
- {}
-};
-MODULE_DEVICE_TABLE(usb, esd_usb2_table);
-
-struct esd_usb2_net_priv;
-
-struct esd_tx_urb_context {
- struct esd_usb2_net_priv *priv;
- u32 echo_index;
- int len; /* CAN payload length */
-};
-
-struct esd_usb2 {
- struct usb_device *udev;
- struct esd_usb2_net_priv *nets[ESD_USB2_MAX_NETS];
-
- struct usb_anchor rx_submitted;
-
- int net_count;
- u32 version;
- int rxinitdone;
- void *rxbuf[MAX_RX_URBS];
- dma_addr_t rxbuf_dma[MAX_RX_URBS];
-};
-
-struct esd_usb2_net_priv {
- struct can_priv can; /* must be the first member */
-
- atomic_t active_tx_jobs;
- struct usb_anchor tx_submitted;
- struct esd_tx_urb_context tx_contexts[MAX_TX_URBS];
-
- struct esd_usb2 *usb2;
- struct net_device *netdev;
- int index;
- u8 old_state;
- struct can_berr_counter bec;
-};
-
-static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
-{
- struct net_device_stats *stats = &priv->netdev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
- u32 id = le32_to_cpu(msg->msg.rx.id) & ESD_IDMASK;
-
- if (id == ESD_EV_CAN_ERROR_EXT) {
- u8 state = msg->msg.rx.data[0];
- u8 ecc = msg->msg.rx.data[1];
- u8 rxerr = msg->msg.rx.data[2];
- u8 txerr = msg->msg.rx.data[3];
-
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb == NULL) {
- stats->rx_dropped++;
- return;
- }
-
- if (state != priv->old_state) {
- priv->old_state = state;
-
- switch (state & ESD_BUSSTATE_MASK) {
- case ESD_BUSSTATE_BUSOFF:
- priv->can.state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
- priv->can.can_stats.bus_off++;
- can_bus_off(priv->netdev);
- break;
- case ESD_BUSSTATE_WARN:
- priv->can.state = CAN_STATE_ERROR_WARNING;
- priv->can.can_stats.error_warning++;
- break;
- case ESD_BUSSTATE_ERRPASSIVE:
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- priv->can.can_stats.error_passive++;
- break;
- default:
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
- break;
- }
- } else {
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
-
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
-
- switch (ecc & SJA1000_ECC_MASK) {
- case SJA1000_ECC_BIT:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- break;
- case SJA1000_ECC_FORM:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- break;
- case SJA1000_ECC_STUFF:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- break;
- default:
- cf->data[3] = ecc & SJA1000_ECC_SEG;
- break;
- }
-
- /* Error occurred during transmission? */
- if (!(ecc & SJA1000_ECC_DIR))
- cf->data[2] |= CAN_ERR_PROT_TX;
-
- if (priv->can.state == CAN_STATE_ERROR_WARNING ||
- priv->can.state == CAN_STATE_ERROR_PASSIVE) {
- cf->data[1] = (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_PASSIVE :
- CAN_ERR_CRTL_RX_PASSIVE;
- }
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
-
- priv->bec.txerr = txerr;
- priv->bec.rxerr = rxerr;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_rx(skb);
- }
-}
-
-static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
-{
- struct net_device_stats *stats = &priv->netdev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
- int i;
- u32 id;
-
- if (!netif_device_present(priv->netdev))
- return;
-
- id = le32_to_cpu(msg->msg.rx.id);
-
- if (id & ESD_EVENT) {
- esd_usb2_rx_event(priv, msg);
- } else {
- skb = alloc_can_skb(priv->netdev, &cf);
- if (skb == NULL) {
- stats->rx_dropped++;
- return;
- }
-
- cf->can_id = id & ESD_IDMASK;
- can_frame_set_cc_len(cf, msg->msg.rx.dlc & ~ESD_RTR,
- priv->can.ctrlmode);
-
- if (id & ESD_EXTID)
- cf->can_id |= CAN_EFF_FLAG;
-
- if (msg->msg.rx.dlc & ESD_RTR) {
- cf->can_id |= CAN_RTR_FLAG;
- } else {
- for (i = 0; i < cf->len; i++)
- cf->data[i] = msg->msg.rx.data[i];
- }
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_rx(skb);
- }
-
- return;
-}
-
-static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
-{
- struct net_device_stats *stats = &priv->netdev->stats;
- struct net_device *netdev = priv->netdev;
- struct esd_tx_urb_context *context;
-
- if (!netif_device_present(netdev))
- return;
-
- context = &priv->tx_contexts[msg->msg.txdone.hnd & (MAX_TX_URBS - 1)];
-
- if (!msg->msg.txdone.status) {
- stats->tx_packets++;
- stats->tx_bytes += context->len;
- can_get_echo_skb(netdev, context->echo_index, NULL);
- } else {
- stats->tx_errors++;
- can_free_echo_skb(netdev, context->echo_index, NULL);
- }
-
- /* Release context */
- context->echo_index = MAX_TX_URBS;
- atomic_dec(&priv->active_tx_jobs);
-
- netif_wake_queue(netdev);
-}
-
-static void esd_usb2_read_bulk_callback(struct urb *urb)
-{
- struct esd_usb2 *dev = urb->context;
- int retval;
- int pos = 0;
- int i;
-
- switch (urb->status) {
- case 0: /* success */
- break;
-
- case -ENOENT:
- case -EPIPE:
- case -EPROTO:
- case -ESHUTDOWN:
- return;
-
- default:
- dev_info(dev->udev->dev.parent,
- "Rx URB aborted (%d)\n", urb->status);
- goto resubmit_urb;
- }
-
- while (pos < urb->actual_length) {
- struct esd_usb2_msg *msg;
-
- msg = (struct esd_usb2_msg *)(urb->transfer_buffer + pos);
-
- switch (msg->msg.hdr.cmd) {
- case CMD_CAN_RX:
- if (msg->msg.rx.net >= dev->net_count) {
- dev_err(dev->udev->dev.parent, "format error\n");
- break;
- }
-
- esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
- break;
-
- case CMD_CAN_TX:
- if (msg->msg.txdone.net >= dev->net_count) {
- dev_err(dev->udev->dev.parent, "format error\n");
- break;
- }
-
- esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
- msg);
- break;
- }
-
- pos += msg->msg.hdr.len << 2;
-
- if (pos > urb->actual_length) {
- dev_err(dev->udev->dev.parent, "format error\n");
- break;
- }
- }
-
-resubmit_urb:
- usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
- urb->transfer_buffer, RX_BUFFER_SIZE,
- esd_usb2_read_bulk_callback, dev);
-
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval == -ENODEV) {
- for (i = 0; i < dev->net_count; i++) {
- if (dev->nets[i])
- netif_device_detach(dev->nets[i]->netdev);
- }
- } else if (retval) {
- dev_err(dev->udev->dev.parent,
- "failed resubmitting read bulk urb: %d\n", retval);
- }
-
- return;
-}
-
-/*
- * callback for bulk IN urb
- */
-static void esd_usb2_write_bulk_callback(struct urb *urb)
-{
- struct esd_tx_urb_context *context = urb->context;
- struct esd_usb2_net_priv *priv;
- struct net_device *netdev;
- size_t size = sizeof(struct esd_usb2_msg);
-
- WARN_ON(!context);
-
- priv = context->priv;
- netdev = priv->netdev;
-
- /* free up our allocated buffer */
- usb_free_coherent(urb->dev, size,
- urb->transfer_buffer, urb->transfer_dma);
-
- if (!netif_device_present(netdev))
- return;
-
- if (urb->status)
- netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
-
- netif_trans_update(netdev);
-}
-
-static ssize_t firmware_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
-
- return sprintf(buf, "%d.%d.%d\n",
- (dev->version >> 12) & 0xf,
- (dev->version >> 8) & 0xf,
- dev->version & 0xff);
-}
-static DEVICE_ATTR_RO(firmware);
-
-static ssize_t hardware_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
-
- return sprintf(buf, "%d.%d.%d\n",
- (dev->version >> 28) & 0xf,
- (dev->version >> 24) & 0xf,
- (dev->version >> 16) & 0xff);
-}
-static DEVICE_ATTR_RO(hardware);
-
-static ssize_t nets_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
-
- return sprintf(buf, "%d", dev->net_count);
-}
-static DEVICE_ATTR_RO(nets);
-
-static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
-{
- int actual_length;
-
- return usb_bulk_msg(dev->udev,
- usb_sndbulkpipe(dev->udev, 2),
- msg,
- msg->msg.hdr.len << 2,
- &actual_length,
- 1000);
-}
-
-static int esd_usb2_wait_msg(struct esd_usb2 *dev,
- struct esd_usb2_msg *msg)
-{
- int actual_length;
-
- return usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev, 1),
- msg,
- sizeof(*msg),
- &actual_length,
- 1000);
-}
-
-static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
-{
- int i, err = 0;
-
- if (dev->rxinitdone)
- return 0;
-
- for (i = 0; i < MAX_RX_URBS; i++) {
- struct urb *urb = NULL;
- u8 *buf = NULL;
- dma_addr_t buf_dma;
-
- /* create a URB, and a buffer for it */
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- err = -ENOMEM;
- break;
- }
-
- buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
- &buf_dma);
- if (!buf) {
- dev_warn(dev->udev->dev.parent,
- "No memory left for USB buffer\n");
- err = -ENOMEM;
- goto freeurb;
- }
-
- urb->transfer_dma = buf_dma;
-
- usb_fill_bulk_urb(urb, dev->udev,
- usb_rcvbulkpipe(dev->udev, 1),
- buf, RX_BUFFER_SIZE,
- esd_usb2_read_bulk_callback, dev);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- usb_anchor_urb(urb, &dev->rx_submitted);
-
- err = usb_submit_urb(urb, GFP_KERNEL);
- if (err) {
- usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
- urb->transfer_dma);
- goto freeurb;
- }
-
- dev->rxbuf[i] = buf;
- dev->rxbuf_dma[i] = buf_dma;
-
-freeurb:
- /* Drop reference, USB core will take care of freeing it */
- usb_free_urb(urb);
- if (err)
- break;
- }
-
- /* Did we submit any URBs */
- if (i == 0) {
- dev_err(dev->udev->dev.parent, "couldn't setup read URBs\n");
- return err;
- }
-
- /* Warn if we've couldn't transmit all the URBs */
- if (i < MAX_RX_URBS) {
- dev_warn(dev->udev->dev.parent,
- "rx performance may be slow\n");
- }
-
- dev->rxinitdone = 1;
- return 0;
-}
-
-/*
- * Start interface
- */
-static int esd_usb2_start(struct esd_usb2_net_priv *priv)
-{
- struct esd_usb2 *dev = priv->usb2;
- struct net_device *netdev = priv->netdev;
- struct esd_usb2_msg *msg;
- int err, i;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg) {
- err = -ENOMEM;
- goto out;
- }
-
- /*
- * Enable all IDs
- * The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
- * Each bit represents one 11 bit CAN identifier. A set bit
- * enables reception of the corresponding CAN identifier. A cleared
- * bit disabled this identifier. An additional bitmask value
- * following the CAN 2.0A bits is used to enable reception of
- * extended CAN frames. Only the LSB of this final mask is checked
- * for the complete 29 bit ID range. The IDADD message also allows
- * filter configuration for an ID subset. In this case you can add
- * the number of the starting bitmask (0..64) to the filter.option
- * field followed by only some bitmasks.
- */
- msg->msg.hdr.cmd = CMD_IDADD;
- msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
- msg->msg.filter.net = priv->index;
- msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
- for (i = 0; i < ESD_MAX_ID_SEGMENT; i++)
- msg->msg.filter.mask[i] = cpu_to_le32(0xffffffff);
- /* enable 29bit extended IDs */
- msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
-
- err = esd_usb2_send_msg(dev, msg);
- if (err)
- goto out;
-
- err = esd_usb2_setup_rx_urbs(dev);
- if (err)
- goto out;
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
-out:
- if (err == -ENODEV)
- netif_device_detach(netdev);
- if (err)
- netdev_err(netdev, "couldn't start device: %d\n", err);
-
- kfree(msg);
- return err;
-}
-
-static void unlink_all_urbs(struct esd_usb2 *dev)
-{
- struct esd_usb2_net_priv *priv;
- int i, j;
-
- usb_kill_anchored_urbs(&dev->rx_submitted);
-
- for (i = 0; i < MAX_RX_URBS; ++i)
- usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
- dev->rxbuf[i], dev->rxbuf_dma[i]);
-
- for (i = 0; i < dev->net_count; i++) {
- priv = dev->nets[i];
- if (priv) {
- usb_kill_anchored_urbs(&priv->tx_submitted);
- atomic_set(&priv->active_tx_jobs, 0);
-
- for (j = 0; j < MAX_TX_URBS; j++)
- priv->tx_contexts[j].echo_index = MAX_TX_URBS;
- }
- }
-}
-
-static int esd_usb2_open(struct net_device *netdev)
-{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- int err;
-
- /* common open */
- err = open_candev(netdev);
- if (err)
- return err;
-
- /* finally start device */
- err = esd_usb2_start(priv);
- if (err) {
- netdev_warn(netdev, "couldn't start device: %d\n", err);
- close_candev(netdev);
- return err;
- }
-
- netif_start_queue(netdev);
-
- return 0;
-}
-
-static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct esd_usb2 *dev = priv->usb2;
- struct esd_tx_urb_context *context = NULL;
- struct net_device_stats *stats = &netdev->stats;
- struct can_frame *cf = (struct can_frame *)skb->data;
- struct esd_usb2_msg *msg;
- struct urb *urb;
- u8 *buf;
- int i, err;
- int ret = NETDEV_TX_OK;
- size_t size = sizeof(struct esd_usb2_msg);
-
- if (can_dropped_invalid_skb(netdev, skb))
- return NETDEV_TX_OK;
-
- /* create a URB, and a buffer for it, and copy the data to the URB */
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- stats->tx_dropped++;
- dev_kfree_skb(skb);
- goto nourbmem;
- }
-
- buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC,
- &urb->transfer_dma);
- if (!buf) {
- netdev_err(netdev, "No memory left for USB buffer\n");
- stats->tx_dropped++;
- dev_kfree_skb(skb);
- goto nobufmem;
- }
-
- msg = (struct esd_usb2_msg *)buf;
-
- msg->msg.hdr.len = 3; /* minimal length */
- msg->msg.hdr.cmd = CMD_CAN_TX;
- msg->msg.tx.net = priv->index;
- msg->msg.tx.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
- msg->msg.tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
-
- if (cf->can_id & CAN_RTR_FLAG)
- msg->msg.tx.dlc |= ESD_RTR;
-
- if (cf->can_id & CAN_EFF_FLAG)
- msg->msg.tx.id |= cpu_to_le32(ESD_EXTID);
-
- for (i = 0; i < cf->len; i++)
- msg->msg.tx.data[i] = cf->data[i];
-
- msg->msg.hdr.len += (cf->len + 3) >> 2;
-
- for (i = 0; i < MAX_TX_URBS; i++) {
- if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
- context = &priv->tx_contexts[i];
- break;
- }
- }
-
- /*
- * This may never happen.
- */
- if (!context) {
- netdev_warn(netdev, "couldn't find free context\n");
- ret = NETDEV_TX_BUSY;
- goto releasebuf;
- }
-
- context->priv = priv;
- context->echo_index = i;
- context->len = cf->len;
-
- /* hnd must not be 0 - MSB is stripped in txdone handling */
- msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */
-
- usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
- msg->msg.hdr.len << 2,
- esd_usb2_write_bulk_callback, context);
-
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
- usb_anchor_urb(urb, &priv->tx_submitted);
-
- can_put_echo_skb(skb, netdev, context->echo_index, 0);
-
- atomic_inc(&priv->active_tx_jobs);
-
- /* Slow down tx path */
- if (atomic_read(&priv->active_tx_jobs) >= MAX_TX_URBS)
- netif_stop_queue(netdev);
-
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
- can_free_echo_skb(netdev, context->echo_index, NULL);
-
- atomic_dec(&priv->active_tx_jobs);
- usb_unanchor_urb(urb);
-
- stats->tx_dropped++;
-
- if (err == -ENODEV)
- netif_device_detach(netdev);
- else
- netdev_warn(netdev, "failed tx_urb %d\n", err);
-
- goto releasebuf;
- }
-
- netif_trans_update(netdev);
-
- /*
- * Release our reference to this URB, the USB core will eventually free
- * it entirely.
- */
- usb_free_urb(urb);
-
- return NETDEV_TX_OK;
-
-releasebuf:
- usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
-
-nobufmem:
- usb_free_urb(urb);
-
-nourbmem:
- return ret;
-}
-
-static int esd_usb2_close(struct net_device *netdev)
-{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct esd_usb2_msg *msg;
- int i;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- /* Disable all IDs (see esd_usb2_start()) */
- msg->msg.hdr.cmd = CMD_IDADD;
- msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
- msg->msg.filter.net = priv->index;
- msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
- for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
- msg->msg.filter.mask[i] = 0;
- if (esd_usb2_send_msg(priv->usb2, msg) < 0)
- netdev_err(netdev, "sending idadd message failed\n");
-
- /* set CAN controller to reset mode */
- msg->msg.hdr.len = 2;
- msg->msg.hdr.cmd = CMD_SETBAUD;
- msg->msg.setbaud.net = priv->index;
- msg->msg.setbaud.rsvd = 0;
- msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
- if (esd_usb2_send_msg(priv->usb2, msg) < 0)
- netdev_err(netdev, "sending setbaud message failed\n");
-
- priv->can.state = CAN_STATE_STOPPED;
-
- netif_stop_queue(netdev);
-
- close_candev(netdev);
-
- kfree(msg);
-
- return 0;
-}
-
-static const struct net_device_ops esd_usb2_netdev_ops = {
- .ndo_open = esd_usb2_open,
- .ndo_stop = esd_usb2_close,
- .ndo_start_xmit = esd_usb2_start_xmit,
- .ndo_change_mtu = can_change_mtu,
-};
-
-static const struct can_bittiming_const esd_usb2_bittiming_const = {
- .name = "esd_usb2",
- .tseg1_min = ESD_USB2_TSEG1_MIN,
- .tseg1_max = ESD_USB2_TSEG1_MAX,
- .tseg2_min = ESD_USB2_TSEG2_MIN,
- .tseg2_max = ESD_USB2_TSEG2_MAX,
- .sjw_max = ESD_USB2_SJW_MAX,
- .brp_min = ESD_USB2_BRP_MIN,
- .brp_max = ESD_USB2_BRP_MAX,
- .brp_inc = ESD_USB2_BRP_INC,
-};
-
-static int esd_usb2_set_bittiming(struct net_device *netdev)
-{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
- struct esd_usb2_msg *msg;
- int err;
- u32 canbtr;
- int sjw_shift;
-
- canbtr = ESD_USB2_UBR;
- if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- canbtr |= ESD_USB2_LOM;
-
- canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
-
- if (le16_to_cpu(priv->usb2->udev->descriptor.idProduct) ==
- USB_CANUSBM_PRODUCT_ID)
- sjw_shift = ESD_USBM_SJW_SHIFT;
- else
- sjw_shift = ESD_USB2_SJW_SHIFT;
-
- canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1))
- << sjw_shift;
- canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1)
- & (ESD_USB2_TSEG1_MAX - 1))
- << ESD_USB2_TSEG1_SHIFT;
- canbtr |= ((bt->phase_seg2 - 1) & (ESD_USB2_TSEG2_MAX - 1))
- << ESD_USB2_TSEG2_SHIFT;
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- canbtr |= ESD_USB2_3_SAMPLES;
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->msg.hdr.len = 2;
- msg->msg.hdr.cmd = CMD_SETBAUD;
- msg->msg.setbaud.net = priv->index;
- msg->msg.setbaud.rsvd = 0;
- msg->msg.setbaud.baud = cpu_to_le32(canbtr);
-
- netdev_info(netdev, "setting BTR=%#x\n", canbtr);
-
- err = esd_usb2_send_msg(priv->usb2, msg);
-
- kfree(msg);
- return err;
-}
-
-static int esd_usb2_get_berr_counter(const struct net_device *netdev,
- struct can_berr_counter *bec)
-{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
-
- bec->txerr = priv->bec.txerr;
- bec->rxerr = priv->bec.rxerr;
-
- return 0;
-}
-
-static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
-{
- switch (mode) {
- case CAN_MODE_START:
- netif_wake_queue(netdev);
- break;
-
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
-{
- struct esd_usb2 *dev = usb_get_intfdata(intf);
- struct net_device *netdev;
- struct esd_usb2_net_priv *priv;
- int err = 0;
- int i;
-
- netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
- if (!netdev) {
- dev_err(&intf->dev, "couldn't alloc candev\n");
- err = -ENOMEM;
- goto done;
- }
-
- priv = netdev_priv(netdev);
-
- init_usb_anchor(&priv->tx_submitted);
- atomic_set(&priv->active_tx_jobs, 0);
-
- for (i = 0; i < MAX_TX_URBS; i++)
- priv->tx_contexts[i].echo_index = MAX_TX_URBS;
-
- priv->usb2 = dev;
- priv->netdev = netdev;
- priv->index = index;
-
- priv->can.state = CAN_STATE_STOPPED;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_CC_LEN8_DLC;
-
- if (le16_to_cpu(dev->udev->descriptor.idProduct) ==
- USB_CANUSBM_PRODUCT_ID)
- priv->can.clock.freq = ESD_USBM_CAN_CLOCK;
- else {
- priv->can.clock.freq = ESD_USB2_CAN_CLOCK;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
- }
-
- priv->can.bittiming_const = &esd_usb2_bittiming_const;
- priv->can.do_set_bittiming = esd_usb2_set_bittiming;
- priv->can.do_set_mode = esd_usb2_set_mode;
- priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
-
- netdev->flags |= IFF_ECHO; /* we support local echo */
-
- netdev->netdev_ops = &esd_usb2_netdev_ops;
-
- SET_NETDEV_DEV(netdev, &intf->dev);
- netdev->dev_id = index;
-
- err = register_candev(netdev);
- if (err) {
- dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
- free_candev(netdev);
- err = -ENOMEM;
- goto done;
- }
-
- dev->nets[index] = priv;
- netdev_info(netdev, "device %s registered\n", netdev->name);
-
-done:
- return err;
-}
-
-/*
- * probe function for new USB2 devices
- *
- * check version information and number of available
- * CAN interfaces
- */
-static int esd_usb2_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct esd_usb2 *dev;
- struct esd_usb2_msg *msg;
- int i, err;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- err = -ENOMEM;
- goto done;
- }
-
- dev->udev = interface_to_usbdev(intf);
-
- init_usb_anchor(&dev->rx_submitted);
-
- usb_set_intfdata(intf, dev);
-
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg) {
- err = -ENOMEM;
- goto free_msg;
- }
-
- /* query number of CAN interfaces (nets) */
- msg->msg.hdr.cmd = CMD_VERSION;
- msg->msg.hdr.len = 2;
- msg->msg.version.rsvd = 0;
- msg->msg.version.flags = 0;
- msg->msg.version.drv_version = 0;
-
- err = esd_usb2_send_msg(dev, msg);
- if (err < 0) {
- dev_err(&intf->dev, "sending version message failed\n");
- goto free_msg;
- }
-
- err = esd_usb2_wait_msg(dev, msg);
- if (err < 0) {
- dev_err(&intf->dev, "no version message answer\n");
- goto free_msg;
- }
-
- dev->net_count = (int)msg->msg.version_reply.nets;
- dev->version = le32_to_cpu(msg->msg.version_reply.version);
-
- if (device_create_file(&intf->dev, &dev_attr_firmware))
- dev_err(&intf->dev,
- "Couldn't create device file for firmware\n");
-
- if (device_create_file(&intf->dev, &dev_attr_hardware))
- dev_err(&intf->dev,
- "Couldn't create device file for hardware\n");
-
- if (device_create_file(&intf->dev, &dev_attr_nets))
- dev_err(&intf->dev,
- "Couldn't create device file for nets\n");
-
- /* do per device probing */
- for (i = 0; i < dev->net_count; i++)
- esd_usb2_probe_one_net(intf, i);
-
-free_msg:
- kfree(msg);
- if (err)
- kfree(dev);
-done:
- return err;
-}
-
-/*
- * called by the usb core when the device is removed from the system
- */
-static void esd_usb2_disconnect(struct usb_interface *intf)
-{
- struct esd_usb2 *dev = usb_get_intfdata(intf);
- struct net_device *netdev;
- int i;
-
- device_remove_file(&intf->dev, &dev_attr_firmware);
- device_remove_file(&intf->dev, &dev_attr_hardware);
- device_remove_file(&intf->dev, &dev_attr_nets);
-
- usb_set_intfdata(intf, NULL);
-
- if (dev) {
- for (i = 0; i < dev->net_count; i++) {
- if (dev->nets[i]) {
- netdev = dev->nets[i]->netdev;
- unregister_netdev(netdev);
- free_candev(netdev);
- }
- }
- unlink_all_urbs(dev);
- kfree(dev);
- }
-}
-
-/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver esd_usb2_driver = {
- .name = "esd_usb2",
- .probe = esd_usb2_probe,
- .disconnect = esd_usb2_disconnect,
- .id_table = esd_usb2_table,
-};
-
-module_usb_driver(esd_usb2_driver);
diff --git a/drivers/net/can/usb/etas_es58x/Makefile b/drivers/net/can/usb/etas_es58x/Makefile
index a129b4aa0215..d6667ebe259f 100644
--- a/drivers/net/can/usb/etas_es58x/Makefile
+++ b/drivers/net/can/usb/etas_es58x/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x.o
-etas_es58x-y = es58x_core.o es581_4.o es58x_fd.o
+etas_es58x-y = es58x_core.o es58x_devlink.o es581_4.o es58x_fd.o
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c
index 14e360c9f2c9..1888ca1de7b6 100644
--- a/drivers/net/can/usb/etas_es58x/es581_4.c
+++ b/drivers/net/can/usb/etas_es58x/es581_4.c
@@ -6,11 +6,12 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
+#include <linux/unaligned.h>
#include <linux/kernel.h>
-#include <asm/unaligned.h>
+#include <linux/units.h>
#include "es58x_core.h"
#include "es581_4.h"
@@ -469,8 +470,8 @@ const struct es58x_parameters es581_4_param = {
.bittiming_const = &es581_4_bittiming_const,
.data_bittiming_const = NULL,
.tdc_const = NULL,
- .bitrate_max = 1 * CAN_MBPS,
- .clock = {.freq = 50 * CAN_MHZ},
+ .bitrate_max = 1 * MEGA /* BPS */,
+ .clock = {.freq = 50 * MEGA /* Hz */},
.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC,
.tx_start_of_frame = 0xAFAF,
.rx_start_of_frame = 0xFAFA,
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.h b/drivers/net/can/usb/etas_es58x/es581_4.h
index 4bc60a6df697..667ecb77168c 100644
--- a/drivers/net/can/usb/etas_es58x/es581_4.h
+++ b/drivers/net/can/usb/etas_es58x/es581_4.h
@@ -192,7 +192,7 @@ struct es581_4_urb_cmd {
struct es581_4_rx_cmd_ret rx_cmd_ret;
__le64 timestamp;
u8 rx_cmd_ret_u8;
- u8 raw_msg[0];
+ DECLARE_FLEX_ARRAY(u8, raw_msg);
} __packed;
__le16 reserved_for_crc16_do_not_use;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 96a13c770e4a..f799233c2b72 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -7,25 +7,24 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2025 Vincent Mailhol <mailhol@kernel.org>
*/
+#include <linux/unaligned.h>
+#include <linux/crc16.h>
+#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
-#include <linux/crc16.h>
-#include <asm/unaligned.h>
+#include <net/devlink.h>
#include "es58x_core.h"
-#define DRV_VERSION "1.00"
MODULE_AUTHOR("Vincent Mailhol <mailhol.vincent@wanadoo.fr>");
MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>");
MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters");
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL v2");
-#define ES58X_MODULE_NAME "etas_es58x"
#define ES58X_VENDOR_ID 0x108C
#define ES581_4_PRODUCT_ID 0x0159
#define ES582_1_PRODUCT_ID 0x0168
@@ -59,11 +58,11 @@ MODULE_DEVICE_TABLE(usb, es58x_id_table);
#define es58x_print_hex_dump(buf, len) \
print_hex_dump(KERN_DEBUG, \
- ES58X_MODULE_NAME " " __stringify(buf) ": ", \
+ KBUILD_MODNAME " " __stringify(buf) ": ", \
DUMP_PREFIX_NONE, 16, 1, buf, len, false)
#define es58x_print_hex_dump_debug(buf, len) \
- print_hex_dump_debug(ES58X_MODULE_NAME " " __stringify(buf) ": ",\
+ print_hex_dump_debug(KBUILD_MODNAME " " __stringify(buf) ": ",\
DUMP_PREFIX_NONE, 16, 1, buf, len, false)
/* The last two bytes of an ES58X command is a CRC16. The first two
@@ -664,7 +663,7 @@ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
struct can_device_stats *can_stats = &can->can_stats;
struct can_frame *cf = NULL;
struct sk_buff *skb;
- int ret;
+ int ret = 0;
if (!netif_running(netdev)) {
if (net_ratelimit())
@@ -823,8 +822,6 @@ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
can->state = CAN_STATE_BUS_OFF;
can_bus_off(netdev);
ret = can->do_set_mode(netdev, CAN_MODE_STOP);
- if (ret)
- return ret;
}
break;
@@ -851,13 +848,6 @@ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
break;
}
- /* driver/net/can/dev.c:can_restart() takes in account error
- * messages in the RX stats. Doing the same here for
- * consistency.
- */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += CAN_ERR_DLC;
-
if (cf) {
if (cf->data[1])
cf->can_id |= CAN_ERR_CRTL;
@@ -881,7 +871,7 @@ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
ES58X_EVENT_BUSOFF, timestamp);
}
- return 0;
+ return ret;
}
/**
@@ -1471,10 +1461,6 @@ static void es58x_read_bulk_callback(struct urb *urb)
}
resubmit_urb:
- usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe,
- urb->transfer_buffer, urb->transfer_buffer_length,
- es58x_read_bulk_callback, es58x_dev);
-
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret == -ENODEV) {
for (i = 0; i < es58x_dev->num_can_ch; i++)
@@ -1608,7 +1594,8 @@ static struct urb *es58x_get_tx_urb(struct es58x_device *es58x_dev)
return NULL;
usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
- buf, tx_buf_len, NULL, NULL);
+ buf, tx_buf_len, es58x_write_bulk_callback,
+ NULL);
return urb;
}
@@ -1641,9 +1628,7 @@ static int es58x_submit_urb(struct es58x_device *es58x_dev, struct urb *urb,
int ret;
es58x_set_crc(urb->transfer_buffer, urb->transfer_buffer_length);
- usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
- urb->transfer_buffer, urb->transfer_buffer_length,
- es58x_write_bulk_callback, netdev);
+ urb->context = netdev;
usb_anchor_urb(urb, &es58x_dev->tx_urbs_busy);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
@@ -1716,7 +1701,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
{
const struct device *dev = es58x_dev->dev;
const struct es58x_parameters *param = es58x_dev->param;
- size_t rx_buf_len = es58x_dev->rx_max_packet_size;
+ u16 rx_buf_len = usb_maxpacket(es58x_dev->udev, es58x_dev->rx_pipe);
struct urb *urb;
u8 *buf;
int i;
@@ -1748,7 +1733,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
dev_err(dev, "%s: Could not setup any rx URBs\n", __func__);
return ret;
}
- dev_dbg(dev, "%s: Allocated %d rx URBs each of size %zu\n",
+ dev_dbg(dev, "%s: Allocated %d rx URBs each of size %u\n",
__func__, i, rx_buf_len);
return ret;
@@ -1796,7 +1781,7 @@ static int es58x_open(struct net_device *netdev)
struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
int ret;
- if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) {
+ if (!es58x_dev->opened_channel_cnt) {
ret = es58x_alloc_rx_urbs(es58x_dev);
if (ret)
return ret;
@@ -1814,12 +1799,13 @@ static int es58x_open(struct net_device *netdev)
if (ret)
goto free_urbs;
+ es58x_dev->opened_channel_cnt++;
netif_start_queue(netdev);
return ret;
free_urbs:
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
netdev_err(netdev, "%s: Could not open the network device: %pe\n",
__func__, ERR_PTR(ret));
@@ -1854,7 +1840,8 @@ static int es58x_stop(struct net_device *netdev)
es58x_flush_pending_tx_msg(netdev);
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ es58x_dev->opened_channel_cnt--;
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
return 0;
@@ -1927,7 +1914,7 @@ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb,
unsigned int frame_len;
int ret;
- if (can_dropped_invalid_skb(netdev, skb)) {
+ if (can_dev_dropped_skb(netdev, skb)) {
if (priv->tx_urb)
goto xmit_commit;
return NETDEV_TX_OK;
@@ -1988,7 +1975,13 @@ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb,
static const struct net_device_ops es58x_netdev_ops = {
.ndo_open = es58x_open,
.ndo_stop = es58x_stop,
- .ndo_start_xmit = es58x_start_xmit
+ .ndo_start_xmit = es58x_start_xmit,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
+};
+
+static const struct ethtool_ops es58x_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
};
/**
@@ -2047,10 +2040,16 @@ static int es58x_set_mode(struct net_device *netdev, enum can_mode mode)
* @es58x_dev: ES58X device.
* @priv: ES58X private parameters related to the network device.
* @channel_idx: Index of the network device.
+ *
+ * Return: zero on success, errno if devlink port could not be
+ * properly registered.
*/
-static void es58x_init_priv(struct es58x_device *es58x_dev,
- struct es58x_priv *priv, int channel_idx)
+static int es58x_init_priv(struct es58x_device *es58x_dev,
+ struct es58x_priv *priv, int channel_idx)
{
+ struct devlink_port_attrs attrs = {
+ .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ };
const struct es58x_parameters *param = es58x_dev->param;
struct can_priv *can = &priv->can;
@@ -2061,14 +2060,18 @@ static void es58x_init_priv(struct es58x_device *es58x_dev,
can->bittiming_const = param->bittiming_const;
if (param->ctrlmode_supported & CAN_CTRLMODE_FD) {
- can->data_bittiming_const = param->data_bittiming_const;
- can->tdc_const = param->tdc_const;
+ can->fd.data_bittiming_const = param->data_bittiming_const;
+ can->fd.tdc_const = param->tdc_const;
}
can->bitrate_max = param->bitrate_max;
can->clock = param->clock;
can->state = CAN_STATE_STOPPED;
can->ctrlmode_supported = param->ctrlmode_supported;
can->do_set_mode = es58x_set_mode;
+
+ devlink_port_attrs_set(&priv->devlink_port, &attrs);
+ return devlink_port_register(priv_to_devlink(es58x_dev),
+ &priv->devlink_port, channel_idx);
}
/**
@@ -2092,19 +2095,31 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
}
SET_NETDEV_DEV(netdev, dev);
es58x_dev->netdev[channel_idx] = netdev;
- es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx);
+ ret = es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx);
+ if (ret)
+ goto free_candev;
+ SET_NETDEV_DEVLINK_PORT(netdev, &es58x_priv(netdev)->devlink_port);
netdev->netdev_ops = &es58x_netdev_ops;
+ netdev->ethtool_ops = &es58x_ethtool_ops;
netdev->flags |= IFF_ECHO; /* We support local echo */
+ netdev->dev_port = channel_idx;
ret = register_candev(netdev);
if (ret)
- return ret;
+ goto devlink_port_unregister;
netdev_queue_set_dql_min_limit(netdev_get_tx_queue(netdev, 0),
es58x_dev->param->dql_min_limit);
return ret;
+
+ devlink_port_unregister:
+ devlink_port_unregister(&es58x_priv(netdev)->devlink_port);
+ free_candev:
+ es58x_dev->netdev[channel_idx] = NULL;
+ free_candev(netdev);
+ return ret;
}
/**
@@ -2121,54 +2136,13 @@ static void es58x_free_netdevs(struct es58x_device *es58x_dev)
if (!netdev)
continue;
unregister_candev(netdev);
+ devlink_port_unregister(&es58x_priv(netdev)->devlink_port);
es58x_dev->netdev[i] = NULL;
free_candev(netdev);
}
}
/**
- * es58x_get_product_info() - Get the product information and print them.
- * @es58x_dev: ES58X device.
- *
- * Do a synchronous call to get the product information.
- *
- * Return: zero on success, errno when any error occurs.
- */
-static int es58x_get_product_info(struct es58x_device *es58x_dev)
-{
- struct usb_device *udev = es58x_dev->udev;
- const int es58x_prod_info_idx = 6;
- /* Empirical tests show a prod_info length of maximum 83,
- * below should be more than enough.
- */
- const size_t prod_info_len = 127;
- char *prod_info;
- int ret;
-
- prod_info = kmalloc(prod_info_len, GFP_KERNEL);
- if (!prod_info)
- return -ENOMEM;
-
- ret = usb_string(udev, es58x_prod_info_idx, prod_info, prod_info_len);
- if (ret < 0) {
- dev_err(es58x_dev->dev,
- "%s: Could not read the product info: %pe\n",
- __func__, ERR_PTR(ret));
- goto out_free;
- }
- if (ret >= prod_info_len - 1) {
- dev_warn(es58x_dev->dev,
- "%s: Buffer is too small, result might be truncated\n",
- __func__);
- }
- dev_info(es58x_dev->dev, "Product info: %s\n", prod_info);
-
- out_free:
- kfree(prod_info);
- return ret < 0 ? ret : 0;
-}
-
-/**
* es58x_init_es58x_dev() - Initialize the ES58X device.
* @intf: USB interface.
* @driver_info: Quirks of the device.
@@ -2181,15 +2155,15 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
{
struct device *dev = &intf->dev;
struct es58x_device *es58x_dev;
+ struct devlink *devlink;
const struct es58x_parameters *param;
const struct es58x_operators *ops;
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_endpoint_descriptor *ep_in, *ep_out;
int ret;
- dev_info(dev,
- "Starting %s %s (Serial Number %s) driver version %s\n",
- udev->manufacturer, udev->product, udev->serial, DRV_VERSION);
+ dev_info(dev, "Starting %s %s (Serial Number %s)\n",
+ udev->manufacturer, udev->product, udev->serial);
ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out,
NULL, NULL);
@@ -2204,11 +2178,12 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
ops = &es581_4_ops;
}
- es58x_dev = devm_kzalloc(dev, es58x_sizeof_es58x_device(param),
- GFP_KERNEL);
- if (!es58x_dev)
+ devlink = devlink_alloc(&es58x_dl_ops, es58x_sizeof_es58x_device(param),
+ dev);
+ if (!devlink)
return ERR_PTR(-ENOMEM);
+ es58x_dev = devlink_priv(devlink);
es58x_dev->param = param;
es58x_dev->ops = ops;
es58x_dev->dev = dev;
@@ -2223,14 +2198,12 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
init_usb_anchor(&es58x_dev->tx_urbs_idle);
init_usb_anchor(&es58x_dev->tx_urbs_busy);
atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0);
- atomic_set(&es58x_dev->opened_channel_cnt, 0);
usb_set_intfdata(intf, es58x_dev);
es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev,
ep_in->bEndpointAddress);
es58x_dev->tx_pipe = usb_sndbulkpipe(es58x_dev->udev,
ep_out->bEndpointAddress);
- es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wMaxPacketSize);
return es58x_dev;
}
@@ -2247,25 +2220,25 @@ static int es58x_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct es58x_device *es58x_dev;
- int ch_idx, ret;
+ int ch_idx;
es58x_dev = es58x_init_es58x_dev(intf, id->driver_info);
if (IS_ERR(es58x_dev))
return PTR_ERR(es58x_dev);
- ret = es58x_get_product_info(es58x_dev);
- if (ret)
- return ret;
+ es58x_parse_product_info(es58x_dev);
+ devlink_register(priv_to_devlink(es58x_dev));
for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) {
- ret = es58x_init_netdev(es58x_dev, ch_idx);
+ int ret = es58x_init_netdev(es58x_dev, ch_idx);
+
if (ret) {
es58x_free_netdevs(es58x_dev);
return ret;
}
}
- return ret;
+ return 0;
}
/**
@@ -2282,13 +2255,15 @@ static void es58x_disconnect(struct usb_interface *intf)
dev_info(&intf->dev, "Disconnecting %s %s\n",
es58x_dev->udev->manufacturer, es58x_dev->udev->product);
+ devlink_unregister(priv_to_devlink(es58x_dev));
es58x_free_netdevs(es58x_dev);
es58x_free_urbs(es58x_dev);
+ devlink_free(priv_to_devlink(es58x_dev));
usb_set_intfdata(intf, NULL);
}
static struct usb_driver es58x_driver = {
- .name = ES58X_MODULE_NAME,
+ .name = KBUILD_MODNAME,
.probe = es58x_probe,
.disconnect = es58x_disconnect,
.id_table = es58x_id_table
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index 826a15871573..2e183bdeedd7 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -6,17 +6,18 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
#ifndef __ES58X_COMMON_H__
#define __ES58X_COMMON_H__
-#include <linux/types.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+#include <net/devlink.h>
#include "es581_4.h"
#include "es58x_fd.h"
@@ -222,7 +223,7 @@ union es58x_urb_cmd {
u8 cmd_type;
u8 cmd_id;
} __packed;
- u8 raw_cmd[0];
+ DECLARE_FLEX_ARRAY(u8, raw_cmd);
};
/**
@@ -230,6 +231,7 @@ union es58x_urb_cmd {
* @can: struct can_priv must be the first member (Socket CAN relies
* on the fact that function netdev_priv() returns a pointer to
* a struct can_priv).
+ * @devlink_port: devlink instance for the network interface.
* @es58x_dev: pointer to the corresponding ES58X device.
* @tx_urb: Used as a buffer to concatenate the TX messages and to do
* a bulk send. Please refer to es58x_start_xmit() for more
@@ -255,6 +257,7 @@ union es58x_urb_cmd {
*/
struct es58x_priv {
struct can_priv can;
+ struct devlink_port devlink_port;
struct es58x_device *es58x_dev;
struct urb *tx_urb;
@@ -357,6 +360,39 @@ struct es58x_operators {
};
/**
+ * struct es58x_sw_version - Version number of the firmware or the
+ * bootloader.
+ * @major: Version major number, represented on two digits.
+ * @minor: Version minor number, represented on two digits.
+ * @revision: Version revision number, represented on two digits.
+ *
+ * The firmware and the bootloader share the same format: "xx.xx.xx"
+ * where 'x' is a digit. Both can be retrieved from the product
+ * information string.
+ */
+struct es58x_sw_version {
+ u8 major;
+ u8 minor;
+ u8 revision;
+};
+
+/**
+ * struct es58x_hw_revision - Hardware revision number.
+ * @letter: Revision letter, an alphanumeric character.
+ * @major: Version major number, represented on three digits.
+ * @minor: Version minor number, represented on three digits.
+ *
+ * The hardware revision uses its own format: "axxx/xxx" where 'a' is
+ * an alphanumeric character and 'x' a digit. It can be retrieved from
+ * the product information string.
+ */
+struct es58x_hw_revision {
+ char letter;
+ u16 major;
+ u16 minor;
+};
+
+/**
* struct es58x_device - All information specific to an ES58X device.
* @dev: Device information.
* @udev: USB device information.
@@ -373,8 +409,9 @@ struct es58x_operators {
* queue wake/stop logic should prevent this URB from getting
* empty. Please refer to es58x_get_tx_urb() for more details.
* @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle.
- * @opened_channel_cnt: number of channels opened (c.f. es58x_open()
- * and es58x_stop()).
+ * @firmware_version: The firmware version number.
+ * @bootloader_version: The bootloader version number.
+ * @hardware_revision: The hardware revision number.
* @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns()
* was called.
* @realtime_diff_ns: difference in nanoseconds between the clocks of
@@ -382,8 +419,11 @@ struct es58x_operators {
* @timestamps: a temporary buffer to store the time stamps before
* feeding them to es58x_can_get_echo_skb(). Can only be used
* in RX branches.
- * @rx_max_packet_size: Maximum length of bulk-in URB.
* @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev).
+ * @opened_channel_cnt: number of channels opened. Free of race
+ * conditions because its two users (net_device_ops:ndo_open()
+ * and net_device_ops:ndo_close()) guarantee that the network
+ * stack big kernel lock (a.k.a. rtnl_mutex) is being hold.
* @rx_cmd_buf_len: Length of @rx_cmd_buf.
* @rx_cmd_buf: The device might split the URB commands in an
* arbitrary amount of pieces. This buffer is used to concatenate
@@ -399,22 +439,25 @@ struct es58x_device {
const struct es58x_parameters *param;
const struct es58x_operators *ops;
- int rx_pipe;
- int tx_pipe;
+ unsigned int rx_pipe;
+ unsigned int tx_pipe;
struct usb_anchor rx_urbs;
struct usb_anchor tx_urbs_busy;
struct usb_anchor tx_urbs_idle;
atomic_t tx_urbs_idle_cnt;
- atomic_t opened_channel_cnt;
+
+ struct es58x_sw_version firmware_version;
+ struct es58x_sw_version bootloader_version;
+ struct es58x_hw_revision hardware_revision;
u64 ktime_req_ns;
s64 realtime_diff_ns;
u64 timestamps[ES58X_ECHO_BULK_MAX];
- u16 rx_max_packet_size;
u8 num_can_ch;
+ u8 opened_channel_cnt;
u16 rx_cmd_buf_len;
union es58x_urb_cmd rx_cmd_buf;
@@ -674,6 +717,7 @@ static inline enum es58x_flag es58x_get_flags(const struct sk_buff *skb)
return es58x_flags;
}
+/* es58x_core.c. */
int es58x_can_get_echo_skb(struct net_device *netdev, u32 packet_idx,
u64 *tstamps, unsigned int pkts);
int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries,
@@ -691,9 +735,15 @@ int es58x_rx_cmd_ret_u32(struct net_device *netdev,
int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id,
const void *msg, u16 cmd_len, int channel_idx);
+/* es58x_devlink.c. */
+void es58x_parse_product_info(struct es58x_device *es58x_dev);
+extern const struct devlink_ops es58x_dl_ops;
+
+/* es581_4.c. */
extern const struct es58x_parameters es581_4_param;
extern const struct es58x_operators es581_4_ops;
+/* es58x_fd.c. */
extern const struct es58x_parameters es58x_fd_param;
extern const struct es58x_operators es58x_fd_ops;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
new file mode 100644
index 000000000000..0d155eb1b9e9
--- /dev/null
+++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces.
+ *
+ * File es58x_devlink.c: report the product information using devlink.
+ *
+ * Copyright (c) 2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/usb.h>
+#include <net/devlink.h>
+
+#include "es58x_core.h"
+
+/* USB descriptor index containing the product information string. */
+#define ES58X_PROD_INFO_IDX 6
+
+/**
+ * es58x_parse_sw_version() - Extract boot loader or firmware version.
+ * @es58x_dev: ES58X device.
+ * @prod_info: USB custom string returned by the device.
+ * @prefix: Select which information should be parsed. Set it to "FW"
+ * to parse the firmware version or to "BL" to parse the
+ * bootloader version.
+ *
+ * The @prod_info string contains the firmware and the bootloader
+ * version number all prefixed by a magic string and concatenated with
+ * other numbers. Depending on the device, the firmware (bootloader)
+ * format is either "FW_Vxx.xx.xx" ("BL_Vxx.xx.xx") or "FW:xx.xx.xx"
+ * ("BL:xx.xx.xx") where 'x' represents a digit. @prod_info must
+ * contains the common part of those prefixes: "FW" or "BL".
+ *
+ * Parse @prod_info and store the version number in
+ * &es58x_dev.firmware_version or &es58x_dev.bootloader_version
+ * according to @prefix value.
+ *
+ * Return: zero on success, -EINVAL if @prefix contains an invalid
+ * value and -EBADMSG if @prod_info could not be parsed.
+ */
+static int es58x_parse_sw_version(struct es58x_device *es58x_dev,
+ const char *prod_info, const char *prefix)
+{
+ struct es58x_sw_version *version;
+ int major, minor, revision;
+
+ if (!strcmp(prefix, "FW"))
+ version = &es58x_dev->firmware_version;
+ else if (!strcmp(prefix, "BL"))
+ version = &es58x_dev->bootloader_version;
+ else
+ return -EINVAL;
+
+ /* Go to prefix */
+ prod_info = strstr(prod_info, prefix);
+ if (!prod_info)
+ return -EBADMSG;
+ /* Go to beginning of the version number */
+ while (!isdigit(*prod_info)) {
+ prod_info++;
+ if (!*prod_info)
+ return -EBADMSG;
+ }
+
+ if (sscanf(prod_info, "%2u.%2u.%2u", &major, &minor, &revision) != 3)
+ return -EBADMSG;
+
+ version->major = major;
+ version->minor = minor;
+ version->revision = revision;
+
+ return 0;
+}
+
+/**
+ * es58x_parse_hw_rev() - Extract hardware revision number.
+ * @es58x_dev: ES58X device.
+ * @prod_info: USB custom string returned by the device.
+ *
+ * @prod_info contains the hardware revision prefixed by a magic
+ * string and conquenated together with other numbers. Depending on
+ * the device, the hardware revision format is either
+ * "HW_VER:axxx/xxx" or "HR:axxx/xxx" where 'a' represents a letter
+ * and 'x' a digit.
+ *
+ * Parse @prod_info and store the hardware revision number in
+ * &es58x_dev.hardware_revision.
+ *
+ * Return: zero on success, -EBADMSG if @prod_info could not be
+ * parsed.
+ */
+static int es58x_parse_hw_rev(struct es58x_device *es58x_dev,
+ const char *prod_info)
+{
+ char letter;
+ int major, minor;
+
+ /* The only occurrence of 'H' is in the hardware revision prefix. */
+ prod_info = strchr(prod_info, 'H');
+ if (!prod_info)
+ return -EBADMSG;
+ /* Go to beginning of the hardware revision */
+ prod_info = strchr(prod_info, ':');
+ if (!prod_info)
+ return -EBADMSG;
+ prod_info++;
+
+ if (sscanf(prod_info, "%c%3u/%3u", &letter, &major, &minor) != 3)
+ return -EBADMSG;
+
+ es58x_dev->hardware_revision.letter = letter;
+ es58x_dev->hardware_revision.major = major;
+ es58x_dev->hardware_revision.minor = minor;
+
+ return 0;
+}
+
+/**
+ * es58x_parse_product_info() - Parse the ES58x product information
+ * string.
+ * @es58x_dev: ES58X device.
+ *
+ * Retrieve the product information string and parse it to extract the
+ * firmware version, the bootloader version and the hardware
+ * revision.
+ *
+ * If the function fails, set the version or revision to an invalid
+ * value and emit an informal message. Continue probing because the
+ * product information is not critical for the driver to operate.
+ */
+void es58x_parse_product_info(struct es58x_device *es58x_dev)
+{
+ static const struct es58x_sw_version sw_version_not_set = {
+ .major = -1,
+ .minor = -1,
+ .revision = -1,
+ };
+ static const struct es58x_hw_revision hw_revision_not_set = {
+ .letter = '\0',
+ .major = -1,
+ .minor = -1,
+ };
+ char *prod_info;
+
+ es58x_dev->firmware_version = sw_version_not_set;
+ es58x_dev->bootloader_version = sw_version_not_set;
+ es58x_dev->hardware_revision = hw_revision_not_set;
+
+ prod_info = usb_cache_string(es58x_dev->udev, ES58X_PROD_INFO_IDX);
+ if (!prod_info) {
+ dev_warn(es58x_dev->dev,
+ "could not retrieve the product info string\n");
+ return;
+ }
+
+ if (es58x_parse_sw_version(es58x_dev, prod_info, "FW") ||
+ es58x_parse_sw_version(es58x_dev, prod_info, "BL") ||
+ es58x_parse_hw_rev(es58x_dev, prod_info))
+ dev_info(es58x_dev->dev,
+ "could not parse product info: '%s'\n", prod_info);
+
+ kfree(prod_info);
+}
+
+/**
+ * es58x_sw_version_is_valid() - Check if the version is a valid number.
+ * @sw_ver: Version number of either the firmware or the bootloader.
+ *
+ * If any of the software version sub-numbers do not fit on two
+ * digits, the version is invalid, most probably because the product
+ * string could not be parsed.
+ *
+ * Return: @true if the software version is valid, @false otherwise.
+ */
+static inline bool es58x_sw_version_is_valid(struct es58x_sw_version *sw_ver)
+{
+ return sw_ver->major < 100 && sw_ver->minor < 100 &&
+ sw_ver->revision < 100;
+}
+
+/**
+ * es58x_hw_revision_is_valid() - Check if the revision is a valid number.
+ * @hw_rev: Revision number of the hardware.
+ *
+ * If &es58x_hw_revision.letter is not a alphanumeric character or if
+ * any of the hardware revision sub-numbers do not fit on three
+ * digits, the revision is invalid, most probably because the product
+ * string could not be parsed.
+ *
+ * Return: @true if the hardware revision is valid, @false otherwise.
+ */
+static inline bool es58x_hw_revision_is_valid(struct es58x_hw_revision *hw_rev)
+{
+ return isalnum(hw_rev->letter) && hw_rev->major < 1000 &&
+ hw_rev->minor < 1000;
+}
+
+/**
+ * es58x_devlink_info_get() - Report the product information.
+ * @devlink: Devlink.
+ * @req: skb wrapper where to put requested information.
+ * @extack: Unused.
+ *
+ * Report the firmware version, the bootloader version, the hardware
+ * revision and the serial number through netlink.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct es58x_device *es58x_dev = devlink_priv(devlink);
+ struct es58x_sw_version *fw_ver = &es58x_dev->firmware_version;
+ struct es58x_sw_version *bl_ver = &es58x_dev->bootloader_version;
+ struct es58x_hw_revision *hw_rev = &es58x_dev->hardware_revision;
+ char buf[MAX(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
+ int ret = 0;
+
+ if (es58x_sw_version_is_valid(fw_ver)) {
+ snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
+ fw_ver->major, fw_ver->minor, fw_ver->revision);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (es58x_sw_version_is_valid(bl_ver)) {
+ snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
+ bl_ver->major, bl_ver->minor, bl_ver->revision);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (es58x_hw_revision_is_valid(hw_rev)) {
+ snprintf(buf, sizeof(buf), "%c%03u/%03u",
+ hw_rev->letter, hw_rev->major, hw_rev->minor);
+ ret = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (es58x_dev->udev->serial)
+ ret = devlink_info_serial_number_put(req,
+ es58x_dev->udev->serial);
+
+ return ret;
+}
+
+const struct devlink_ops es58x_dl_ops = {
+ .info_get = es58x_devlink_info_get,
+};
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index 4f0cae29f4d8..6476add1c105 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -8,11 +8,12 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
+#include <linux/unaligned.h>
#include <linux/kernel.h>
-#include <asm/unaligned.h>
+#include <linux/units.h>
#include "es58x_core.h"
#include "es58x_fd.h"
@@ -68,7 +69,8 @@ static int es58x_fd_echo_msg(struct net_device *netdev,
int i, num_element;
u32 rcv_packet_idx;
- const u32 mask = GENMASK(31, sizeof(echo_msg->packet_idx) * 8);
+ const u32 mask = GENMASK(BITS_PER_TYPE(mask) - 1,
+ BITS_PER_TYPE(echo_msg->packet_idx));
num_element = es58x_msg_num_element(es58x_dev->dev,
es58x_fd_urb_cmd->echo_msg,
@@ -171,12 +173,11 @@ static int es58x_fd_rx_event_msg(struct net_device *netdev,
const struct es58x_fd_rx_event_msg *rx_event_msg;
int ret;
+ rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len);
if (ret)
return ret;
- rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
-
return es58x_rx_err_msg(netdev, rx_event_msg->error_code,
rx_event_msg->event_code,
get_unaligned_le64(&rx_event_msg->timestamp));
@@ -426,12 +427,12 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv)
if (tx_conf_msg.canfd_enabled) {
es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
- &priv->can.data_bittiming);
+ &priv->can.fd.data_bittiming);
- if (can_tdc_is_enabled(&priv->can)) {
+ if (can_fd_tdc_is_enabled(&priv->can)) {
tx_conf_msg.tdc_enabled = 1;
- tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco);
- tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf);
+ tx_conf_msg.tdco = cpu_to_le16(priv->can.fd.tdc.tdco);
+ tx_conf_msg.tdcf = cpu_to_le16(priv->can.fd.tdc.tdcf);
}
conf_len = ES58X_FD_CANFD_CONF_LEN;
@@ -522,8 +523,8 @@ const struct es58x_parameters es58x_fd_param = {
* Mbps work in an optimal environment but are not recommended
* for production environment.
*/
- .bitrate_max = 8 * CAN_MBPS,
- .clock = {.freq = 80 * CAN_MHZ},
+ .bitrate_max = 8 * MEGA /* BPS */,
+ .clock = {.freq = 80 * MEGA /* Hz */},
.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO,
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.h b/drivers/net/can/usb/etas_es58x/es58x_fd.h
index a191891b8777..c4b19a6a33ae 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.h
@@ -219,7 +219,7 @@ struct es58x_fd_urb_cmd {
struct es58x_fd_tx_ack_msg tx_ack_msg;
__le64 timestamp;
__le32 rx_cmd_ret_le32;
- u8 raw_msg[0];
+ DECLARE_FLEX_ARRAY(u8, raw_msg);
} __packed;
__le16 reserved_for_crc16_do_not_use;
diff --git a/drivers/net/can/usb/f81604.c b/drivers/net/can/usb/f81604.c
new file mode 100644
index 000000000000..efe61ece79ea
--- /dev/null
+++ b/drivers/net/can/usb/f81604.c
@@ -0,0 +1,1204 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Fintek F81604 USB-to-2CAN controller driver.
+ *
+ * Copyright (C) 2023 Ji-Ze Hong (Peter Hong) <peter_hong@fintek.com.tw>
+ */
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+#include <linux/units.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/platform/sja1000.h>
+
+#include <linux/unaligned.h>
+
+/* vendor and product id */
+#define F81604_VENDOR_ID 0x2c42
+#define F81604_PRODUCT_ID 0x1709
+#define F81604_CAN_CLOCK (12 * MEGA)
+#define F81604_MAX_DEV 2
+#define F81604_SET_DEVICE_RETRY 10
+
+#define F81604_USB_TIMEOUT 2000
+#define F81604_SET_GET_REGISTER 0xA0
+#define F81604_PORT_OFFSET 0x1000
+#define F81604_MAX_RX_URBS 4
+
+#define F81604_CMD_DATA 0x00
+
+#define F81604_DLC_LEN_MASK GENMASK(3, 0)
+#define F81604_DLC_EFF_BIT BIT(7)
+#define F81604_DLC_RTR_BIT BIT(6)
+
+#define F81604_SFF_SHIFT 5
+#define F81604_EFF_SHIFT 3
+
+#define F81604_BRP_MASK GENMASK(5, 0)
+#define F81604_SJW_MASK GENMASK(7, 6)
+
+#define F81604_SEG1_MASK GENMASK(3, 0)
+#define F81604_SEG2_MASK GENMASK(6, 4)
+
+#define F81604_CLEAR_ALC 0
+#define F81604_CLEAR_ECC 1
+#define F81604_CLEAR_OVERRUN 2
+
+/* device setting */
+#define F81604_CTRL_MODE_REG 0x80
+#define F81604_TX_ONESHOT (0x03 << 3)
+#define F81604_TX_NORMAL (0x01 << 3)
+#define F81604_RX_AUTO_RELEASE_BUF BIT(1)
+#define F81604_INT_WHEN_CHANGE BIT(0)
+
+#define F81604_TERMINATOR_REG 0x105
+#define F81604_CAN0_TERM BIT(2)
+#define F81604_CAN1_TERM BIT(3)
+
+#define F81604_TERMINATION_DISABLED CAN_TERMINATION_DISABLED
+#define F81604_TERMINATION_ENABLED 120
+
+/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
+#define F81604_SJA1000_MOD 0x00
+#define F81604_SJA1000_CMR 0x01
+#define F81604_SJA1000_IR 0x03
+#define F81604_SJA1000_IER 0x04
+#define F81604_SJA1000_ALC 0x0B
+#define F81604_SJA1000_ECC 0x0C
+#define F81604_SJA1000_RXERR 0x0E
+#define F81604_SJA1000_TXERR 0x0F
+#define F81604_SJA1000_ACCC0 0x10
+#define F81604_SJA1000_ACCM0 0x14
+#define F81604_MAX_FILTER_CNT 4
+
+/* Common registers - manual section 6.5 */
+#define F81604_SJA1000_BTR0 0x06
+#define F81604_SJA1000_BTR1 0x07
+#define F81604_SJA1000_BTR1_SAMPLE_TRIPLE BIT(7)
+#define F81604_SJA1000_OCR 0x08
+#define F81604_SJA1000_CDR 0x1F
+
+/* mode register */
+#define F81604_SJA1000_MOD_RM 0x01
+#define F81604_SJA1000_MOD_LOM 0x02
+#define F81604_SJA1000_MOD_STM 0x04
+
+/* commands */
+#define F81604_SJA1000_CMD_CDO 0x08
+
+/* interrupt sources */
+#define F81604_SJA1000_IRQ_BEI 0x80
+#define F81604_SJA1000_IRQ_ALI 0x40
+#define F81604_SJA1000_IRQ_EPI 0x20
+#define F81604_SJA1000_IRQ_DOI 0x08
+#define F81604_SJA1000_IRQ_EI 0x04
+#define F81604_SJA1000_IRQ_TI 0x02
+#define F81604_SJA1000_IRQ_RI 0x01
+#define F81604_SJA1000_IRQ_ALL 0xFF
+#define F81604_SJA1000_IRQ_OFF 0x00
+
+/* status register content */
+#define F81604_SJA1000_SR_BS 0x80
+#define F81604_SJA1000_SR_ES 0x40
+#define F81604_SJA1000_SR_TCS 0x08
+
+/* ECC register */
+#define F81604_SJA1000_ECC_SEG 0x1F
+#define F81604_SJA1000_ECC_DIR 0x20
+#define F81604_SJA1000_ECC_BIT 0x00
+#define F81604_SJA1000_ECC_FORM 0x40
+#define F81604_SJA1000_ECC_STUFF 0x80
+#define F81604_SJA1000_ECC_MASK 0xc0
+
+/* ALC register */
+#define F81604_SJA1000_ALC_MASK 0x1f
+
+/* table of devices that work with this driver */
+static const struct usb_device_id f81604_table[] = {
+ { USB_DEVICE(F81604_VENDOR_ID, F81604_PRODUCT_ID) },
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, f81604_table);
+
+static const struct ethtool_ops f81604_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static const u16 f81604_termination[] = { F81604_TERMINATION_DISABLED,
+ F81604_TERMINATION_ENABLED };
+
+struct f81604_priv {
+ struct net_device *netdev[F81604_MAX_DEV];
+};
+
+struct f81604_port_priv {
+ struct can_priv can;
+ struct net_device *netdev;
+ struct sk_buff *echo_skb;
+
+ unsigned long clear_flags;
+ struct work_struct clear_reg_work;
+
+ struct usb_device *dev;
+ struct usb_interface *intf;
+
+ struct usb_anchor urbs_anchor;
+};
+
+/* Interrupt endpoint data format:
+ * Byte 0: Status register.
+ * Byte 1: Interrupt register.
+ * Byte 2: Interrupt enable register.
+ * Byte 3: Arbitration lost capture(ALC) register.
+ * Byte 4: Error code capture(ECC) register.
+ * Byte 5: Error warning limit register.
+ * Byte 6: RX error counter register.
+ * Byte 7: TX error counter register.
+ * Byte 8: Reserved.
+ */
+struct f81604_int_data {
+ u8 sr;
+ u8 isrc;
+ u8 ier;
+ u8 alc;
+ u8 ecc;
+ u8 ewlr;
+ u8 rxerr;
+ u8 txerr;
+ u8 val;
+} __packed __aligned(4);
+
+struct f81604_sff {
+ __be16 id;
+ u8 data[CAN_MAX_DLEN];
+} __packed __aligned(2);
+
+struct f81604_eff {
+ __be32 id;
+ u8 data[CAN_MAX_DLEN];
+} __packed __aligned(2);
+
+struct f81604_can_frame {
+ u8 cmd;
+
+ /* According for F81604 DLC define:
+ * bit 3~0: data length (0~8)
+ * bit6: is RTR flag.
+ * bit7: is EFF frame.
+ */
+ u8 dlc;
+
+ union {
+ struct f81604_sff sff;
+ struct f81604_eff eff;
+ };
+} __packed __aligned(2);
+
+static const u8 bulk_in_addr[F81604_MAX_DEV] = { 2, 4 };
+static const u8 bulk_out_addr[F81604_MAX_DEV] = { 1, 3 };
+static const u8 int_in_addr[F81604_MAX_DEV] = { 1, 3 };
+
+static int f81604_write(struct usb_device *dev, u16 reg, u8 data)
+{
+ int ret;
+
+ ret = usb_control_msg_send(dev, 0, F81604_SET_GET_REGISTER,
+ USB_TYPE_VENDOR | USB_DIR_OUT, 0, reg,
+ &data, sizeof(data), F81604_USB_TIMEOUT,
+ GFP_KERNEL);
+ if (ret)
+ dev_err(&dev->dev, "%s: reg: %x data: %x failed: %pe\n",
+ __func__, reg, data, ERR_PTR(ret));
+
+ return ret;
+}
+
+static int f81604_read(struct usb_device *dev, u16 reg, u8 *data)
+{
+ int ret;
+
+ ret = usb_control_msg_recv(dev, 0, F81604_SET_GET_REGISTER,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, reg, data,
+ sizeof(*data), F81604_USB_TIMEOUT,
+ GFP_KERNEL);
+
+ if (ret < 0)
+ dev_err(&dev->dev, "%s: reg: %x failed: %pe\n", __func__, reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static int f81604_update_bits(struct usb_device *dev, u16 reg, u8 mask,
+ u8 data)
+{
+ int ret;
+ u8 tmp;
+
+ ret = f81604_read(dev, reg, &tmp);
+ if (ret)
+ return ret;
+
+ tmp &= ~mask;
+ tmp |= (mask & data);
+
+ return f81604_write(dev, reg, tmp);
+}
+
+static int f81604_sja1000_write(struct f81604_port_priv *priv, u16 reg,
+ u8 data)
+{
+ int port = priv->netdev->dev_port;
+ int real_reg;
+
+ real_reg = reg + F81604_PORT_OFFSET * port + F81604_PORT_OFFSET;
+ return f81604_write(priv->dev, real_reg, data);
+}
+
+static int f81604_sja1000_read(struct f81604_port_priv *priv, u16 reg,
+ u8 *data)
+{
+ int port = priv->netdev->dev_port;
+ int real_reg;
+
+ real_reg = reg + F81604_PORT_OFFSET * port + F81604_PORT_OFFSET;
+ return f81604_read(priv->dev, real_reg, data);
+}
+
+static int f81604_set_reset_mode(struct f81604_port_priv *priv)
+{
+ int ret, i;
+ u8 tmp;
+
+ /* disable interrupts */
+ ret = f81604_sja1000_write(priv, F81604_SJA1000_IER,
+ F81604_SJA1000_IRQ_OFF);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < F81604_SET_DEVICE_RETRY; i++) {
+ ret = f81604_sja1000_read(priv, F81604_SJA1000_MOD, &tmp);
+ if (ret)
+ return ret;
+
+ /* check reset bit */
+ if (tmp & F81604_SJA1000_MOD_RM) {
+ priv->can.state = CAN_STATE_STOPPED;
+ return 0;
+ }
+
+ /* reset chip */
+ ret = f81604_sja1000_write(priv, F81604_SJA1000_MOD,
+ F81604_SJA1000_MOD_RM);
+ if (ret)
+ return ret;
+ }
+
+ return -EPERM;
+}
+
+static int f81604_set_normal_mode(struct f81604_port_priv *priv)
+{
+ u8 tmp, ier = 0;
+ u8 mod_reg = 0;
+ int ret, i;
+
+ for (i = 0; i < F81604_SET_DEVICE_RETRY; i++) {
+ ret = f81604_sja1000_read(priv, F81604_SJA1000_MOD, &tmp);
+ if (ret)
+ return ret;
+
+ /* check reset bit */
+ if ((tmp & F81604_SJA1000_MOD_RM) == 0) {
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ /* enable interrupts, RI handled by bulk-in */
+ ier = F81604_SJA1000_IRQ_ALL & ~F81604_SJA1000_IRQ_RI;
+ if (!(priv->can.ctrlmode &
+ CAN_CTRLMODE_BERR_REPORTING))
+ ier &= ~F81604_SJA1000_IRQ_BEI;
+
+ return f81604_sja1000_write(priv, F81604_SJA1000_IER,
+ ier);
+ }
+
+ /* set chip to normal mode */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ mod_reg |= F81604_SJA1000_MOD_LOM;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
+ mod_reg |= F81604_SJA1000_MOD_STM;
+
+ ret = f81604_sja1000_write(priv, F81604_SJA1000_MOD, mod_reg);
+ if (ret)
+ return ret;
+ }
+
+ return -EPERM;
+}
+
+static int f81604_chipset_init(struct f81604_port_priv *priv)
+{
+ int i, ret;
+
+ /* set clock divider and output control register */
+ ret = f81604_sja1000_write(priv, F81604_SJA1000_CDR,
+ CDR_CBP | CDR_PELICAN);
+ if (ret)
+ return ret;
+
+ /* set acceptance filter (accept all) */
+ for (i = 0; i < F81604_MAX_FILTER_CNT; ++i) {
+ ret = f81604_sja1000_write(priv, F81604_SJA1000_ACCC0 + i, 0);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < F81604_MAX_FILTER_CNT; ++i) {
+ ret = f81604_sja1000_write(priv, F81604_SJA1000_ACCM0 + i,
+ 0xFF);
+ if (ret)
+ return ret;
+ }
+
+ return f81604_sja1000_write(priv, F81604_SJA1000_OCR,
+ OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL |
+ OCR_MODE_NORMAL);
+}
+
+static void f81604_process_rx_packet(struct net_device *netdev,
+ struct f81604_can_frame *frame)
+{
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ if (frame->cmd != F81604_CMD_DATA)
+ return;
+
+ skb = alloc_can_skb(netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cf->len = can_cc_dlc2len(frame->dlc & F81604_DLC_LEN_MASK);
+
+ if (frame->dlc & F81604_DLC_EFF_BIT) {
+ cf->can_id = get_unaligned_be32(&frame->eff.id) >>
+ F81604_EFF_SHIFT;
+ cf->can_id |= CAN_EFF_FLAG;
+
+ if (!(frame->dlc & F81604_DLC_RTR_BIT))
+ memcpy(cf->data, frame->eff.data, cf->len);
+ } else {
+ cf->can_id = get_unaligned_be16(&frame->sff.id) >>
+ F81604_SFF_SHIFT;
+
+ if (!(frame->dlc & F81604_DLC_RTR_BIT))
+ memcpy(cf->data, frame->sff.data, cf->len);
+ }
+
+ if (frame->dlc & F81604_DLC_RTR_BIT)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ stats->rx_bytes += cf->len;
+
+ stats->rx_packets++;
+ netif_rx(skb);
+}
+
+static void f81604_read_bulk_callback(struct urb *urb)
+{
+ struct f81604_can_frame *frame = urb->transfer_buffer;
+ struct net_device *netdev = urb->context;
+ int ret;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ netdev_info(netdev, "%s: URB aborted %pe\n", __func__,
+ ERR_PTR(urb->status));
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+
+ case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
+ case -ESHUTDOWN:
+ return;
+
+ default:
+ goto resubmit_urb;
+ }
+
+ if (urb->actual_length != sizeof(*frame)) {
+ netdev_warn(netdev, "URB length %u not equal to %zu\n",
+ urb->actual_length, sizeof(*frame));
+ goto resubmit_urb;
+ }
+
+ f81604_process_rx_packet(netdev, frame);
+
+resubmit_urb:
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret == -ENODEV)
+ netif_device_detach(netdev);
+ else if (ret)
+ netdev_err(netdev,
+ "%s: failed to resubmit read bulk urb: %pe\n",
+ __func__, ERR_PTR(ret));
+}
+
+static void f81604_handle_tx(struct f81604_port_priv *priv,
+ struct f81604_int_data *data)
+{
+ struct net_device *netdev = priv->netdev;
+ struct net_device_stats *stats = &netdev->stats;
+
+ /* transmission buffer released */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT &&
+ !(data->sr & F81604_SJA1000_SR_TCS)) {
+ stats->tx_errors++;
+ can_free_echo_skb(netdev, 0, NULL);
+ } else {
+ /* transmission complete */
+ stats->tx_bytes += can_get_echo_skb(netdev, 0, NULL);
+ stats->tx_packets++;
+ }
+
+ netif_wake_queue(netdev);
+}
+
+static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv,
+ struct f81604_int_data *data)
+{
+ enum can_state can_state = priv->can.state;
+ struct net_device *netdev = priv->netdev;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* Note: ALC/ECC will not auto clear by read here, must be cleared by
+ * read register (via clear_reg_work).
+ */
+
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = data->txerr;
+ cf->data[7] = data->rxerr;
+ }
+
+ if (data->isrc & F81604_SJA1000_IRQ_DOI) {
+ /* data overrun interrupt */
+ netdev_dbg(netdev, "data overrun interrupt\n");
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ set_bit(F81604_CLEAR_OVERRUN, &priv->clear_flags);
+ }
+
+ if (data->isrc & F81604_SJA1000_IRQ_EI) {
+ /* error warning interrupt */
+ netdev_dbg(netdev, "error warning interrupt\n");
+
+ if (data->sr & F81604_SJA1000_SR_BS)
+ can_state = CAN_STATE_BUS_OFF;
+ else if (data->sr & F81604_SJA1000_SR_ES)
+ can_state = CAN_STATE_ERROR_WARNING;
+ else
+ can_state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ if (data->isrc & F81604_SJA1000_IRQ_BEI) {
+ /* bus error interrupt */
+ netdev_dbg(netdev, "bus error interrupt\n");
+
+ priv->can.can_stats.bus_error++;
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ /* set error type */
+ switch (data->ecc & F81604_SJA1000_ECC_MASK) {
+ case F81604_SJA1000_ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case F81604_SJA1000_ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case F81604_SJA1000_ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ break;
+ }
+
+ /* set error location */
+ cf->data[3] = data->ecc & F81604_SJA1000_ECC_SEG;
+ }
+
+ /* Error occurred during transmission? */
+ if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) {
+ stats->tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ } else {
+ stats->rx_errors++;
+ }
+
+ set_bit(F81604_CLEAR_ECC, &priv->clear_flags);
+ }
+
+ if (data->isrc & F81604_SJA1000_IRQ_EPI) {
+ if (can_state == CAN_STATE_ERROR_PASSIVE)
+ can_state = CAN_STATE_ERROR_WARNING;
+ else
+ can_state = CAN_STATE_ERROR_PASSIVE;
+
+ /* error passive interrupt */
+ netdev_dbg(netdev, "error passive interrupt: %d\n", can_state);
+ }
+
+ if (data->isrc & F81604_SJA1000_IRQ_ALI) {
+ /* arbitration lost interrupt */
+ netdev_dbg(netdev, "arbitration lost interrupt\n");
+
+ priv->can.can_stats.arbitration_lost++;
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = data->alc & F81604_SJA1000_ALC_MASK;
+ }
+
+ set_bit(F81604_CLEAR_ALC, &priv->clear_flags);
+ }
+
+ if (can_state != priv->can.state) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = data->txerr >= data->rxerr ? can_state : 0;
+ rx_state = data->txerr <= data->rxerr ? can_state : 0;
+
+ can_change_state(netdev, cf, tx_state, rx_state);
+
+ if (can_state == CAN_STATE_BUS_OFF)
+ can_bus_off(netdev);
+ }
+
+ if (priv->clear_flags)
+ schedule_work(&priv->clear_reg_work);
+
+ if (skb)
+ netif_rx(skb);
+}
+
+static void f81604_read_int_callback(struct urb *urb)
+{
+ struct f81604_int_data *data = urb->transfer_buffer;
+ struct net_device *netdev = urb->context;
+ struct f81604_port_priv *priv;
+ int ret;
+
+ priv = netdev_priv(netdev);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ netdev_info(netdev, "%s: Int URB aborted: %pe\n", __func__,
+ ERR_PTR(urb->status));
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+
+ case -ENOENT:
+ case -EPIPE:
+ case -EPROTO:
+ case -ESHUTDOWN:
+ return;
+
+ default:
+ goto resubmit_urb;
+ }
+
+ /* handle Errors */
+ if (data->isrc & (F81604_SJA1000_IRQ_DOI | F81604_SJA1000_IRQ_EI |
+ F81604_SJA1000_IRQ_BEI | F81604_SJA1000_IRQ_EPI |
+ F81604_SJA1000_IRQ_ALI))
+ f81604_handle_can_bus_errors(priv, data);
+
+ /* handle TX */
+ if (priv->can.state != CAN_STATE_BUS_OFF &&
+ (data->isrc & F81604_SJA1000_IRQ_TI))
+ f81604_handle_tx(priv, data);
+
+resubmit_urb:
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret == -ENODEV)
+ netif_device_detach(netdev);
+ else if (ret)
+ netdev_err(netdev, "%s: failed to resubmit int urb: %pe\n",
+ __func__, ERR_PTR(ret));
+}
+
+static void f81604_unregister_urbs(struct f81604_port_priv *priv)
+{
+ usb_kill_anchored_urbs(&priv->urbs_anchor);
+}
+
+static int f81604_register_urbs(struct f81604_port_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct f81604_int_data *int_data;
+ int id = netdev->dev_port;
+ struct urb *int_urb;
+ int rx_urb_cnt;
+ int ret;
+
+ for (rx_urb_cnt = 0; rx_urb_cnt < F81604_MAX_RX_URBS; ++rx_urb_cnt) {
+ struct f81604_can_frame *frame;
+ struct urb *rx_urb;
+
+ rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rx_urb) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ frame = kmalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ usb_free_urb(rx_urb);
+ ret = -ENOMEM;
+ break;
+ }
+
+ usb_fill_bulk_urb(rx_urb, priv->dev,
+ usb_rcvbulkpipe(priv->dev, bulk_in_addr[id]),
+ frame, sizeof(*frame),
+ f81604_read_bulk_callback, netdev);
+
+ rx_urb->transfer_flags |= URB_FREE_BUFFER;
+ usb_anchor_urb(rx_urb, &priv->urbs_anchor);
+
+ ret = usb_submit_urb(rx_urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(rx_urb);
+ usb_free_urb(rx_urb);
+ break;
+ }
+
+ /* Drop reference, USB core will take care of freeing it */
+ usb_free_urb(rx_urb);
+ }
+
+ if (rx_urb_cnt == 0) {
+ netdev_warn(netdev, "%s: submit rx urb failed: %pe\n",
+ __func__, ERR_PTR(ret));
+
+ goto error;
+ }
+
+ int_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!int_urb) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ int_data = kmalloc(sizeof(*int_data), GFP_KERNEL);
+ if (!int_data) {
+ usb_free_urb(int_urb);
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ usb_fill_int_urb(int_urb, priv->dev,
+ usb_rcvintpipe(priv->dev, int_in_addr[id]), int_data,
+ sizeof(*int_data), f81604_read_int_callback, netdev,
+ 1);
+
+ int_urb->transfer_flags |= URB_FREE_BUFFER;
+ usb_anchor_urb(int_urb, &priv->urbs_anchor);
+
+ ret = usb_submit_urb(int_urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(int_urb);
+ usb_free_urb(int_urb);
+
+ netdev_warn(netdev, "%s: submit int urb failed: %pe\n",
+ __func__, ERR_PTR(ret));
+ goto error;
+ }
+
+ /* Drop reference, USB core will take care of freeing it */
+ usb_free_urb(int_urb);
+
+ return 0;
+
+error:
+ f81604_unregister_urbs(priv);
+ return ret;
+}
+
+static int f81604_start(struct net_device *netdev)
+{
+ struct f81604_port_priv *priv = netdev_priv(netdev);
+ int ret;
+ u8 mode;
+ u8 tmp;
+
+ mode = F81604_RX_AUTO_RELEASE_BUF | F81604_INT_WHEN_CHANGE;
+
+ /* Set TR/AT mode */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ mode |= F81604_TX_ONESHOT;
+ else
+ mode |= F81604_TX_NORMAL;
+
+ ret = f81604_sja1000_write(priv, F81604_CTRL_MODE_REG, mode);
+ if (ret)
+ return ret;
+
+ /* set reset mode */
+ ret = f81604_set_reset_mode(priv);
+ if (ret)
+ return ret;
+
+ ret = f81604_chipset_init(priv);
+ if (ret)
+ return ret;
+
+ /* Clear error counters and error code capture */
+ ret = f81604_sja1000_write(priv, F81604_SJA1000_TXERR, 0);
+ if (ret)
+ return ret;
+
+ ret = f81604_sja1000_write(priv, F81604_SJA1000_RXERR, 0);
+ if (ret)
+ return ret;
+
+ /* Read clear for ECC/ALC/IR register */
+ ret = f81604_sja1000_read(priv, F81604_SJA1000_ECC, &tmp);
+ if (ret)
+ return ret;
+
+ ret = f81604_sja1000_read(priv, F81604_SJA1000_ALC, &tmp);
+ if (ret)
+ return ret;
+
+ ret = f81604_sja1000_read(priv, F81604_SJA1000_IR, &tmp);
+ if (ret)
+ return ret;
+
+ ret = f81604_register_urbs(priv);
+ if (ret)
+ return ret;
+
+ ret = f81604_set_normal_mode(priv);
+ if (ret) {
+ f81604_unregister_urbs(priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int f81604_set_bittiming(struct net_device *dev)
+{
+ struct f81604_port_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u8 btr0, btr1;
+ int ret;
+
+ btr0 = FIELD_PREP(F81604_BRP_MASK, bt->brp - 1) |
+ FIELD_PREP(F81604_SJW_MASK, bt->sjw - 1);
+
+ btr1 = FIELD_PREP(F81604_SEG1_MASK,
+ bt->prop_seg + bt->phase_seg1 - 1) |
+ FIELD_PREP(F81604_SEG2_MASK, bt->phase_seg2 - 1);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= F81604_SJA1000_BTR1_SAMPLE_TRIPLE;
+
+ ret = f81604_sja1000_write(priv, F81604_SJA1000_BTR0, btr0);
+ if (ret) {
+ netdev_warn(dev, "%s: Set BTR0 failed: %pe\n", __func__,
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = f81604_sja1000_write(priv, F81604_SJA1000_BTR1, btr1);
+ if (ret) {
+ netdev_warn(dev, "%s: Set BTR1 failed: %pe\n", __func__,
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ return 0;
+}
+
+static int f81604_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = f81604_start(netdev);
+ if (!ret && netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static void f81604_write_bulk_callback(struct urb *urb)
+{
+ struct net_device *netdev = urb->context;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ netdev_info(netdev, "%s: Tx URB error: %pe\n", __func__,
+ ERR_PTR(urb->status));
+}
+
+static void f81604_clear_reg_work(struct work_struct *work)
+{
+ struct f81604_port_priv *priv;
+ u8 tmp;
+
+ priv = container_of(work, struct f81604_port_priv, clear_reg_work);
+
+ /* dummy read for clear Arbitration lost capture(ALC) register. */
+ if (test_and_clear_bit(F81604_CLEAR_ALC, &priv->clear_flags))
+ f81604_sja1000_read(priv, F81604_SJA1000_ALC, &tmp);
+
+ /* dummy read for clear Error code capture(ECC) register. */
+ if (test_and_clear_bit(F81604_CLEAR_ECC, &priv->clear_flags))
+ f81604_sja1000_read(priv, F81604_SJA1000_ECC, &tmp);
+
+ /* dummy write for clear data overrun flag. */
+ if (test_and_clear_bit(F81604_CLEAR_OVERRUN, &priv->clear_flags))
+ f81604_sja1000_write(priv, F81604_SJA1000_CMR,
+ F81604_SJA1000_CMD_CDO);
+}
+
+static netdev_tx_t f81604_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct f81604_port_priv *priv = netdev_priv(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+ struct f81604_can_frame *frame;
+ struct urb *write_urb;
+ int ret;
+
+ if (can_dev_dropped_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ netif_stop_queue(netdev);
+
+ write_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!write_urb)
+ goto nomem_urb;
+
+ frame = kzalloc(sizeof(*frame), GFP_ATOMIC);
+ if (!frame)
+ goto nomem_buf;
+
+ usb_fill_bulk_urb(write_urb, priv->dev,
+ usb_sndbulkpipe(priv->dev,
+ bulk_out_addr[netdev->dev_port]),
+ frame, sizeof(*frame), f81604_write_bulk_callback,
+ priv->netdev);
+
+ write_urb->transfer_flags |= URB_FREE_BUFFER;
+
+ frame->cmd = F81604_CMD_DATA;
+ frame->dlc = cf->len;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ frame->dlc |= F81604_DLC_RTR_BIT;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ u32 id = (cf->can_id & CAN_EFF_MASK) << F81604_EFF_SHIFT;
+
+ put_unaligned_be32(id, &frame->eff.id);
+
+ frame->dlc |= F81604_DLC_EFF_BIT;
+
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ memcpy(&frame->eff.data, cf->data, cf->len);
+ } else {
+ u32 id = (cf->can_id & CAN_SFF_MASK) << F81604_SFF_SHIFT;
+
+ put_unaligned_be16(id, &frame->sff.id);
+
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ memcpy(&frame->sff.data, cf->data, cf->len);
+ }
+
+ can_put_echo_skb(skb, netdev, 0, 0);
+
+ ret = usb_submit_urb(write_urb, GFP_ATOMIC);
+ if (ret) {
+ netdev_err(netdev, "%s: failed to resubmit tx bulk urb: %pe\n",
+ __func__, ERR_PTR(ret));
+
+ can_free_echo_skb(netdev, 0, NULL);
+ stats->tx_dropped++;
+ stats->tx_errors++;
+
+ if (ret == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netif_wake_queue(netdev);
+ }
+
+ /* let usb core take care of this urb */
+ usb_free_urb(write_urb);
+
+ return NETDEV_TX_OK;
+
+nomem_buf:
+ usb_free_urb(write_urb);
+
+nomem_urb:
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ stats->tx_errors++;
+ netif_wake_queue(netdev);
+
+ return NETDEV_TX_OK;
+}
+
+static int f81604_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct f81604_port_priv *priv = netdev_priv(netdev);
+ u8 txerr, rxerr;
+ int ret;
+
+ ret = f81604_sja1000_read(priv, F81604_SJA1000_TXERR, &txerr);
+ if (ret)
+ return ret;
+
+ ret = f81604_sja1000_read(priv, F81604_SJA1000_RXERR, &rxerr);
+ if (ret)
+ return ret;
+
+ bec->txerr = txerr;
+ bec->rxerr = rxerr;
+
+ return 0;
+}
+
+/* Open USB device */
+static int f81604_open(struct net_device *netdev)
+{
+ int ret;
+
+ ret = open_candev(netdev);
+ if (ret)
+ return ret;
+
+ ret = f81604_start(netdev);
+ if (ret) {
+ if (ret == -ENODEV)
+ netif_device_detach(netdev);
+
+ close_candev(netdev);
+ return ret;
+ }
+
+ netif_start_queue(netdev);
+ return 0;
+}
+
+/* Close USB device */
+static int f81604_close(struct net_device *netdev)
+{
+ struct f81604_port_priv *priv = netdev_priv(netdev);
+
+ f81604_set_reset_mode(priv);
+
+ netif_stop_queue(netdev);
+ cancel_work_sync(&priv->clear_reg_work);
+ close_candev(netdev);
+
+ f81604_unregister_urbs(priv);
+
+ return 0;
+}
+
+static const struct net_device_ops f81604_netdev_ops = {
+ .ndo_open = f81604_open,
+ .ndo_stop = f81604_close,
+ .ndo_start_xmit = f81604_start_xmit,
+};
+
+static const struct can_bittiming_const f81604_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+/* Called by the usb core when driver is unloaded or device is removed */
+static void f81604_disconnect(struct usb_interface *intf)
+{
+ struct f81604_priv *priv = usb_get_intfdata(intf);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) {
+ if (!priv->netdev[i])
+ continue;
+
+ unregister_netdev(priv->netdev[i]);
+ free_candev(priv->netdev[i]);
+ }
+}
+
+static int __f81604_set_termination(struct usb_device *dev, int idx, u16 term)
+{
+ u8 mask, data = 0;
+
+ if (idx == 0)
+ mask = F81604_CAN0_TERM;
+ else
+ mask = F81604_CAN1_TERM;
+
+ if (term)
+ data = mask;
+
+ return f81604_update_bits(dev, F81604_TERMINATOR_REG, mask, data);
+}
+
+static int f81604_set_termination(struct net_device *netdev, u16 term)
+{
+ struct f81604_port_priv *port_priv = netdev_priv(netdev);
+
+ ASSERT_RTNL();
+
+ return __f81604_set_termination(port_priv->dev, netdev->dev_port,
+ term);
+}
+
+static int f81604_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct net_device *netdev;
+ struct f81604_priv *priv;
+ int i, ret;
+
+ priv = devm_kzalloc(&intf->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ usb_set_intfdata(intf, priv);
+
+ for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) {
+ ret = __f81604_set_termination(dev, i, 0);
+ if (ret) {
+ dev_err(&intf->dev,
+ "Setting termination of CH#%d failed: %pe\n",
+ i, ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) {
+ struct f81604_port_priv *port_priv;
+
+ netdev = alloc_candev(sizeof(*port_priv), 1);
+ if (!netdev) {
+ dev_err(&intf->dev, "Couldn't alloc candev: %d\n", i);
+ ret = -ENOMEM;
+
+ goto failure_cleanup;
+ }
+
+ port_priv = netdev_priv(netdev);
+
+ INIT_WORK(&port_priv->clear_reg_work, f81604_clear_reg_work);
+ init_usb_anchor(&port_priv->urbs_anchor);
+
+ port_priv->intf = intf;
+ port_priv->dev = dev;
+ port_priv->netdev = netdev;
+ port_priv->can.clock.freq = F81604_CAN_CLOCK;
+
+ port_priv->can.termination_const = f81604_termination;
+ port_priv->can.termination_const_cnt =
+ ARRAY_SIZE(f81604_termination);
+ port_priv->can.bittiming_const = &f81604_bittiming_const;
+ port_priv->can.do_set_bittiming = f81604_set_bittiming;
+ port_priv->can.do_set_mode = f81604_set_mode;
+ port_priv->can.do_set_termination = f81604_set_termination;
+ port_priv->can.do_get_berr_counter = f81604_get_berr_counter;
+ port_priv->can.ctrlmode_supported =
+ CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_PRESUME_ACK;
+
+ netdev->ethtool_ops = &f81604_ethtool_ops;
+ netdev->netdev_ops = &f81604_netdev_ops;
+ netdev->flags |= IFF_ECHO;
+ netdev->dev_port = i;
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ ret = register_candev(netdev);
+ if (ret) {
+ netdev_err(netdev, "register CAN device failed: %pe\n",
+ ERR_PTR(ret));
+ free_candev(netdev);
+
+ goto failure_cleanup;
+ }
+
+ priv->netdev[i] = netdev;
+ }
+
+ return 0;
+
+failure_cleanup:
+ f81604_disconnect(intf);
+ return ret;
+}
+
+static struct usb_driver f81604_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = f81604_probe,
+ .disconnect = f81604_disconnect,
+ .id_table = f81604_table,
+};
+
+module_usb_driver(f81604_driver);
+
+MODULE_AUTHOR("Ji-Ze Hong (Peter Hong) <peter_hong@fintek.com.tw>");
+MODULE_DESCRIPTION("Fintek F81604 USB to 2xCANBUS");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 1b400de00f51..e29e85b67fd4 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -5,30 +5,54 @@
* Copyright (C) 2013-2016 Geschwister Schneider Technologie-,
* Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
* Copyright (C) 2016 Hubert Denkmair
+ * Copyright (c) 2023 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
*
* Many thanks to all socketcan devs!
*/
+#include <linux/bitfield.h>
+#include <linux/clocksource.h>
#include <linux/ethtool.h>
#include <linux/init.h>
-#include <linux/signal.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/timecounter.h>
+#include <linux/units.h>
#include <linux/usb.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/rx-offload.h>
/* Device specific constants */
-#define USB_GSUSB_1_VENDOR_ID 0x1d50
-#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_GS_USB_1_VENDOR_ID 0x1d50
+#define USB_GS_USB_1_PRODUCT_ID 0x606f
-#define USB_CANDLELIGHT_VENDOR_ID 0x1209
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209
#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
-#define GSUSB_ENDPOINT_IN 1
-#define GSUSB_ENDPOINT_OUT 2
+#define USB_CES_CANEXT_FD_VENDOR_ID 0x1cd2
+#define USB_CES_CANEXT_FD_PRODUCT_ID 0x606f
+
+#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
+#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
+
+#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0
+#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30
+
+#define USB_CANNECTIVITY_VENDOR_ID 0x1209
+#define USB_CANNECTIVITY_PRODUCT_ID 0xca01
+
+/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
+ * for timer overflow (will be after ~71 minutes)
+ */
+#define GS_USB_TIMESTAMP_TIMER_HZ (1 * HZ_PER_MHZ)
+#define GS_USB_TIMESTAMP_WORK_DELAY_SEC 1800
+static_assert(GS_USB_TIMESTAMP_WORK_DELAY_SEC <
+ CYCLECOUNTER_MASK(32) / GS_USB_TIMESTAMP_TIMER_HZ / 2);
/* Device specific constants */
enum gs_usb_breq {
@@ -40,6 +64,14 @@ enum gs_usb_breq {
GS_USB_BREQ_DEVICE_CONFIG,
GS_USB_BREQ_TIMESTAMP,
GS_USB_BREQ_IDENTIFY,
+ GS_USB_BREQ_GET_USER_ID,
+ GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING = GS_USB_BREQ_GET_USER_ID,
+ GS_USB_BREQ_SET_USER_ID,
+ GS_USB_BREQ_DATA_BITTIMING,
+ GS_USB_BREQ_BT_CONST_EXT,
+ GS_USB_BREQ_SET_TERMINATION,
+ GS_USB_BREQ_GET_TERMINATION,
+ GS_USB_BREQ_GET_STATE,
};
enum gs_can_mode {
@@ -63,6 +95,14 @@ enum gs_can_identify_mode {
GS_CAN_IDENTIFY_ON
};
+enum gs_can_termination_state {
+ GS_CAN_TERMINATION_STATE_OFF = 0,
+ GS_CAN_TERMINATION_STATE_ON
+};
+
+#define GS_USB_TERMINATION_DISABLED CAN_TERMINATION_DISABLED
+#define GS_USB_TERMINATION_ENABLED 120
+
/* data types passed between host and device */
/* The firmware on the original USB2CAN by Geschwister Schneider
@@ -87,11 +127,21 @@ struct gs_device_config {
__le32 hw_version;
} __packed;
-#define GS_CAN_MODE_NORMAL 0
-#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
-#define GS_CAN_MODE_LOOP_BACK BIT(1)
-#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
-#define GS_CAN_MODE_ONE_SHOT BIT(3)
+#define GS_CAN_MODE_NORMAL 0
+#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
+#define GS_CAN_MODE_LOOP_BACK BIT(1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_MODE_ONE_SHOT BIT(3)
+#define GS_CAN_MODE_HW_TIMESTAMP BIT(4)
+/* GS_CAN_FEATURE_IDENTIFY BIT(5) */
+/* GS_CAN_FEATURE_USER_ID BIT(6) */
+#define GS_CAN_MODE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_MODE_FD BIT(8)
+/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */
+/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */
+/* GS_CAN_FEATURE_TERMINATION BIT(11) */
+#define GS_CAN_MODE_BERR_REPORTING BIT(12)
+/* GS_CAN_FEATURE_GET_STATE BIT(13) */
struct gs_device_mode {
__le32 mode;
@@ -116,12 +166,32 @@ struct gs_identify_mode {
__le32 mode;
} __packed;
-#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
-#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
-#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
-#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
-#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
-#define GS_CAN_FEATURE_IDENTIFY BIT(5)
+struct gs_device_termination_state {
+ __le32 state;
+} __packed;
+
+#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
+#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
+#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
+#define GS_CAN_FEATURE_IDENTIFY BIT(5)
+#define GS_CAN_FEATURE_USER_ID BIT(6)
+#define GS_CAN_FEATURE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_FEATURE_FD BIT(8)
+#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9)
+#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10)
+#define GS_CAN_FEATURE_TERMINATION BIT(11)
+#define GS_CAN_FEATURE_BERR_REPORTING BIT(12)
+#define GS_CAN_FEATURE_GET_STATE BIT(13)
+#define GS_CAN_FEATURE_MASK GENMASK(13, 0)
+
+/* internal quirks - keep in GS_CAN_FEATURE space for now */
+
+/* CANtact Pro original firmware:
+ * BREQ DATA_BITTIMING overlaps with GET_USER_ID
+ */
+#define GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO BIT(31)
struct gs_device_bt_const {
__le32 feature;
@@ -136,18 +206,85 @@ struct gs_device_bt_const {
__le32 brp_inc;
} __packed;
-#define GS_CAN_FLAG_OVERFLOW 1
+struct gs_device_bt_const_extended {
+ __le32 feature;
+ __le32 fclk_can;
+ __le32 tseg1_min;
+ __le32 tseg1_max;
+ __le32 tseg2_min;
+ __le32 tseg2_max;
+ __le32 sjw_max;
+ __le32 brp_min;
+ __le32 brp_max;
+ __le32 brp_inc;
+
+ __le32 dtseg1_min;
+ __le32 dtseg1_max;
+ __le32 dtseg2_min;
+ __le32 dtseg2_max;
+ __le32 dsjw_max;
+ __le32 dbrp_min;
+ __le32 dbrp_max;
+ __le32 dbrp_inc;
+} __packed;
+
+#define GS_CAN_FLAG_OVERFLOW BIT(0)
+#define GS_CAN_FLAG_FD BIT(1)
+#define GS_CAN_FLAG_BRS BIT(2)
+#define GS_CAN_FLAG_ESI BIT(3)
-struct gs_host_frame {
- u32 echo_id;
- __le32 can_id;
+struct classic_can {
+ u8 data[8];
+} __packed;
- u8 can_dlc;
- u8 channel;
- u8 flags;
- u8 reserved;
+struct classic_can_ts {
+ u8 data[8];
+ __le32 timestamp_us;
+} __packed;
+struct classic_can_quirk {
u8 data[8];
+ u8 quirk;
+} __packed;
+
+struct canfd {
+ u8 data[64];
+} __packed;
+
+struct canfd_ts {
+ u8 data[64];
+ __le32 timestamp_us;
+} __packed;
+
+struct canfd_quirk {
+ u8 data[64];
+ u8 quirk;
+} __packed;
+
+/* struct gs_host_frame::echo_id == GS_HOST_FRAME_ECHO_ID_RX indicates
+ * a regular RX'ed CAN frame
+ */
+#define GS_HOST_FRAME_ECHO_ID_RX 0xffffffff
+
+struct gs_host_frame {
+ struct_group(header,
+ u32 echo_id;
+ __le32 can_id;
+
+ u8 can_dlc;
+ u8 channel;
+ u8 flags;
+ u8 reserved;
+ );
+
+ union {
+ DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
+ DECLARE_FLEX_ARRAY(struct classic_can_ts, classic_can_ts);
+ DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk);
+ DECLARE_FLEX_ARRAY(struct canfd, canfd);
+ DECLARE_FLEX_ARRAY(struct canfd_ts, canfd_ts);
+ DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk);
+ };
} __packed;
/* The GS USB devices make use of the same flags and masks as in
* linux/can.h and linux/can/error.h, and no additional mapping is necessary.
@@ -157,10 +294,7 @@ struct gs_host_frame {
#define GS_MAX_TX_URBS 10
/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
#define GS_MAX_RX_URBS 30
-/* Maximum number of interfaces the driver supports per device.
- * Current hardware only supports 2 interfaces. The future may vary.
- */
-#define GS_MAX_INTF 2
+#define GS_NAPI_WEIGHT 32
struct gs_tx_context {
struct gs_can *dev;
@@ -170,15 +304,18 @@ struct gs_tx_context {
struct gs_can {
struct can_priv can; /* must be the first member */
+ struct can_rx_offload offload;
struct gs_usb *parent;
struct net_device *netdev;
struct usb_device *udev;
- struct usb_interface *iface;
- struct can_bittiming_const bt_const;
+ struct can_bittiming_const bt_const, data_bt_const;
unsigned int channel; /* channel number */
+ u32 feature;
+ unsigned int hf_size_tx;
+
/* This lock prevents a race condition between xmit and receive. */
spinlock_t tx_ctx_lock;
struct gs_tx_context tx_context[GS_MAX_TX_URBS];
@@ -189,10 +326,22 @@ struct gs_can {
/* usb interface struct */
struct gs_usb {
- struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
- atomic_t active_channels;
struct usb_device *udev;
+
+ /* time counter for hardware timestamps */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ spinlock_t tc_lock; /* spinlock to guard access tc->cycle_last */
+ struct delayed_work timestamp;
+
+ unsigned int hf_size_rx;
+ u8 active_channels;
+ u8 channel_cnt;
+
+ unsigned int pipe_in;
+ unsigned int pipe_out;
+ struct gs_can *canch[] __counted_by(channel_cnt);
};
/* 'allocate' a tx context.
@@ -242,31 +391,108 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
return NULL;
}
-static int gs_cmd_reset(struct gs_can *gsdev)
+static int gs_cmd_reset(struct gs_can *dev)
{
- struct gs_device_mode *dm;
- struct usb_interface *intf = gsdev->iface;
+ struct gs_device_mode dm = {
+ .mode = cpu_to_le32(GS_CAN_MODE_RESET),
+ };
+
+ return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+}
+
+static inline int gs_usb_get_timestamp(const struct gs_usb *parent,
+ u32 *timestamp_p)
+{
+ __le32 timestamp;
int rc;
- dm = kzalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
+ rc = usb_control_msg_recv(parent->udev, 0, GS_USB_BREQ_TIMESTAMP,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 0, 0,
+ &timestamp, sizeof(timestamp),
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ *timestamp_p = le32_to_cpu(timestamp);
+
+ return 0;
+}
+
+static u64 gs_usb_timestamp_read(struct cyclecounter *cc) __must_hold(&dev->tc_lock)
+{
+ struct gs_usb *parent = container_of(cc, struct gs_usb, cc);
+ u32 timestamp = 0;
+ int err;
+
+ lockdep_assert_held(&parent->tc_lock);
+
+ /* drop lock for synchronous USB transfer */
+ spin_unlock_bh(&parent->tc_lock);
+ err = gs_usb_get_timestamp(parent, &timestamp);
+ spin_lock_bh(&parent->tc_lock);
+ if (err)
+ dev_err(&parent->udev->dev,
+ "Error %d while reading timestamp. HW timestamps may be inaccurate.",
+ err);
+
+ return timestamp;
+}
- dm->mode = GS_CAN_MODE_RESET;
+static void gs_usb_timestamp_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct gs_usb *parent;
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- gsdev->channel,
- 0,
- dm,
- sizeof(*dm),
- 1000);
+ parent = container_of(delayed_work, struct gs_usb, timestamp);
+ spin_lock_bh(&parent->tc_lock);
+ timecounter_read(&parent->tc);
+ spin_unlock_bh(&parent->tc_lock);
- kfree(dm);
+ schedule_delayed_work(&parent->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
- return rc;
+static void gs_usb_skb_set_timestamp(struct gs_can *dev,
+ struct sk_buff *skb, u32 timestamp)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct gs_usb *parent = dev->parent;
+ u64 ns;
+
+ spin_lock_bh(&parent->tc_lock);
+ ns = timecounter_cyc2time(&parent->tc, timestamp);
+ spin_unlock_bh(&parent->tc_lock);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void gs_usb_timestamp_init(struct gs_usb *parent)
+{
+ struct cyclecounter *cc = &parent->cc;
+
+ cc->read = gs_usb_timestamp_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+ cc->shift = 32 - bits_per(NSEC_PER_SEC / GS_USB_TIMESTAMP_TIMER_HZ);
+ cc->mult = clocksource_hz2mult(GS_USB_TIMESTAMP_TIMER_HZ, cc->shift);
+
+ spin_lock_init(&parent->tc_lock);
+ spin_lock_bh(&parent->tc_lock);
+ timecounter_init(&parent->tc, &parent->cc, ktime_get_real_ns());
+ spin_unlock_bh(&parent->tc_lock);
+
+ INIT_DELAYED_WORK(&parent->timestamp, gs_usb_timestamp_work);
+ schedule_delayed_work(&parent->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+static void gs_usb_timestamp_stop(struct gs_usb *parent)
+{
+ cancel_delayed_work_sync(&parent->timestamp);
}
static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
@@ -294,19 +520,107 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
}
}
+static u32 gs_usb_set_timestamp(struct gs_can *dev, struct sk_buff *skb,
+ const struct gs_host_frame *hf)
+{
+ u32 timestamp;
+
+ if (hf->flags & GS_CAN_FLAG_FD)
+ timestamp = le32_to_cpu(hf->canfd_ts->timestamp_us);
+ else
+ timestamp = le32_to_cpu(hf->classic_can_ts->timestamp_us);
+
+ if (skb)
+ gs_usb_skb_set_timestamp(dev, skb, timestamp);
+
+ return timestamp;
+}
+
+static void gs_usb_rx_offload(struct gs_can *dev, struct sk_buff *skb,
+ const struct gs_host_frame *hf)
+{
+ struct can_rx_offload *offload = &dev->offload;
+ int rc;
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) {
+ const u32 ts = gs_usb_set_timestamp(dev, skb, hf);
+
+ rc = can_rx_offload_queue_timestamp(offload, skb, ts);
+ } else {
+ rc = can_rx_offload_queue_tail(offload, skb);
+ }
+
+ if (rc)
+ dev->netdev->stats.rx_fifo_errors++;
+}
+
+static unsigned int
+gs_usb_get_echo_skb(struct gs_can *dev, struct sk_buff *skb,
+ const struct gs_host_frame *hf)
+{
+ struct can_rx_offload *offload = &dev->offload;
+ const u32 echo_id = hf->echo_id;
+ unsigned int len;
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) {
+ const u32 ts = gs_usb_set_timestamp(dev, skb, hf);
+
+ len = can_rx_offload_get_echo_skb_queue_timestamp(offload, echo_id,
+ ts, NULL);
+ } else {
+ len = can_rx_offload_get_echo_skb_queue_tail(offload, echo_id,
+ NULL);
+ }
+
+ return len;
+}
+
+static unsigned int
+gs_usb_get_minimum_rx_length(const struct gs_can *dev, const struct gs_host_frame *hf,
+ unsigned int *data_length_p)
+{
+ unsigned int minimum_length, data_length = 0;
+
+ if (hf->flags & GS_CAN_FLAG_FD) {
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX)
+ data_length = can_fd_dlc2len(hf->can_dlc);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ /* timestamp follows data field of max size */
+ minimum_length = struct_size(hf, canfd_ts, 1);
+ else
+ minimum_length = sizeof(hf->header) + data_length;
+ } else {
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX &&
+ !(hf->can_id & cpu_to_le32(CAN_RTR_FLAG)))
+ data_length = can_cc_dlc2len(hf->can_dlc);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ /* timestamp follows data field of max size */
+ minimum_length = struct_size(hf, classic_can_ts, 1);
+ else
+ minimum_length = sizeof(hf->header) + data_length;
+ }
+
+ *data_length_p = data_length;
+ return minimum_length;
+}
+
static void gs_usb_receive_bulk_callback(struct urb *urb)
{
- struct gs_usb *usbcan = urb->context;
+ struct gs_usb *parent = urb->context;
struct gs_can *dev;
struct net_device *netdev;
int rc;
struct net_device_stats *stats;
struct gs_host_frame *hf = urb->transfer_buffer;
+ unsigned int minimum_length, data_length;
struct gs_tx_context *txc;
struct can_frame *cf;
+ struct canfd_frame *cfd;
struct sk_buff *skb;
- BUG_ON(!usbcan);
+ BUG_ON(!parent);
switch (urb->status) {
case 0: /* success */
@@ -319,11 +633,20 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
return;
}
- /* device reports out of range channel id */
- if (hf->channel >= GS_MAX_INTF)
+ minimum_length = sizeof(hf->header);
+ if (urb->actual_length < minimum_length) {
+ dev_err_ratelimited(&parent->udev->dev,
+ "short read (actual_length=%u, minimum_length=%u)\n",
+ urb->actual_length, minimum_length);
+
goto resubmit_urb;
+ }
+
+ /* device reports out of range channel id */
+ if (hf->channel >= parent->channel_cnt)
+ goto device_detach;
- dev = usbcan->canch[hf->channel];
+ dev = parent->canch[hf->channel];
netdev = dev->netdev;
stats = &netdev->stats;
@@ -331,24 +654,52 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
if (!netif_device_present(netdev))
return;
- if (hf->echo_id == -1) { /* normal rx */
- skb = alloc_can_skb(dev->netdev, &cf);
- if (!skb)
- return;
+ if (!netif_running(netdev))
+ goto resubmit_urb;
- cf->can_id = le32_to_cpu(hf->can_id);
+ minimum_length = gs_usb_get_minimum_rx_length(dev, hf, &data_length);
+ if (urb->actual_length < minimum_length) {
+ stats->rx_errors++;
+ stats->rx_length_errors++;
+
+ if (net_ratelimit())
+ netdev_err(netdev,
+ "short read (actual_length=%u, minimum_length=%u)\n",
+ urb->actual_length, minimum_length);
- can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- memcpy(cf->data, hf->data, 8);
+ goto resubmit_urb;
+ }
- /* ERROR frames tell us information about the controller */
- if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
- gs_update_state(dev, cf);
+ if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX) { /* normal rx */
+ if (hf->flags & GS_CAN_FLAG_FD) {
+ skb = alloc_canfd_skb(netdev, &cfd);
+ if (!skb)
+ return;
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += hf->can_dlc;
+ cfd->can_id = le32_to_cpu(hf->can_id);
+ cfd->len = data_length;
+ if (hf->flags & GS_CAN_FLAG_BRS)
+ cfd->flags |= CANFD_BRS;
+ if (hf->flags & GS_CAN_FLAG_ESI)
+ cfd->flags |= CANFD_ESI;
- netif_rx(skb);
+ memcpy(cfd->data, hf->canfd->data, data_length);
+ } else {
+ skb = alloc_can_skb(netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id = le32_to_cpu(hf->can_id);
+ can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
+
+ memcpy(cf->data, hf->classic_can->data, data_length);
+
+ /* ERROR frames tell us information about the controller */
+ if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
+ gs_update_state(dev, cf);
+ }
+
+ gs_usb_rx_offload(dev, skb, hf);
} else { /* echo_id == hf->echo_id */
if (hf->echo_id >= GS_MAX_TX_URBS) {
netdev_err(netdev,
@@ -357,9 +708,6 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += hf->can_dlc;
-
txc = gs_get_tx_context(dev, hf->echo_id);
/* bad devices send bad echo_ids. */
@@ -370,8 +718,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- can_get_echo_skb(netdev, hf->echo_id, NULL);
-
+ skb = dev->can.echo_skb[hf->echo_id];
+ stats->tx_packets++;
+ stats->tx_bytes += gs_usb_get_echo_skb(dev, skb, hf);
gs_free_tx_context(txc);
atomic_dec(&dev->active_tx_urbs);
@@ -380,6 +729,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
}
if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
skb = alloc_can_err_skb(netdev, &cf);
if (!skb)
goto resubmit_urb;
@@ -387,28 +739,26 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
cf->can_id |= CAN_ERR_CRTL;
cf->len = CAN_ERR_DLC;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_over_errors++;
- stats->rx_errors++;
- netif_rx(skb);
+
+ gs_usb_rx_offload(dev, skb, hf);
}
- resubmit_urb:
- usb_fill_bulk_urb(urb,
- usbcan->udev,
- usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
- hf,
- sizeof(struct gs_host_frame),
- gs_usb_receive_bulk_callback,
- usbcan
- );
+ can_rx_offload_irq_finish(&dev->offload);
+
+resubmit_urb:
+ usb_fill_bulk_urb(urb, parent->udev,
+ parent->pipe_in,
+ hf, parent->hf_size_rx,
+ gs_usb_receive_bulk_callback, parent);
rc = usb_submit_urb(urb, GFP_ATOMIC);
/* USB failure take down all interfaces */
if (rc == -ENODEV) {
- for (rc = 0; rc < GS_MAX_INTF; rc++) {
- if (usbcan->canch[rc])
- netif_device_detach(usbcan->canch[rc]->netdev);
+device_detach:
+ for (rc = 0; rc < parent->channel_cnt; rc++) {
+ if (parent->canch[rc])
+ netif_device_detach(parent->canch[rc]->netdev);
}
}
}
@@ -417,38 +767,42 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct can_bittiming *bt = &dev->can.bittiming;
- struct usb_interface *intf = dev->iface;
- int rc;
- struct gs_device_bittiming *dbt;
-
- dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
- if (!dbt)
- return -ENOMEM;
-
- dbt->prop_seg = cpu_to_le32(bt->prop_seg);
- dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
- dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
- dbt->sjw = cpu_to_le32(bt->sjw);
- dbt->brp = cpu_to_le32(bt->brp);
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
/* request bit timings */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BITTIMING,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- dbt,
- sizeof(*dbt),
- 1000);
-
- kfree(dbt);
-
- if (rc < 0)
- dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
- rc);
+ return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_BITTIMING,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
+}
- return (rc > 0) ? 0 : rc;
+static int gs_usb_set_data_bittiming(struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.fd.data_bittiming;
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
+ u8 request = GS_USB_BREQ_DATA_BITTIMING;
+
+ if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO)
+ request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING;
+
+ /* request data bit timings */
+ return usb_control_msg_send(dev->udev, 0, request,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
}
static void gs_usb_xmit_callback(struct urb *urb)
@@ -457,13 +811,21 @@ static void gs_usb_xmit_callback(struct urb *urb)
struct gs_can *dev = txc->dev;
struct net_device *netdev = dev->netdev;
- if (urb->status)
- netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
+ if (!urb->status)
+ return;
+
+ if (urb->status != -ESHUTDOWN && net_ratelimit())
+ netdev_info(netdev, "failed to xmit URB %u: %pe\n",
+ txc->echo_id, ERR_PTR(urb->status));
+
+ netdev->stats.tx_dropped++;
+ netdev->stats.tx_errors++;
+
+ can_free_echo_skb(netdev, txc->echo_id, NULL);
+ gs_free_tx_context(txc);
+ atomic_dec(&dev->active_tx_urbs);
- usb_free_coherent(urb->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ netif_wake_queue(netdev);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
@@ -474,11 +836,12 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
struct urb *urb;
struct gs_host_frame *hf;
struct can_frame *cf;
+ struct canfd_frame *cfd;
int rc;
unsigned int idx;
struct gs_tx_context *txc;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* find an empty context to keep track of transmission */
@@ -491,12 +854,9 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
if (!urb)
goto nomem_urb;
- hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
- &urb->transfer_dma);
- if (!hf) {
- netdev_err(netdev, "No memory left for USB buffer\n");
+ hf = kmalloc(dev->hf_size_tx, GFP_ATOMIC);
+ if (!hf)
goto nomem_hf;
- }
idx = txc->echo_id;
@@ -507,22 +867,36 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
hf->echo_id = idx;
hf->channel = dev->channel;
+ hf->flags = 0;
+ hf->reserved = 0;
+
+ if (can_is_canfd_skb(skb)) {
+ cfd = (struct canfd_frame *)skb->data;
- cf = (struct can_frame *)skb->data;
+ hf->can_id = cpu_to_le32(cfd->can_id);
+ hf->can_dlc = can_fd_len2dlc(cfd->len);
+ hf->flags |= GS_CAN_FLAG_FD;
+ if (cfd->flags & CANFD_BRS)
+ hf->flags |= GS_CAN_FLAG_BRS;
+ if (cfd->flags & CANFD_ESI)
+ hf->flags |= GS_CAN_FLAG_ESI;
- hf->can_id = cpu_to_le32(cf->can_id);
- hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+ memcpy(hf->canfd->data, cfd->data, cfd->len);
+ } else {
+ cf = (struct can_frame *)skb->data;
- memcpy(hf->data, cf->data, cf->len);
+ hf->can_id = cpu_to_le32(cf->can_id);
+ hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+
+ memcpy(hf->classic_can->data, cf->data, cf->len);
+ }
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
- hf,
- sizeof(*hf),
- gs_usb_xmit_callback,
- txc);
+ dev->parent->pipe_out,
+ hf, dev->hf_size_tx,
+ gs_usb_xmit_callback, txc);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &dev->tx_submitted);
can_put_echo_skb(skb, netdev, idx, 0);
@@ -537,10 +911,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
gs_free_tx_context(txc);
usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev,
- sizeof(*hf),
- hf,
- urb->transfer_dma);
if (rc == -ENODEV) {
netif_device_detach(netdev);
@@ -559,15 +929,12 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
- badidx:
- usb_free_coherent(dev->udev,
- sizeof(*hf),
- hf,
- urb->transfer_dma);
- nomem_hf:
+badidx:
+ kfree(hf);
+nomem_hf:
usb_free_urb(urb);
- nomem_urb:
+nomem_urb:
gs_free_tx_context(txc);
dev_kfree_skb(skb);
stats->tx_dropped++;
@@ -578,47 +945,64 @@ static int gs_can_open(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
- int rc, i;
- struct gs_device_mode *dm;
+ struct gs_device_mode dm = {
+ .mode = cpu_to_le32(GS_CAN_MODE_START),
+ };
+ struct gs_host_frame *hf;
+ struct urb *urb = NULL;
u32 ctrlmode;
u32 flags = 0;
+ int rc, i;
rc = open_candev(netdev);
if (rc)
return rc;
- if (atomic_add_return(1, &parent->active_channels) == 1) {
+ ctrlmode = dev->can.ctrlmode;
+ if (ctrlmode & CAN_CTRLMODE_FD) {
+ if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+ dev->hf_size_tx = struct_size(hf, canfd_quirk, 1);
+ else
+ dev->hf_size_tx = struct_size(hf, canfd, 1);
+ } else {
+ if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+ dev->hf_size_tx = struct_size(hf, classic_can_quirk, 1);
+ else
+ dev->hf_size_tx = struct_size(hf, classic_can, 1);
+ }
+
+ can_rx_offload_enable(&dev->offload);
+
+ if (!parent->active_channels) {
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_init(parent);
+
for (i = 0; i < GS_MAX_RX_URBS; i++) {
- struct urb *urb;
u8 *buf;
/* alloc rx urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- return -ENOMEM;
+ if (!urb) {
+ rc = -ENOMEM;
+ goto out_usb_kill_anchored_urbs;
+ }
/* alloc rx buffer */
- buf = usb_alloc_coherent(dev->udev,
- sizeof(struct gs_host_frame),
- GFP_KERNEL,
- &urb->transfer_dma);
+ buf = kmalloc(dev->parent->hf_size_rx,
+ GFP_KERNEL);
if (!buf) {
- netdev_err(netdev,
- "No memory left for USB buffer\n");
- usb_free_urb(urb);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_usb_free_urb;
}
/* fill, anchor, and submit rx urb */
usb_fill_bulk_urb(urb,
dev->udev,
- usb_rcvbulkpipe(dev->udev,
- GSUSB_ENDPOINT_IN),
+ dev->parent->pipe_in,
buf,
- sizeof(struct gs_host_frame),
- gs_usb_receive_bulk_callback,
- parent);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ dev->parent->hf_size_rx,
+ gs_usb_receive_bulk_callback, parent);
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &parent->rx_submitted);
@@ -628,12 +1012,10 @@ static int gs_can_open(struct net_device *netdev)
netif_device_detach(dev->netdev);
netdev_err(netdev,
- "usb_submit failed (err=%d)\n",
- rc);
+ "usb_submit_urb() failed, error %pe\n",
+ ERR_PTR(rc));
- usb_unanchor_urb(urb);
- usb_free_urb(urb);
- break;
+ goto out_usb_unanchor_urb;
}
/* Drop reference,
@@ -643,55 +1025,100 @@ static int gs_can_open(struct net_device *netdev)
}
}
- dm = kmalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
-
/* flags */
- ctrlmode = dev->can.ctrlmode;
-
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
flags |= GS_CAN_MODE_LOOP_BACK;
- else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+
+ if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
flags |= GS_CAN_MODE_LISTEN_ONLY;
- /* Controller is not allowed to retry TX
- * this mode is unavailable on atmels uc3c hardware
- */
+ if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+
if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
flags |= GS_CAN_MODE_ONE_SHOT;
- if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ if (ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ flags |= GS_CAN_MODE_BERR_REPORTING;
- /* finally start device */
- dm->mode = cpu_to_le32(GS_CAN_MODE_START);
- dm->flags = cpu_to_le32(flags);
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- dm,
- sizeof(*dm),
- 1000);
-
- if (rc < 0) {
- netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
- kfree(dm);
- return rc;
- }
+ if (ctrlmode & CAN_CTRLMODE_FD)
+ flags |= GS_CAN_MODE_FD;
- kfree(dm);
+ /* if hardware supports timestamps, enable it */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ flags |= GS_CAN_MODE_HW_TIMESTAMP;
+ /* finally start device */
dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ dm.flags = cpu_to_le32(flags);
+ rc = usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+ if (rc) {
+ netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
+ dev->can.state = CAN_STATE_STOPPED;
+ goto out_usb_kill_anchored_urbs;
+ }
+
+ parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
return 0;
+
+out_usb_unanchor_urb:
+ usb_unanchor_urb(urb);
+out_usb_free_urb:
+ usb_free_urb(urb);
+out_usb_kill_anchored_urbs:
+ if (!parent->active_channels) {
+ usb_kill_anchored_urbs(&dev->tx_submitted);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(parent);
+ }
+
+ can_rx_offload_disable(&dev->offload);
+ close_candev(netdev);
+
+ return rc;
+}
+
+static int gs_usb_get_state(const struct net_device *netdev,
+ struct can_berr_counter *bec,
+ enum can_state *state)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_state ds;
+ int rc;
+
+ rc = usb_control_msg_recv(dev->udev, 0, GS_USB_BREQ_GET_STATE,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &ds, sizeof(ds),
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ if (le32_to_cpu(ds.state) >= CAN_STATE_MAX)
+ return -EOPNOTSUPP;
+
+ *state = le32_to_cpu(ds.state);
+ bec->txerr = le32_to_cpu(ds.txerr);
+ bec->rxerr = le32_to_cpu(ds.rxerr);
+
+ return 0;
+}
+
+static int gs_usb_can_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ enum can_state state;
+
+ return gs_usb_get_state(netdev, bec, &state);
}
static int gs_can_close(struct net_device *netdev)
@@ -703,17 +1130,22 @@ static int gs_can_close(struct net_device *netdev)
netif_stop_queue(netdev);
/* Stop polling */
- if (atomic_dec_and_test(&parent->active_channels))
+ parent->active_channels--;
+ if (!parent->active_channels) {
usb_kill_anchored_urbs(&parent->rx_submitted);
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(parent);
+ }
+
/* Stop sending URBs */
usb_kill_anchored_urbs(&dev->tx_submitted);
atomic_set(&dev->active_tx_urbs, 0);
+ dev->can.state = CAN_STATE_STOPPED;
+
/* reset the device */
- rc = gs_cmd_reset(dev);
- if (rc < 0)
- netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc);
+ gs_cmd_reset(dev);
/* reset tx contexts */
for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
@@ -721,64 +1153,77 @@ static int gs_can_close(struct net_device *netdev)
dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
}
+ can_rx_offload_disable(&dev->offload);
+
/* close the netdev */
close_candev(netdev);
return 0;
}
+static int gs_can_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ const struct gs_can *dev = netdev_priv(netdev);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_hwtstamp_get(netdev, cfg);
+
+ return -EOPNOTSUPP;
+}
+
+static int gs_can_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ const struct gs_can *dev = netdev_priv(netdev);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_hwtstamp_set(netdev, cfg, extack);
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops gs_usb_netdev_ops = {
.ndo_open = gs_can_open,
.ndo_stop = gs_can_close,
.ndo_start_xmit = gs_can_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = gs_can_hwtstamp_get,
+ .ndo_hwtstamp_set = gs_can_hwtstamp_set,
};
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
{
struct gs_can *dev = netdev_priv(netdev);
- struct gs_identify_mode *imode;
- int rc;
-
- imode = kmalloc(sizeof(*imode), GFP_KERNEL);
-
- if (!imode)
- return -ENOMEM;
+ struct gs_identify_mode imode;
if (do_identify)
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
else
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
-
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface),
- 0),
- GS_USB_BREQ_IDENTIFY,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- imode,
- sizeof(*imode),
- 100);
-
- kfree(imode);
-
- return (rc > 0) ? 0 : rc;
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
+
+ return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_IDENTIFY,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &imode, sizeof(imode), 100,
+ GFP_KERNEL);
}
/* blink LED's for finding the this interface */
-static int gs_usb_set_phys_id(struct net_device *dev,
+static int gs_usb_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
+ const struct gs_can *dev = netdev_priv(netdev);
int rc = 0;
+ if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY))
+ return -EOPNOTSUPP;
+
switch (state) {
case ETHTOOL_ID_ACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON);
break;
case ETHTOOL_ID_INACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF);
break;
default:
break;
@@ -787,8 +1232,65 @@ static int gs_usb_set_phys_id(struct net_device *dev,
return rc;
}
+static int gs_usb_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+
+ /* report if device supports HW timestamps */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_ethtool_op_get_ts_info_hwts(netdev, info);
+
+ return ethtool_op_get_ts_info(netdev, info);
+}
+
static const struct ethtool_ops gs_usb_ethtool_ops = {
.set_phys_id = gs_usb_set_phys_id,
+ .get_ts_info = gs_usb_get_ts_info,
+};
+
+static int gs_usb_get_termination(struct net_device *netdev, u16 *term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+ int rc;
+
+ rc = usb_control_msg_recv(dev->udev, 0, GS_USB_BREQ_GET_TERMINATION,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ if (term_state.state == cpu_to_le32(GS_CAN_TERMINATION_STATE_ON))
+ *term = GS_USB_TERMINATION_ENABLED;
+ else
+ *term = GS_USB_TERMINATION_DISABLED;
+
+ return 0;
+}
+
+static int gs_usb_set_termination(struct net_device *netdev, u16 term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+
+ if (term == GS_USB_TERMINATION_ENABLED)
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_ON);
+ else
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_OFF);
+
+ return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_SET_TERMINATION,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+}
+
+static const u16 gs_usb_termination_const[] = {
+ GS_USB_TERMINATION_DISABLED,
+ GS_USB_TERMINATION_ENABLED
};
static struct gs_can *gs_make_candev(unsigned int channel,
@@ -798,29 +1300,21 @@ static struct gs_can *gs_make_candev(unsigned int channel,
struct gs_can *dev;
struct net_device *netdev;
int rc;
- struct gs_device_bt_const *bt_const;
+ struct gs_device_bt_const_extended bt_const_extended;
+ struct gs_device_bt_const bt_const;
u32 feature;
- bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
- if (!bt_const)
- return ERR_PTR(-ENOMEM);
-
/* fetch bit timing constants */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BT_CONST,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- channel,
- 0,
- bt_const,
- sizeof(*bt_const),
- 1000);
-
- if (rc < 0) {
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const, sizeof(bt_const), 1000,
+ GFP_KERNEL);
+
+ if (rc) {
dev_err(&intf->dev,
- "Couldn't get bit timing const for channel (err=%d)\n",
- rc);
- kfree(bt_const);
+ "Couldn't get bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
return ERR_PTR(rc);
}
@@ -828,29 +1322,30 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
if (!netdev) {
dev_err(&intf->dev, "Couldn't allocate candev\n");
- kfree(bt_const);
return ERR_PTR(-ENOMEM);
}
dev = netdev_priv(netdev);
netdev->netdev_ops = &gs_usb_netdev_ops;
+ netdev->ethtool_ops = &gs_usb_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
+ netdev->dev_id = channel;
+ netdev->dev_port = channel;
/* dev setup */
- strcpy(dev->bt_const.name, "gs_usb");
- dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
- dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
- dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
- dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
- dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
- dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
- dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
- dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
+ strcpy(dev->bt_const.name, KBUILD_MODNAME);
+ dev->bt_const.tseg1_min = le32_to_cpu(bt_const.tseg1_min);
+ dev->bt_const.tseg1_max = le32_to_cpu(bt_const.tseg1_max);
+ dev->bt_const.tseg2_min = le32_to_cpu(bt_const.tseg2_min);
+ dev->bt_const.tseg2_max = le32_to_cpu(bt_const.tseg2_max);
+ dev->bt_const.sjw_max = le32_to_cpu(bt_const.sjw_max);
+ dev->bt_const.brp_min = le32_to_cpu(bt_const.brp_min);
+ dev->bt_const.brp_max = le32_to_cpu(bt_const.brp_max);
+ dev->bt_const.brp_inc = le32_to_cpu(bt_const.brp_inc);
dev->udev = interface_to_usbdev(intf);
- dev->iface = intf;
dev->netdev = netdev;
dev->channel = channel;
@@ -864,13 +1359,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* can setup */
dev->can.state = CAN_STATE_STOPPED;
- dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
+ dev->can.clock.freq = le32_to_cpu(bt_const.fclk_can);
dev->can.bittiming_const = &dev->bt_const;
dev->can.do_set_bittiming = gs_usb_set_bittiming;
dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
- feature = le32_to_cpu(bt_const->feature);
+ feature = le32_to_cpu(bt_const.feature);
+ dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature);
if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
@@ -883,168 +1379,274 @@ static struct gs_can *gs_make_candev(unsigned int channel,
if (feature & GS_CAN_FEATURE_ONE_SHOT)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
- SET_NETDEV_DEV(netdev, &intf->dev);
+ if (feature & GS_CAN_FEATURE_FD) {
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ /* The data bit timing will be overwritten, if
+ * GS_CAN_FEATURE_BT_CONST_EXT is set.
+ */
+ dev->can.fd.data_bittiming_const = &dev->bt_const;
+ dev->can.fd.do_set_data_bittiming = gs_usb_set_data_bittiming;
+ }
+
+ if (feature & GS_CAN_FEATURE_TERMINATION) {
+ rc = gs_usb_get_termination(netdev, &dev->can.termination);
+ if (rc) {
+ dev->feature &= ~GS_CAN_FEATURE_TERMINATION;
+
+ dev_info(&intf->dev,
+ "Disabling termination support for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ } else {
+ dev->can.termination_const = gs_usb_termination_const;
+ dev->can.termination_const_cnt = ARRAY_SIZE(gs_usb_termination_const);
+ dev->can.do_set_termination = gs_usb_set_termination;
+ }
+ }
- if (le32_to_cpu(dconf->sw_version) > 1)
- if (feature & GS_CAN_FEATURE_IDENTIFY)
- netdev->ethtool_ops = &gs_usb_ethtool_ops;
+ if (feature & GS_CAN_FEATURE_BERR_REPORTING)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING;
+
+ if (feature & GS_CAN_FEATURE_GET_STATE)
+ dev->can.do_get_berr_counter = gs_usb_can_get_berr_counter;
+
+ /* The CANtact Pro from LinkLayer Labs is based on the
+ * LPC54616 µC, which is affected by the NXP LPC USB transfer
+ * erratum. However, the current firmware (version 2) doesn't
+ * set the GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX bit. Set the
+ * feature GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX to workaround
+ * this issue.
+ *
+ * For the GS_USB_BREQ_DATA_BITTIMING USB control message the
+ * CANtact Pro firmware uses a request value, which is already
+ * used by the candleLight firmware for a different purpose
+ * (GS_USB_BREQ_GET_USER_ID). Set the feature
+ * GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this
+ * issue.
+ */
+ if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GS_USB_1_VENDOR_ID) &&
+ dev->udev->descriptor.idProduct == cpu_to_le16(USB_GS_USB_1_PRODUCT_ID) &&
+ dev->udev->manufacturer && dev->udev->product &&
+ !strcmp(dev->udev->manufacturer, "LinkLayer Labs") &&
+ !strcmp(dev->udev->product, "CANtact Pro") &&
+ (le32_to_cpu(dconf->sw_version) <= 2))
+ dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX |
+ GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
+
+ /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */
+ if (!(le32_to_cpu(dconf->sw_version) > 1 &&
+ feature & GS_CAN_FEATURE_IDENTIFY))
+ dev->feature &= ~GS_CAN_FEATURE_IDENTIFY;
+
+ /* fetch extended bit timing constants if device has feature
+ * GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT
+ */
+ if (feature & GS_CAN_FEATURE_FD &&
+ feature & GS_CAN_FEATURE_BT_CONST_EXT) {
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const_extended,
+ sizeof(bt_const_extended),
+ 1000, GFP_KERNEL);
+ if (rc) {
+ dev_err(&intf->dev,
+ "Couldn't get extended bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_free_candev;
+ }
- kfree(bt_const);
+ strcpy(dev->data_bt_const.name, KBUILD_MODNAME);
+ dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended.dtseg1_min);
+ dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended.dtseg1_max);
+ dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended.dtseg2_min);
+ dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended.dtseg2_max);
+ dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended.dsjw_max);
+ dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended.dbrp_min);
+ dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max);
+ dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc);
+
+ dev->can.fd.data_bittiming_const = &dev->data_bt_const;
+ }
+
+ can_rx_offload_add_manual(netdev, &dev->offload, GS_NAPI_WEIGHT);
+ SET_NETDEV_DEV(netdev, &intf->dev);
rc = register_candev(dev->netdev);
if (rc) {
- free_candev(dev->netdev);
- dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
- return ERR_PTR(rc);
+ dev_err(&intf->dev,
+ "Couldn't register candev for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_can_rx_offload_del;
}
return dev;
+
+out_can_rx_offload_del:
+ can_rx_offload_del(&dev->offload);
+out_free_candev:
+ free_candev(dev->netdev);
+ return ERR_PTR(rc);
}
static void gs_destroy_candev(struct gs_can *dev)
{
unregister_candev(dev->netdev);
- usb_kill_anchored_urbs(&dev->tx_submitted);
+ can_rx_offload_del(&dev->offload);
free_candev(dev->netdev);
}
static int gs_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct gs_usb *dev;
- int rc = -ENOMEM;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *ep_in, *ep_out;
+ struct gs_host_frame *hf;
+ struct gs_usb *parent;
+ struct gs_host_config hconf = {
+ .byte_order = cpu_to_le32(0x0000beef),
+ };
+ struct gs_device_config dconf;
unsigned int icount, i;
- struct gs_host_config *hconf;
- struct gs_device_config *dconf;
-
- hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
- if (!hconf)
- return -ENOMEM;
+ int rc;
- hconf->byte_order = cpu_to_le32(0x0000beef);
+ rc = usb_find_common_endpoints(intf->cur_altsetting,
+ &ep_in, &ep_out, NULL, NULL);
+ if (rc) {
+ dev_err(&intf->dev, "Required endpoints not found\n");
+ return rc;
+ }
/* send host config */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_HOST_FORMAT,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1,
- intf->cur_altsetting->desc.bInterfaceNumber,
- hconf,
- sizeof(*hconf),
- 1000);
-
- kfree(hconf);
-
- if (rc < 0) {
- dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
- rc);
+ rc = usb_control_msg_send(udev, 0,
+ GS_USB_BREQ_HOST_FORMAT,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &hconf, sizeof(hconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
+ dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc);
return rc;
}
- dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
- if (!dconf)
- return -ENOMEM;
-
/* read device config */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_DEVICE_CONFIG,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1,
- intf->cur_altsetting->desc.bInterfaceNumber,
- dconf,
- sizeof(*dconf),
- 1000);
- if (rc < 0) {
+ rc = usb_control_msg_recv(udev, 0,
+ GS_USB_BREQ_DEVICE_CONFIG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &dconf, sizeof(dconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
rc);
- kfree(dconf);
return rc;
}
- icount = dconf->icount + 1;
+ icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
- if (icount > GS_MAX_INTF) {
+ if (icount > type_max(parent->channel_cnt)) {
dev_err(&intf->dev,
"Driver cannot handle more that %u CAN interfaces\n",
- GS_MAX_INTF);
- kfree(dconf);
+ type_max(parent->channel_cnt));
return -EINVAL;
}
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- kfree(dconf);
+ parent = kzalloc(struct_size(parent, canch, icount), GFP_KERNEL);
+ if (!parent)
return -ENOMEM;
- }
- init_usb_anchor(&dev->rx_submitted);
+ parent->channel_cnt = icount;
- atomic_set(&dev->active_channels, 0);
+ init_usb_anchor(&parent->rx_submitted);
- usb_set_intfdata(intf, dev);
- dev->udev = interface_to_usbdev(intf);
+ usb_set_intfdata(intf, parent);
+ parent->udev = udev;
+
+ /* store the detected endpoints */
+ parent->pipe_in = usb_rcvbulkpipe(parent->udev, ep_in->bEndpointAddress);
+ parent->pipe_out = usb_sndbulkpipe(parent->udev, ep_out->bEndpointAddress);
for (i = 0; i < icount; i++) {
- dev->canch[i] = gs_make_candev(i, intf, dconf);
- if (IS_ERR_OR_NULL(dev->canch[i])) {
+ unsigned int hf_size_rx = 0;
+
+ parent->canch[i] = gs_make_candev(i, intf, &dconf);
+ if (IS_ERR_OR_NULL(parent->canch[i])) {
/* save error code to return later */
- rc = PTR_ERR(dev->canch[i]);
+ rc = PTR_ERR(parent->canch[i]);
/* on failure destroy previously created candevs */
icount = i;
for (i = 0; i < icount; i++)
- gs_destroy_candev(dev->canch[i]);
+ gs_destroy_candev(parent->canch[i]);
- usb_kill_anchored_urbs(&dev->rx_submitted);
- kfree(dconf);
- kfree(dev);
+ usb_kill_anchored_urbs(&parent->rx_submitted);
+ kfree(parent);
return rc;
}
- dev->canch[i]->parent = dev;
+ parent->canch[i]->parent = parent;
+
+ /* set RX packet size based on FD and if hardware
+ * timestamps are supported.
+ */
+ if (parent->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ if (parent->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, canfd_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, canfd, 1);
+ } else {
+ if (parent->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, classic_can_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, classic_can, 1);
+ }
+ parent->hf_size_rx = max(parent->hf_size_rx, hf_size_rx);
}
- kfree(dconf);
-
return 0;
}
static void gs_usb_disconnect(struct usb_interface *intf)
{
- unsigned i;
- struct gs_usb *dev = usb_get_intfdata(intf);
+ struct gs_usb *parent = usb_get_intfdata(intf);
+ unsigned int i;
+
usb_set_intfdata(intf, NULL);
- if (!dev) {
+ if (!parent) {
dev_err(&intf->dev, "Disconnect (nodata)\n");
return;
}
- for (i = 0; i < GS_MAX_INTF; i++)
- if (dev->canch[i])
- gs_destroy_candev(dev->canch[i]);
+ for (i = 0; i < parent->channel_cnt; i++)
+ if (parent->canch[i])
+ gs_destroy_candev(parent->canch[i]);
- usb_kill_anchored_urbs(&dev->rx_submitted);
- kfree(dev);
+ kfree(parent);
}
static const struct usb_device_id gs_usb_table[] = {
- { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
- USB_GSUSB_1_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_GS_USB_1_VENDOR_ID,
+ USB_GS_USB_1_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
USB_CANDLELIGHT_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID,
+ USB_CES_CANEXT_FD_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID,
+ USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_XYLANTA_SAINT3_VENDOR_ID,
+ USB_XYLANTA_SAINT3_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CANNECTIVITY_VENDOR_ID,
+ USB_CANNECTIVITY_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, gs_usb_table);
static struct usb_driver gs_usb_driver = {
- .name = "gs_usb",
- .probe = gs_usb_probe,
+ .name = KBUILD_MODNAME,
+ .probe = gs_usb_probe,
.disconnect = gs_usb_disconnect,
- .id_table = gs_usb_table,
+ .id_table = gs_usb_table,
};
module_usb_driver(gs_usb_driver);
diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile
index cf260044f0b9..41b4a11555aa 100644
--- a/drivers/net/can/usb/kvaser_usb/Makefile
+++ b/drivers/net/can/usb/kvaser_usb/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
-kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o
+kvaser_usb-y = kvaser_usb_core.o kvaser_usb_devlink.o kvaser_usb_leaf.o kvaser_usb_hydra.o
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 390b6bde883c..46a1b6907a50 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -22,9 +22,12 @@
*/
#include <linux/completion.h>
+#include <linux/ktime.h>
+#include <linux/math64.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/usb.h>
+#include <net/devlink.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -35,15 +38,20 @@
#define KVASER_USB_RX_BUFFER_SIZE 3072
#define KVASER_USB_MAX_NET_DEVICES 5
-/* USB devices features */
-#define KVASER_USB_HAS_SILENT_MODE BIT(0)
-#define KVASER_USB_HAS_TXRX_ERRORS BIT(1)
+/* Kvaser USB device quirks */
+#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
+#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
+#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
#define KVASER_USB_CAP_EXT_CAP 0x02
#define KVASER_USB_HYDRA_CAP_EXT_CMD 0x04
+#define KVASER_USB_SW_VERSION_MAJOR_MASK GENMASK(31, 24)
+#define KVASER_USB_SW_VERSION_MINOR_MASK GENMASK(23, 16)
+#define KVASER_USB_SW_VERSION_BUILD_MASK GENMASK(15, 0)
+
struct kvaser_usb_dev_cfg;
enum kvaser_usb_leaf_family {
@@ -51,6 +59,11 @@ enum kvaser_usb_leaf_family {
KVASER_USBCAN,
};
+enum kvaser_usb_led_state {
+ KVASER_USB_LED_ON = 0,
+ KVASER_USB_LED_OFF = 1,
+};
+
#define KVASER_USB_HYDRA_MAX_CMD_LEN 128
struct kvaser_usb_dev_card_data_hydra {
u8 channel_to_he[KVASER_USB_MAX_NET_DEVICES];
@@ -65,37 +78,49 @@ struct kvaser_usb_dev_card_data_hydra {
struct kvaser_usb_dev_card_data {
u32 ctrlmode_supported;
u32 capabilities;
- union {
- struct {
- enum kvaser_usb_leaf_family family;
- } leaf;
- struct kvaser_usb_dev_card_data_hydra hydra;
- };
+ struct kvaser_usb_dev_card_data_hydra hydra;
+ u32 usbcan_timestamp_msb;
};
/* Context for an outstanding, not yet ACKed, transmission */
struct kvaser_usb_tx_urb_context {
struct kvaser_usb_net_priv *priv;
u32 echo_index;
- int dlc;
};
+struct kvaser_usb_fw_version {
+ u8 major;
+ u8 minor;
+ u16 build;
+};
+
+struct kvaser_usb_busparams {
+ __le32 bitrate;
+ u8 tseg1;
+ u8 tseg2;
+ u8 sjw;
+ u8 nsamples;
+} __packed;
+
struct kvaser_usb {
struct usb_device *udev;
struct usb_interface *intf;
struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES];
- const struct kvaser_usb_dev_ops *ops;
+ const struct kvaser_usb_driver_info *driver_info;
const struct kvaser_usb_dev_cfg *cfg;
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
struct usb_anchor rx_submitted;
+ u32 ean[2];
+ u32 serial_number;
+ struct kvaser_usb_fw_version fw_version;
+ u8 hw_revision;
+ unsigned int nchannels;
/* @max_tx_urbs: Firmware-reported maximum number of outstanding,
* not yet ACKed, transmissions on this device. This value is
* also used as a sentinel for marking free tx contexts.
*/
- u32 fw_version;
- unsigned int nchannels;
unsigned int max_tx_urbs;
struct kvaser_usb_dev_card_data card_data;
@@ -106,15 +131,22 @@ struct kvaser_usb {
struct kvaser_usb_net_priv {
struct can_priv can;
+ struct devlink_port devlink_port;
struct can_berr_counter bec;
+ /* subdriver-specific data */
+ void *sub_priv;
+
struct kvaser_usb *dev;
struct net_device *netdev;
int channel;
- struct completion start_comp, stop_comp, flush_comp;
+ struct completion start_comp, stop_comp, flush_comp,
+ get_busparams_comp;
struct usb_anchor tx_submitted;
+ struct kvaser_usb_busparams busparams_nominal, busparams_data;
+
spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */
int active_tx_contexts;
struct kvaser_usb_tx_urb_context tx_contexts[];
@@ -124,15 +156,20 @@ struct kvaser_usb_net_priv {
* struct kvaser_usb_dev_ops - Device specific functions
* @dev_set_mode: used for can.do_set_mode
* @dev_set_bittiming: used for can.do_set_bittiming
- * @dev_set_data_bittiming: used for can.do_set_data_bittiming
+ * @dev_get_busparams: readback arbitration busparams
+ * @dev_set_data_bittiming: used for can.fd.do_set_data_bittiming
+ * @dev_get_data_busparams: readback data busparams
* @dev_get_berr_counter: used for can.do_get_berr_counter
*
* @dev_setup_endpoints: setup USB in and out endpoints
* @dev_init_card: initialize card
+ * @dev_init_channel: initialize channel
+ * @dev_remove_channel: uninitialize channel
* @dev_get_software_info: get software info
* @dev_get_software_details: get software details
* @dev_get_card_info: get card info
* @dev_get_capabilities: discover device capabilities
+ * @dev_set_led: turn on/off device LED
*
* @dev_set_opt_mode: set ctrlmod
* @dev_start_chip: start the CAN controller
@@ -144,16 +181,25 @@ struct kvaser_usb_net_priv {
*/
struct kvaser_usb_dev_ops {
int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode);
- int (*dev_set_bittiming)(struct net_device *netdev);
- int (*dev_set_data_bittiming)(struct net_device *netdev);
+ int (*dev_set_bittiming)(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams);
+ int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv);
+ int (*dev_set_data_bittiming)(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams);
+ int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv);
int (*dev_get_berr_counter)(const struct net_device *netdev,
struct can_berr_counter *bec);
int (*dev_setup_endpoints)(struct kvaser_usb *dev);
int (*dev_init_card)(struct kvaser_usb *dev);
+ int (*dev_init_channel)(struct kvaser_usb_net_priv *priv);
+ void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv);
int (*dev_get_software_info)(struct kvaser_usb *dev);
int (*dev_get_software_details)(struct kvaser_usb *dev);
int (*dev_get_card_info)(struct kvaser_usb *dev);
int (*dev_get_capabilities)(struct kvaser_usb *dev);
+ int (*dev_set_led)(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms);
int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv);
int (*dev_start_chip)(struct kvaser_usb_net_priv *priv);
int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv);
@@ -162,8 +208,14 @@ struct kvaser_usb_dev_ops {
void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf,
int len);
void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid);
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid);
+};
+
+struct kvaser_usb_driver_info {
+ u32 quirks;
+ enum kvaser_usb_leaf_family family;
+ const struct kvaser_usb_dev_ops *ops;
};
struct kvaser_usb_dev_cfg {
@@ -176,6 +228,13 @@ struct kvaser_usb_dev_cfg {
extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+extern const struct devlink_ops kvaser_usb_devlink_ops;
+
+int kvaser_usb_devlink_port_register(struct kvaser_usb_net_priv *priv);
+void kvaser_usb_devlink_port_unregister(struct kvaser_usb_net_priv *priv);
+
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv);
+
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
int *actual_len);
@@ -185,4 +244,29 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
int len);
int kvaser_usb_can_rx_over_error(struct net_device *netdev);
+
+extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const;
+
+static inline ktime_t kvaser_usb_ticks_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ u64 ticks)
+{
+ return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+}
+
+static inline ktime_t kvaser_usb_timestamp48_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ const __le16 *timestamp)
+{
+ u64 ticks = le16_to_cpu(timestamp[0]) |
+ (u64)(le16_to_cpu(timestamp[1])) << 16 |
+ (u64)(le16_to_cpu(timestamp[2])) << 32;
+
+ return kvaser_usb_ticks_to_ktime(cfg, ticks);
+}
+
+static inline ktime_t kvaser_usb_timestamp64_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ __le64 timestamp)
+{
+ return kvaser_usb_ticks_to_ktime(cfg, le64_to_cpu(timestamp));
+}
+
#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 0cc0fc866a2a..62701ec34272 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -13,6 +13,7 @@
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/kernel.h>
@@ -30,187 +31,233 @@
#include "kvaser_usb.h"
/* Kvaser USB vendor id. */
-#define KVASER_VENDOR_ID 0x0bfd
+#define KVASER_VENDOR_ID 0x0bfd
/* Kvaser Leaf USB devices product ids */
-#define USB_LEAF_DEVEL_PRODUCT_ID 10
-#define USB_LEAF_LITE_PRODUCT_ID 11
-#define USB_LEAF_PRO_PRODUCT_ID 12
-#define USB_LEAF_SPRO_PRODUCT_ID 14
-#define USB_LEAF_PRO_LS_PRODUCT_ID 15
-#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
-#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
-#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
-#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
-#define USB_MEMO2_DEVEL_PRODUCT_ID 22
-#define USB_MEMO2_HSHS_PRODUCT_ID 23
-#define USB_UPRO_HSHS_PRODUCT_ID 24
-#define USB_LEAF_LITE_GI_PRODUCT_ID 25
-#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
-#define USB_MEMO2_HSLS_PRODUCT_ID 27
-#define USB_LEAF_LITE_CH_PRODUCT_ID 28
-#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
-#define USB_OEM_MERCURY_PRODUCT_ID 34
-#define USB_OEM_LEAF_PRODUCT_ID 35
-#define USB_CAN_R_PRODUCT_ID 39
-#define USB_LEAF_LITE_V2_PRODUCT_ID 288
-#define USB_MINI_PCIE_HS_PRODUCT_ID 289
-#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
-#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
-#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
-#define USB_USBCAN_R_V2_PRODUCT_ID 294
-#define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 295
-#define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 296
-#define USB_LEAF_PRODUCT_ID_END \
- USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID
+#define USB_LEAF_DEVEL_PRODUCT_ID 0x000a
+#define USB_LEAF_LITE_PRODUCT_ID 0x000b
+#define USB_LEAF_PRO_PRODUCT_ID 0x000c
+#define USB_LEAF_SPRO_PRODUCT_ID 0x000e
+#define USB_LEAF_PRO_LS_PRODUCT_ID 0x000f
+#define USB_LEAF_PRO_SWC_PRODUCT_ID 0x0010
+#define USB_LEAF_PRO_LIN_PRODUCT_ID 0x0011
+#define USB_LEAF_SPRO_LS_PRODUCT_ID 0x0012
+#define USB_LEAF_SPRO_SWC_PRODUCT_ID 0x0013
+#define USB_MEMO2_DEVEL_PRODUCT_ID 0x0016
+#define USB_MEMO2_HSHS_PRODUCT_ID 0x0017
+#define USB_UPRO_HSHS_PRODUCT_ID 0x0018
+#define USB_LEAF_LITE_GI_PRODUCT_ID 0x0019
+#define USB_LEAF_PRO_OBDII_PRODUCT_ID 0x001a
+#define USB_MEMO2_HSLS_PRODUCT_ID 0x001b
+#define USB_LEAF_LITE_CH_PRODUCT_ID 0x001c
+#define USB_BLACKBIRD_SPRO_PRODUCT_ID 0x001d
+#define USB_OEM_MERCURY_PRODUCT_ID 0x0022
+#define USB_OEM_LEAF_PRODUCT_ID 0x0023
+#define USB_CAN_R_PRODUCT_ID 0x0027
+#define USB_LEAF_LITE_V2_PRODUCT_ID 0x0120
+#define USB_MINI_PCIE_HS_PRODUCT_ID 0x0121
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 0x0122
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 0x0123
+#define USB_MINI_PCIE_2HS_PRODUCT_ID 0x0124
+#define USB_USBCAN_R_V2_PRODUCT_ID 0x0126
+#define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 0x0127
+#define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 0x0128
/* Kvaser USBCan-II devices product ids */
-#define USB_USBCAN_REVB_PRODUCT_ID 2
-#define USB_VCI2_PRODUCT_ID 3
-#define USB_USBCAN2_PRODUCT_ID 4
-#define USB_MEMORATOR_PRODUCT_ID 5
+#define USB_USBCAN_REVB_PRODUCT_ID 0x0002
+#define USB_VCI2_PRODUCT_ID 0x0003
+#define USB_USBCAN2_PRODUCT_ID 0x0004
+#define USB_MEMORATOR_PRODUCT_ID 0x0005
/* Kvaser Minihydra USB devices product ids */
-#define USB_BLACKBIRD_V2_PRODUCT_ID 258
-#define USB_MEMO_PRO_5HS_PRODUCT_ID 260
-#define USB_USBCAN_PRO_5HS_PRODUCT_ID 261
-#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 262
-#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 263
-#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 264
-#define USB_MEMO_2HS_PRODUCT_ID 265
-#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 266
-#define USB_HYBRID_2CANLIN_PRODUCT_ID 267
-#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268
-#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269
-#define USB_HYBRID_PRO_2CANLIN_PRODUCT_ID 270
-#define USB_U100_PRODUCT_ID 273
-#define USB_U100P_PRODUCT_ID 274
-#define USB_U100S_PRODUCT_ID 275
-#define USB_USBCAN_PRO_4HS_PRODUCT_ID 276
-#define USB_HYBRID_CANLIN_PRODUCT_ID 277
-#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 278
-#define USB_HYDRA_PRODUCT_ID_END \
- USB_HYBRID_PRO_CANLIN_PRODUCT_ID
-
-static inline bool kvaser_is_leaf(const struct usb_device_id *id)
-{
- return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
- (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
- id->idProduct <= USB_LEAF_PRODUCT_ID_END);
-}
+#define USB_BLACKBIRD_V2_PRODUCT_ID 0x0102
+#define USB_MEMO_PRO_5HS_PRODUCT_ID 0x0104
+#define USB_USBCAN_PRO_5HS_PRODUCT_ID 0x0105
+#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 0x0106
+#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 0x0107
+#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 0x0108
+#define USB_MEMO_2HS_PRODUCT_ID 0x0109
+#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 0x010a
+#define USB_HYBRID_2CANLIN_PRODUCT_ID 0x010b
+#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 0x010c
+#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 0x010d
+#define USB_HYBRID_PRO_2CANLIN_PRODUCT_ID 0x010e
+#define USB_U100_PRODUCT_ID 0x0111
+#define USB_U100P_PRODUCT_ID 0x0112
+#define USB_U100S_PRODUCT_ID 0x0113
+#define USB_USBCAN_PRO_4HS_PRODUCT_ID 0x0114
+#define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115
+#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116
+#define USB_LEAF_V3_PRODUCT_ID 0x0117
+#define USB_VINING_800_PRODUCT_ID 0x0119
+#define USB_USBCAN_PRO_5XCAN_PRODUCT_ID 0x011A
+#define USB_MINI_PCIE_1XCAN_PRODUCT_ID 0x011B
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
+ .quirks = 0,
+ .ops = &kvaser_usb_hydra_dev_ops,
+};
-static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
- id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_usbcan = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE,
+ .family = KVASER_USBCAN,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
-static inline bool kvaser_is_hydra(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
- id->idProduct <= USB_HYDRA_PRODUCT_ID_END;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf = {
+ .quirks = KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_listen = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+ .quirks = 0,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
static const struct usb_device_id kvaser_usb_table[] = {
- /* Leaf USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+ /* Leaf M32C USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+
+ /* Leaf i.MX28 USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
/* USBCANII USB product IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
/* Minihydra USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_V3_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_VINING_800_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5XCAN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_1XCAN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len)
{
- int actual_len; /* Not used */
-
return usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
dev->bulk_out->bEndpointAddress),
- cmd, len, &actual_len, KVASER_USB_TIMEOUT);
+ cmd, len, NULL, KVASER_USB_TIMEOUT);
}
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
@@ -257,7 +304,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
}
usb_free_urb(urb);
- return 0;
+ return err;
}
int kvaser_usb_can_rx_over_error(struct net_device *netdev)
@@ -279,8 +326,6 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -289,6 +334,7 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
static void kvaser_usb_read_bulk_callback(struct urb *urb)
{
struct kvaser_usb *dev = urb->context;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
unsigned int i;
@@ -305,8 +351,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
- urb->actual_length);
+ ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
+ urb->actual_length);
resubmit_urb:
usb_fill_bulk_urb(urb, dev->udev,
@@ -318,10 +364,13 @@ resubmit_urb:
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err == -ENODEV) {
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ struct kvaser_usb_net_priv *priv;
+
+ priv = dev->nets[i];
+ if (!priv)
continue;
- netif_device_detach(dev->nets[i]->netdev);
+ netif_device_detach(priv->netdev);
}
} else if (err) {
dev_err(&dev->intf->dev,
@@ -400,21 +449,18 @@ static int kvaser_usb_open(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
err = open_candev(netdev);
if (err)
return err;
- err = kvaser_usb_setup_rx_urbs(dev);
- if (err)
- goto error;
-
- err = dev->ops->dev_set_opt_mode(priv);
+ err = ops->dev_set_opt_mode(priv);
if (err)
goto error;
- err = dev->ops->dev_start_chip(priv);
+ err = ops->dev_start_chip(priv);
if (err) {
netdev_warn(netdev, "Cannot start device, error %d\n", err);
goto error;
@@ -443,7 +489,7 @@ static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
/* This method might sleep. Do not call it in the atomic context
* of URB completions.
*/
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
{
usb_kill_anchored_urbs(&priv->tx_submitted);
kvaser_usb_reset_tx_urb_contexts(priv);
@@ -471,22 +517,23 @@ static int kvaser_usb_close(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
netif_stop_queue(netdev);
- err = dev->ops->dev_flush_queue(priv);
+ err = ops->dev_flush_queue(priv);
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, priv->channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, priv->channel);
if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n",
err);
}
- err = dev->ops->dev_stop_chip(priv);
+ err = ops->dev_stop_chip(priv);
if (err)
netdev_warn(netdev, "Cannot stop device, error %d\n", err);
@@ -499,6 +546,91 @@ static int kvaser_usb_close(struct net_device *netdev)
return 0;
}
+static int kvaser_usb_set_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb_busparams busparams;
+ int tseg1 = bt->prop_seg + bt->phase_seg1;
+ int tseg2 = bt->phase_seg2;
+ int sjw = bt->sjw;
+ int err;
+
+ busparams.bitrate = cpu_to_le32(bt->bitrate);
+ busparams.sjw = (u8)sjw;
+ busparams.tseg1 = (u8)tseg1;
+ busparams.tseg2 = (u8)tseg2;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ busparams.nsamples = 3;
+ else
+ busparams.nsamples = 1;
+
+ err = ops->dev_set_bittiming(netdev, &busparams);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(priv->dev);
+ if (err)
+ return err;
+
+ err = ops->dev_get_busparams(priv);
+ if (err) {
+ /* Treat EOPNOTSUPP as success */
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+ }
+
+ if (memcmp(&busparams, &priv->busparams_nominal,
+ sizeof(priv->busparams_nominal)) != 0)
+ err = -EINVAL;
+
+ return err;
+}
+
+static int kvaser_usb_set_data_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ struct kvaser_usb_busparams busparams;
+ int tseg1 = dbt->prop_seg + dbt->phase_seg1;
+ int tseg2 = dbt->phase_seg2;
+ int sjw = dbt->sjw;
+ int err;
+
+ if (!ops->dev_set_data_bittiming ||
+ !ops->dev_get_data_busparams)
+ return -EOPNOTSUPP;
+
+ busparams.bitrate = cpu_to_le32(dbt->bitrate);
+ busparams.sjw = (u8)sjw;
+ busparams.tseg1 = (u8)tseg1;
+ busparams.tseg2 = (u8)tseg2;
+ busparams.nsamples = 1;
+
+ err = ops->dev_set_data_bittiming(netdev, &busparams);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(priv->dev);
+ if (err)
+ return err;
+
+ err = ops->dev_get_data_busparams(priv);
+ if (err)
+ return err;
+
+ if (memcmp(&busparams, &priv->busparams_data,
+ sizeof(priv->busparams_data)) != 0)
+ err = -EINVAL;
+
+ return err;
+}
+
static void kvaser_usb_write_bulk_callback(struct urb *urb)
{
struct kvaser_usb_tx_urb_context *context = urb->context;
@@ -525,6 +657,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
struct net_device_stats *stats = &netdev->stats;
struct kvaser_usb_tx_urb_context *context = NULL;
struct urb *urb;
@@ -534,7 +667,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
unsigned int i;
unsigned long flags;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -567,8 +700,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
goto freeurb;
}
- buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len,
- context->echo_index);
+ buf = ops->dev_frame_to_cmd(priv, skb, &cmd_len, context->echo_index);
if (!buf) {
stats->tx_dropped++;
dev_kfree_skb(skb);
@@ -624,43 +756,83 @@ freeurb:
return ret;
}
+static int kvaser_usb_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ const struct kvaser_usb_dev_ops *ops = priv->dev->driver_info->ops;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 3; /* 3 On/Off cycles per second */
+
+ case ETHTOOL_ID_ON:
+ return ops->dev_set_led(priv, KVASER_USB_LED_ON, 1000);
+
+ case ETHTOOL_ID_OFF:
+ return ops->dev_set_led(priv, KVASER_USB_LED_OFF, 1000);
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Turn LED off and restore standard function after 1ms */
+ return ops->dev_set_led(priv, KVASER_USB_LED_OFF, 1);
+
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_open = kvaser_usb_open,
.ndo_stop = kvaser_usb_close,
.ndo_start_xmit = kvaser_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = can_hwtstamp_get,
+ .ndo_hwtstamp_set = can_hwtstamp_set,
+};
+
+static const struct ethtool_ops kvaser_usb_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+ .set_phys_id = kvaser_usb_set_phys_id,
};
static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
{
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int i;
+ struct kvaser_usb_net_priv *priv;
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ priv = dev->nets[i];
+ if (!priv)
continue;
- unregister_candev(dev->nets[i]->netdev);
+ unregister_candev(priv->netdev);
}
kvaser_usb_unlink_all_urbs(dev);
for (i = 0; i < dev->nchannels; i++) {
- if (!dev->nets[i])
+ priv = dev->nets[i];
+ if (!priv)
continue;
- free_candev(dev->nets[i]->netdev);
+ if (ops->dev_remove_channel)
+ ops->dev_remove_channel(priv);
+
+ kvaser_usb_devlink_port_unregister(priv);
+ free_candev(priv->netdev);
}
}
-static int kvaser_usb_init_one(struct kvaser_usb *dev,
- const struct usb_device_id *id, int channel)
+static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
{
struct net_device *netdev;
struct kvaser_usb_net_priv *priv;
+ const struct kvaser_usb_driver_info *driver_info = dev->driver_info;
+ const struct kvaser_usb_dev_ops *ops = driver_info->ops;
int err;
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, channel);
if (err)
return err;
}
@@ -677,7 +849,10 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev,
init_usb_anchor(&priv->tx_submitted);
init_completion(&priv->start_comp);
init_completion(&priv->stop_comp);
- priv->can.ctrlmode_supported = 0;
+ init_completion(&priv->flush_comp);
+ init_completion(&priv->get_busparams_comp);
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
priv->dev = dev;
priv->netdev = netdev;
@@ -689,76 +864,88 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev,
priv->can.state = CAN_STATE_STOPPED;
priv->can.clock.freq = dev->cfg->clock.freq;
priv->can.bittiming_const = dev->cfg->bittiming_const;
- priv->can.do_set_bittiming = dev->ops->dev_set_bittiming;
- priv->can.do_set_mode = dev->ops->dev_set_mode;
- if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) ||
+ priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
+ priv->can.do_set_mode = ops->dev_set_mode;
+ if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
(priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
- priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter;
- if (id->driver_info & KVASER_USB_HAS_SILENT_MODE)
+ priv->can.do_get_berr_counter = ops->dev_get_berr_counter;
+ if (driver_info->quirks & KVASER_USB_QUIRK_HAS_SILENT_MODE)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
- priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming =
- dev->ops->dev_set_data_bittiming;
+ priv->can.fd.data_bittiming_const = dev->cfg->data_bittiming_const;
+ priv->can.fd.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &kvaser_usb_netdev_ops;
-
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->intf->dev);
netdev->dev_id = channel;
+ netdev->dev_port = channel;
dev->nets[channel] = priv;
+ if (ops->dev_init_channel) {
+ err = ops->dev_init_channel(priv);
+ if (err)
+ goto candev_free;
+ }
+
+ err = kvaser_usb_devlink_port_register(priv);
+ if (err) {
+ dev_err(&dev->intf->dev, "Failed to register devlink port\n");
+ goto candev_free;
+ }
+
err = register_candev(netdev);
if (err) {
dev_err(&dev->intf->dev, "Failed to register CAN device\n");
- free_candev(netdev);
- dev->nets[channel] = NULL;
- return err;
+ goto unregister_devlink_port;
}
netdev_dbg(netdev, "device registered\n");
return 0;
+
+unregister_devlink_port:
+ kvaser_usb_devlink_port_unregister(priv);
+candev_free:
+ free_candev(netdev);
+ dev->nets[channel] = NULL;
+ return err;
}
static int kvaser_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct kvaser_usb *dev;
+ struct devlink *devlink;
int err;
int i;
+ const struct kvaser_usb_driver_info *driver_info;
+ const struct kvaser_usb_dev_ops *ops;
- dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- if (kvaser_is_leaf(id)) {
- dev->card_data.leaf.family = KVASER_LEAF;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_usbcan(id)) {
- dev->card_data.leaf.family = KVASER_USBCAN;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_hydra(id)) {
- dev->ops = &kvaser_usb_hydra_dev_ops;
- } else {
- dev_err(&intf->dev,
- "Product ID (%d) is not a supported Kvaser USB device\n",
- id->idProduct);
+ driver_info = (const struct kvaser_usb_driver_info *)id->driver_info;
+ if (!driver_info)
return -ENODEV;
- }
+ devlink = devlink_alloc(&kvaser_usb_devlink_ops, sizeof(*dev), &intf->dev);
+ if (!devlink)
+ return -ENOMEM;
+
+ dev = devlink_priv(devlink);
dev->intf = intf;
+ dev->driver_info = driver_info;
+ ops = driver_info->ops;
- err = dev->ops->dev_setup_endpoints(dev);
+ err = ops->dev_setup_endpoints(dev);
if (err) {
- dev_err(&intf->dev, "Cannot get usb endpoint(s)");
- return err;
+ dev_err_probe(&intf->dev, err, "Cannot get usb endpoint(s)");
+ goto free_devlink;
}
dev->udev = interface_to_usbdev(intf);
@@ -769,64 +956,67 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev->card_data.ctrlmode_supported = 0;
dev->card_data.capabilities = 0;
- err = dev->ops->dev_init_card(dev);
+ err = ops->dev_init_card(dev);
if (err) {
- dev_err(&intf->dev,
- "Failed to initialize card, error %d\n", err);
- return err;
+ dev_err_probe(&intf->dev, err,
+ "Failed to initialize card\n");
+ goto free_devlink;
}
- err = dev->ops->dev_get_software_info(dev);
+ err = ops->dev_get_software_info(dev);
if (err) {
- dev_err(&intf->dev,
- "Cannot get software info, error %d\n", err);
- return err;
+ dev_err_probe(&intf->dev, err,
+ "Cannot get software info\n");
+ goto free_devlink;
}
- if (dev->ops->dev_get_software_details) {
- err = dev->ops->dev_get_software_details(dev);
+ if (ops->dev_get_software_details) {
+ err = ops->dev_get_software_details(dev);
if (err) {
- dev_err(&intf->dev,
- "Cannot get software details, error %d\n", err);
- return err;
+ dev_err_probe(&intf->dev, err,
+ "Cannot get software details\n");
+ goto free_devlink;
}
}
- if (WARN_ON(!dev->cfg))
- return -ENODEV;
-
- dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
- ((dev->fw_version >> 24) & 0xff),
- ((dev->fw_version >> 16) & 0xff),
- (dev->fw_version & 0xffff));
+ if (WARN_ON(!dev->cfg)) {
+ err = -ENODEV;
+ goto free_devlink;
+ }
dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
- err = dev->ops->dev_get_card_info(dev);
+ err = ops->dev_get_card_info(dev);
if (err) {
- dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
- return err;
+ dev_err_probe(&intf->dev, err,
+ "Cannot get card info\n");
+ goto free_devlink;
}
- if (dev->ops->dev_get_capabilities) {
- err = dev->ops->dev_get_capabilities(dev);
+ if (ops->dev_get_capabilities) {
+ err = ops->dev_get_capabilities(dev);
if (err) {
- dev_err(&intf->dev,
- "Cannot get capabilities, error %d\n", err);
- kvaser_usb_remove_interfaces(dev);
- return err;
+ dev_err_probe(&intf->dev, err,
+ "Cannot get capabilities\n");
+ goto remove_interfaces;
}
}
for (i = 0; i < dev->nchannels; i++) {
- err = kvaser_usb_init_one(dev, id, i);
- if (err) {
- kvaser_usb_remove_interfaces(dev);
- return err;
- }
+ err = kvaser_usb_init_one(dev, i);
+ if (err)
+ goto remove_interfaces;
}
+ devlink_register(devlink);
return 0;
+
+remove_interfaces:
+ kvaser_usb_remove_interfaces(dev);
+free_devlink:
+ devlink_free(devlink);
+
+ return err;
}
static void kvaser_usb_disconnect(struct usb_interface *intf)
@@ -839,10 +1029,12 @@ static void kvaser_usb_disconnect(struct usb_interface *intf)
return;
kvaser_usb_remove_interfaces(dev);
+ devlink_unregister(priv_to_devlink(dev));
+ devlink_free(priv_to_devlink(dev));
}
static struct usb_driver kvaser_usb_driver = {
- .name = "kvaser_usb",
+ .name = KBUILD_MODNAME,
.probe = kvaser_usb_probe,
.disconnect = kvaser_usb_disconnect,
.id_table = kvaser_usb_table,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c
new file mode 100644
index 000000000000..e838b82298ae
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/* kvaser_usb devlink functions
+ *
+ * Copyright (C) 2025 KVASER AB, Sweden. All rights reserved.
+ */
+#include "kvaser_usb.h"
+
+#include <linux/netdevice.h>
+#include <net/devlink.h>
+
+#define KVASER_USB_EAN_MSB 0x00073301
+
+static int kvaser_usb_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct kvaser_usb *dev = devlink_priv(devlink);
+ char buf[] = "73301XXXXXXXXXX";
+ int ret;
+
+ if (dev->serial_number) {
+ snprintf(buf, sizeof(buf), "%u", dev->serial_number);
+ ret = devlink_info_serial_number_put(req, buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->fw_version.major) {
+ snprintf(buf, sizeof(buf), "%u.%u.%u",
+ dev->fw_version.major,
+ dev->fw_version.minor,
+ dev->fw_version.build);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->hw_revision) {
+ snprintf(buf, sizeof(buf), "%u", dev->hw_revision);
+ ret = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->ean[1] == KVASER_USB_EAN_MSB) {
+ snprintf(buf, sizeof(buf), "%x%08x", dev->ean[1], dev->ean[0]);
+ ret = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct devlink_ops kvaser_usb_devlink_ops = {
+ .info_get = kvaser_usb_devlink_info_get,
+};
+
+int kvaser_usb_devlink_port_register(struct kvaser_usb_net_priv *priv)
+{
+ int ret;
+ struct devlink_port_attrs attrs = {
+ .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ .phys.port_number = priv->channel,
+ };
+ devlink_port_attrs_set(&priv->devlink_port, &attrs);
+
+ ret = devlink_port_register(priv_to_devlink(priv->dev),
+ &priv->devlink_port, priv->channel);
+ if (ret)
+ return ret;
+
+ SET_NETDEV_DEVLINK_PORT(priv->netdev, &priv->devlink_port);
+
+ return 0;
+}
+
+void kvaser_usb_devlink_port_unregister(struct kvaser_usb_net_priv *priv)
+{
+ devlink_port_unregister(&priv->devlink_port);
+}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index dcee8dc828ec..a59f20dad692 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -10,9 +10,9 @@
* - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only
* reported after a call to do_get_berr_counter(), since firmware does not
* distinguish between ERROR_WARNING and ERROR_ACTIVE.
- * - Hardware timestamps are not set for CAN Tx frames.
*/
+#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/gfp.h>
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <linux/usb.h>
#include <linux/can.h>
@@ -44,6 +45,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt;
/* Minihydra command IDs */
#define CMD_SET_BUSPARAMS_REQ 16
+#define CMD_GET_BUSPARAMS_REQ 17
+#define CMD_GET_BUSPARAMS_RESP 18
#define CMD_GET_CHIP_STATE_REQ 19
#define CMD_CHIP_STATE_EVENT 20
#define CMD_SET_DRIVERMODE_REQ 21
@@ -65,6 +68,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt;
#define CMD_SET_BUSPARAMS_RESP 85
#define CMD_GET_CAPABILITIES_REQ 95
#define CMD_GET_CAPABILITIES_RESP 96
+#define CMD_LED_ACTION_REQ 101
+#define CMD_LED_ACTION_RESP 102
#define CMD_RX_MESSAGE 106
#define CMD_MAP_CHANNEL_REQ 200
#define CMD_MAP_CHANNEL_RESP 201
@@ -109,7 +114,7 @@ struct kvaser_cmd_card_info {
__le32 clock_res;
__le32 mfg_date;
__le32 ean[2];
- u8 hw_version;
+ u8 hw_revision;
u8 usb_mode;
u8 hw_type;
u8 reserved0;
@@ -195,21 +200,42 @@ struct kvaser_cmd_chip_state_event {
#define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01
#define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02
struct kvaser_cmd_set_busparams {
- __le32 bitrate;
- u8 tseg1;
- u8 tseg2;
- u8 sjw;
- u8 nsamples;
+ struct kvaser_usb_busparams busparams_nominal;
u8 reserved0[4];
- __le32 bitrate_d;
- u8 tseg1_d;
- u8 tseg2_d;
- u8 sjw_d;
- u8 nsamples_d;
+ struct kvaser_usb_busparams busparams_data;
u8 canfd_mode;
u8 reserved1[7];
} __packed;
+/* Busparam type */
+#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00
+#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01
+struct kvaser_cmd_get_busparams_req {
+ u8 type;
+ u8 reserved[27];
+} __packed;
+
+struct kvaser_cmd_get_busparams_res {
+ struct kvaser_usb_busparams busparams;
+ u8 reserved[20];
+} __packed;
+
+/* The device has two LEDs per CAN channel
+ * The LSB of action field controls the state:
+ * 0 = ON
+ * 1 = OFF
+ * The remaining bits of action field is the LED index
+ */
+#define KVASER_USB_HYDRA_LED_IDX_MASK GENMASK(31, 1)
+#define KVASER_USB_HYDRA_LED_YELLOW_CH0_IDX 3
+#define KVASER_USB_HYDRA_LEDS_PER_CHANNEL 2
+struct kvaser_cmd_led_action_req {
+ u8 action;
+ u8 padding;
+ __le16 duration_ms;
+ u8 reserved[24];
+} __packed;
+
/* Ctrl modes */
#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
@@ -253,6 +279,15 @@ struct kvaser_cmd_tx_can {
u8 reserved[11];
} __packed;
+struct kvaser_cmd_tx_ack {
+ __le32 id;
+ u8 data[8];
+ u8 dlc;
+ u8 flags;
+ __le16 timestamp[3];
+ u8 reserved0[8];
+} __packed;
+
struct kvaser_cmd_header {
u8 cmd_no;
/* The destination HE address is stored in 0..5 of he_addr.
@@ -280,6 +315,10 @@ struct kvaser_cmd {
struct kvaser_cmd_error_event error_event;
struct kvaser_cmd_set_busparams set_busparams_req;
+ struct kvaser_cmd_get_busparams_req get_busparams_req;
+ struct kvaser_cmd_get_busparams_res get_busparams_res;
+
+ struct kvaser_cmd_led_action_req led_action_req;
struct kvaser_cmd_chip_state_event chip_state_event;
@@ -287,6 +326,7 @@ struct kvaser_cmd {
struct kvaser_cmd_rx_can rx_can;
struct kvaser_cmd_tx_can tx_can;
+ struct kvaser_cmd_tx_ack tx_ack;
} __packed;
} __packed;
@@ -295,6 +335,7 @@ struct kvaser_cmd {
#define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1)
#define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4)
#define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5)
+#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6)
/* CAN frame flags. Used in ext_rx_can and ext_tx_can */
#define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12)
#define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13)
@@ -361,6 +402,10 @@ struct kvaser_cmd_ext {
} __packed;
} __packed;
+struct kvaser_usb_net_hydra_priv {
+ int pending_get_busparams_type;
+};
+
static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.name = "kvaser_usb_kcan",
.tseg1_min = 1,
@@ -373,7 +418,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.brp_inc = 1,
};
-static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
+const struct can_bittiming_const kvaser_usb_flexc_bittiming_const = {
.name = "kvaser_usb_flex",
.tseg1_min = 4,
.tseg1_max = 16,
@@ -507,36 +552,40 @@ kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev,
return priv;
}
-static ktime_t
-kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg,
- const struct kvaser_cmd *cmd)
+static ktime_t kvaser_usb_hydra_ktime_from_cmd(const struct kvaser_usb_dev_cfg *cfg,
+ const struct kvaser_cmd *cmd)
{
- u64 ticks;
+ ktime_t hwtstamp = 0;
if (cmd->header.cmd_no == CMD_EXTENDED) {
struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
- ticks = le64_to_cpu(cmd_ext->rx_can.timestamp);
- } else {
- ticks = le16_to_cpu(cmd->rx_can.timestamp[0]);
- ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16;
- ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32;
+ if (cmd_ext->cmd_no_ext == CMD_RX_MESSAGE_FD)
+ hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->rx_can.timestamp);
+ else if (cmd_ext->cmd_no_ext == CMD_TX_ACKNOWLEDGE_FD)
+ hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->tx_ack.timestamp);
+ } else if (cmd->header.cmd_no == CMD_RX_MESSAGE) {
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->rx_can.timestamp);
+ } else if (cmd->header.cmd_no == CMD_TX_ACKNOWLEDGE) {
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->tx_ack.timestamp);
}
- return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+ return hwtstamp;
}
static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
u8 cmd_no, int channel)
{
struct kvaser_cmd *cmd;
+ size_t cmd_len;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = cmd_no;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
if (channel < 0) {
kvaser_usb_hydra_set_cmd_dest_he
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
@@ -553,7 +602,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@@ -569,21 +618,22 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
{
struct kvaser_cmd *cmd;
struct kvaser_usb *dev = priv->dev;
+ size_t cmd_len;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = cmd_no;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd_async(priv, cmd,
- kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len);
if (err)
kfree(cmd);
@@ -692,7 +742,7 @@ static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid,
struct kvaser_cmd *cmd;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -727,24 +777,26 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
{
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
struct kvaser_cmd *cmd;
+ size_t cmd_len;
u32 value = 0;
u32 mask = 0;
u16 cap_cmd_res;
int err;
int i;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@@ -838,6 +890,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev,
complete(&priv->flush_comp);
}
+static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ struct kvaser_usb_net_hydra_priv *hydra;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ hydra = priv->sub_priv;
+ if (!hydra)
+ return;
+
+ switch (hydra->pending_get_busparams_type) {
+ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN:
+ memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams,
+ sizeof(priv->busparams_nominal));
+ break;
+ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD:
+ memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams,
+ sizeof(priv->busparams_nominal));
+ break;
+ default:
+ dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n",
+ hydra->pending_get_busparams_type);
+ break;
+ }
+ hydra->pending_get_busparams_type = -1;
+
+ complete(&priv->get_busparams_comp);
+}
+
static void
kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
u8 bus_status,
@@ -862,6 +947,42 @@ kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
}
}
+static void kvaser_usb_hydra_change_state(struct kvaser_usb_net_priv *priv,
+ const struct can_berr_counter *bec,
+ struct can_frame *cf,
+ enum can_state new_state)
+{
+ struct net_device *netdev = priv->netdev;
+ enum can_state old_state = priv->can.state;
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec->txerr >= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec->txerr <= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ can_change_state(netdev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
+ if (priv->can.restart_ms == 0)
+ kvaser_usb_hydra_send_simple_cmd_async(priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF) {
+ priv->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
+}
+
static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
u8 bus_status,
const struct can_berr_counter *bec)
@@ -869,7 +990,6 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
struct net_device *netdev = priv->netdev;
struct can_frame *cf;
struct sk_buff *skb;
- struct net_device_stats *stats;
enum can_state new_state, old_state;
old_state = priv->can.state;
@@ -888,41 +1008,11 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
return;
skb = alloc_can_err_skb(netdev, &cf);
- if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec->txerr >= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec->txerr <= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- can_change_state(netdev, cf, tx_state, rx_state);
- }
-
- if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
-
- can_bus_off(netdev);
- }
-
- if (!skb) {
+ kvaser_usb_hydra_change_state(priv, bec, cf, new_state);
+ if (skb)
+ netif_rx(skb);
+ else
netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- priv->can.can_stats.restarts++;
-
- cf->data[6] = bec->txerr;
- cf->data[7] = bec->rxerr;
-
- stats = &netdev->stats;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_rx(skb);
}
static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev,
@@ -1015,9 +1105,8 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
{
struct net_device *netdev = priv->netdev;
struct net_device_stats *stats = &netdev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
- struct skb_shared_hwtstamps *shhwtstamps;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct can_berr_counter bec;
enum can_state new_state, old_state;
u8 bus_status;
@@ -1033,51 +1122,26 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec,
&new_state);
- skb = alloc_can_err_skb(netdev, &cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (new_state != old_state)
+ kvaser_usb_hydra_change_state(priv, &bec, cf, new_state);
- if (new_state != old_state) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec.txerr >= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec.txerr <= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
-
- can_change_state(netdev, cf, tx_state, rx_state);
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
-
- if (new_state == CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
-
- can_bus_off(netdev);
+ shhwtstamps->hwtstamp = hwtstamp;
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
+ } else {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
}
}
- if (!skb) {
- stats->rx_dropped++;
- netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp = hwtstamp;
-
- cf->can_id |= CAN_ERR_BUSERROR;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_rx(skb);
-
priv->bec.txerr = bec.txerr;
priv->bec.rxerr = bec.rxerr;
}
@@ -1109,8 +1173,6 @@ static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv,
}
stats->tx_errors++;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -1120,8 +1182,11 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
struct kvaser_usb_tx_urb_context *context;
struct kvaser_usb_net_priv *priv;
unsigned long irq_flags;
+ unsigned int len;
bool one_shot_fail = false;
+ bool is_err_frame = false;
u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+ struct sk_buff *skb;
priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
if (!priv)
@@ -1139,24 +1204,31 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
kvaser_usb_hydra_one_shot_fail(priv, cmd_ext);
one_shot_fail = true;
}
+
+ is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK &&
+ flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME;
}
context = &priv->tx_contexts[transid % dev->max_tx_urbs];
- if (!one_shot_fail) {
- struct net_device_stats *stats = &priv->netdev->stats;
-
- stats->tx_packets++;
- stats->tx_bytes += can_fd_dlc2len(context->dlc);
- }
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
- can_get_echo_skb(priv->netdev, context->echo_index, NULL);
+ skb = priv->can.echo_skb[context->echo_index];
+ if (skb)
+ skb_hwtstamps(skb)->hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd);
+ len = can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags);
+
+ if (!one_shot_fail && !is_err_frame) {
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ }
}
static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
@@ -1177,7 +1249,7 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
flags = cmd->rx_can.flags;
- hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd);
+ hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd);
if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
@@ -1206,15 +1278,17 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN)
kvaser_usb_can_rx_over_error(priv->netdev);
- cf->len = can_cc_dlc2len(cmd->rx_can.dlc);
+ can_frame_set_cc_len((struct can_frame *)cf, cmd->rx_can.dlc, priv->can.ctrlmode);
- if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, cmd->rx_can.data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
@@ -1243,7 +1317,7 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
KVASER_USB_KCAN_DATA_DLC_SHIFT;
flags = le32_to_cpu(cmd->rx_can.flags);
- hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd);
+ hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, std_cmd);
if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
@@ -1283,16 +1357,18 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI)
cf->flags |= CANFD_ESI;
} else {
- cf->len = can_cc_dlc2len(dlc);
+ can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->can.ctrlmode);
}
- if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len);
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
@@ -1316,6 +1392,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
kvaser_usb_hydra_state_event(dev, cmd);
break;
+ case CMD_GET_BUSPARAMS_RESP:
+ kvaser_usb_hydra_get_busparams_reply(dev, cmd);
+ break;
+
case CMD_ERROR_EVENT:
kvaser_usb_hydra_error_event(dev, cmd);
break;
@@ -1331,6 +1411,7 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
/* Ignored commands */
case CMD_SET_BUSPARAMS_RESP:
case CMD_SET_BUSPARAMS_FD_RESP:
+ case CMD_LED_ACTION_RESP:
break;
default:
@@ -1371,22 +1452,20 @@ static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev,
static void *
kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd_ext *cmd;
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
- u8 dlc = can_fd_len2dlc(cf->len);
+ u8 dlc;
u8 nbr_of_bytes = cf->len;
u32 flags;
u32 id;
u32 kcan_id;
u32 kcan_header;
- *frame_len = nbr_of_bytes;
-
- cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1404,6 +1483,11 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
cmd->len = cpu_to_le16(*cmd_len);
+ if (can_is_canfd_skb(skb))
+ dlc = can_fd_len2dlc(cf->len);
+ else
+ dlc = can_get_cc_dlc((struct can_frame *)cf, priv->can.ctrlmode);
+
cmd->tx_can.databytes = nbr_of_bytes;
cmd->tx_can.dlc = dlc;
@@ -1451,8 +1535,8 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
static void *
kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
@@ -1460,9 +1544,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
u32 flags;
u32 id;
- *frame_len = cf->len;
-
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1481,7 +1563,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
id = cf->can_id & CAN_SFF_MASK;
}
- cmd->tx_can.dlc = cf->len;
+ cmd->tx_can.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
flags = (cf->can_id & CAN_EFF_FLAG ?
KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0);
@@ -1495,7 +1577,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
cmd->tx_can.id = cpu_to_le32(id);
cmd->tx_can.flags = flags;
- memcpy(cmd->tx_can.data, cf->data, *frame_len);
+ memcpy(cmd->tx_can.data, cf->data, cf->len);
return cmd;
}
@@ -1516,61 +1598,101 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev,
return err;
}
-static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
+ int busparams_type)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
+ struct kvaser_cmd *cmd;
+ size_t cmd_len;
+ int err;
+
+ if (!hydra)
+ return -EINVAL;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+ cmd->get_busparams_req.type = busparams_type;
+ hydra->pending_get_busparams_type = busparams_type;
+
+ reinit_completion(&priv->get_busparams_comp);
+
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv)
+{
+ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN);
+}
+
+static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv)
+{
+ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD);
+}
+
+static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
struct kvaser_usb *dev = priv->dev;
- int tseg1 = bt->prop_seg + bt->phase_seg1;
- int tseg2 = bt->phase_seg2;
- int sjw = bt->sjw;
+ size_t cmd_len;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
- cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate);
- cmd->set_busparams_req.sjw = (u8)sjw;
- cmd->set_busparams_req.tseg1 = (u8)tseg1;
- cmd->set_busparams_req.tseg2 = (u8)tseg2;
- cmd->set_busparams_req.nsamples = 1;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
+ sizeof(cmd->set_busparams_req.busparams_nominal));
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
return err;
}
-static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
+static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
struct kvaser_usb *dev = priv->dev;
- int tseg1 = dbt->prop_seg + dbt->phase_seg1;
- int tseg2 = dbt->phase_seg2;
- int sjw = dbt->sjw;
+ size_t cmd_len;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
- cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate);
- cmd->set_busparams_req.sjw_d = (u8)sjw;
- cmd->set_busparams_req.tseg1_d = (u8)tseg1;
- cmd->set_busparams_req.tseg2_d = (u8)tseg2;
- cmd->set_busparams_req.nsamples_d = 1;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ memcpy(&cmd->set_busparams_req.busparams_data, busparams,
+ sizeof(cmd->set_busparams_req.busparams_data));
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1586,7 +1708,7 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
@@ -1677,6 +1799,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_hydra_priv *hydra;
+
+ hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL);
+ if (!hydra)
+ return -ENOMEM;
+
+ priv->sub_priv = hydra;
+
+ return 0;
+}
+
static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
@@ -1701,15 +1836,18 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
{
struct kvaser_cmd *cmd;
+ size_t cmd_len;
int err;
u32 flags;
+ u32 fw_version;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
cmd->sw_detail_req.use_ext_cmd = 1;
kvaser_usb_hydra_set_cmd_dest_he
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
@@ -1717,7 +1855,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@@ -1726,7 +1864,10 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
if (err)
goto end;
- dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
+ fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK, fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK, fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK, fw_version);
flags = le32_to_cpu(cmd->sw_detail_res.sw_flags);
if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) {
@@ -1777,6 +1918,10 @@ static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev)
err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd);
if (err)
return err;
+ dev->ean[1] = le32_to_cpu(cmd.card_info.ean[1]);
+ dev->ean[0] = le32_to_cpu(cmd.card_info.ean[0]);
+ dev->serial_number = le32_to_cpu(cmd.card_info.serial_number);
+ dev->hw_revision = cmd.card_info.hw_revision;
dev->nchannels = cmd.card_info.nchannels;
if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES)
@@ -1831,10 +1976,41 @@ static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_hydra_set_led(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ size_t cmd_len;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_LED_ACTION_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ kvaser_usb_hydra_set_cmd_dest_he(cmd, dev->card_data.hydra.sysdbg_he);
+ kvaser_usb_hydra_set_cmd_transid(cmd, kvaser_usb_hydra_get_next_transid(dev));
+
+ cmd->led_action_req.duration_ms = cpu_to_le16(duration_ms);
+ cmd->led_action_req.action = state |
+ FIELD_PREP(KVASER_USB_HYDRA_LED_IDX_MASK,
+ KVASER_USB_HYDRA_LED_YELLOW_CH0_IDX +
+ KVASER_USB_HYDRA_LEDS_PER_CHANNEL * priv->channel);
+
+ ret = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ kfree(cmd);
+
+ return ret;
+}
+
static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
+ size_t cmd_len;
int err;
if ((priv->can.ctrlmode &
@@ -1845,11 +2021,12 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
return -EINVAL;
}
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
@@ -1859,7 +2036,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
else
cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
return err;
@@ -1869,7 +2046,7 @@ static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->start_comp);
+ reinit_completion(&priv->start_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
priv->channel);
@@ -1887,7 +2064,7 @@ static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->stop_comp);
+ reinit_completion(&priv->stop_comp);
/* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
* see comment in kvaser_usb_hydra_update_state()
@@ -1910,7 +2087,7 @@ static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->flush_comp);
+ reinit_completion(&priv->flush_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
priv->channel);
@@ -2003,17 +2180,17 @@ static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev,
static void *
kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
void *buf;
if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD)
- buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len,
- cmd_len, transid);
+ buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, cmd_len,
+ transid);
else
- buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len,
- cmd_len, transid);
+ buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, cmd_len,
+ transid);
return buf;
}
@@ -2021,14 +2198,18 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
.dev_set_mode = kvaser_usb_hydra_set_mode,
.dev_set_bittiming = kvaser_usb_hydra_set_bittiming,
+ .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams,
.dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming,
+ .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams,
.dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter,
.dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints,
.dev_init_card = kvaser_usb_hydra_init_card,
+ .dev_init_channel = kvaser_usb_hydra_init_channel,
.dev_get_software_info = kvaser_usb_hydra_get_software_info,
.dev_get_software_details = kvaser_usb_hydra_get_software_details,
.dev_get_card_info = kvaser_usb_hydra_get_card_info,
.dev_get_capabilities = kvaser_usb_hydra_get_capabilities,
+ .dev_set_led = kvaser_usb_hydra_set_led,
.dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode,
.dev_start_chip = kvaser_usb_hydra_start_chip,
.dev_stop_chip = kvaser_usb_hydra_stop_chip,
@@ -2040,7 +2221,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
.clock = {
- .freq = 80000000,
+ .freq = 80 * MEGA /* Hz */,
},
.timestamp_freq = 80,
.bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
@@ -2049,15 +2230,15 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
.clock = {
- .freq = 24000000,
+ .freq = 24 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = {
.clock = {
- .freq = 80000000,
+ .freq = 80 * MEGA /* Hz */,
},
.timestamp_freq = 24,
.bittiming_const = &kvaser_usb_hydra_rt_bittiming_c,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 59ba7c7beec0..1167d38344f1 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -10,6 +10,7 @@
* Copyright (C) 2015 Valeo S.A.
*/
+#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/gfp.h>
@@ -19,7 +20,9 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <linux/usb.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -28,10 +31,6 @@
#include "kvaser_usb.h"
-/* Forward declaration */
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
-
-#define CAN_USB_CLOCK 8000000
#define MAX_USBCAN_NET_DEVICES 2
/* Command header size */
@@ -59,6 +58,9 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
#define CMD_RX_EXT_MESSAGE 14
#define CMD_TX_EXT_MESSAGE 15
#define CMD_SET_BUS_PARAMS 16
+#define CMD_GET_BUS_PARAMS 17
+#define CMD_GET_BUS_PARAMS_REPLY 18
+#define CMD_GET_CHIP_STATE 19
#define CMD_CHIP_STATE_EVENT 20
#define CMD_SET_CTRL_MODE 21
#define CMD_RESET_CHIP 24
@@ -73,13 +75,26 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
#define CMD_GET_CARD_INFO_REPLY 35
#define CMD_GET_SOFTWARE_INFO 38
#define CMD_GET_SOFTWARE_INFO_REPLY 39
+#define CMD_ERROR_EVENT 45
#define CMD_FLUSH_QUEUE 48
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
#define CMD_FLUSH_QUEUE_REPLY 68
+#define CMD_GET_CAPABILITIES_REQ 95
+#define CMD_GET_CAPABILITIES_RESP 96
+#define CMD_LED_ACTION_REQ 101
+#define CMD_LED_ACTION_RESP 102
#define CMD_LEAF_LOG_MESSAGE 106
+/* Leaf frequency options */
+#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60
+#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0
+#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
+#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+
+#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12)
+
/* error factors */
#define M16C_EF_ACKE BIT(0)
#define M16C_EF_CRCE BIT(1)
@@ -98,16 +113,6 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
-/* bittiming parameters */
-#define KVASER_USB_TSEG1_MIN 1
-#define KVASER_USB_TSEG1_MAX 16
-#define KVASER_USB_TSEG2_MIN 1
-#define KVASER_USB_TSEG2_MAX 8
-#define KVASER_USB_SJW_MAX 4
-#define KVASER_USB_BRP_MIN 1
-#define KVASER_USB_BRP_MAX 64
-#define KVASER_USB_BRP_INC 1
-
/* ctrl modes */
#define KVASER_CTRL_MODE_NORMAL 1
#define KVASER_CTRL_MODE_SILENT 2
@@ -117,6 +122,10 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
/* Extended CAN identifier flag */
#define KVASER_EXTENDED_FRAME BIT(31)
+/* USBCanII timestamp */
+#define KVASER_USB_USBCAN_CLK_OVERFLOW_MASK GENMASK(31, 16)
+#define KVASER_USB_USBCAN_TIMESTAMP_FACTOR 10
+
struct kvaser_cmd_simple {
u8 tid;
u8 channel;
@@ -129,7 +138,7 @@ struct kvaser_cmd_cardinfo {
__le32 padding0;
__le32 clock_resolution;
__le32 mfgdate;
- u8 ean[8];
+ __le32 ean[2];
u8 hw_revision;
union {
struct {
@@ -164,11 +173,22 @@ struct usbcan_cmd_softinfo {
struct kvaser_cmd_busparams {
u8 tid;
u8 channel;
- __le32 bitrate;
- u8 tseg1;
- u8 tseg2;
- u8 sjw;
- u8 no_samp;
+ struct kvaser_usb_busparams busparams;
+} __packed;
+
+/* The device has one LED per CAN channel
+ * The LSB of action field controls the state:
+ * 0 = ON
+ * 1 = OFF
+ * The remaining bits of action field is the LED index
+ */
+#define KVASER_USB_LEAF_LED_IDX_MASK GENMASK(31, 1)
+#define KVASER_USB_LEAF_LED_YELLOW_CH0_IDX 2
+struct kvaser_cmd_led_action_req {
+ u8 tid;
+ u8 action;
+ __le16 duration_ms;
+ u8 padding[24];
} __packed;
struct kvaser_cmd_tx_can {
@@ -237,7 +257,21 @@ struct kvaser_cmd_tx_acknowledge_header {
u8 tid;
} __packed;
-struct leaf_cmd_error_event {
+struct leaf_cmd_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+ __le16 time[3];
+ u8 padding[2];
+} __packed;
+
+struct usbcan_cmd_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+ __le16 time;
+ u8 padding[2];
+} __packed;
+
+struct leaf_cmd_can_error_event {
u8 tid;
u8 flags;
__le16 time[3];
@@ -249,7 +283,7 @@ struct leaf_cmd_error_event {
u8 error_factor;
} __packed;
-struct usbcan_cmd_error_event {
+struct usbcan_cmd_can_error_event {
u8 tid;
u8 padding;
u8 tx_errors_count_ch0;
@@ -261,6 +295,34 @@ struct usbcan_cmd_error_event {
__le16 time;
} __packed;
+/* CMD_ERROR_EVENT error codes */
+#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8
+#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9
+
+struct leaf_cmd_error_event {
+ u8 tid;
+ u8 error_code;
+ __le16 timestamp[3];
+ __le16 padding;
+ __le16 info1;
+ __le16 info2;
+} __packed;
+
+struct usbcan_cmd_error_event {
+ u8 tid;
+ u8 error_code;
+ __le16 info1;
+ __le16 info2;
+ __le16 timestamp;
+ __le16 padding;
+} __packed;
+
+struct usbcan_cmd_clk_overflow_event {
+ u8 tid;
+ u8 padding;
+ __le32 time;
+} __packed;
+
struct kvaser_cmd_ctrl_mode {
u8 tid;
u8 channel;
@@ -285,6 +347,28 @@ struct leaf_cmd_log_message {
u8 data[8];
} __packed;
+/* Sub commands for cap_req and cap_res */
+#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02
+#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05
+struct kvaser_cmd_cap_req {
+ __le16 padding0;
+ __le16 cap_cmd;
+ __le16 padding1;
+ __le16 channel;
+} __packed;
+
+/* Status codes for cap_res */
+#define KVASER_USB_LEAF_CAP_STAT_OK 0x00
+#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01
+#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02
+struct kvaser_cmd_cap_res {
+ __le16 padding;
+ __le16 cap_cmd;
+ __le16 status;
+ __le32 mask;
+ __le32 value;
+} __packed;
+
struct kvaser_cmd {
u8 len;
u8 id;
@@ -293,6 +377,8 @@ struct kvaser_cmd {
struct kvaser_cmd_cardinfo cardinfo;
struct kvaser_cmd_busparams busparams;
+ struct kvaser_cmd_led_action_req led_action_req;
+
struct kvaser_cmd_rx_can_header rx_can_header;
struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header;
@@ -300,15 +386,22 @@ struct kvaser_cmd {
struct leaf_cmd_softinfo softinfo;
struct leaf_cmd_rx_can rx_can;
struct leaf_cmd_chip_state_event chip_state_event;
- struct leaf_cmd_error_event error_event;
+ struct leaf_cmd_can_error_event can_error_event;
struct leaf_cmd_log_message log_message;
+ struct leaf_cmd_error_event error_event;
+ struct kvaser_cmd_cap_req cap_req;
+ struct kvaser_cmd_cap_res cap_res;
+ struct leaf_cmd_tx_acknowledge tx_ack;
} __packed leaf;
union {
struct usbcan_cmd_softinfo softinfo;
struct usbcan_cmd_rx_can rx_can;
struct usbcan_cmd_chip_state_event chip_state_event;
+ struct usbcan_cmd_can_error_event can_error_event;
struct usbcan_cmd_error_event error_event;
+ struct usbcan_cmd_tx_acknowledge tx_ack;
+ struct usbcan_cmd_clk_overflow_event clk_overflow_event;
} __packed usbcan;
struct kvaser_cmd_tx_can tx_can;
@@ -317,6 +410,44 @@ struct kvaser_cmd {
} u;
} __packed;
+#define CMD_SIZE_ANY 0xff
+#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field)
+
+static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.leaf.tx_ack),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event),
+ [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res),
+ [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams),
+ [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
+ /* ignored events: */
+ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+ [CMD_LED_ACTION_RESP] = CMD_SIZE_ANY,
+};
+
+static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.usbcan.tx_ack),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
+ [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = kvaser_fsize(u.usbcan.clk_overflow_event),
+ /* ignored events: */
+ [CMD_LED_ACTION_RESP] = CMD_SIZE_ANY,
+};
+
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
* handling. Some discrepancies between the two families exist:
*
@@ -340,18 +471,151 @@ struct kvaser_usb_err_summary {
};
};
+struct kvaser_usb_net_leaf_priv {
+ struct kvaser_usb_net_priv *net;
+
+ struct delayed_work chip_state_req_work;
+
+ /* started but not reported as bus-on yet */
+ bool joining_bus;
+};
+
+static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
+ .name = "kvaser_usb_ucii",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 16,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = {
+ .name = "kvaser_usb_leaf",
+ .tseg1_min = 3,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 2,
+ .brp_max = 128,
+ .brp_inc = 2,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = {
+ .clock = {
+ .freq = 8 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_16mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 16,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_24mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 24,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_32mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 32,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 16,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = {
+ .clock = {
+ .freq = 24 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 24,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
+ .clock = {
+ .freq = 32 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 32,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
+};
+
+static inline ktime_t kvaser_usb_usbcan_timestamp_to_ktime(const struct kvaser_usb *dev,
+ __le16 timestamp)
+{
+ u64 ticks = le16_to_cpu(timestamp) |
+ dev->card_data.usbcan_timestamp_msb;
+
+ return kvaser_usb_ticks_to_ktime(dev->cfg, ticks * KVASER_USB_USBCAN_TIMESTAMP_FACTOR);
+}
+
+static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ /* buffer size >= cmd->len ensured by caller */
+ u8 min_size = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf))
+ min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id];
+ break;
+ case KVASER_USBCAN:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan))
+ min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id];
+ break;
+ }
+
+ if (min_size == CMD_SIZE_ANY)
+ return 0;
+
+ if (min_size) {
+ min_size += CMD_HEADER_LEN;
+ if (cmd->len >= min_size)
+ return 0;
+
+ dev_err_ratelimited(&dev->intf->dev,
+ "Received command %u too short (size %u, needed %u)",
+ cmd->id, cmd->len, min_size);
+ return -EIO;
+ }
+
+ dev_warn_ratelimited(&dev->intf->dev,
+ "Unhandled command (%d, size %d)\n",
+ cmd->id, cmd->len);
+ return -EINVAL;
+}
+
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
u8 *cmd_tx_can_flags = NULL; /* GCC */
struct can_frame *cf = (struct can_frame *)skb->data;
- *frame_len = cf->len;
-
cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
if (cmd) {
cmd->u.tx_can.tid = transid & 0xff;
@@ -359,7 +623,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
sizeof(struct kvaser_cmd_tx_can);
cmd->u.tx_can.channel = priv->channel;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
break;
@@ -383,7 +647,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
cmd->u.tx_can.data[1] = cf->can_id & 0x3f;
}
- cmd->u.tx_can.data[5] = cf->len;
+ cmd->u.tx_can.data[5] = can_get_cc_dlc(cf, priv->can.ctrlmode);
memcpy(&cmd->u.tx_can.data[6], cf->data, cf->len);
if (cf->can_id & CAN_RTR_FLAG)
@@ -421,7 +685,7 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
* for further details.
*/
if (tmp->len == 0) {
- pos = round_up(pos,
+ pos = round_up(pos + 1,
le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
@@ -447,6 +711,9 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
end:
kfree(buf);
+ if (err == 0)
+ err = kvaser_usb_leaf_verify_size(dev, cmd);
+
return err;
}
@@ -471,10 +738,57 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
return rc;
}
+static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
+ const struct leaf_cmd_softinfo *softinfo)
+{
+ u32 fw_version;
+ u32 sw_options = le32_to_cpu(softinfo->sw_options);
+
+ fw_version = le32_to_cpu(softinfo->fw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK, fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK, fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK, fw_version);
+ dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
+
+ if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP)
+ dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP;
+
+ if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
+ /* Firmware expects bittiming parameters calculated for 16MHz
+ * clock, regardless of the actual clock
+ * Though, the reported freq is used for timestamps
+ */
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_32mhz;
+ break;
+ }
+ } else {
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz;
+ break;
+ }
+ }
+}
+
static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
int err;
+ u32 fw_version;
err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0);
if (err)
@@ -484,16 +798,21 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
if (err)
return err;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
- dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
- dev->max_tx_urbs =
- le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
+ kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
case KVASER_USBCAN:
- dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
+ dev->fw_version.major = FIELD_GET(KVASER_USB_SW_VERSION_MAJOR_MASK,
+ fw_version);
+ dev->fw_version.minor = FIELD_GET(KVASER_USB_SW_VERSION_MINOR_MASK,
+ fw_version);
+ dev->fw_version.build = FIELD_GET(KVASER_USB_SW_VERSION_BUILD_MASK,
+ fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
+ dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg;
break;
}
@@ -532,13 +851,155 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
dev->nchannels = cmd.u.cardinfo.nchannels;
if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
- (dev->card_data.leaf.family == KVASER_USBCAN &&
+ (dev->driver_info->family == KVASER_USBCAN &&
dev->nchannels > MAX_USBCAN_NET_DEVICES))
return -EINVAL;
+ dev->ean[1] = le32_to_cpu(cmd.u.cardinfo.ean[1]);
+ dev->ean[0] = le32_to_cpu(cmd.u.cardinfo.ean[0]);
+ dev->serial_number = le32_to_cpu(cmd.u.cardinfo.serial_number);
+ dev->hw_revision = cmd.u.cardinfo.hw_revision;
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev,
+ u16 cap_cmd_req, u16 *status)
+{
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+ struct kvaser_cmd *cmd;
+ u32 value = 0;
+ u32 mask = 0;
+ u16 cap_cmd_res;
+ int err;
+ int i;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_GET_CAPABILITIES_REQ;
+ cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req);
+
+ err = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+ if (err)
+ goto end;
+
+ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd);
+ if (err)
+ goto end;
+
+ *status = le16_to_cpu(cmd->u.leaf.cap_res.status);
+
+ if (*status != KVASER_USB_LEAF_CAP_STAT_OK)
+ goto end;
+
+ cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd);
+ switch (cap_cmd_res) {
+ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
+ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
+ value = le32_to_cpu(cmd->u.leaf.cap_res.value);
+ mask = le32_to_cpu(cmd->u.leaf.cap_res.mask);
+ break;
+ default:
+ dev_warn(&dev->intf->dev, "Unknown capability command %u\n",
+ cap_cmd_res);
+ break;
+ }
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (BIT(i) & (value & mask)) {
+ switch (cap_cmd_res) {
+ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
+ card_data->ctrlmode_supported |=
+ CAN_CTRLMODE_LISTENONLY;
+ break;
+ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
+ card_data->capabilities |=
+ KVASER_USB_CAP_BERR_CAP;
+ break;
+ }
+ }
+ }
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev)
+{
+ int err;
+ u16 status;
+
+ if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) {
+ dev_info(&dev->intf->dev,
+ "No extended capability support. Upgrade device firmware.\n");
+ return 0;
+ }
+
+ err = kvaser_usb_leaf_get_single_capability(dev,
+ KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n",
+ status);
+
+ err = kvaser_usb_leaf_get_single_capability(dev,
+ KVASER_USB_LEAF_CAP_CMD_ERR_REPORT,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n",
+ status);
return 0;
}
+static int kvaser_usb_leaf_set_led(struct kvaser_usb_net_priv *priv,
+ enum kvaser_usb_led_state state,
+ u16 duration_ms)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int ret;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_LED_ACTION_REQ;
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_led_action_req);
+ cmd->u.led_action_req.tid = 0xff;
+
+ cmd->u.led_action_req.duration_ms = cpu_to_le16(duration_ms);
+ cmd->u.led_action_req.action = state |
+ FIELD_PREP(KVASER_USB_LEAF_LED_IDX_MASK,
+ KVASER_USB_LEAF_LED_YELLOW_CH0_IDX +
+ priv->channel);
+
+ ret = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+ kfree(cmd);
+
+ return ret;
+}
+
+static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev)
+{
+ int err = 0;
+
+ if (dev->driver_info->family == KVASER_LEAF)
+ err = kvaser_usb_leaf_get_capabilities_leaf(dev);
+
+ return err;
+}
+
static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -547,6 +1008,8 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
struct kvaser_usb_net_priv *priv;
unsigned long flags;
u8 channel, tid;
+ struct sk_buff *skb;
+ ktime_t hwtstamp = 0;
channel = cmd->u.tx_acknowledge_header.channel;
tid = cmd->u.tx_acknowledge_header.tid;
@@ -567,17 +1030,15 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
context = &priv->tx_contexts[tid % dev->max_tx_urbs];
/* Sometimes the state change doesn't come after a bus-off event */
- if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
- struct sk_buff *skb;
+ if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) {
+ struct sk_buff *err_skb;
struct can_frame *cf;
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb) {
+ err_skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (err_skb) {
cf->can_id |= CAN_ERR_RESTARTED;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_rx(skb);
+ netif_rx(err_skb);
} else {
netdev_err(priv->netdev,
"No memory left for err_skb\n");
@@ -588,13 +1049,23 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
-
- stats->tx_packets++;
- stats->tx_bytes += context->dlc;
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.tx_ack.time);
+ break;
+ case KVASER_USBCAN:
+ hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.tx_ack.time);
+ break;
+ }
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- can_get_echo_skb(priv->netdev, context->echo_index, NULL);
+ skb = priv->can.echo_skb[context->echo_index];
+ if (skb)
+ skb_hwtstamps(skb)->hwtstamp = hwtstamp;
+ stats->tx_packets++;
+ stats->tx_bytes += can_get_echo_skb(priv->netdev,
+ context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
@@ -623,11 +1094,22 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
return err;
}
+static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work)
+{
+ struct kvaser_usb_net_leaf_priv *leaf =
+ container_of(work, struct kvaser_usb_net_leaf_priv,
+ chip_state_req_work.work);
+ struct kvaser_usb_net_priv *priv = leaf->net;
+
+ kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE);
+}
+
static void
kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
const struct kvaser_usb_err_summary *es,
struct can_frame *cf)
{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
struct kvaser_usb *dev = priv->dev;
struct net_device_stats *stats = &priv->netdev->stats;
enum can_state cur_state, new_state, tx_state, rx_state;
@@ -641,20 +1123,32 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
new_state = CAN_STATE_BUS_OFF;
} else if (es->status & M16C_STATE_BUS_PASSIVE) {
new_state = CAN_STATE_ERROR_PASSIVE;
- } else if (es->status & M16C_STATE_BUS_ERROR) {
+ } else if ((es->status & M16C_STATE_BUS_ERROR) &&
+ cur_state >= CAN_STATE_BUS_OFF) {
/* Guard against spurious error events after a busoff */
- if (cur_state < CAN_STATE_BUS_OFF) {
- if (es->txerr >= 128 || es->rxerr >= 128)
- new_state = CAN_STATE_ERROR_PASSIVE;
- else if (es->txerr >= 96 || es->rxerr >= 96)
- new_state = CAN_STATE_ERROR_WARNING;
- else if (cur_state > CAN_STATE_ERROR_ACTIVE)
- new_state = CAN_STATE_ERROR_ACTIVE;
- }
+ } else if (es->txerr >= 128 || es->rxerr >= 128) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (es->txerr >= 96 || es->rxerr >= 96) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
}
- if (!es->status)
- new_state = CAN_STATE_ERROR_ACTIVE;
+ /* 0bfd:0124 FW 4.18.778 was observed to send the initial
+ * CMD_CHIP_STATE_EVENT after CMD_START_CHIP with M16C_STATE_BUS_OFF
+ * bit set if the channel was bus-off when it was last stopped (even
+ * across chip resets). This bit will clear shortly afterwards, without
+ * triggering a second unsolicited chip state event.
+ * Ignore this initial bus-off.
+ */
+ if (leaf->joining_bus) {
+ if (new_state == CAN_STATE_BUS_OFF) {
+ netdev_dbg(priv->netdev, "ignoring bus-off during startup");
+ new_state = cur_state;
+ } else {
+ leaf->joining_bus = false;
+ }
+ }
if (new_state != cur_state) {
tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
@@ -664,11 +1158,11 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
}
if (priv->can.restart_ms &&
- cur_state >= CAN_STATE_BUS_OFF &&
+ cur_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
priv->can.can_stats.bus_error++;
@@ -692,12 +1186,11 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
const struct kvaser_usb_err_summary *es)
{
- struct can_frame *cf;
- struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
- .len = CAN_ERR_DLC };
- struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct net_device_stats *stats;
struct kvaser_usb_net_priv *priv;
+ struct kvaser_usb_net_leaf_priv *leaf;
enum can_state old_state, new_state;
if (es->channel >= dev->nchannels) {
@@ -707,28 +1200,29 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
}
priv = dev->nets[es->channel];
+ leaf = priv->sub_priv;
stats = &priv->netdev->stats;
- /* Update all of the CAN interface's state and error counters before
- * trying any memory allocation that can actually fail with -ENOMEM.
- *
- * We send a temporary stack-allocated error CAN frame to
- * can_change_state() for the very same reason.
- *
- * TODO: Split can_change_state() responsibility between updating the
- * CAN interface's state and counters, and the setting up of CAN error
- * frame ID and data to userspace. Remove stack allocation afterwards.
- */
+ /* Ignore e.g. state change to bus-off reported just after stopping */
+ if (!netif_running(priv->netdev))
+ return;
+
old_state = priv->can.state;
- kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, cf);
new_state = priv->can.state;
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
- memcpy(cf, &tmp_cf, sizeof(*cf));
+ /* If there are errors, request status updates periodically as we do
+ * not get automatic notifications of improved state.
+ * Also request updates if we saw a stale BUS_OFF during startup
+ * (joining_bus).
+ */
+ if (new_state < CAN_STATE_BUS_OFF &&
+ (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE ||
+ leaf->joining_bus))
+ schedule_delayed_work(&leaf->chip_state_req_work,
+ msecs_to_jiffies(500));
if (new_state != old_state) {
if (es->status &
@@ -740,14 +1234,23 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
}
if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
+ old_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_RESTARTED;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
netif_carrier_on(priv->netdev);
}
}
- switch (dev->card_data.leaf.family) {
+ if (!skb) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ stats->rx_dropped++;
+ netdev_warn(priv->netdev, "No memory left for err_skb\n");
+ }
+ return;
+ }
+
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
@@ -774,11 +1277,12 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
break;
}
- cf->data[6] = es->txerr;
- cf->data[7] = es->rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = es->txerr;
+ cf->data[7] = es->rxerr;
+ }
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -838,11 +1342,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
case CMD_CAN_ERROR_EVENT:
es.channel = 0;
- es.status = cmd->u.usbcan.error_event.status_ch0;
- es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
- es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
+ es.status = cmd->u.usbcan.can_error_event.status_ch0;
+ es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0;
+ es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0;
es.usbcan.other_ch_status =
- cmd->u.usbcan.error_event.status_ch1;
+ cmd->u.usbcan.can_error_event.status_ch1;
kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
/* The USBCAN firmware supports up to 2 channels.
@@ -850,13 +1354,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
*/
if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
es.channel = 1;
- es.status = cmd->u.usbcan.error_event.status_ch1;
+ es.status = cmd->u.usbcan.can_error_event.status_ch1;
es.txerr =
- cmd->u.usbcan.error_event.tx_errors_count_ch1;
+ cmd->u.usbcan.can_error_event.tx_errors_count_ch1;
es.rxerr =
- cmd->u.usbcan.error_event.rx_errors_count_ch1;
+ cmd->u.usbcan.can_error_event.rx_errors_count_ch1;
es.usbcan.other_ch_status =
- cmd->u.usbcan.error_event.status_ch0;
+ cmd->u.usbcan.can_error_event.status_ch0;
kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
}
break;
@@ -873,11 +1377,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
switch (cmd->id) {
case CMD_CAN_ERROR_EVENT:
- es.channel = cmd->u.leaf.error_event.channel;
- es.status = cmd->u.leaf.error_event.status;
- es.txerr = cmd->u.leaf.error_event.tx_errors_count;
- es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
- es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
+ es.channel = cmd->u.leaf.can_error_event.channel;
+ es.status = cmd->u.leaf.can_error_event.status;
+ es.txerr = cmd->u.leaf.can_error_event.tx_errors_count;
+ es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count;
+ es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor;
break;
case CMD_LEAF_LOG_MESSAGE:
es.channel = cmd->u.leaf.log_message.channel;
@@ -928,6 +1432,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
struct net_device_stats *stats;
u8 channel = cmd->u.rx_can_header.channel;
const u8 *rx_data = NULL; /* GCC */
+ ktime_t hwtstamp = 0;
if (channel >= dev->nchannels) {
dev_err(&dev->intf->dev,
@@ -939,7 +1444,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
- (dev->card_data.leaf.family == KVASER_LEAF &&
+ (dev->driver_info->family == KVASER_LEAF &&
cmd->id == CMD_LEAF_LOG_MESSAGE)) {
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
return;
@@ -955,12 +1460,14 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
return;
}
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
rx_data = cmd->u.leaf.rx_can.data;
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.rx_can.time);
break;
case KVASER_USBCAN:
rx_data = cmd->u.usbcan.rx_can.data;
+ hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.rx_can.time);
break;
}
@@ -970,7 +1477,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
return;
}
- if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
+ if (dev->driver_info->family == KVASER_LEAF && cmd->id ==
CMD_LEAF_LOG_MESSAGE) {
cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
if (cf->can_id & KVASER_EXTENDED_FRAME)
@@ -978,7 +1485,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
else
cf->can_id &= CAN_SFF_MASK;
- cf->len = can_cc_dlc2len(cmd->u.leaf.log_message.dlc);
+ can_frame_set_cc_len(cf, cmd->u.leaf.log_message.dlc & 0xF, priv->can.ctrlmode);
if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
cf->can_id |= CAN_RTR_FLAG;
@@ -996,7 +1503,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
cf->can_id |= CAN_EFF_FLAG;
}
- cf->len = can_cc_dlc2len(rx_data[5]);
+ can_frame_set_cc_len(cf, rx_data[5] & 0xF, priv->can.ctrlmode);
if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
cf->can_id |= CAN_RTR_FLAG;
@@ -1004,11 +1511,81 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
memcpy(cf->data, &rx_data[6], cf->len);
}
+ skb_hwtstamps(skb)->hwtstamp = hwtstamp;
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
+static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ u16 info1 = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ info1 = le16_to_cpu(cmd->u.leaf.error_event.info1);
+ break;
+ case KVASER_USBCAN:
+ info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1);
+ break;
+ }
+
+ /* info1 will contain the offending cmd_no */
+ switch (info1) {
+ case CMD_SET_CTRL_MODE:
+ dev_warn(&dev->intf->dev,
+ "CMD_SET_CTRL_MODE error in parameter\n");
+ break;
+
+ case CMD_SET_BUS_PARAMS:
+ dev_warn(&dev->intf->dev,
+ "CMD_SET_BUS_PARAMS error in parameter\n");
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled parameter error event cmd_no (%u)\n",
+ info1);
+ break;
+ }
+}
+
+static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ u8 error_code = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ error_code = cmd->u.leaf.error_event.error_code;
+ break;
+ case KVASER_USBCAN:
+ error_code = cmd->u.usbcan.error_event.error_code;
+ break;
+ }
+
+ switch (error_code) {
+ case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL:
+ /* Received additional CAN message, when firmware TX queue is
+ * already full. Something is wrong with the driver.
+ * This should never happen!
+ */
+ dev_err(&dev->intf->dev,
+ "Received error event TX_QUEUE_FULL\n");
+ break;
+ case KVASER_USB_LEAF_ERROR_EVENT_PARAM:
+ kvaser_usb_leaf_error_event_parameter(dev, cmd);
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled error event (%d)\n", error_code);
+ break;
+ }
+}
+
static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -1049,9 +1626,31 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
complete(&priv->stop_comp);
}
-static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = cmd->u.busparams.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams,
+ sizeof(priv->busparams_nominal));
+
+ complete(&priv->get_busparams_comp);
+}
+
+static void kvaser_usb_leaf_handle_command(struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
+ if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
+ return;
+
switch (cmd->id) {
case CMD_START_CHIP_REPLY:
kvaser_usb_leaf_start_chip_reply(dev, cmd);
@@ -1067,14 +1666,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
break;
case CMD_LEAF_LOG_MESSAGE:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
kvaser_usb_leaf_rx_can_msg(dev, cmd);
break;
case CMD_CHIP_STATE_EVENT:
case CMD_CAN_ERROR_EVENT:
- if (dev->card_data.leaf.family == KVASER_LEAF)
+ if (dev->driver_info->family == KVASER_LEAF)
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
else
kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
@@ -1084,16 +1683,29 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
kvaser_usb_leaf_tx_acknowledge(dev, cmd);
break;
- /* Ignored commands */
+ case CMD_ERROR_EVENT:
+ kvaser_usb_leaf_error_event(dev, cmd);
+ break;
+
+ case CMD_GET_BUS_PARAMS_REPLY:
+ kvaser_usb_leaf_get_busparams_reply(dev, cmd);
+ break;
+
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
- if (dev->card_data.leaf.family != KVASER_USBCAN)
+ if (dev->driver_info->family != KVASER_USBCAN)
goto warn;
+ dev->card_data.usbcan_timestamp_msb =
+ le32_to_cpu(cmd->u.usbcan.clk_overflow_event.time) &
+ KVASER_USB_USBCAN_CLK_OVERFLOW_MASK;
break;
+ /* Ignored commands */
case CMD_FLUSH_QUEUE_REPLY:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
break;
+ case CMD_LED_ACTION_RESP:
+ break;
default:
warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id);
@@ -1120,7 +1732,7 @@ static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev,
* number of events in case of a heavy rx load on the bus.
*/
if (cmd->len == 0) {
- pos = round_up(pos, le16_to_cpu
+ pos = round_up(pos + 1, le16_to_cpu
(dev->bulk_in->wMaxPacketSize));
continue;
}
@@ -1162,9 +1774,12 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
int err;
- init_completion(&priv->start_comp);
+ leaf->joining_bus = true;
+
+ reinit_completion(&priv->start_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
priv->channel);
@@ -1180,9 +1795,12 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
int err;
- init_completion(&priv->stop_comp);
+ reinit_completion(&priv->stop_comp);
+
+ cancel_delayed_work(&leaf->chip_state_req_work);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
priv->channel);
@@ -1225,28 +1843,40 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
{
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
- dev->cfg = &kvaser_usb_leaf_dev_cfg;
card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
return 0;
}
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
- .name = "kvaser_usb",
- .tseg1_min = KVASER_USB_TSEG1_MIN,
- .tseg1_max = KVASER_USB_TSEG1_MAX,
- .tseg2_min = KVASER_USB_TSEG2_MIN,
- .tseg2_max = KVASER_USB_TSEG2_MAX,
- .sjw_max = KVASER_USB_SJW_MAX,
- .brp_min = KVASER_USB_BRP_MIN,
- .brp_max = KVASER_USB_BRP_MAX,
- .brp_inc = KVASER_USB_BRP_INC,
-};
+static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_leaf_priv *leaf;
+
+ leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL);
+ if (!leaf)
+ return -ENOMEM;
+
+ leaf->net = priv;
+ INIT_DELAYED_WORK(&leaf->chip_state_req_work,
+ kvaser_usb_leaf_chip_state_req_work);
-static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+ priv->sub_priv = leaf;
+
+ return 0;
+}
+
+static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
+
+ if (leaf)
+ cancel_delayed_work_sync(&leaf->chip_state_req_work);
+}
+
+static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
int rc;
@@ -1259,15 +1889,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
cmd->u.busparams.channel = priv->channel;
cmd->u.busparams.tid = 0xff;
- cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
- cmd->u.busparams.sjw = bt->sjw;
- cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
- cmd->u.busparams.tseg2 = bt->phase_seg2;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- cmd->u.busparams.no_samp = 3;
- else
- cmd->u.busparams.no_samp = 1;
+ memcpy(&cmd->u.busparams.busparams, busparams,
+ sizeof(cmd->u.busparams.busparams));
rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
@@ -1275,17 +1898,45 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
return rc;
}
+static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ if (priv->dev->driver_info->family == KVASER_USBCAN)
+ return -EOPNOTSUPP;
+
+ reinit_completion(&priv->get_busparams_comp);
+
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
enum can_mode mode)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
int err;
switch (mode) {
case CAN_MODE_START:
+ kvaser_usb_unlink_tx_urbs(priv);
+
+ leaf->joining_bus = true;
+
err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
if (err)
return err;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
break;
default:
return -EOPNOTSUPP;
@@ -1332,14 +1983,19 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_set_mode = kvaser_usb_leaf_set_mode,
.dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
+ .dev_get_busparams = kvaser_usb_leaf_get_busparams,
.dev_set_data_bittiming = NULL,
+ .dev_get_data_busparams = NULL,
.dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
.dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
.dev_init_card = kvaser_usb_leaf_init_card,
+ .dev_init_channel = kvaser_usb_leaf_init_channel,
+ .dev_remove_channel = kvaser_usb_leaf_remove_channel,
.dev_get_software_info = kvaser_usb_leaf_get_software_info,
.dev_get_software_details = NULL,
.dev_get_card_info = kvaser_usb_leaf_get_card_info,
- .dev_get_capabilities = NULL,
+ .dev_get_capabilities = kvaser_usb_leaf_get_capabilities,
+ .dev_set_led = kvaser_usb_leaf_set_led,
.dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
.dev_start_chip = kvaser_usb_leaf_start_chip,
.dev_stop_chip = kvaser_usb_leaf_stop_chip,
@@ -1348,11 +2004,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
.dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
};
-
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
- .clock = {
- .freq = CAN_USB_CLOCK,
- },
- .timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
-};
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index a1a154c08b7f..41c0a1c399bf 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -6,11 +6,11 @@
* This driver is inspired by the 4.6.2 version of net/can/usb/usb_8dev.c
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
@@ -33,10 +33,6 @@
#define MCBA_USB_RX_BUFF_SIZE 64
#define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg))
-/* MCBA endpoint numbers */
-#define MCBA_USB_EP_IN 1
-#define MCBA_USB_EP_OUT 1
-
/* Microchip command id */
#define MBCA_CMD_RECEIVE_MESSAGE 0xE3
#define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5
@@ -51,6 +47,10 @@
#define MCBA_VER_REQ_USB 1
#define MCBA_VER_REQ_CAN 2
+/* Drive the CAN_RES signal LOW "0" to activate R24 and R25 */
+#define MCBA_VER_TERMINATION_ON 0
+#define MCBA_VER_TERMINATION_OFF 1
+
#define MCBA_SIDL_EXID_MASK 0x8
#define MCBA_DLC_MASK 0xf
#define MCBA_DLC_RTR_MASK 0x40
@@ -64,7 +64,6 @@
struct mcba_usb_ctx {
struct mcba_priv *priv;
u32 ndx;
- u8 dlc;
bool can;
};
@@ -84,6 +83,8 @@ struct mcba_priv {
atomic_t free_ctx_cnt;
void *rxbuf[MCBA_MAX_RX_URBS];
dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
+ int rx_pipe;
+ int tx_pipe;
};
/* CAN frame */
@@ -184,13 +185,10 @@ static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv,
ctx = &priv->tx_context[i];
ctx->ndx = i;
- if (cf) {
+ if (cf)
ctx->can = true;
- ctx->dlc = cf->len;
- } else {
+ else
ctx->can = false;
- ctx->dlc = 0;
- }
atomic_dec(&priv->free_ctx_cnt);
break;
@@ -236,10 +234,8 @@ static void mcba_usb_write_bulk_callback(struct urb *urb)
return;
netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += ctx->dlc;
-
- can_led_event(netdev, CAN_LED_EVENT_TX);
- can_get_echo_skb(netdev, ctx->ndx, NULL);
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, ctx->ndx,
+ NULL);
}
if (urb->status)
@@ -272,10 +268,8 @@ static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv,
memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE);
- usb_fill_bulk_urb(urb, priv->udev,
- usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf,
- MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback,
- ctx);
+ usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE,
+ mcba_usb_write_bulk_callback, ctx);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &priv->tx_submitted);
@@ -321,7 +315,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
.cmd_id = MBCA_CMD_TRANSMIT_MESSAGE_EV
};
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
ctx = mcba_usb_get_free_ctx(priv, cf);
@@ -368,7 +362,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
xmit_failed:
can_free_echo_skb(priv->netdev, ctx->ndx, NULL);
mcba_usb_free_ctx(ctx);
- dev_kfree_skb(skb);
stats->tx_dropped++;
return NETDEV_TX_OK;
@@ -450,17 +443,17 @@ static void mcba_usb_process_can(struct mcba_priv *priv,
cf->can_id = (sid & 0xffe0) >> 5;
}
- if (msg->dlc & MCBA_DLC_RTR_MASK)
- cf->can_id |= CAN_RTR_FLAG;
-
cf->len = can_cc_dlc2len(msg->dlc & MCBA_DLC_MASK);
- memcpy(cf->data, msg->data, cf->len);
+ if (msg->dlc & MCBA_DLC_RTR_MASK) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ memcpy(cf->data, msg->data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
- can_led_event(priv->netdev, CAN_LED_EVENT_RX);
netif_rx(skb);
}
@@ -474,7 +467,7 @@ static void mcba_usb_process_ka_usb(struct mcba_priv *priv,
priv->usb_ka_first_pass = false;
}
- if (msg->termination_state)
+ if (msg->termination_state == MCBA_VER_TERMINATION_ON)
priv->can.termination = MCBA_TERMINATION_ENABLED;
else
priv->can.termination = MCBA_TERMINATION_DISABLED;
@@ -611,7 +604,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, priv->udev,
- usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT),
+ priv->rx_pipe,
urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE,
mcba_usb_read_bulk_callback, priv);
@@ -656,7 +649,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
urb->transfer_dma = buf_dma;
usb_fill_bulk_urb(urb, priv->udev,
- usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
+ priv->rx_pipe,
buf, MCBA_USB_RX_BUFF_SIZE,
mcba_usb_read_bulk_callback, priv);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -708,7 +701,6 @@ static int mcba_usb_open(struct net_device *netdev)
priv->can_speed_check = true;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- can_led_event(netdev, CAN_LED_EVENT_OPEN);
netif_start_queue(netdev);
return 0;
@@ -740,7 +732,6 @@ static int mcba_usb_close(struct net_device *netdev)
mcba_urb_unlink(priv);
close_candev(netdev);
- can_led_event(netdev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -772,6 +763,10 @@ static const struct net_device_ops mcba_netdev_ops = {
.ndo_start_xmit = mcba_usb_start_xmit,
};
+static const struct ethtool_ops mcba_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/* Microchip CANBUS has hardcoded bittiming values by default.
* This function sends request via USB to change the speed and align bittiming
* values for presentation purposes only
@@ -794,9 +789,9 @@ static int mcba_set_termination(struct net_device *netdev, u16 term)
};
if (term == MCBA_TERMINATION_ENABLED)
- usb_msg.termination = 1;
+ usb_msg.termination = MCBA_VER_TERMINATION_ON;
else
- usb_msg.termination = 0;
+ usb_msg.termination = MCBA_VER_TERMINATION_OFF;
mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg);
@@ -810,6 +805,13 @@ static int mcba_usb_probe(struct usb_interface *intf,
struct mcba_priv *priv;
int err;
struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *in, *out;
+
+ err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL);
+ if (err) {
+ dev_err(&intf->dev, "Can't find endpoints\n");
+ return err;
+ }
netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS);
if (!netdev) {
@@ -843,6 +845,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
priv->can.do_set_bittiming = mcba_net_set_bittiming;
netdev->netdev_ops = &mcba_netdev_ops;
+ netdev->ethtool_ops = &mcba_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -855,7 +858,8 @@ static int mcba_usb_probe(struct usb_interface *intf,
goto cleanup_free_candev;
}
- devm_can_led_init(netdev);
+ priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress);
+ priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress);
/* Start USB dev only if we have successfully registered CAN device */
err = mcba_usb_start(priv);
diff --git a/drivers/net/can/usb/nct6694_canfd.c b/drivers/net/can/usb/nct6694_canfd.c
new file mode 100644
index 000000000000..dd6df2ec3742
--- /dev/null
+++ b/drivers/net/can/usb/nct6694_canfd.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Nuvoton NCT6694 Socket CANfd driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+#include <linux/ethtool.h>
+#include <linux/idr.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#define DEVICE_NAME "nct6694-canfd"
+
+/* USB command module type for NCT6694 CANfd controller.
+ * This defines the module type used for communication with the NCT6694
+ * CANfd controller over the USB interface.
+ */
+#define NCT6694_CANFD_MOD 0x05
+
+/* Command 00h - CAN Setting and Initialization */
+#define NCT6694_CANFD_SETTING 0x00
+#define NCT6694_CANFD_SETTING_ACTIVE_CTRL1 BIT(0)
+#define NCT6694_CANFD_SETTING_ACTIVE_CTRL2 BIT(1)
+#define NCT6694_CANFD_SETTING_ACTIVE_NBTP_DBTP BIT(2)
+#define NCT6694_CANFD_SETTING_CTRL1_MON BIT(0)
+#define NCT6694_CANFD_SETTING_CTRL1_NISO BIT(1)
+#define NCT6694_CANFD_SETTING_CTRL1_LBCK BIT(2)
+#define NCT6694_CANFD_SETTING_NBTP_NTSEG2 GENMASK(6, 0)
+#define NCT6694_CANFD_SETTING_NBTP_NTSEG1 GENMASK(15, 8)
+#define NCT6694_CANFD_SETTING_NBTP_NBRP GENMASK(24, 16)
+#define NCT6694_CANFD_SETTING_NBTP_NSJW GENMASK(31, 25)
+#define NCT6694_CANFD_SETTING_DBTP_DSJW GENMASK(3, 0)
+#define NCT6694_CANFD_SETTING_DBTP_DTSEG2 GENMASK(7, 4)
+#define NCT6694_CANFD_SETTING_DBTP_DTSEG1 GENMASK(12, 8)
+#define NCT6694_CANFD_SETTING_DBTP_DBRP GENMASK(20, 16)
+#define NCT6694_CANFD_SETTING_DBTP_TDC BIT(23)
+
+/* Command 01h - CAN Information */
+#define NCT6694_CANFD_INFORMATION 0x01
+#define NCT6694_CANFD_INFORMATION_SEL 0x00
+
+/* Command 02h - CAN Event */
+#define NCT6694_CANFD_EVENT 0x02
+#define NCT6694_CANFD_EVENT_SEL(idx, mask) \
+ ((idx ? 0x80 : 0x00) | ((mask) & 0x7F))
+
+#define NCT6694_CANFD_EVENT_MASK GENMASK(5, 0)
+#define NCT6694_CANFD_EVT_TX_FIFO_EMPTY BIT(7) /* Read-clear */
+#define NCT6694_CANFD_EVT_RX_DATA_LOST BIT(5) /* Read-clear */
+#define NCT6694_CANFD_EVT_RX_DATA_IN BIT(7) /* Read-clear */
+
+/* Command 10h - CAN Deliver */
+#define NCT6694_CANFD_DELIVER 0x10
+#define NCT6694_CANFD_DELIVER_SEL(buf_cnt) \
+ ((buf_cnt) & 0xFF)
+
+/* Command 11h - CAN Receive */
+#define NCT6694_CANFD_RECEIVE 0x11
+#define NCT6694_CANFD_RECEIVE_SEL(idx, buf_cnt) \
+ ((idx ? 0x80 : 0x00) | ((buf_cnt) & 0x7F))
+
+#define NCT6694_CANFD_FRAME_TAG(idx) (0xC0 | (idx))
+#define NCT6694_CANFD_FRAME_FLAG_EFF BIT(0)
+#define NCT6694_CANFD_FRAME_FLAG_RTR BIT(1)
+#define NCT6694_CANFD_FRAME_FLAG_FD BIT(2)
+#define NCT6694_CANFD_FRAME_FLAG_BRS BIT(3)
+#define NCT6694_CANFD_FRAME_FLAG_ERR BIT(4)
+
+#define NCT6694_NAPI_WEIGHT 32
+
+enum nct6694_event_err {
+ NCT6694_CANFD_EVT_ERR_NO_ERROR = 0,
+ NCT6694_CANFD_EVT_ERR_CRC_ERROR,
+ NCT6694_CANFD_EVT_ERR_STUFF_ERROR,
+ NCT6694_CANFD_EVT_ERR_ACK_ERROR,
+ NCT6694_CANFD_EVT_ERR_FORM_ERROR,
+ NCT6694_CANFD_EVT_ERR_BIT_ERROR,
+ NCT6694_CANFD_EVT_ERR_TIMEOUT_ERROR,
+ NCT6694_CANFD_EVT_ERR_UNKNOWN_ERROR,
+};
+
+enum nct6694_event_status {
+ NCT6694_CANFD_EVT_STS_ERROR_ACTIVE = 0,
+ NCT6694_CANFD_EVT_STS_ERROR_PASSIVE,
+ NCT6694_CANFD_EVT_STS_BUS_OFF,
+ NCT6694_CANFD_EVT_STS_WARNING,
+};
+
+struct __packed nct6694_canfd_setting {
+ __le32 nbr;
+ __le32 dbr;
+ u8 active;
+ u8 reserved[3];
+ __le16 ctrl1;
+ __le16 ctrl2;
+ __le32 nbtp;
+ __le32 dbtp;
+};
+
+struct __packed nct6694_canfd_information {
+ u8 tx_fifo_cnt;
+ u8 rx_fifo_cnt;
+ u8 reserved[2];
+ __le32 can_clk;
+};
+
+struct __packed nct6694_canfd_event {
+ u8 err;
+ u8 status;
+ u8 tx_evt;
+ u8 rx_evt;
+ u8 rec;
+ u8 tec;
+ u8 reserved[2];
+};
+
+struct __packed nct6694_canfd_frame {
+ u8 tag;
+ u8 flag;
+ u8 reserved;
+ u8 length;
+ __le32 id;
+ u8 data[CANFD_MAX_DLEN];
+};
+
+struct nct6694_canfd_priv {
+ struct can_priv can; /* must be the first member */
+ struct can_rx_offload offload;
+ struct net_device *ndev;
+ struct nct6694 *nct6694;
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct nct6694_canfd_frame tx;
+ struct nct6694_canfd_frame rx;
+ struct nct6694_canfd_event event[2];
+ struct can_berr_counter bec;
+};
+
+static inline struct nct6694_canfd_priv *rx_offload_to_priv(struct can_rx_offload *offload)
+{
+ return container_of(offload, struct nct6694_canfd_priv, offload);
+}
+
+static const struct can_bittiming_const nct6694_canfd_bittiming_nominal_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const nct6694_canfd_bittiming_data_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
+static void nct6694_canfd_rx_offload(struct can_rx_offload *offload,
+ struct sk_buff *skb)
+{
+ struct nct6694_canfd_priv *priv = rx_offload_to_priv(offload);
+ int ret;
+
+ ret = can_rx_offload_queue_tail(offload, skb);
+ if (ret)
+ priv->ndev->stats.rx_fifo_errors++;
+}
+
+static void nct6694_canfd_handle_lost_msg(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ netdev_dbg(ndev, "RX FIFO overflow, message(s) lost.\n");
+
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_rx(struct net_device *ndev, u8 rx_evt)
+{
+ struct net_device_stats *stats = &ndev->stats;
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_frame *frame = &priv->rx;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_RECEIVE,
+ .sel = NCT6694_CANFD_RECEIVE_SEL(ndev->dev_port, 1),
+ .len = cpu_to_le16(sizeof(*frame))
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, frame);
+ if (ret)
+ return;
+
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_FD) {
+ struct canfd_frame *cfd;
+
+ skb = alloc_canfd_skb(priv->ndev, &cfd);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cfd->can_id = le32_to_cpu(frame->id);
+ cfd->len = canfd_sanitize_len(frame->length);
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_EFF)
+ cfd->can_id |= CAN_EFF_FLAG;
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_BRS)
+ cfd->flags |= CANFD_BRS;
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_ERR)
+ cfd->flags |= CANFD_ESI;
+
+ memcpy(cfd->data, frame->data, cfd->len);
+ } else {
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cf->can_id = le32_to_cpu(frame->id);
+ cf->len = can_cc_dlc2len(frame->length);
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_EFF)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, frame->data, cf->len);
+ }
+
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static int nct6694_canfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ *bec = priv->bec;
+
+ return 0;
+}
+
+static void nct6694_canfd_handle_state_change(struct net_device *ndev, u8 status)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ enum can_state new_state, rx_state, tx_state;
+ struct can_berr_counter bec;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ nct6694_canfd_get_berr_counter(ndev, &bec);
+ can_state_get_by_berr_counter(ndev, &bec, &tx_state, &rx_state);
+
+ new_state = max(tx_state, rx_state);
+
+ /* state hasn't changed */
+ if (new_state == priv->can.state)
+ return;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ can_bus_off(ndev);
+ } else if (cf) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ if (skb)
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_bus_err(struct net_device *ndev, u8 bus_err)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ priv->can.can_stats.bus_error++;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (cf)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (bus_err) {
+ case NCT6694_CANFD_EVT_ERR_CRC_ERROR:
+ netdev_dbg(ndev, "CRC error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_STUFF_ERROR:
+ netdev_dbg(ndev, "Stuff error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_ACK_ERROR:
+ netdev_dbg(ndev, "Ack error\n");
+ ndev->stats.tx_errors++;
+ if (cf) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_FORM_ERROR:
+ netdev_dbg(ndev, "Form error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_BIT_ERROR:
+ netdev_dbg(ndev, "Bit error\n");
+ ndev->stats.tx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT;
+ break;
+
+ default:
+ break;
+ }
+
+ if (skb)
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_tx(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+
+ stats->tx_bytes += can_rx_offload_get_echo_skb_queue_tail(&priv->offload,
+ 0, NULL);
+ stats->tx_packets++;
+ netif_wake_queue(ndev);
+}
+
+static irqreturn_t nct6694_canfd_irq(int irq, void *data)
+{
+ struct net_device *ndev = data;
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_event *event = &priv->event[ndev->dev_port];
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_EVENT,
+ .sel = NCT6694_CANFD_EVENT_SEL(ndev->dev_port, NCT6694_CANFD_EVENT_MASK),
+ .len = cpu_to_le16(sizeof(priv->event))
+ };
+ irqreturn_t handled = IRQ_NONE;
+ int ret;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, priv->event);
+ if (ret < 0)
+ return handled;
+
+ if (event->rx_evt & NCT6694_CANFD_EVT_RX_DATA_IN) {
+ nct6694_canfd_handle_rx(ndev, event->rx_evt);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->rx_evt & NCT6694_CANFD_EVT_RX_DATA_LOST) {
+ nct6694_canfd_handle_lost_msg(ndev);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->status) {
+ nct6694_canfd_handle_state_change(ndev, event->status);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->err != NCT6694_CANFD_EVT_ERR_NO_ERROR) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ nct6694_canfd_handle_bus_err(ndev, event->err);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->tx_evt & NCT6694_CANFD_EVT_TX_FIFO_EMPTY) {
+ nct6694_canfd_handle_tx(ndev);
+ handled = IRQ_HANDLED;
+ }
+
+ if (handled)
+ can_rx_offload_threaded_irq_finish(&priv->offload);
+
+ priv->bec.rxerr = event->rec;
+ priv->bec.txerr = event->tec;
+
+ return handled;
+}
+
+static void nct6694_canfd_tx_work(struct work_struct *work)
+{
+ struct nct6694_canfd_priv *priv = container_of(work,
+ struct nct6694_canfd_priv,
+ tx_work);
+ struct nct6694_canfd_frame *frame = &priv->tx;
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ struct sk_buff *skb = priv->can.echo_skb[0];
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_DELIVER,
+ .sel = NCT6694_CANFD_DELIVER_SEL(1),
+ .len = cpu_to_le16(sizeof(*frame))
+ };
+ u32 txid;
+ int err;
+
+ memset(frame, 0, sizeof(*frame));
+
+ frame->tag = NCT6694_CANFD_FRAME_TAG(ndev->dev_port);
+
+ if (can_is_canfd_skb(skb)) {
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (cfd->flags & CANFD_BRS)
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_BRS;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ txid = cfd->can_id & CAN_EFF_MASK;
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_EFF;
+ } else {
+ txid = cfd->can_id & CAN_SFF_MASK;
+ }
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_FD;
+ frame->id = cpu_to_le32(txid);
+ frame->length = canfd_sanitize_len(cfd->len);
+
+ memcpy(frame->data, cfd->data, frame->length);
+ } else {
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ txid = cf->can_id & CAN_EFF_MASK;
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_EFF;
+ } else {
+ txid = cf->can_id & CAN_SFF_MASK;
+ }
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_RTR;
+ else
+ memcpy(frame->data, cf->data, cf->len);
+
+ frame->id = cpu_to_le32(txid);
+ frame->length = cf->len;
+ }
+
+ err = nct6694_write_msg(priv->nct6694, &cmd_hd, frame);
+ if (err) {
+ can_free_echo_skb(ndev, 0, NULL);
+ stats->tx_dropped++;
+ stats->tx_errors++;
+ netif_wake_queue(ndev);
+ }
+}
+
+static netdev_tx_t nct6694_canfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ netif_stop_queue(ndev);
+ can_put_echo_skb(skb, ndev, 0, 0);
+ queue_work(priv->wq, &priv->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+static int nct6694_canfd_start(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ const struct can_bittiming *n_bt = &priv->can.bittiming;
+ const struct can_bittiming *d_bt = &priv->can.fd.data_bittiming;
+ struct nct6694_canfd_setting *setting __free(kfree) = NULL;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_SETTING,
+ .sel = ndev->dev_port,
+ .len = cpu_to_le16(sizeof(*setting))
+ };
+ u32 en_tdc;
+ int ret;
+
+ setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+ if (!setting)
+ return -ENOMEM;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_MON);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_NISO);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_LBCK);
+
+ /* Disable clock divider */
+ setting->ctrl2 = 0;
+
+ setting->nbtp = cpu_to_le32(FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NSJW,
+ n_bt->sjw - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NBRP,
+ n_bt->brp - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NTSEG2,
+ n_bt->phase_seg2 - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NTSEG1,
+ n_bt->prop_seg + n_bt->phase_seg1 - 1));
+
+ if (d_bt->brp <= 2)
+ en_tdc = NCT6694_CANFD_SETTING_DBTP_TDC;
+ else
+ en_tdc = 0;
+
+ setting->dbtp = cpu_to_le32(FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DSJW,
+ d_bt->sjw - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DBRP,
+ d_bt->brp - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DTSEG2,
+ d_bt->phase_seg2 - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DTSEG1,
+ d_bt->prop_seg + d_bt->phase_seg1 - 1) |
+ en_tdc);
+
+ setting->active = NCT6694_CANFD_SETTING_ACTIVE_CTRL1 |
+ NCT6694_CANFD_SETTING_ACTIVE_CTRL2 |
+ NCT6694_CANFD_SETTING_ACTIVE_NBTP_DBTP;
+
+ ret = nct6694_write_msg(priv->nct6694, &cmd_hd, setting);
+ if (ret)
+ return ret;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+}
+
+static void nct6694_canfd_stop(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_setting *setting __free(kfree) = NULL;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_SETTING,
+ .sel = ndev->dev_port,
+ .len = cpu_to_le16(sizeof(*setting))
+ };
+
+ /* The NCT6694 cannot be stopped. To ensure safe operation and avoid
+ * interference, the control mode is set to Listen-Only mode. This
+ * mode allows the device to monitor bus activity without actively
+ * participating in communication.
+ */
+ setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+ if (!setting)
+ return;
+
+ nct6694_read_msg(priv->nct6694, &cmd_hd, setting);
+ setting->ctrl1 = cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_MON);
+ setting->active = NCT6694_CANFD_SETTING_ACTIVE_CTRL1;
+ nct6694_write_msg(priv->nct6694, &cmd_hd, setting);
+
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int nct6694_canfd_close(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ nct6694_canfd_stop(ndev);
+ destroy_workqueue(priv->wq);
+ free_irq(ndev->irq, ndev);
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+ return 0;
+}
+
+static int nct6694_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = nct6694_canfd_start(ndev);
+ if (ret)
+ return ret;
+
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int nct6694_canfd_open(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = open_candev(ndev);
+ if (ret)
+ return ret;
+
+ can_rx_offload_enable(&priv->offload);
+
+ ret = request_threaded_irq(ndev->irq, NULL,
+ nct6694_canfd_irq, IRQF_ONESHOT,
+ "nct6694_canfd", ndev);
+ if (ret) {
+ netdev_err(ndev, "Failed to request IRQ\n");
+ goto can_rx_offload_disable;
+ }
+
+ priv->wq = alloc_ordered_workqueue("%s-nct6694_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ ndev->name);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto free_irq;
+ }
+
+ ret = nct6694_canfd_start(ndev);
+ if (ret)
+ goto destroy_wq;
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+destroy_wq:
+ destroy_workqueue(priv->wq);
+free_irq:
+ free_irq(ndev->irq, ndev);
+can_rx_offload_disable:
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+ return ret;
+}
+
+static const struct net_device_ops nct6694_canfd_netdev_ops = {
+ .ndo_open = nct6694_canfd_open,
+ .ndo_stop = nct6694_canfd_close,
+ .ndo_start_xmit = nct6694_canfd_start_xmit,
+};
+
+static const struct ethtool_ops nct6694_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static int nct6694_canfd_get_clock(struct nct6694_canfd_priv *priv)
+{
+ struct nct6694_canfd_information *info __free(kfree) = NULL;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_INFORMATION,
+ .sel = NCT6694_CANFD_INFORMATION_SEL,
+ .len = cpu_to_le16(sizeof(*info))
+ };
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, info);
+ if (ret)
+ return ret;
+
+ return le32_to_cpu(info->can_clk);
+}
+
+static int nct6694_canfd_probe(struct platform_device *pdev)
+{
+ struct nct6694 *nct6694 = dev_get_drvdata(pdev->dev.parent);
+ struct nct6694_canfd_priv *priv;
+ struct net_device *ndev;
+ int port, irq, ret, can_clk;
+
+ port = ida_alloc(&nct6694->canfd_ida, GFP_KERNEL);
+ if (port < 0)
+ return port;
+
+ irq = irq_create_mapping(nct6694->domain,
+ NCT6694_IRQ_CAN0 + port);
+ if (!irq) {
+ ret = -EINVAL;
+ goto free_ida;
+ }
+
+ ndev = alloc_candev(sizeof(struct nct6694_canfd_priv), 1);
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto dispose_irq;
+ }
+
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO;
+ ndev->dev_port = port;
+ ndev->netdev_ops = &nct6694_canfd_netdev_ops;
+ ndev->ethtool_ops = &nct6694_canfd_ethtool_ops;
+
+ priv = netdev_priv(ndev);
+ priv->nct6694 = nct6694;
+ priv->ndev = ndev;
+
+ can_clk = nct6694_canfd_get_clock(priv);
+ if (can_clk < 0) {
+ ret = dev_err_probe(&pdev->dev, can_clk,
+ "Failed to get clock\n");
+ goto free_candev;
+ }
+
+ INIT_WORK(&priv->tx_work, nct6694_canfd_tx_work);
+
+ priv->can.clock.freq = can_clk;
+ priv->can.bittiming_const = &nct6694_canfd_bittiming_nominal_const;
+ priv->can.fd.data_bittiming_const = &nct6694_canfd_bittiming_data_const;
+ priv->can.do_set_mode = nct6694_canfd_set_mode;
+ priv->can.do_get_berr_counter = nct6694_canfd_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_FD_NON_ISO;
+
+ ret = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
+ if (ret)
+ goto free_candev;
+
+ ret = can_rx_offload_add_manual(ndev, &priv->offload,
+ NCT6694_NAPI_WEIGHT);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "Failed to add rx_offload\n");
+ goto free_candev;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ SET_NETDEV_DEV(priv->ndev, &pdev->dev);
+
+ ret = register_candev(priv->ndev);
+ if (ret)
+ goto rx_offload_del;
+
+ return 0;
+
+rx_offload_del:
+ can_rx_offload_del(&priv->offload);
+free_candev:
+ free_candev(ndev);
+dispose_irq:
+ irq_dispose_mapping(irq);
+free_ida:
+ ida_free(&nct6694->canfd_ida, port);
+ return ret;
+}
+
+static void nct6694_canfd_remove(struct platform_device *pdev)
+{
+ struct nct6694_canfd_priv *priv = platform_get_drvdata(pdev);
+ struct nct6694 *nct6694 = priv->nct6694;
+ struct net_device *ndev = priv->ndev;
+ int port = ndev->dev_port;
+ int irq = ndev->irq;
+
+ unregister_candev(ndev);
+ can_rx_offload_del(&priv->offload);
+ free_candev(ndev);
+ irq_dispose_mapping(irq);
+ ida_free(&nct6694->canfd_ida, port);
+}
+
+static struct platform_driver nct6694_canfd_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ },
+ .probe = nct6694_canfd_probe,
+ .remove = nct6694_canfd_remove,
+};
+
+module_platform_driver(nct6694_canfd_driver);
+
+MODULE_DESCRIPTION("USB-CAN FD driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 837b3fecd71e..9278a1522aae 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -3,15 +3,17 @@
* CAN driver for PEAK System PCAN-USB adapter
* Derived from the PCAN project file driver/src/pcan_usb.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
+#include <linux/unaligned.h>
+
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -317,7 +319,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
*/
static void pcan_usb_restart(struct timer_list *t)
{
- struct pcan_usb *pdev = from_timer(pdev, t, restart_timer);
+ struct pcan_usb *pdev = timer_container_of(pdev, t, restart_timer);
struct peak_usb_device *dev = &pdev->dev;
/* notify candev and netdev */
@@ -380,23 +382,42 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
}
/*
- * read device id from device
+ * read can channel id from device
*/
-static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
+static int pcan_usb_get_can_channel_id(struct peak_usb_device *dev, u32 *can_ch_id)
{
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args);
if (err)
- netdev_err(dev->netdev, "getting device id failure: %d\n", err);
+ netdev_err(dev->netdev, "getting can channel id failure: %d\n", err);
else
- *device_id = args[0];
+ *can_ch_id = args[0];
return err;
}
+/* set a new CAN channel id in the flash memory of the device */
+static int pcan_usb_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN];
+
+ /* this kind of device supports 8-bit values only */
+ if (can_ch_id > U8_MAX)
+ return -EINVAL;
+
+ /* during the flash process the device disconnects during ~1.25 s.:
+ * prohibit access when interface is UP
+ */
+ if (dev->netdev->flags & IFF_UP)
+ return -EBUSY;
+
+ args[0] = can_ch_id;
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_DEVID, PCAN_USB_SET, args);
+}
+
/*
* update current time ref with received timestamp
*/
@@ -505,6 +526,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
/* Supply TX/RX error counters in case of
* controller error.
*/
+ cf->can_id = CAN_ERR_CNT;
cf->data[6] = mc->pdev->bec.txerr;
cf->data[7] = mc->pdev->bec.rxerr;
}
@@ -520,8 +542,6 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
&hwts->hwtstamp);
}
- mc->netdev->stats.rx_packets++;
- mc->netdev->stats.rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -534,7 +554,7 @@ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
{
struct pcan_usb *pdev = mc->pdev;
- /* acccording to the content of the packet */
+ /* according to the content of the packet */
switch (ir) {
case PCAN_USB_ERR_CNT_DEC:
case PCAN_USB_ERR_CNT_INC:
@@ -678,15 +698,16 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
/* Ignore next byte (client private id) if SRR bit is set */
if (can_id_flags & PCAN_USB_TX_SRR)
mc->ptr++;
+
+ /* update statistics */
+ mc->netdev->stats.rx_bytes += cf->len;
}
+ mc->netdev->stats.rx_packets++;
/* convert timestamp into kernel time */
hwts = skb_hwtstamps(skb);
peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp);
- /* update statistics */
- mc->netdev->stats.rx_packets++;
- mc->netdev->stats.rx_bytes += cf->len;
/* push the skb */
netif_rx(skb);
@@ -841,14 +862,14 @@ static int pcan_usb_start(struct peak_usb_device *dev)
pdev->bec.rxerr = 0;
pdev->bec.txerr = 0;
- /* be notified on error counter changes (if requested by user) */
- if (dev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
- err = pcan_usb_set_err_frame(dev, PCAN_USB_BERR_MASK);
- if (err)
- netdev_warn(dev->netdev,
- "Asking for BERR reporting error %u\n",
- err);
- }
+ /* always ask the device for BERR reporting, to be able to switch from
+ * WARNING to PASSIVE state
+ */
+ err = pcan_usb_set_err_frame(dev, PCAN_USB_BERR_MASK);
+ if (err)
+ netdev_warn(dev->netdev,
+ "Asking for BERR reporting error %u\n",
+ err);
/* if revision greater than 3, can put silent mode on/off */
if (dev->device_rev > 3) {
@@ -883,6 +904,11 @@ static int pcan_usb_init(struct peak_usb_device *dev)
return err;
}
+ dev_info(dev->netdev->dev.parent,
+ "PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n",
+ pcan_usb.name, dev->device_rev, serial_number,
+ pcan_usb.ctrl_count);
+
/* Since rev 4.1, PCAN-USB is able to make single-shot as well as
* looped back frames.
*/
@@ -893,14 +919,9 @@ static int pcan_usb_init(struct peak_usb_device *dev)
CAN_CTRLMODE_LOOPBACK;
} else {
dev_info(dev->netdev->dev.parent,
- "Firmware update available. Please contact support@peak-system.com\n");
+ "Firmware update available. Please contact support.peak@hms-networks.com\n");
}
- dev_info(dev->netdev->dev.parent,
- "PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n",
- pcan_usb.name, dev->device_rev, serial_number,
- pcan_usb.ctrl_count);
-
return 0;
}
@@ -962,8 +983,18 @@ static int pcan_usb_set_phys_id(struct net_device *netdev,
return err;
}
+/* This device only handles 8-bit CAN channel id. */
+static int pcan_usb_get_eeprom_len(struct net_device *netdev)
+{
+ return sizeof(u8);
+}
+
static const struct ethtool_ops pcan_usb_ethtool_ops = {
.set_phys_id = pcan_usb_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = pcan_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/*
@@ -986,7 +1017,6 @@ const struct peak_usb_adapter pcan_usb = {
.device_id = PCAN_USB_PRODUCT_ID,
.ctrl_count = 1,
.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
.freq = PCAN_USB_CRYSTAL_HZ / 2,
@@ -1016,7 +1046,8 @@ const struct peak_usb_adapter pcan_usb = {
.dev_init = pcan_usb_init,
.dev_set_bus = pcan_usb_write_mode,
.dev_set_bittiming = pcan_usb_set_bittiming,
- .dev_get_device_id = pcan_usb_get_device_id,
+ .dev_get_can_channel_id = pcan_usb_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_set_can_channel_id,
.dev_decode_buf = pcan_usb_decode_buf,
.dev_encode_msg = pcan_usb_encode_msg,
.dev_start = pcan_usb_start,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 6107fef9f4a0..cf48bb26d46d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -3,18 +3,20 @@
* CAN driver for PEAK System USB adapters
* Derived from the PCAN project file driver/src/pcan_usb_core.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
+#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/usb.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -22,7 +24,7 @@
#include "pcan_usb_core.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters");
MODULE_LICENSE("GPL v2");
@@ -53,11 +55,31 @@ static const struct usb_device_id peak_usb_table[] = {
MODULE_DEVICE_TABLE(usb, peak_usb_table);
+static ssize_t can_channel_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct peak_usb_device *peak_dev = netdev_priv(netdev);
+
+ return sysfs_emit(buf, "%08X\n", peak_dev->can_channel_id);
+}
+static DEVICE_ATTR_RO(can_channel_id);
+
+/* mutable to avoid cast in attribute_group */
+static struct attribute *peak_usb_sysfs_attrs[] = {
+ &dev_attr_can_channel_id.attr,
+ NULL,
+};
+
+static const struct attribute_group peak_usb_sysfs_group = {
+ .name = "peak_usb",
+ .attrs = peak_usb_sysfs_attrs,
+};
+
/*
* dump memory
*/
#define DUMP_WIDTH 16
-void pcan_dump_mem(char *prompt, void *p, int l)
+void pcan_dump_mem(const char *prompt, const void *p, int l)
{
pr_info("%s dumping %s (%d bytes):\n",
PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l);
@@ -89,7 +111,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
- delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1;
+ delta_ts &= (1ULL << time_ref->adapter->ts_used_bits) - 1;
time_ref->ts_total += delta_ts;
}
@@ -192,19 +214,6 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
}
}
-/*
- * post received skb after having set any hw timestamp
- */
-int peak_usb_netif_rx(struct sk_buff *skb,
- struct peak_time_ref *time_ref, u32 ts_low)
-{
- struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
-
- peak_usb_get_ts_time(time_ref, ts_low, &hwts->hwtstamp);
-
- return netif_rx(skb);
-}
-
/* post received skb with native 64-bit hw timestamp */
int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high)
{
@@ -291,6 +300,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
struct peak_tx_urb_context *context = urb->context;
struct peak_usb_device *dev;
struct net_device *netdev;
+ int tx_bytes;
BUG_ON(!context);
@@ -305,10 +315,6 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
/* check tx status */
switch (urb->status) {
case 0:
- /* transmission complete */
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += context->data_len;
-
/* prevent tx timeout */
netif_trans_update(netdev);
break;
@@ -327,12 +333,17 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
}
/* should always release echo skb and corresponding context */
- can_get_echo_skb(netdev, context->echo_index, NULL);
+ tx_bytes = can_get_echo_skb(netdev, context->echo_index, NULL);
context->echo_index = PCAN_USB_MAX_TX_URBS;
- /* do wakeup tx queue in case of success only */
- if (!urb->status)
+ if (!urb->status) {
+ /* transmission complete */
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += tx_bytes;
+
+ /* do wakeup tx queue in case of success only */
netif_wake_queue(netdev);
+ }
}
/*
@@ -344,13 +355,12 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
struct peak_usb_device *dev = netdev_priv(netdev);
struct peak_tx_urb_context *context = NULL;
struct net_device_stats *stats = &netdev->stats;
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct urb *urb;
u8 *obuf;
int i, err;
size_t size = dev->adapter->tx_buffer_size;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++)
@@ -378,9 +388,6 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
context->echo_index = i;
- /* Note: this works with CANFD frames too */
- context->data_len = cfd->len;
-
usb_anchor_urb(urb, &dev->tx_submitted);
can_put_echo_skb(skb, netdev, context->echo_index, 0);
@@ -763,7 +770,7 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
const struct peak_usb_adapter *pa = dev->adapter;
if (pa->dev_set_data_bittiming) {
- struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct can_bittiming *bt = &dev->can.fd.data_bittiming;
int err = pa->dev_set_data_bittiming(dev, bt);
if (err)
@@ -777,13 +784,127 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
return 0;
}
+static int peak_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ config->tx_type = HWTSTAMP_TX_OFF;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+
+ return 0;
+}
+
+static int peak_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ if (config->tx_type == HWTSTAMP_TX_OFF &&
+ config->rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack, "Only RX HWTSTAMP_FILTER_ALL is supported");
+ return -ERANGE;
+}
+
static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_open = peak_usb_ndo_open,
.ndo_stop = peak_usb_ndo_stop,
.ndo_start_xmit = peak_usb_ndo_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+ .ndo_hwtstamp_get = peak_hwtstamp_get,
+ .ndo_hwtstamp_set = peak_hwtstamp_set,
};
+/* CAN-USB devices generally handle 32-bit CAN channel IDs.
+ * In case one doesn't, then it have to overload this function.
+ */
+int peak_usb_get_eeprom_len(struct net_device *netdev)
+{
+ return sizeof(u32);
+}
+
+/* Every CAN-USB device exports the dev_get_can_channel_id() operation. It is used
+ * here to fill the data buffer with the user defined CAN channel ID.
+ */
+int peak_usb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ u32 ch_id;
+ __le32 ch_id_le;
+ int err;
+
+ err = dev->adapter->dev_get_can_channel_id(dev, &ch_id);
+ if (err)
+ return err;
+
+ /* ethtool operates on individual bytes. The byte order of the CAN
+ * channel id in memory depends on the kernel architecture. We
+ * convert the CAN channel id back to the native byte order of the PEAK
+ * device itself to ensure that the order is consistent for all
+ * host architectures.
+ */
+ ch_id_le = cpu_to_le32(ch_id);
+ memcpy(data, (u8 *)&ch_id_le + eeprom->offset, eeprom->len);
+
+ /* update cached value */
+ dev->can_channel_id = ch_id;
+ return err;
+}
+
+/* Every CAN-USB device exports the dev_get_can_channel_id()/dev_set_can_channel_id()
+ * operations. They are used here to set the new user defined CAN channel ID.
+ */
+int peak_usb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ u32 ch_id;
+ __le32 ch_id_le;
+ int err;
+
+ /* first, read the current user defined CAN channel ID */
+ err = dev->adapter->dev_get_can_channel_id(dev, &ch_id);
+ if (err) {
+ netdev_err(netdev, "Failed to init CAN channel id (err %d)\n", err);
+ return err;
+ }
+
+ /* do update the value with user given bytes.
+ * ethtool operates on individual bytes. The byte order of the CAN
+ * channel ID in memory depends on the kernel architecture. We
+ * convert the CAN channel ID back to the native byte order of the PEAK
+ * device itself to ensure that the order is consistent for all
+ * host architectures.
+ */
+ ch_id_le = cpu_to_le32(ch_id);
+ memcpy((u8 *)&ch_id_le + eeprom->offset, data, eeprom->len);
+ ch_id = le32_to_cpu(ch_id_le);
+
+ /* flash the new value now */
+ err = dev->adapter->dev_set_can_channel_id(dev, ch_id);
+ if (err) {
+ netdev_err(netdev, "Failed to write new CAN channel id (err %d)\n",
+ err);
+ return err;
+ }
+
+ /* update cached value with the new one */
+ dev->can_channel_id = ch_id;
+
+ return 0;
+}
+
+int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
/*
* create one device which is attached to CAN controller #ctrl_idx of the
* usb adapter.
@@ -829,8 +950,8 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
dev->can.clock = peak_usb_adapter->clock;
dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
dev->can.do_set_bittiming = peak_usb_set_bittiming;
- dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
- dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
+ dev->can.fd.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
+ dev->can.fd.do_set_data_bittiming = peak_usb_set_data_bittiming;
dev->can.do_set_mode = peak_usb_set_mode;
dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported;
@@ -842,6 +963,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
/* add ethtool support */
netdev->ethtool_ops = peak_usb_adapter->ethtool_ops;
+ /* register peak_usb sysfs files */
+ netdev->sysfs_groups[0] = &peak_usb_sysfs_group;
+
init_usb_anchor(&dev->rx_submitted);
init_usb_anchor(&dev->tx_submitted);
@@ -882,12 +1006,11 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
goto adap_dev_free;
}
- /* get device number early */
- if (dev->adapter->dev_get_device_id)
- dev->adapter->dev_get_device_id(dev, &dev->device_number);
+ /* get CAN channel id early */
+ dev->adapter->dev_get_can_channel_id(dev, &dev->can_channel_id);
- netdev_info(netdev, "attached to %s channel %u (device %u)\n",
- peak_usb_adapter->name, ctrl_idx, dev->device_number);
+ netdev_info(netdev, "attached to %s channel %u (device 0x%08X)\n",
+ peak_usb_adapter->name, ctrl_idx, dev->can_channel_id);
return 0;
@@ -923,9 +1046,9 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
- unregister_netdev(netdev);
+ unregister_candev(netdev);
kfree(dev->cmd_buf);
dev->next_siblings = NULL;
@@ -1013,7 +1136,7 @@ static void __exit peak_usb_exit(void)
int err;
/* last chance do send any synchronous commands here */
- err = driver_for_each_device(&peak_usb_driver.drvwrap.driver, NULL,
+ err = driver_for_each_device(&peak_usb_driver.driver, NULL,
NULL, peak_usb_do_device_exit);
if (err)
pr_err("%s: failed to stop all can devices (err %d)\n",
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index daa19f57e742..d1c1897d47b9 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -3,8 +3,8 @@
* CAN driver for PEAK System USB adapters
* Derived from the PCAN project file driver/src/pcan_usb_core.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
@@ -60,7 +60,8 @@ struct peak_usb_adapter {
int (*dev_set_data_bittiming)(struct peak_usb_device *dev,
struct can_bittiming *bt);
int (*dev_set_bus)(struct peak_usb_device *dev, u8 onoff);
- int (*dev_get_device_id)(struct peak_usb_device *dev, u32 *device_id);
+ int (*dev_get_can_channel_id)(struct peak_usb_device *dev, u32 *can_ch_id);
+ int (*dev_set_can_channel_id)(struct peak_usb_device *dev, u32 can_ch_id);
int (*dev_decode_buf)(struct peak_usb_device *dev, struct urb *urb);
int (*dev_encode_msg)(struct peak_usb_device *dev, struct sk_buff *skb,
u8 *obuf, size_t *size);
@@ -99,7 +100,6 @@ struct peak_time_ref {
struct peak_tx_urb_context {
struct peak_usb_device *dev;
u32 echo_index;
- u8 data_len;
struct urb *urb;
};
@@ -123,7 +123,8 @@ struct peak_usb_device {
u8 *cmd_buf;
struct usb_anchor rx_submitted;
- u32 device_number;
+ /* equivalent to the device ID in the Windows API */
+ u32 can_channel_id;
u8 device_rev;
u8 ep_msg_in;
@@ -133,7 +134,7 @@ struct peak_usb_device {
struct peak_usb_device *next_siblings;
};
-void pcan_dump_mem(char *prompt, void *p, int l);
+void pcan_dump_mem(const char *prompt, const void *p, int l);
/* common timestamp management */
void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
@@ -141,10 +142,15 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
-int peak_usb_netif_rx(struct sk_buff *skb,
- struct peak_time_ref *time_ref, u32 ts_low);
int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
-
+int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info);
+
+/* common 32-bit CAN channel ID ethtool management */
+int peak_usb_get_eeprom_len(struct net_device *netdev);
+int peak_usb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data);
+int peak_usb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data);
#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 6bd12549f101..be84191cde56 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -2,12 +2,13 @@
/*
* CAN driver for PEAK System PCAN-USB FD / PCAN-USB Pro FD adapter
*
- * Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2013-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -33,6 +34,10 @@
#define PCAN_UFD_RX_BUFFER_SIZE 2048
#define PCAN_UFD_TX_BUFFER_SIZE 512
+/* struct pcan_ufd_fw_info::type */
+#define PCAN_USBFD_TYPE_STD 1
+#define PCAN_USBFD_TYPE_EXT 2 /* includes EP numbers */
+
/* read some versions info from the hw device */
struct __packed pcan_ufd_fw_info {
__le16 size_of; /* sizeof this */
@@ -44,6 +49,13 @@ struct __packed pcan_ufd_fw_info {
__le32 dev_id[2]; /* "device id" per CAN */
__le32 ser_no; /* S/N */
__le32 flags; /* special functions */
+
+ /* extended data when type >= PCAN_USBFD_TYPE_EXT */
+ u8 cmd_out_ep; /* ep for cmd */
+ u8 cmd_in_ep; /* ep for replies */
+ u8 data_out_ep[2]; /* ep for CANx TX */
+ u8 data_in_ep; /* ep for CAN RX */
+ u8 dummy[3];
};
/* handle device specific info used by the netdevices */
@@ -136,6 +148,15 @@ struct __packed pcan_ufd_ovr_msg {
u8 unused[3];
};
+#define PCAN_UFD_CMD_DEVID_SET 0x81
+
+struct __packed pcan_ufd_device_id {
+ __le16 opcode_channel;
+
+ u16 unused;
+ __le32 device_id;
+};
+
static inline int pufd_omsg_get_channel(struct pcan_ufd_ovr_msg *om)
{
return om->channel & 0xf;
@@ -171,6 +192,9 @@ static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev)
/* send PCAN-USB Pro FD commands synchronously */
static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info;
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
int err = 0;
u8 *packet_ptr;
@@ -200,7 +224,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
do {
err = usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
- PCAN_USBPRO_EP_CMDOUT),
+ fw_info->cmd_out_ep),
packet_ptr, packet_len,
NULL, PCAN_UFD_CMD_TIMEOUT_MS);
if (err) {
@@ -220,6 +244,15 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
return err;
}
+static int pcan_usb_fd_read_fwinfo(struct peak_usb_device *dev,
+ struct pcan_ufd_fw_info *fw_info)
+{
+ return pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
+ PCAN_USBPRO_INFO_FW,
+ fw_info,
+ sizeof(*fw_info));
+}
+
/* build the commands list in the given buffer, to enter operational mode */
static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
{
@@ -420,12 +453,43 @@ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev,
return pcan_usb_fd_send_cmd(dev, ++cmd);
}
+/* read user CAN channel id from device */
+static int pcan_usb_fd_get_can_channel_id(struct peak_usb_device *dev,
+ u32 *can_ch_id)
+{
+ int err;
+ struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev);
+
+ err = pcan_usb_fd_read_fwinfo(dev, &usb_if->fw_info);
+ if (err)
+ return err;
+
+ *can_ch_id = le32_to_cpu(usb_if->fw_info.dev_id[dev->ctrl_idx]);
+ return err;
+}
+
+/* set a new CAN channel id in the flash memory of the device */
+static int pcan_usb_fd_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id)
+{
+ struct pcan_ufd_device_id *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
+ PCAN_UFD_CMD_DEVID_SET);
+ cmd->device_id = cpu_to_le32(can_ch_id);
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
/* handle restart but in asynchronously way
* (uses PCAN-USB Pro code to complete asynchronous request)
*/
static int pcan_usb_fd_restart_async(struct peak_usb_device *dev,
struct urb *urb, u8 *buf)
{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info;
u8 *pc = buf;
/* build the entire cmds list in the provided buffer, to go back into
@@ -439,7 +503,7 @@ static int pcan_usb_fd_restart_async(struct peak_usb_device *dev,
/* complete the URB */
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT),
+ usb_sndbulkpipe(dev->udev, fw_info->cmd_out_ep),
buf, pc - buf,
pcan_usb_pro_restart_complete, dev);
@@ -507,13 +571,13 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
if (rx_msg_flags & PUCAN_MSG_EXT_ID)
cfd->can_id |= CAN_EFF_FLAG;
- if (rx_msg_flags & PUCAN_MSG_RTR)
+ if (rx_msg_flags & PUCAN_MSG_RTR) {
cfd->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cfd->data, rm->d, cfd->len);
-
+ netdev->stats.rx_bytes += cfd->len;
+ }
netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += cfd->len;
peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low),
le32_to_cpu(rm->ts_high));
@@ -577,9 +641,6 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
if (!skb)
return -ENOMEM;
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += cf->len;
-
peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low),
le32_to_cpu(sm->ts_high));
@@ -842,6 +903,15 @@ static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev,
return 0;
}
+/* probe function for all PCAN-USB FD family usb interfaces */
+static int pcan_usb_fd_probe(struct usb_interface *intf)
+{
+ struct usb_host_interface *iface_desc = &intf->altsetting[0];
+
+ /* CAN interface is always interface #0 */
+ return iface_desc->desc.bInterfaceNumber;
+}
+
/* stop interface (last chance before set bus off) */
static int pcan_usb_fd_stop(struct peak_usb_device *dev)
{
@@ -863,6 +933,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
{
struct pcan_usb_fd_device *pdev =
container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info;
int i, err = -ENOMEM;
/* do this for 1st channel only */
@@ -881,10 +952,9 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
/* number of ts msgs to ignore before taking one into account */
pdev->usb_if->cm_ignore_count = 5;
- err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
- PCAN_USBPRO_INFO_FW,
- &pdev->usb_if->fw_info,
- sizeof(pdev->usb_if->fw_info));
+ fw_info = &pdev->usb_if->fw_info;
+
+ err = pcan_usb_fd_read_fwinfo(dev, fw_info);
if (err) {
dev_err(dev->netdev->dev.parent,
"unable to read %s firmware info (err %d)\n",
@@ -898,14 +968,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
*/
dev_info(dev->netdev->dev.parent,
"PEAK-System %s v%u fw v%u.%u.%u (%u channels)\n",
- dev->adapter->name, pdev->usb_if->fw_info.hw_version,
- pdev->usb_if->fw_info.fw_version[0],
- pdev->usb_if->fw_info.fw_version[1],
- pdev->usb_if->fw_info.fw_version[2],
+ dev->adapter->name, fw_info->hw_version,
+ fw_info->fw_version[0],
+ fw_info->fw_version[1],
+ fw_info->fw_version[2],
dev->adapter->ctrl_count);
/* check for ability to switch between ISO/non-ISO modes */
- if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
+ if (fw_info->fw_version[0] >= 2) {
/* firmware >= 2.x supports ISO/non-ISO switching */
dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
} else {
@@ -913,6 +983,15 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
}
+ /* if vendor rsp type is greater than or equal to 2, then it
+ * contains EP numbers to use for cmds pipes. If not, then
+ * default EP should be used.
+ */
+ if (le16_to_cpu(fw_info->type) < PCAN_USBFD_TYPE_EXT) {
+ fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT;
+ fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN;
+ }
+
/* tell the hardware the can driver is running */
err = pcan_usb_fd_drv_loaded(dev, 1);
if (err) {
@@ -933,12 +1012,23 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
/* do a copy of the ctrlmode[_supported] too */
dev->can.ctrlmode = ppdev->dev.can.ctrlmode;
dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported;
+
+ fw_info = &pdev->usb_if->fw_info;
}
pdev->usb_if->dev[dev->ctrl_idx] = dev;
- dev->device_number =
+ dev->can_channel_id =
le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
+ /* if vendor rsp type is greater than or equal to 2, then it contains EP
+ * numbers to use for data pipes. If not, then statically defined EP are
+ * used (see peak_usb_create_dev()).
+ */
+ if (le16_to_cpu(fw_info->type) >= PCAN_USBFD_TYPE_EXT) {
+ dev->ep_msg_in = fw_info->data_in_ep;
+ dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx];
+ }
+
/* set clock domain */
for (i = 0; i < ARRAY_SIZE(pcan_usb_fd_clk_freq); i++)
if (dev->adapter->clock.freq == pcan_usb_fd_clk_freq[i])
@@ -1035,6 +1125,10 @@ static int pcan_usb_fd_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_fd_ethtool_ops = {
.set_phys_id = pcan_usb_fd_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = peak_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/* describes the PCAN-USB FD adapter */
@@ -1094,7 +1188,7 @@ const struct peak_usb_adapter pcan_usb_fd = {
.tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
/* device callbacks */
- .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
+ .intf_probe = pcan_usb_fd_probe,
.dev_init = pcan_usb_fd_init,
.dev_exit = pcan_usb_fd_exit,
@@ -1102,6 +1196,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1176,6 +1272,8 @@ const struct peak_usb_adapter pcan_usb_chip = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1250,6 +1348,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1324,6 +1424,8 @@ const struct peak_usb_adapter pcan_usb_x6 = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 858ab22708fc..7be286293b1a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -3,13 +3,13 @@
* CAN driver for PEAK System PCAN-USB Pro adapter
* Derived from the PCAN project file driver/src/pcan_usbpro.c
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -76,6 +76,7 @@ static u16 pcan_usb_pro_sizeof_rec[256] = {
[PCAN_USBPRO_SETFILTR] = sizeof(struct pcan_usb_pro_filter),
[PCAN_USBPRO_SETTS] = sizeof(struct pcan_usb_pro_setts),
[PCAN_USBPRO_GETDEVID] = sizeof(struct pcan_usb_pro_devid),
+ [PCAN_USBPRO_SETDEVID] = sizeof(struct pcan_usb_pro_devid),
[PCAN_USBPRO_SETLED] = sizeof(struct pcan_usb_pro_setled),
[PCAN_USBPRO_RXMSG8] = sizeof(struct pcan_usb_pro_rxmsg),
[PCAN_USBPRO_RXMSG4] = sizeof(struct pcan_usb_pro_rxmsg) - 4,
@@ -149,6 +150,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...)
case PCAN_USBPRO_SETBTR:
case PCAN_USBPRO_GETDEVID:
+ case PCAN_USBPRO_SETDEVID:
*pc++ = va_arg(ap, int);
pc += 2;
*(__le32 *)pc = cpu_to_le32(va_arg(ap, u32));
@@ -419,8 +421,8 @@ static int pcan_usb_pro_set_led(struct peak_usb_device *dev, u8 mode,
return pcan_usb_pro_send_cmd(dev, &um);
}
-static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
- u32 *device_id)
+static int pcan_usb_pro_get_can_channel_id(struct peak_usb_device *dev,
+ u32 *can_ch_id)
{
struct pcan_usb_pro_devid *pdn;
struct pcan_usb_pro_msg um;
@@ -439,11 +441,23 @@ static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
return err;
pdn = (struct pcan_usb_pro_devid *)pc;
- *device_id = le32_to_cpu(pdn->serial_num);
+ *can_ch_id = le32_to_cpu(pdn->dev_num);
return err;
}
+static int pcan_usb_pro_set_can_channel_id(struct peak_usb_device *dev,
+ u32 can_ch_id)
+{
+ struct pcan_usb_pro_msg um;
+
+ pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETDEVID, dev->ctrl_idx,
+ can_ch_id);
+
+ return pcan_usb_pro_send_cmd(dev, &um);
+}
+
static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev,
struct can_bittiming *bt)
{
@@ -536,17 +550,19 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
if (rx->flags & PCAN_USBPRO_EXT)
can_frame->can_id |= CAN_EFF_FLAG;
- if (rx->flags & PCAN_USBPRO_RTR)
+ if (rx->flags & PCAN_USBPRO_RTR) {
can_frame->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(can_frame->data, rx->data, can_frame->len);
+ netdev->stats.rx_bytes += can_frame->len;
+ }
+ netdev->stats.rx_packets++;
+
hwts = skb_hwtstamps(skb);
peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32),
&hwts->hwtstamp);
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += can_frame->len;
netif_rx(skb);
return 0;
@@ -660,8 +676,6 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
hwts = skb_hwtstamps(skb);
peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp);
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += can_frame->len;
netif_rx(skb);
return 0;
@@ -1022,6 +1036,10 @@ static int pcan_usb_pro_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_pro_ethtool_ops = {
.set_phys_id = pcan_usb_pro_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = peak_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/*
@@ -1075,7 +1093,8 @@ const struct peak_usb_adapter pcan_usb_pro = {
.dev_free = pcan_usb_pro_free,
.dev_set_bus = pcan_usb_pro_set_bus,
.dev_set_bittiming = pcan_usb_pro_set_bittiming,
- .dev_get_device_id = pcan_usb_pro_get_device_id,
+ .dev_get_can_channel_id = pcan_usb_pro_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_pro_set_can_channel_id,
.dev_decode_buf = pcan_usb_pro_decode_buf,
.dev_encode_msg = pcan_usb_pro_encode_msg,
.dev_start = pcan_usb_pro_start,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 5d4cf14eb9d9..162c7546d3a8 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB Pro adapter
* Derived from the PCAN project file driver/src/pcan_usbpro_fw.h
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PCAN_USB_PRO_H
#define PCAN_USB_PRO_H
@@ -62,6 +62,7 @@ struct __packed pcan_usb_pro_fwinfo {
#define PCAN_USBPRO_SETBTR 0x02
#define PCAN_USBPRO_SETBUSACT 0x04
#define PCAN_USBPRO_SETSILENT 0x05
+#define PCAN_USBPRO_SETDEVID 0x06
#define PCAN_USBPRO_SETFILTR 0x0a
#define PCAN_USBPRO_SETTS 0x10
#define PCAN_USBPRO_GETDEVID 0x12
@@ -112,7 +113,7 @@ struct __packed pcan_usb_pro_devid {
u8 data_type;
u8 channel;
__le16 dummy;
- __le32 serial_num;
+ __le32 dev_num;
};
#define PCAN_USBPRO_LED_DEVICE 0x00
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 1679cbe45ded..de61d9da99e3 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -28,6 +28,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
@@ -185,7 +186,7 @@ union ucan_ctl_payload {
*/
struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
- u8 raw[128];
+ u8 fw_str[128];
} __packed;
enum {
@@ -244,7 +245,8 @@ struct ucan_message_in {
/* CAN transmission complete
* (type == UCAN_IN_TX_COMPLETE)
*/
- struct ucan_tx_complete_entry_t can_tx_complete_msg[0];
+ DECLARE_FLEX_ARRAY(struct ucan_tx_complete_entry_t,
+ can_tx_complete_msg);
} __aligned(0x4) msg;
} __packed __aligned(0x4);
@@ -259,7 +261,6 @@ struct ucan_priv;
/* Context Information for transmission URBs */
struct ucan_urb_context {
struct ucan_priv *up;
- u8 dlc;
bool allocated;
};
@@ -276,7 +277,6 @@ struct ucan_priv {
/* linux USB device structures */
struct usb_device *udev;
- struct usb_interface *intf;
struct net_device *netdev;
/* lock for can->echo_skb (used around
@@ -284,7 +284,7 @@ struct ucan_priv {
*/
spinlock_t echo_skb_lock;
- /* usb device information information */
+ /* usb device information */
u8 intf_index;
u8 in_ep_addr;
u8 out_ep_addr;
@@ -424,18 +424,20 @@ static int ucan_ctrl_command_out(struct ucan_priv *up,
UCAN_USB_CTL_PIPE_TIMEOUT);
}
-static int ucan_device_request_in(struct ucan_priv *up,
- u8 cmd, u16 subcmd, u16 datalen)
+static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size)
{
- return usb_control_msg(up->udev,
- usb_rcvctrlpipe(up->udev, 0),
- cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- subcmd,
- 0,
- up->ctl_msg_buffer,
- datalen,
- UCAN_USB_CTL_PIPE_TIMEOUT);
+ int ret;
+
+ ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0),
+ UCAN_DEVICE_GET_FW_STRING,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ 0, 0, fw_str, size - 1,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+ if (ret > 0)
+ fw_str[ret] = '\0';
+ else
+ strscpy(fw_str, "unknown", size);
}
/* Parse the device information structure reported by the device and
@@ -621,8 +623,11 @@ static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m)
memcpy(cf->data, m->msg.can_msg.data, cf->len);
/* don't count error frames as real packets */
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_ERR_FLAG)) {
+ stats->rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
+ }
/* pass it to Linux */
netif_rx(skb);
@@ -634,7 +639,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
{
unsigned long flags;
u16 count, i;
- u8 echo_index, dlc;
+ u8 echo_index;
u16 len = le16_to_cpu(m->len);
struct ucan_urb_context *context;
@@ -658,7 +663,6 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
/* gather information from the context */
context = &up->context_array[echo_index];
- dlc = READ_ONCE(context->dlc);
/* Release context and restart queue if necessary.
* Also check if the context was allocated
@@ -671,8 +675,8 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
UCAN_TX_COMPLETE_SUCCESS) {
/* update statistics */
up->netdev->stats.tx_packets++;
- up->netdev->stats.tx_bytes += dlc;
- can_get_echo_skb(up->netdev, echo_index, NULL);
+ up->netdev->stats.tx_bytes +=
+ can_get_echo_skb(up->netdev, echo_index, NULL);
} else {
up->netdev->stats.tx_dropped++;
can_free_echo_skb(up->netdev, echo_index, NULL);
@@ -1086,8 +1090,6 @@ static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up,
}
m->len = cpu_to_le16(mlen);
- context->dlc = cf->len;
-
m->subtype = echo_index;
/* build the urb */
@@ -1120,7 +1122,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
/* check skb */
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* allocate a context and slow down tx path, if fifo state is low */
@@ -1231,7 +1233,10 @@ static const struct net_device_ops ucan_netdev_ops = {
.ndo_open = ucan_open,
.ndo_stop = ucan_close,
.ndo_start_xmit = ucan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops ucan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
/* Request to set bittiming
@@ -1310,7 +1315,6 @@ static int ucan_probe(struct usb_interface *intf,
u8 in_ep_addr;
u8 out_ep_addr;
union ucan_ctl_payload *ctl_msg_buffer;
- char firmware_str[sizeof(union ucan_ctl_payload) + 1];
udev = interface_to_usbdev(intf);
@@ -1393,7 +1397,7 @@ static int ucan_probe(struct usb_interface *intf,
* Stage 3 for the final driver initialisation.
*/
- /* Prepare Memory for control transferes */
+ /* Prepare Memory for control transfers */
ctl_msg_buffer = devm_kzalloc(&udev->dev,
sizeof(union ucan_ctl_payload),
GFP_KERNEL);
@@ -1496,7 +1500,6 @@ static int ucan_probe(struct usb_interface *intf,
/* initialize data */
up->udev = udev;
- up->intf = intf;
up->netdev = netdev;
up->intf_index = iface_desc->desc.bInterfaceNumber;
up->in_ep_addr = in_ep_addr;
@@ -1513,6 +1516,7 @@ static int ucan_probe(struct usb_interface *intf,
spin_lock_init(&up->context_lock);
spin_lock_init(&up->echo_skb_lock);
netdev->netdev_ops = &ucan_netdev_ops;
+ netdev->ethtool_ops = &ucan_ethtool_ops;
usb_set_intfdata(intf, up);
SET_NETDEV_DEV(netdev, &intf->dev);
@@ -1523,18 +1527,6 @@ static int ucan_probe(struct usb_interface *intf,
*/
ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
- /* just print some device information - if available */
- ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
- sizeof(union ucan_ctl_payload));
- if (ret > 0) {
- /* copy string while ensuring zero terminiation */
- strncpy(firmware_str, up->ctl_msg_buffer->raw,
- sizeof(union ucan_ctl_payload));
- firmware_str[sizeof(union ucan_ctl_payload)] = '\0';
- } else {
- strcpy(firmware_str, "unknown");
- }
-
/* device is compatible, reset it */
ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
if (ret < 0)
@@ -1552,7 +1544,10 @@ static int ucan_probe(struct usb_interface *intf,
/* initialisation complete, log device info */
netdev_info(up->netdev, "registered device\n");
- netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
+ ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str,
+ sizeof(up->ctl_msg_buffer->fw_str));
+ netdev_info(up->netdev, "firmware string: %s\n",
+ up->ctl_msg_buffer->fw_str);
/* success */
return 0;
@@ -1576,7 +1571,7 @@ static void ucan_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (up) {
- unregister_netdev(up->netdev);
+ unregister_candev(up->netdev);
free_candev(up->netdev);
}
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index d1b83bd1b3cb..7449328f7cd7 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -12,6 +12,7 @@
* who were very cooperative and answered my questions.
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -21,7 +22,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
/* driver constants */
#define MAX_RX_URBS 20
@@ -114,15 +114,12 @@ struct usb_8dev_tx_urb_context {
struct usb_8dev_priv *priv;
u32 echo_index;
- u8 dlc;
};
/* Structure to hold all of our device specific stuff */
struct usb_8dev_priv {
struct can_priv can; /* must be the first member */
- struct sk_buff *echo_skb[MAX_TX_URBS];
-
struct usb_device *udev;
struct net_device *netdev;
@@ -442,15 +439,15 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
if (rx_errors)
stats->rx_errors++;
-
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
+ if (priv->can.state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -476,16 +473,15 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
if (msg->flags & USB_8DEV_EXTID)
cf->can_id |= CAN_EFF_FLAG;
- if (msg->flags & USB_8DEV_RTR)
+ if (msg->flags & USB_8DEV_RTR) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, msg->data, cf->len);
-
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_rx(skb);
- can_led_event(priv->netdev, CAN_LED_EVENT_RX);
+ netif_rx(skb);
} else {
netdev_warn(priv->netdev, "frame type %d unknown",
msg->type);
@@ -584,11 +580,7 @@ static void usb_8dev_write_bulk_callback(struct urb *urb)
urb->status);
netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += context->dlc;
-
- can_get_echo_skb(netdev, context->echo_index, NULL);
-
- can_led_event(netdev, CAN_LED_EVENT_TX);
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL);
/* Release context */
context->echo_index = MAX_TX_URBS;
@@ -610,7 +602,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
int i, err;
size_t size = sizeof(struct usb_8dev_tx_msg);
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
@@ -657,7 +649,6 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
context->priv = priv;
context->echo_index = i;
- context->dlc = cf->len;
usb_fill_bulk_urb(urb, priv->udev,
usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX),
@@ -670,9 +661,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
atomic_inc(&priv->active_tx_urbs);
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err))
- goto failed;
- else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
+ if (unlikely(err)) {
+ can_free_echo_skb(netdev, context->echo_index, NULL);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+
+ atomic_dec(&priv->active_tx_urbs);
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_warn(netdev, "failed tx_urb %d\n", err);
+ stats->tx_dropped++;
+ } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
/* Slow down tx path */
netif_stop_queue(netdev);
@@ -691,19 +693,6 @@ nofreecontext:
return NETDEV_TX_BUSY;
-failed:
- can_free_echo_skb(netdev, context->echo_index, NULL);
-
- usb_unanchor_urb(urb);
- usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
-
- atomic_dec(&priv->active_tx_urbs);
-
- if (err == -ENODEV)
- netif_device_detach(netdev);
- else
- netdev_warn(netdev, "failed tx_urb %d\n", err);
-
nomembuf:
usb_free_urb(urb);
@@ -816,8 +805,6 @@ static int usb_8dev_open(struct net_device *netdev)
if (err)
return err;
- can_led_event(netdev, CAN_LED_EVENT_OPEN);
-
/* finally start device */
err = usb_8dev_start(priv);
if (err) {
@@ -874,8 +861,6 @@ static int usb_8dev_close(struct net_device *netdev)
close_candev(netdev);
- can_led_event(netdev, CAN_LED_EVENT_STOP);
-
return err;
}
@@ -883,11 +868,14 @@ static const struct net_device_ops usb_8dev_netdev_ops = {
.ndo_open = usb_8dev_open,
.ndo_stop = usb_8dev_close,
.ndo_start_xmit = usb_8dev_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops usb_8dev_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
};
static const struct can_bittiming_const usb_8dev_bittiming_const = {
- .name = "usb_8dev",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -943,6 +931,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
CAN_CTRLMODE_CC_LEN8_DLC;
netdev->netdev_ops = &usb_8dev_netdev_ops;
+ netdev->ethtool_ops = &usb_8dev_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -983,8 +972,6 @@ static int usb_8dev_probe(struct usb_interface *intf,
(version>>8) & 0xff, version & 0xff);
}
- devm_can_led_init(netdev);
-
return 0;
cleanup_unregister_candev:
@@ -1015,7 +1002,7 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
}
static struct usb_driver usb_8dev_driver = {
- .name = "usb_8dev",
+ .name = KBUILD_MODNAME,
.probe = usb_8dev_probe,
.disconnect = usb_8dev_disconnect,
.id_table = usb_8dev_table,
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 067705e2850b..fdc662aea279 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -40,6 +40,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -70,34 +71,36 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += cfd->len;
+ stats->rx_bytes += can_skb_get_data_len(skb);
skb->pkt_type = PACKET_BROADCAST;
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
+ unsigned int len;
int loop;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
+ len = can_skb_get_data_len(skb);
stats->tx_packets++;
- stats->tx_bytes += cfd->len;
+ stats->tx_bytes += len;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
+ skb_tx_timestamp(skb);
+
if (!echo) {
/* no echo handling available inside this driver */
if (loop) {
@@ -105,7 +108,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
* CAN core already did the echo for us
*/
stats->rx_packets++;
- stats->rx_bytes += cfd->len;
+ stats->rx_bytes += len;
}
consume_skb(skb);
return NETDEV_TX_OK;
@@ -133,10 +136,11 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -145,10 +149,14 @@ static const struct net_device_ops vcan_netdev_ops = {
.ndo_change_mtu = vcan_change_mtu,
};
+static const struct ethtool_ops vcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = CANFD_MTU;
+ dev->mtu = CANXL_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
@@ -160,6 +168,7 @@ static void vcan_setup(struct net_device *dev)
dev->flags |= IFF_ECHO;
dev->netdev_ops = &vcan_netdev_ops;
+ dev->ethtool_ops = &vcan_ethtool_ops;
dev->needs_free_netdev = true;
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 8861a7d875e7..b2c19f8c5f8e 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -9,6 +9,7 @@
* Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -33,28 +34,34 @@ struct vxcan_priv {
struct net_device __rcu *peer;
};
-static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
{
struct vxcan_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
- u8 len;
+ struct sk_buff *skb;
+ unsigned int len;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dropped_invalid_skb(dev, oskb))
return NETDEV_TX_OK;
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (unlikely(!peer)) {
- kfree_skb(skb);
+ kfree_skb(oskb);
dev->stats.tx_dropped++;
goto out_unlock;
}
- skb = can_create_echo_skb(skb);
- if (!skb)
+ skb_tx_timestamp(oskb);
+
+ skb = skb_clone(oskb, GFP_ATOMIC);
+ if (skb) {
+ consume_skb(oskb);
+ } else {
+ kfree_skb(oskb);
goto out_unlock;
+ }
/* reset CAN GW hop counter */
skb->csum_start = 0;
@@ -62,8 +69,8 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- len = cfd->len;
- if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
+ len = can_skb_get_data_len(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
srcstats->tx_bytes += len;
peerstats = &peer->stats;
@@ -112,7 +119,7 @@ static int vxcan_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(priv->peer);
- iflink = peer ? peer->ifindex : 0;
+ iflink = peer ? READ_ONCE(peer->ifindex) : 0;
rcu_read_unlock();
return iflink;
@@ -124,10 +131,11 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -139,17 +147,22 @@ static const struct net_device_ops vxcan_netdev_ops = {
.ndo_change_mtu = vxcan_change_mtu,
};
+static const struct ethtool_ops vxcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void vxcan_setup(struct net_device *dev)
{
struct can_ml_priv *can_ml;
dev->type = ARPHRD_CAN;
- dev->mtu = CANFD_MTU;
+ dev->mtu = CANXL_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
- dev->flags = (IFF_NOARP|IFF_ECHO);
+ dev->flags = IFF_NOARP;
dev->netdev_ops = &vxcan_netdev_ops;
+ dev->ethtool_ops = &vxcan_ethtool_ops;
dev->needs_free_netdev = true;
can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
@@ -159,13 +172,15 @@ static void vxcan_setup(struct net_device *dev)
/* forward declaration for rtnl_create_link() */
static struct rtnl_link_ops vxcan_link_ops;
-static int vxcan_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int vxcan_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *peer_net = rtnl_newlink_peer_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct vxcan_priv *priv;
struct net_device *peer;
- struct net *peer_net;
struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
char ifname[IFNAMSIZ];
@@ -175,19 +190,10 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
/* register peer device */
if (data && data[VXCAN_INFO_PEER]) {
- struct nlattr *nla_peer;
+ struct nlattr *nla_peer = data[VXCAN_INFO_PEER];
- nla_peer = data[VXCAN_INFO_PEER];
ifmp = nla_data(nla_peer);
- err = rtnl_nla_parse_ifla(peer_tb,
- nla_data(nla_peer) +
- sizeof(struct ifinfomsg),
- nla_len(nla_peer) -
- sizeof(struct ifinfomsg),
- NULL);
- if (err < 0)
- return err;
-
+ rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
tbp = peer_tb;
}
@@ -199,23 +205,15 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
name_assign_type = NET_NAME_ENUM;
}
- peer_net = rtnl_link_get_net(net, tbp);
- if (IS_ERR(peer_net))
- return PTR_ERR(peer_net);
-
peer = rtnl_create_link(peer_net, ifname, name_assign_type,
&vxcan_link_ops, tbp, extack);
- if (IS_ERR(peer)) {
- put_net(peer_net);
+ if (IS_ERR(peer))
return PTR_ERR(peer);
- }
if (ifmp && dev->ifindex)
peer->ifindex = ifmp->ifi_index;
err = register_netdevice(peer);
- put_net(peer_net);
- peer_net = NULL;
if (err < 0) {
free_netdev(peer);
return err;
@@ -223,7 +221,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
netif_carrier_off(peer);
- err = rtnl_configure_link(peer, ifmp);
+ err = rtnl_configure_link(peer, ifmp, 0, NULL);
if (err < 0)
goto unregister_network_device;
@@ -294,6 +292,7 @@ static struct rtnl_link_ops vxcan_link_ops = {
.newlink = vxcan_newlink,
.dellink = vxcan_dellink,
.policy = vxcan_policy,
+ .peer_type = VXCAN_INFO_PEER,
.maxtype = VXCAN_INFO_MAX,
.get_link_net = vxcan_get_link_net,
};
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index e2b15d29d15e..43d7f22820b8 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1,16 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Xilinx CAN device driver
*
- * Copyright (C) 2012 - 2014 Xilinx, Inc.
+ * Copyright (C) 2012 - 2022 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
* Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
*
* Description:
- * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
+ * This driver is developed for AXI CAN IP, AXI CANFD IP, CANPS and CANFD PS Controller.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -18,16 +20,18 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/u64_stats_sync.h>
#define DRIVER_NAME "xilinx_can"
@@ -51,10 +55,17 @@ enum xcan_reg {
/* only on CAN FD cores */
XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
- * Prescalar
+ * Prescaler
*/
XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
+
+ /* only on AXI CAN cores */
+ XCAN_ECC_CFG_OFFSET = 0xC8, /* ECC Configuration */
+ XCAN_TXTLFIFO_ECC_OFFSET = 0xCC, /* TXTL FIFO ECC error counter */
+ XCAN_TXOLFIFO_ECC_OFFSET = 0xD0, /* TXOL FIFO ECC error counter */
+ XCAN_RXFIFO_ECC_OFFSET = 0xD4, /* RX FIFO ECC error counter */
+
XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
@@ -87,6 +98,8 @@ enum xcan_reg {
#define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
#define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
#define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
+#define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */
+#define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */
#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
@@ -100,6 +113,7 @@ enum xcan_reg {
#define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
#define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
#define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
+#define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */
#define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
#define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
#define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
@@ -118,6 +132,18 @@ enum xcan_reg {
#define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
#define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
#define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
+#define XCAN_IXR_E2BERX_MASK BIT(23) /* RX FIFO two bit ECC error */
+#define XCAN_IXR_E1BERX_MASK BIT(22) /* RX FIFO one bit ECC error */
+#define XCAN_IXR_E2BETXOL_MASK BIT(21) /* TXOL FIFO two bit ECC error */
+#define XCAN_IXR_E1BETXOL_MASK BIT(20) /* TXOL FIFO One bit ECC error */
+#define XCAN_IXR_E2BETXTL_MASK BIT(19) /* TXTL FIFO Two bit ECC error */
+#define XCAN_IXR_E1BETXTL_MASK BIT(18) /* TXTL FIFO One bit ECC error */
+#define XCAN_IXR_ECC_MASK (XCAN_IXR_E2BERX_MASK | \
+ XCAN_IXR_E1BERX_MASK | \
+ XCAN_IXR_E2BETXOL_MASK | \
+ XCAN_IXR_E1BETXOL_MASK | \
+ XCAN_IXR_E2BETXTL_MASK | \
+ XCAN_IXR_E1BETXTL_MASK)
#define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
#define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
#define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
@@ -131,8 +157,14 @@ enum xcan_reg {
#define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
#define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
+#define XCAN_ECC_CFG_REECRX_MASK BIT(2) /* Reset RX FIFO ECC error counters */
+#define XCAN_ECC_CFG_REECTXOL_MASK BIT(1) /* Reset TXOL FIFO ECC error counters */
+#define XCAN_ECC_CFG_REECTXTL_MASK BIT(0) /* Reset TXTL FIFO ECC error counters */
+#define XCAN_ECC_1BIT_CNT_MASK GENMASK(15, 0) /* FIFO ECC 1bit count mask */
+#define XCAN_ECC_2BIT_CNT_MASK GENMASK(31, 16) /* FIFO ECC 2bit count mask */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
+#define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
@@ -193,6 +225,16 @@ struct xcan_devtype_data {
* @bus_clk: Pointer to struct clk
* @can_clk: Pointer to struct clk
* @devtype: Device type specific constants
+ * @transceiver: Optional pointer to associated CAN transceiver
+ * @rstc: Pointer to reset control
+ * @ecc_enable: ECC enable flag
+ * @syncp: synchronization for ECC error stats
+ * @ecc_rx_2_bit_errors: RXFIFO 2bit ECC count
+ * @ecc_rx_1_bit_errors: RXFIFO 1bit ECC count
+ * @ecc_txol_2_bit_errors: TXOLFIFO 2bit ECC count
+ * @ecc_txol_1_bit_errors: TXOLFIFO 1bit ECC count
+ * @ecc_txtl_2_bit_errors: TXTLFIFO 2bit ECC count
+ * @ecc_txtl_1_bit_errors: TXTLFIFO 1bit ECC count
*/
struct xcan_priv {
struct can_priv can;
@@ -210,6 +252,16 @@ struct xcan_priv {
struct clk *bus_clk;
struct clk *can_clk;
struct xcan_devtype_data devtype;
+ struct phy *transceiver;
+ struct reset_control *rstc;
+ bool ecc_enable;
+ struct u64_stats_sync syncp;
+ u64_stats_t ecc_rx_2_bit_errors;
+ u64_stats_t ecc_rx_1_bit_errors;
+ u64_stats_t ecc_txol_2_bit_errors;
+ u64_stats_t ecc_txol_1_bit_errors;
+ u64_stats_t ecc_txtl_2_bit_errors;
+ u64_stats_t ecc_txtl_1_bit_errors;
};
/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -239,7 +291,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
};
/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -259,24 +311,62 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 128,
.sjw_max = 128,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 32,
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
+/* Transmission Delay Compensation constants for CANFD 1.0 */
+static const struct can_tdc_const xcan_tdc_const_canfd = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 32,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+/* Transmission Delay Compensation constants for CANFD 2.0 */
+static const struct can_tdc_const xcan_tdc_const_canfd2 = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 64,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+enum xcan_stats_type {
+ XCAN_ECC_RX_2_BIT_ERRORS,
+ XCAN_ECC_RX_1_BIT_ERRORS,
+ XCAN_ECC_TXOL_2_BIT_ERRORS,
+ XCAN_ECC_TXOL_1_BIT_ERRORS,
+ XCAN_ECC_TXTL_2_BIT_ERRORS,
+ XCAN_ECC_TXTL_1_BIT_ERRORS,
+};
+
+static const char xcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ [XCAN_ECC_RX_2_BIT_ERRORS] = "ecc_rx_2_bit_errors",
+ [XCAN_ECC_RX_1_BIT_ERRORS] = "ecc_rx_1_bit_errors",
+ [XCAN_ECC_TXOL_2_BIT_ERRORS] = "ecc_txol_2_bit_errors",
+ [XCAN_ECC_TXOL_1_BIT_ERRORS] = "ecc_txol_1_bit_errors",
+ [XCAN_ECC_TXTL_2_BIT_ERRORS] = "ecc_txtl_2_bit_errors",
+ [XCAN_ECC_TXTL_1_BIT_ERRORS] = "ecc_txtl_1_bit_errors",
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -391,7 +481,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u32 btr0, btr1;
u32 is_config_mode;
@@ -406,7 +496,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
return -EPERM;
}
- /* Setting Baud Rate prescalar value in BRPR Register */
+ /* Setting Baud Rate prescaler value in BRPR Register */
btr0 = (bt->brp - 1);
/* Setting Time Segment 1 in BTR Register */
@@ -423,8 +513,16 @@ static int xcan_set_bittiming(struct net_device *ndev)
if (priv->devtype.cantype == XAXI_CANFD ||
priv->devtype.cantype == XAXI_CANFD_2_0) {
- /* Setting Baud Rate prescalar value in F_BRPR Register */
+ /* Setting Baud Rate prescaler value in F_BRPR Register */
btr0 = dbt->brp - 1;
+ if (can_fd_tdc_is_enabled(&priv->can)) {
+ if (priv->devtype.cantype == XAXI_CANFD)
+ btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
+ XCAN_BRPR_TDC_ENABLE;
+ else
+ btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
+ XCAN_BRPR_TDC_ENABLE;
+ }
/* Setting Time Segment 1 in BTR Register */
btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
@@ -484,6 +582,9 @@ static int xcan_chip_start(struct net_device *ndev)
XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
+ if (priv->ecc_enable)
+ ier |= XCAN_IXR_ECC_MASK;
+
if (priv->devtype.flags & XCAN_FLAG_RXMNF)
ier |= XCAN_IXR_RXMNF_MASK;
@@ -589,14 +690,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
dlc |= XCAN_DLCR_EDL_MASK;
}
- if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
- (priv->devtype.flags & XCAN_FLAG_TXFEMP))
- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
- else
- can_put_echo_skb(skb, ndev, 0, 0);
-
- priv->tx_head++;
-
priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
/* If the CAN frame is RTR frame this write triggers transmission
* (not on CAN FD)
@@ -629,6 +722,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
data[1]);
}
}
+
+ if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+ (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+ else
+ can_put_echo_skb(skb, ndev, 0, 0);
+
+ priv->tx_head++;
}
/**
@@ -710,7 +811,7 @@ static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
@@ -787,10 +888,11 @@ static int xcan_rx(struct net_device *ndev, int frame_base)
*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
if (cf->len > 4)
*(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
- }
- stats->rx_bytes += cf->len;
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
+
netif_receive_skb(skb);
return 1;
@@ -871,8 +973,11 @@ static int xcanfd_rx(struct net_device *ndev, int frame_base)
*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
}
}
- stats->rx_bytes += cf->len;
+
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
stats->rx_packets++;
+
netif_receive_skb(skb);
return 1;
@@ -929,6 +1034,7 @@ static void xcan_set_error_state(struct net_device *ndev,
can_change_state(ndev, cf, tx_state, rx_state);
if (cf) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
@@ -965,13 +1071,8 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
- if (skb) {
- struct net_device_stats *stats = &ndev->stats;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (skb)
netif_rx(skb);
- }
}
}
@@ -1088,6 +1189,54 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
priv->can.can_stats.bus_error++;
}
+ if (priv->ecc_enable && isr & XCAN_IXR_ECC_MASK) {
+ u32 reg_rx_ecc, reg_txol_ecc, reg_txtl_ecc;
+
+ reg_rx_ecc = priv->read_reg(priv, XCAN_RXFIFO_ECC_OFFSET);
+ reg_txol_ecc = priv->read_reg(priv, XCAN_TXOLFIFO_ECC_OFFSET);
+ reg_txtl_ecc = priv->read_reg(priv, XCAN_TXTLFIFO_ECC_OFFSET);
+
+ /* The counter reaches its maximum at 0xffff and does not overflow.
+ * Accept the small race window between reading and resetting ECC counters.
+ */
+ priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
+ XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
+
+ u64_stats_update_begin(&priv->syncp);
+
+ if (isr & XCAN_IXR_E2BERX_MASK) {
+ u64_stats_add(&priv->ecc_rx_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_rx_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BERX_MASK) {
+ u64_stats_add(&priv->ecc_rx_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_rx_ecc));
+ }
+
+ if (isr & XCAN_IXR_E2BETXOL_MASK) {
+ u64_stats_add(&priv->ecc_txol_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txol_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BETXOL_MASK) {
+ u64_stats_add(&priv->ecc_txol_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txol_ecc));
+ }
+
+ if (isr & XCAN_IXR_E2BETXTL_MASK) {
+ u64_stats_add(&priv->ecc_txtl_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txtl_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BETXTL_MASK) {
+ u64_stats_add(&priv->ecc_txtl_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txtl_ecc));
+ }
+
+ u64_stats_update_end(&priv->syncp);
+ }
+
if (cf.can_id) {
struct can_frame *skb_cf;
struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
@@ -1095,8 +1244,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
if (skb) {
skb_cf->can_id |= cf.can_id;
memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
- stats->rx_packets++;
- stats->rx_bytes += CAN_ERR_DLC;
netif_rx(skb);
}
}
@@ -1212,16 +1359,15 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
XCAN_IXR_RXNEMP_MASK);
}
- if (work_done) {
- can_led_event(ndev, CAN_LED_EVENT_RX);
+ if (work_done)
xcan_update_error_state_after_rxtx(ndev);
- }
if (work_done < quota) {
- napi_complete_done(napi, work_done);
- ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier |= xcan_rx_int_mask(priv);
- priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ if (napi_complete_done(napi, work_done)) {
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier |= xcan_rx_int_mask(priv);
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ }
}
return work_done;
}
@@ -1300,7 +1446,6 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
spin_unlock_irqrestore(&priv->tx_lock, flags);
- can_led_event(ndev, CAN_LED_EVENT_TX);
xcan_update_error_state_after_rxtx(ndev);
}
@@ -1319,8 +1464,8 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev);
+ u32 isr_errors, mask;
u32 isr, ier;
- u32 isr_errors;
u32 rx_int_mask = xcan_rx_int_mask(priv);
/* Get the interrupt status from Xilinx CAN */
@@ -1339,10 +1484,15 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
if (isr & XCAN_IXR_TXOK_MASK)
xcan_tx_interrupt(ndev, isr);
+ mask = XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
+ XCAN_IXR_RXMNF_MASK;
+
+ if (priv->ecc_enable)
+ mask |= XCAN_IXR_ECC_MASK;
+
/* Check for the type of error interrupt and Processing it */
- isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
- XCAN_IXR_RXMNF_MASK);
+ isr_errors = isr & mask;
if (isr_errors) {
priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
xcan_err_interrupt(ndev, isr);
@@ -1390,6 +1540,10 @@ static int xcan_open(struct net_device *ndev)
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
+ ret = phy_power_on(priv->transceiver);
+ if (ret)
+ return ret;
+
ret = pm_runtime_get_sync(priv->dev);
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
@@ -1422,7 +1576,6 @@ static int xcan_open(struct net_device *ndev)
goto err_candev;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -1434,6 +1587,7 @@ err_irq:
free_irq(ndev->irq, ndev);
err:
pm_runtime_put(priv->dev);
+ phy_power_off(priv->transceiver);
return ret;
}
@@ -1454,8 +1608,8 @@ static int xcan_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
pm_runtime_put(priv->dev);
+ phy_power_off(priv->transceiver);
return 0;
}
@@ -1491,11 +1645,70 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
return 0;
}
+/**
+ * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value
+ * @ndev: Pointer to net_device structure
+ * @tdcv: Pointer to TDCV value
+ *
+ * Return: 0 on success
+ */
+static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET));
+
+ return 0;
+}
+
+static void xcan_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &xcan_priv_flags_strings,
+ sizeof(xcan_priv_flags_strings));
+ }
+}
+
+static int xcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(xcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void xcan_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+
+ data[XCAN_ECC_RX_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_2_bit_errors);
+ data[XCAN_ECC_RX_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_1_bit_errors);
+ data[XCAN_ECC_TXOL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_2_bit_errors);
+ data[XCAN_ECC_TXOL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_1_bit_errors);
+ data[XCAN_ECC_TXTL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_2_bit_errors);
+ data[XCAN_ECC_TXTL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_1_bit_errors);
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
.ndo_start_xmit = xcan_start_xmit,
- .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops xcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_strings = xcan_get_strings,
+ .get_sset_count = xcan_get_sset_count,
+ .get_ethtool_stats = xcan_get_ethtool_stats,
};
/**
@@ -1666,8 +1879,8 @@ static int xcan_probe(struct platform_device *pdev)
{
struct net_device *ndev;
struct xcan_priv *priv;
- const struct of_device_id *of_id;
- const struct xcan_devtype_data *devtype = &xcan_axi_data;
+ struct phy *transceiver;
+ const struct xcan_devtype_data *devtype;
void __iomem *addr;
int ret;
int rx_max, tx_max;
@@ -1681,9 +1894,7 @@ static int xcan_probe(struct platform_device *pdev)
goto err;
}
- of_id = of_match_device(xcan_of_match, &pdev->dev);
- if (of_id && of_id->data)
- devtype = of_id->data;
+ devtype = device_get_match_data(&pdev->dev);
hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
"tx-mailbox-count" : "tx-fifo-depth";
@@ -1736,24 +1947,42 @@ static int xcan_probe(struct platform_device *pdev)
return -ENOMEM;
priv = netdev_priv(ndev);
+ priv->ecc_enable = of_property_read_bool(pdev->dev.of_node, "xlnx,has-ecc");
priv->dev = &pdev->dev;
priv->can.bittiming_const = devtype->bittiming_const;
priv->can.do_set_mode = xcan_do_set_mode;
priv->can.do_get_berr_counter = xcan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
+ priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc)) {
+ dev_err(&pdev->dev, "Cannot get CAN reset.\n");
+ ret = PTR_ERR(priv->rstc);
+ goto err_free;
+ }
- if (devtype->cantype == XAXI_CANFD)
- priv->can.data_bittiming_const =
+ ret = reset_control_reset(priv->rstc);
+ if (ret)
+ goto err_free;
+
+ if (devtype->cantype == XAXI_CANFD) {
+ priv->can.fd.data_bittiming_const =
&xcan_data_bittiming_const_canfd;
+ priv->can.fd.tdc_const = &xcan_tdc_const_canfd;
+ }
- if (devtype->cantype == XAXI_CANFD_2_0)
- priv->can.data_bittiming_const =
+ if (devtype->cantype == XAXI_CANFD_2_0) {
+ priv->can.fd.data_bittiming_const =
&xcan_data_bittiming_const_canfd2;
+ priv->can.fd.tdc_const = &xcan_tdc_const_canfd2;
+ }
if (devtype->cantype == XAXI_CANFD ||
- devtype->cantype == XAXI_CANFD_2_0)
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ devtype->cantype == XAXI_CANFD_2_0) {
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_TDC_AUTO;
+ priv->can.fd.do_get_auto_tdcv = xcan_get_auto_tdcv;
+ }
priv->reg_base = addr;
priv->tx_max = tx_max;
@@ -1761,28 +1990,42 @@ static int xcan_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
/* Get IRQ for the device */
- ndev->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_reset;
+
+ ndev->irq = ret;
+
ndev->flags |= IFF_ECHO; /* We support local echo */
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &xcan_netdev_ops;
+ ndev->ethtool_ops = &xcan_ethtool_ops;
/* Getting the CAN can_clk info */
priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
if (IS_ERR(priv->can_clk)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk),
"device clock not found\n");
- goto err_free;
+ goto err_reset;
}
priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
if (IS_ERR(priv->bus_clk)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk),
"bus clock not found\n");
- goto err_free;
+ goto err_reset;
}
+ transceiver = devm_phy_optional_get(&pdev->dev, NULL);
+ if (IS_ERR(transceiver)) {
+ ret = PTR_ERR(transceiver);
+ dev_err_probe(&pdev->dev, ret, "failed to get phy\n");
+ goto err_reset;
+ }
+ priv->transceiver = transceiver;
+
priv->write_reg = xcan_write_reg_le;
priv->read_reg = xcan_read_reg_le;
@@ -1801,7 +2044,7 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.clock.freq = clk_get_rate(priv->can_clk);
- netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
+ netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max);
ret = register_candev(ndev);
if (ret) {
@@ -1809,8 +2052,7 @@ static int xcan_probe(struct platform_device *pdev)
goto err_disableclks;
}
- devm_can_led_init(ndev);
-
+ of_can_transceiver(ndev);
pm_runtime_put(&pdev->dev);
if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
@@ -1822,11 +2064,18 @@ static int xcan_probe(struct platform_device *pdev)
priv->reg_base, ndev->irq, priv->can.clock.freq,
hw_tx_max, priv->tx_max);
+ if (priv->ecc_enable) {
+ /* Reset FIFO ECC counters */
+ priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
+ XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
+ }
return 0;
err_disableclks:
pm_runtime_put(priv->dev);
pm_runtime_disable(&pdev->dev);
+err_reset:
+ reset_control_assert(priv->rstc);
err_free:
free_candev(ndev);
err:
@@ -1840,20 +2089,20 @@ err:
* This function frees all the resources allocated to the device.
* Return: 0 always
*/
-static int xcan_remove(struct platform_device *pdev)
+static void xcan_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xcan_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
pm_runtime_disable(&pdev->dev);
+ reset_control_assert(priv->rstc);
free_candev(ndev);
-
- return 0;
}
static struct platform_driver xcan_driver = {
.probe = xcan_probe,
- .remove = xcan_remove,
+ .remove = xcan_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &xcan_dev_pm_ops,
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7b1457a6e327..7eb301fd987d 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -18,6 +18,7 @@ config NET_DSA_BCM_SF2
config NET_DSA_LOOP
tristate "DSA mock-up Ethernet switch chip support"
+ select NET_DSA_TAG_NONE
select FIXED_PHY
help
This enables support for a fake mock-up switch chip which
@@ -25,20 +26,42 @@ config NET_DSA_LOOP
source "drivers/net/dsa/hirschmann/Kconfig"
-config NET_DSA_LANTIQ_GSWIP
- tristate "Lantiq / Intel GSWIP"
- depends on HAS_IOMEM
- select NET_DSA_TAG_GSWIP
- help
- This enables support for the Lantiq / Intel GSWIP 2.1 found in
- the xrx200 / VR9 SoC.
+source "drivers/net/dsa/lantiq/Kconfig"
config NET_DSA_MT7530
- tristate "MediaTek MT753x and MT7621 Ethernet switch support"
+ tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
select NET_DSA_TAG_MTK
+ select REGMAP_IRQ
+ imply NET_DSA_MT7530_MDIO
+ imply NET_DSA_MT7530_MMIO
+ help
+ This enables support for the MediaTek MT7530 and MT7531 Ethernet
+ switch chips. Multi-chip module MT7530 in MT7621AT, MT7621DAT,
+ MT7621ST and MT7623AI SoCs, and built-in switch in MT7988 SoC are
+ supported as well.
+
+config NET_DSA_MT7530_MDIO
+ tristate "MediaTek MT7530 MDIO interface driver"
+ depends on NET_DSA_MT7530
+ select MEDIATEK_GE_PHY
+ select PCS_MTK_LYNXI
+ help
+ This enables support for the MediaTek MT7530 and MT7531 switch
+ chips which are connected via MDIO, as well as multi-chip
+ module MT7530 which can be found in the MT7621AT, MT7621DAT,
+ MT7621ST and MT7623AI SoCs.
+
+config NET_DSA_MT7530_MMIO
+ tristate "MediaTek MT7530 MMIO interface driver"
+ depends on NET_DSA_MT7530
+ depends on HAS_IOMEM
+ imply MEDIATEK_GE_SOC_PHY
help
- This enables support for the MediaTek MT7530, MT7531, and MT7621
- Ethernet switch chips.
+ This enables support for the built-in Ethernet switch found
+ in the MediaTek MT7988 SoC.
+ The switch is a similar design as MT7531, but the switch registers
+ are directly mapped into the SoCs register space rather than being
+ accessible via MDIO.
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
@@ -59,37 +82,38 @@ source "drivers/net/dsa/sja1105/Kconfig"
source "drivers/net/dsa/xrs700x/Kconfig"
-config NET_DSA_QCA8K
- tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
- select NET_DSA_TAG_QCA
- select REGMAP
+source "drivers/net/dsa/realtek/Kconfig"
+
+config NET_DSA_RZN1_A5PSW
+ tristate "Renesas RZ/N1 A5PSW Ethernet switch support"
+ depends on OF && (ARCH_RZN1 || COMPILE_TEST)
+ select NET_DSA_TAG_RZN1_A5PSW
+ select PCS_RZN1_MIIC
help
- This enables support for the Qualcomm Atheros QCA8K Ethernet
- switch chips.
+ This driver supports the A5PSW switch, which is embedded in Renesas
+ RZ/N1 SoC.
-config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI Ethernet switch family support"
- select NET_DSA_TAG_RTL4_A
- select NET_DSA_TAG_RTL8_4
- select FIXED_PHY
- select IRQ_DOMAIN
- select REALTEK_PHY
- select REGMAP
+config NET_DSA_KS8995
+ tristate "Micrel KS8995 family 5-ports 10/100 Ethernet switches"
+ depends on SPI
+ select NET_DSA_TAG_NONE
help
- This enables support for the Realtek SMI-based switch
- chips, currently only RTL8366RB.
+ This driver supports the Micrel KS8995 family of 10/100 Mbit ethernet
+ switches, managed over SPI.
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
select REGMAP
+ imply SMSC_PHY
help
- This enables support for the SMSC/Microchip LAN9303 3 port ethernet
+ This enables support for the Microchip LAN9303/LAN9354 3 port ethernet
switch chips.
config NET_DSA_SMSC_LAN9303_I2C
tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode"
depends on I2C
+ depends on VLAN_8021Q || VLAN_8021Q=n
select NET_DSA_SMSC_LAN9303
select REGMAP_I2C
help
@@ -97,14 +121,16 @@ config NET_DSA_SMSC_LAN9303_I2C
for I2C managed mode.
config NET_DSA_SMSC_LAN9303_MDIO
- tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode"
+ tristate "Microchip LAN9303/LAN9354 3-ports 10/100 ethernet switch in MDIO managed mode"
select NET_DSA_SMSC_LAN9303
+ depends on VLAN_8021Q || VLAN_8021Q=n
help
- Enable access functions if the SMSC/Microchip LAN9303 is configured
+ Enable access functions if the Microchip LAN9303/LAN9354 is configured
for MDIO managed mode.
config NET_DSA_VITESSE_VSC73XX
tristate
+ select NET_DSA_TAG_VSC73XX_8021Q
select FIXED_PHY
select VITESSE_PHY
select GPIOLIB
@@ -128,4 +154,11 @@ config NET_DSA_VITESSE_VSC73XX_PLATFORM
This enables support for the Vitesse VSC7385, VSC7388, VSC7395
and VSC7398 SparX integrated ethernet switches, connected over
a CPU-attached address bus and work in memory-mapped I/O mode.
+
+config NET_DSA_YT921X
+ tristate "Motorcomm YT9215 ethernet switch chip support"
+ select NET_DSA_TAG_YT921X
+ help
+ This enables support for the Motorcomm YT9215 ethernet switch
+ chip.
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 8da1569a34e6..16de4ba3fa38 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -2,26 +2,26 @@
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
-ifdef CONFIG_NET_DSA_LOOP
-obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
-endif
-obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
+obj-$(CONFIG_NET_DSA_KS8995) += ks8995.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
+obj-$(CONFIG_NET_DSA_MT7530_MDIO) += mt7530-mdio.o
+obj-$(CONFIG_NET_DSA_MT7530_MMIO) += mt7530-mmio.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
-obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
-realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
+obj-$(CONFIG_NET_DSA_RZN1_A5PSW) += rzn1_a5psw.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx-core.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
+obj-$(CONFIG_NET_DSA_YT921X) += yt921x.o
obj-y += b53/
obj-y += hirschmann/
+obj-y += lantiq/
obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
obj-y += qca/
+obj-y += realtek/
obj-y += sja1105/
obj-y += xrs700x/
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
index 90b525160b71..915008e8eff5 100644
--- a/drivers/net/dsa/b53/Kconfig
+++ b/drivers/net/dsa/b53/Kconfig
@@ -2,8 +2,10 @@
menuconfig B53
tristate "Broadcom BCM53xx managed switch support"
depends on NET_DSA
+ select NET_DSA_TAG_NONE
select NET_DSA_TAG_BRCM
select NET_DSA_TAG_BRCM_LEGACY
+ select NET_DSA_TAG_BRCM_LEGACY_FCS
select NET_DSA_TAG_BRCM_PREPEND
help
This driver adds support for Broadcom managed switch chips. It supports
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index af4761968733..a1a177713d99 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -21,12 +21,15 @@
#include <linux/export.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/platform_data/b53.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <net/dsa.h>
#include "b53_regs.h"
@@ -224,6 +227,9 @@ static const struct b53_mib_desc b53_mibs_58xx[] = {
#define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
+#define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
+#define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
+
static int b53_do_vlan_op(struct b53_device *dev, u8 op)
{
unsigned int i;
@@ -322,6 +328,26 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
}
}
+static void b53_set_eap_mode(struct b53_device *dev, int port, int mode)
+{
+ u64 eap_conf;
+
+ if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID)
+ return;
+
+ b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf);
+
+ if (is63xx(dev)) {
+ eap_conf &= ~EAP_MODE_MASK_63XX;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX;
+ } else {
+ eap_conf &= ~EAP_MODE_MASK;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT;
+ }
+
+ b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf);
+}
+
static void b53_set_forwarding(struct b53_device *dev, int enable)
{
u8 mgmt;
@@ -335,18 +361,23 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
- /* Include IMP port in dumb forwarding mode
- */
- b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
- mgmt |= B53_MII_DUMB_FWDG_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
+ if (!is5325(dev)) {
+ /* Include IMP port in dumb forwarding mode */
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
+ mgmt |= B53_MII_DUMB_FWDG_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
- /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
- * frames should be flooded or not.
- */
- b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
- mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
+ * frames should be flooded or not.
+ */
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IP_MC;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ } else {
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+ mgmt |= B53_IP_MC;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ }
}
static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
@@ -369,15 +400,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
}
+ vc1 &= ~VC1_RX_MCST_FWD_EN;
+
if (enable) {
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
- vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
+ vc1 |= VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
if (enable_filtering) {
vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
vc5 |= VC5_DROP_VTABLE_MISS;
} else {
- vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
vc5 &= ~VC5_DROP_VTABLE_MISS;
}
@@ -389,7 +422,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
} else {
vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
- vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
+ vc1 &= ~VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
vc5 &= ~VC5_DROP_VTABLE_MISS;
@@ -459,6 +492,9 @@ static int b53_flush_arl(struct b53_device *dev, u8 mask)
{
unsigned int i;
+ if (is5325(dev))
+ return 0;
+
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
@@ -483,6 +519,9 @@ out:
static int b53_fast_age_port(struct b53_device *dev, int port)
{
+ if (is5325(dev))
+ return 0;
+
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
return b53_flush_arl(dev, FAST_AGE_PORT);
@@ -490,6 +529,9 @@ static int b53_fast_age_port(struct b53_device *dev, int port)
static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
{
+ if (is5325(dev))
+ return 0;
+
b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
return b53_flush_arl(dev, FAST_AGE_VLAN);
@@ -501,6 +543,10 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
unsigned int i;
u16 pvlan;
+ /* BCM5325 CPU port is at 8 */
+ if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
+ cpu_port = B53_CPU_PORT;
+
/* Enable the IMP port to be in the same VLAN as the other ports
* on a per-port basis such that we only have Port i and IMP in
* the same VLAN.
@@ -518,12 +564,24 @@ static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
{
u16 uc;
- b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
- if (unicast)
- uc |= BIT(port);
- else
- uc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+ if (is5325(dev)) {
+ if (port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
+
+ b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, &uc);
+ if (unicast)
+ uc |= BIT(port) | B53_IEEE_UCAST_DROP_EN;
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, uc);
+ } else {
+ b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
+ if (unicast)
+ uc |= BIT(port);
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+ }
}
static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
@@ -531,19 +589,31 @@ static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
{
u16 mc;
- b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+ if (is5325(dev)) {
+ if (port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
- b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+ b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, &mc);
+ if (multicast)
+ mc |= BIT(port) | B53_IEEE_MCAST_DROP_EN;
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, mc);
+ } else {
+ b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+ }
}
static void b53_port_set_learning(struct b53_device *dev, int port,
@@ -551,6 +621,9 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
{
u16 reg;
+ if (is5325(dev))
+ return;
+
b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
if (learning)
reg &= ~BIT(port);
@@ -559,6 +632,71 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
}
+static void b53_port_set_isolated(struct b53_device *dev, int port,
+ bool isolated)
+{
+ u8 offset;
+ u16 reg;
+
+ if (is5325(dev))
+ offset = B53_PROTECTED_PORT_SEL_25;
+ else
+ offset = B53_PROTECTED_PORT_SEL;
+
+ b53_read16(dev, B53_CTRL_PAGE, offset, &reg);
+ if (isolated)
+ reg |= BIT(port);
+ else
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, offset, reg);
+}
+
+static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+{
+ struct b53_device *dev = ds->priv;
+ u16 reg;
+
+ b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
+ if (enable)
+ reg |= BIT(port);
+ else
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
+}
+
+int b53_setup_port(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ b53_port_set_ucast_flood(dev, port, true);
+ b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
+ b53_port_set_isolated(dev, port, false);
+
+ /* Force all traffic to go to the CPU port to prevent the ASIC from
+ * trying to forward to bridged ports on matching FDB entries, then
+ * dropping frames because it isn't allowed to forward there.
+ */
+ if (dsa_is_user_port(ds, port))
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
+ if (is5325(dev) &&
+ in_range(port, 1, 4)) {
+ u8 reg;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, &reg);
+ reg &= ~PD_MODE_POWER_DOWN_PORT(0);
+ if (dsa_is_unused_port(ds, port))
+ reg |= PD_MODE_POWER_DOWN_PORT(port);
+ else
+ reg &= ~PD_MODE_POWER_DOWN_PORT(port);
+ b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_setup_port);
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
@@ -571,9 +709,8 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- b53_port_set_ucast_flood(dev, port, true);
- b53_port_set_mcast_flood(dev, port, true);
- b53_port_set_learning(dev, port, false);
+ if (dev->ops->phy_enable)
+ dev->ops->phy_enable(dev, port);
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
@@ -613,6 +750,9 @@ void b53_disable_port(struct dsa_switch *ds, int port)
reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+ if (dev->ops->phy_disable)
+ dev->ops->phy_disable(dev, port);
+
if (dev->ops->irq_disable)
dev->ops->irq_disable(dev, port);
}
@@ -657,6 +797,11 @@ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
hdr_ctl |= GC_FRM_MGMT_PORT_M;
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
+ /* B53_BRCM_HDR not present on devices with legacy tags */
+ if (dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY ||
+ dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY_FCS)
+ return;
+
/* Enable Broadcom tags for IMP port */
b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
if (tag_en)
@@ -705,10 +850,6 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
b53_brcm_hdr_setup(dev->ds, port);
-
- b53_port_set_ucast_flood(dev, port, true);
- b53_port_set_mcast_flood(dev, port, true);
- b53_port_set_learning(dev, port, false);
}
static void b53_enable_mib(struct b53_device *dev)
@@ -720,12 +861,18 @@ static void b53_enable_mib(struct b53_device *dev)
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
}
+static void b53_enable_stp(struct b53_device *dev)
+{
+ u8 gc;
+
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+ gc |= GC_RX_BPDU_EN;
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
+}
+
static u16 b53_default_pvid(struct b53_device *dev)
{
- if (is5325(dev) || is5365(dev))
- return 1;
- else
- return 0;
+ return 0;
}
static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
@@ -735,6 +882,22 @@ static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
}
+static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ if (!dev->vlan_filtering)
+ return true;
+
+ dp = dsa_to_port(ds, port);
+
+ if (dsa_port_is_cpu(dp))
+ return true;
+
+ return dp->bridge == NULL;
+}
+
int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
@@ -753,34 +916,47 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
+ b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering);
/* Create an untagged VLAN entry for the default PVID in case
* CONFIG_VLAN_8021Q is disabled and there are no calls to
- * dsa_slave_vlan_rx_add_vid() to create the default VLAN
+ * dsa_user_vlan_rx_add_vid() to create the default VLAN
* entry. Do this only when the tagging protocol is not
* DSA_TAG_PROTO_NONE
*/
+ v = &dev->vlans[def_vid];
b53_for_each_port(dev, i) {
- v = &dev->vlans[def_vid];
- v->members |= BIT(i);
+ if (!b53_vlan_port_may_join_untagged(ds, i))
+ continue;
+
+ vl.members |= BIT(i);
if (!b53_vlan_port_needs_forced_tagged(ds, i))
- v->untag = v->members;
- b53_write16(dev, B53_VLAN_PAGE,
- B53_VLAN_PORT_DEF_TAG(i), def_vid);
+ vl.untag = vl.members;
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i),
+ def_vid);
}
+ b53_set_vlan_entry(dev, def_vid, &vl);
- /* Upon initial call we have not set-up any VLANs, but upon
- * system resume, we need to restore all VLAN entries.
- */
- for (vid = def_vid; vid < dev->num_vlans; vid++) {
- v = &dev->vlans[vid];
+ if (dev->vlan_filtering) {
+ /* Upon initial call we have not set-up any VLANs, but upon
+ * system resume, we need to restore all VLAN entries.
+ */
+ for (vid = def_vid + 1; vid < dev->num_vlans; vid++) {
+ v = &dev->vlans[vid];
- if (!v->members)
- continue;
+ if (!v->members)
+ continue;
- b53_set_vlan_entry(dev, vid, v);
- b53_fast_age_vlan(dev, vid);
+ b53_set_vlan_entry(dev, vid, v);
+ b53_fast_age_vlan(dev, vid);
+ }
+
+ b53_for_each_port(dev, i) {
+ if (!dsa_is_cpu_port(ds, i))
+ b53_write16(dev, B53_VLAN_PAGE,
+ B53_VLAN_PORT_DEF_TAG(i),
+ dev->ports[i].pvid);
+ }
}
return 0;
@@ -859,6 +1035,7 @@ static int b53_switch_reset(struct b53_device *dev)
}
b53_enable_mib(dev);
+ b53_enable_stp(dev);
return b53_flush_arl(dev, FAST_AGE_STATIC);
}
@@ -958,7 +1135,7 @@ static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
return NULL;
}
- return mdiobus_get_phy(ds->slave_mii_bus, port);
+ return mdiobus_get_phy(ds->user_mii_bus, port);
}
void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
@@ -972,8 +1149,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
if (stringset == ETH_SS_STATS) {
for (i = 0; i < mib_size; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
- mibs[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, mibs[i].name);
} else if (stringset == ETH_SS_PHY_STATS) {
phydev = b53_get_phy_device(ds, port);
if (!phydev)
@@ -1099,7 +1275,9 @@ EXPORT_SYMBOL(b53_setup_devlink_resources);
static int b53_setup(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl;
unsigned int port;
+ u16 pvid;
int ret;
/* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
@@ -1107,12 +1285,36 @@ static int b53_setup(struct dsa_switch *ds)
*/
ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
+ /* The switch does not tell us the original VLAN for untagged
+ * packets, so keep the CPU port always tagged.
+ */
+ ds->untag_vlan_aware_bridge_pvid = true;
+
+ if (dev->chip_id == BCM53101_DEVICE_ID) {
+ /* BCM53101 uses 0.5 second increments */
+ ds->ageing_time_min = 1 * 500;
+ ds->ageing_time_max = AGE_TIME_MAX * 500;
+ } else {
+ /* Everything else uses 1 second increments */
+ ds->ageing_time_min = 1 * 1000;
+ ds->ageing_time_max = AGE_TIME_MAX * 1000;
+ }
+
ret = b53_reset_switch(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
return ret;
}
+ /* setup default vlan for filtering mode */
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+ b53_for_each_port(dev, port) {
+ vl->members |= BIT(port);
+ if (!b53_vlan_port_needs_forced_tagged(ds, port))
+ vl->untag |= BIT(port);
+ }
+
b53_reset_mib(dev);
ret = b53_apply_config(dev);
@@ -1147,6 +1349,8 @@ static void b53_force_link(struct b53_device *dev, int port, int link)
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
+ } else if (is5325(dev)) {
+ return;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
@@ -1171,6 +1375,8 @@ static void b53_force_port_config(struct b53_device *dev, int port,
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
+ } else if (is5325(dev)) {
+ return;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
@@ -1183,6 +1389,10 @@ static void b53_force_port_config(struct b53_device *dev, int port,
else
reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
+ reg &= ~(0x3 << GMII_PO_SPEED_S);
+ if (is5301x(dev) || is58xx(dev))
+ reg &= ~PORT_OVERRIDE_SPEED_2000M;
+
switch (speed) {
case 2000:
reg |= PORT_OVERRIDE_SPEED_2000M;
@@ -1201,100 +1411,114 @@ static void b53_force_port_config(struct b53_device *dev, int port,
return;
}
- if (rx_pause)
- reg |= PORT_OVERRIDE_RX_FLOW;
- if (tx_pause)
- reg |= PORT_OVERRIDE_TX_FLOW;
+ if (is5325(dev))
+ reg &= ~PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg &= ~(PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW);
+
+ if (rx_pause) {
+ if (is5325(dev))
+ reg |= PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg |= PORT_OVERRIDE_RX_FLOW;
+ }
+
+ if (tx_pause) {
+ if (is5325(dev))
+ reg |= PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg |= PORT_OVERRIDE_TX_FLOW;
+ }
b53_write8(dev, B53_CTRL_PAGE, off, reg);
}
-static void b53_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
- u8 rgmii_ctrl = 0, reg = 0, off;
- bool tx_pause = false;
- bool rx_pause = false;
+ u8 rgmii_ctrl = 0;
- if (!phy_is_pseudo_fixed_link(phydev))
- return;
+ b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- /* Enable flow control on BCM5301x's CPU port */
- if (is5301x(dev) && dsa_is_cpu_port(ds, port))
- tx_pause = rx_pause = true;
+ if (is6318_268(dev))
+ rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
- if (phydev->pause) {
- if (phydev->asym_pause)
- tx_pause = true;
- rx_pause = true;
- }
+ rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
- b53_force_port_config(dev, port, phydev->speed, phydev->duplex,
- tx_pause, rx_pause);
- b53_force_link(dev, port, phydev->link);
+ b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl);
- if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
- if (port == dev->imp_port)
- off = B53_RGMII_CTRL_IMP;
- else
- off = B53_RGMII_CTRL_P(port);
+ dev_dbg(ds->dev, "Configured port %d for %s\n", port,
+ phy_modes(interface));
+}
- /* Configure the port RGMII clock delay by DLL disabled and
- * tx_clk aligned timing (restoring to reset defaults)
- */
- b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
- RGMII_CTRL_TIMING_SEL);
-
- /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
- * sure that we enable the port TX clock internal delay to
- * account for this internal delay that is inserted, otherwise
- * the switch won't be able to receive correctly.
- *
- * PHY_INTERFACE_MODE_RGMII means that we are not introducing
- * any delay neither on transmission nor reception, so the
- * BCM53125 must also be configured accordingly to account for
- * the lack of delay and introduce
- *
- * The BCM53125 switch has its RX clock and TX clock control
- * swapped, hence the reason why we modify the TX clock path in
- * the "RGMII" case
- */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
+static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct b53_device *dev = ds->priv;
+ u8 rgmii_ctrl = 0, off;
+
+ if (port == dev->imp_port)
+ off = B53_RGMII_CTRL_IMP;
+ else
+ off = B53_RGMII_CTRL_P(port);
+
+ /* Configure the port RGMII clock delay by DLL disabled and
+ * tx_clk aligned timing (restoring to reset defaults)
+ */
+ b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
+
+ /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
+ * sure that we enable the port TX clock internal delay to
+ * account for this internal delay that is inserted, otherwise
+ * the switch won't be able to receive correctly.
+ *
+ * PHY_INTERFACE_MODE_RGMII means that we are not introducing
+ * any delay neither on transmission nor reception, so the
+ * BCM53125 must also be configured accordingly to account for
+ * the lack of delay and introduce
+ *
+ * The BCM53125 switch has its RX clock and TX clock control
+ * swapped, hence the reason why we modify the TX clock path in
+ * the "RGMII" case
+ */
+ if (interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
+ if (interface == PHY_INTERFACE_MODE_RGMII)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
+
+ if (dev->chip_id != BCM53115_DEVICE_ID)
rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
- b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
- dev_info(ds->dev, "Configured port %d for %s\n", port,
- phy_modes(phydev->interface));
- }
+ b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
- /* configure MII port if necessary */
- if (is5325(dev)) {
+ dev_info(ds->dev, "Configured port %d for %s\n", port,
+ phy_modes(interface));
+}
+
+static void b53_adjust_5325_mii(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ u8 reg = 0;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
+
+ /* reverse mii needs to be enabled */
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ reg | PORT_OVERRIDE_RV_MII_25);
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
&reg);
- /* reverse mii needs to be enabled */
if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
- b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
- reg | PORT_OVERRIDE_RV_MII_25);
- b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
- &reg);
-
- if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
- dev_err(ds->dev,
- "Failed to enable reverse MII mode\n");
- return;
- }
+ dev_err(ds->dev,
+ "Failed to enable reverse MII mode\n");
+ return;
}
}
-
- /* Re-negotiate EEE if it was enabled already */
- p->eee_enabled = b53_eee_init(ds, port, phydev);
}
void b53_port_event(struct dsa_switch *ds, int port)
@@ -1309,95 +1533,97 @@ void b53_port_event(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL(b53_port_event);
-void b53_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct b53_device *dev = ds->priv;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (dev->ops->serdes_phylink_validate)
- dev->ops->serdes_phylink_validate(dev, port, mask, state);
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
- * support Gigabit, including Half duplex.
+ /* Internal ports need GMII for PHYLIB */
+ __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
+
+ /* These switches appear to support MII and RevMII too, but beyond
+ * this, the code gives very few clues. FIXME: We probably need more
+ * interface modes here.
+ *
+ * According to b53_srab_mux_init(), ports 3..5 can support:
+ * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting.
+ * However, the interface mode read from the MUX configuration is
+ * not passed back to DSA, so phylink uses NA.
+ * DT can specify RGMII for ports 0, 1.
+ * For MDIO, port 8 can be RGMII_TXID.
*/
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- !phy_interface_mode_is_8023z(state->interface) &&
- !(is5325(dev) || is5365(dev))) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
- if (!phy_interface_mode_is_8023z(state->interface)) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- }
+ /* BCM63xx RGMII ports support RGMII */
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ phy_interface_set_rgmii(config->supported_interfaces);
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
- phylink_helper_basex_speed(state);
+ /* 5325/5365 are not capable of gigabit speeds, everything else is.
+ * Note: the original code also exclulded Gigagbit for MII, RevMII
+ * and 802.3z modes. MII and RevMII are not able to work above 100M,
+ * so will be excluded by the generic validator implementation.
+ * However, the exclusion of Gigabit for 802.3z just seems wrong.
+ */
+ if (!(is5325(dev) || is5365(dev)))
+ config->mac_capabilities |= MAC_1000;
+
+ /* Get the implementation specific capabilities */
+ if (dev->ops->phylink_get_caps)
+ dev->ops->phylink_get_caps(dev, port, config);
}
-EXPORT_SYMBOL(b53_phylink_validate);
-int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
- struct b53_device *dev = ds->priv;
- int ret = -EOPNOTSUPP;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct b53_device *dev = dp->ds->priv;
- if ((phy_interface_mode_is_8023z(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII) &&
- dev->ops->serdes_link_state)
- ret = dev->ops->serdes_link_state(dev, port, state);
+ if (!dev->ops->phylink_mac_select_pcs)
+ return NULL;
- return ret;
+ return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_state);
-void b53_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
+static void b53_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ phy_interface_t interface = state->interface;
+ struct dsa_switch *ds = dp->ds;
struct b53_device *dev = ds->priv;
+ int port = dp->index;
- if (mode == MLO_AN_PHY || mode == MLO_AN_FIXED)
- return;
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ b53_adjust_63xx_rgmii(ds, port, interface);
- if ((phy_interface_mode_is_8023z(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII) &&
- dev->ops->serdes_config)
- dev->ops->serdes_config(dev, port, mode, state);
-}
-EXPORT_SYMBOL(b53_phylink_mac_config);
-
-void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
-{
- struct b53_device *dev = ds->priv;
+ if (mode == MLO_AN_FIXED) {
+ if (is531x5(dev) && phy_interface_mode_is_rgmii(interface))
+ b53_adjust_531x5_rgmii(ds, port, interface);
- if (dev->ops->serdes_an_restart)
- dev->ops->serdes_an_restart(dev, port);
+ /* configure MII port if necessary */
+ if (is5325(dev))
+ b53_adjust_5325_mii(ds, port);
+ }
}
-EXPORT_SYMBOL(b53_phylink_mac_an_restart);
-void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface)
+static void b53_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
{
- struct b53_device *dev = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct b53_device *dev = dp->ds->priv;
+ int port = dp->index;
- if (mode == MLO_AN_PHY)
+ if (mode == MLO_AN_PHY) {
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ b53_force_link(dev, port, false);
return;
+ }
if (mode == MLO_AN_FIXED) {
b53_force_link(dev, port, false);
@@ -1408,21 +1634,38 @@ void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
dev->ops->serdes_link_set)
dev->ops->serdes_link_set(dev, port, mode, interface, false);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_down);
-void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause)
+static void b53_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
struct b53_device *dev = ds->priv;
+ struct ethtool_keee *p = &dev->ports[dp->index].eee;
+ int port = dp->index;
+
+ if (mode == MLO_AN_PHY) {
+ /* Re-negotiate EEE if it was enabled already */
+ p->eee_enabled = b53_eee_init(ds, port, phydev);
+
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) {
+ b53_force_port_config(dev, port, speed, duplex,
+ tx_pause, rx_pause);
+ b53_force_link(dev, port, true);
+ }
- if (mode == MLO_AN_PHY)
return;
+ }
if (mode == MLO_AN_FIXED) {
+ /* Force flow control on BCM5301x's CPU port */
+ if (is5301x(dev) && dsa_is_cpu_port(ds, port))
+ tx_pause = rx_pause = true;
+
b53_force_port_config(dev, port, speed, duplex,
tx_pause, rx_pause);
b53_force_link(dev, port, true);
@@ -1433,14 +1676,16 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
dev->ops->serdes_link_set)
dev->ops->serdes_link_set(dev, port, mode, interface, true);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
- b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering);
+ if (dev->vlan_filtering != vlan_filtering) {
+ dev->vlan_filtering = vlan_filtering;
+ b53_apply_config(dev);
+ }
return 0;
}
@@ -1451,9 +1696,6 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
{
struct b53_device *dev = ds->priv;
- if ((is5325(dev) || is5365(dev)) && vlan->vid == 0)
- return -EOPNOTSUPP;
-
/* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
* receiving VLAN tagged frames at all, we can still allow the port to
* be configured for egress untagged.
@@ -1465,7 +1707,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid >= dev->num_vlans)
return -ERANGE;
- b53_enable_vlan(dev, port, true, ds->vlan_filtering);
+ b53_enable_vlan(dev, port, true, dev->vlan_filtering);
return 0;
}
@@ -1478,18 +1720,29 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct b53_vlan *vl;
+ u16 old_pvid, new_pvid;
int err;
err = b53_vlan_prepare(ds, port, vlan);
if (err)
return err;
- vl = &dev->vlans[vlan->vid];
+ if (vlan->vid == 0)
+ return 0;
- b53_get_vlan_entry(dev, vlan->vid, vl);
+ old_pvid = dev->ports[port].pvid;
+ if (pvid)
+ new_pvid = vlan->vid;
+ else if (!pvid && vlan->vid == old_pvid)
+ new_pvid = b53_default_pvid(dev);
+ else
+ new_pvid = old_pvid;
+ dev->ports[port].pvid = new_pvid;
+
+ vl = &dev->vlans[vlan->vid];
- if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
- untagged = true;
+ if (dsa_is_cpu_port(ds, port))
+ untagged = false;
vl->members |= BIT(port);
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
@@ -1497,13 +1750,16 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
else
vl->untag &= ~BIT(port);
+ if (!dev->vlan_filtering)
+ return 0;
+
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
- if (pvid && !dsa_is_cpu_port(ds, port)) {
+ if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
- vlan->vid);
- b53_fast_age_vlan(dev, vlan->vid);
+ new_pvid);
+ b53_fast_age_vlan(dev, old_pvid);
}
return 0;
@@ -1518,20 +1774,25 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
struct b53_vlan *vl;
u16 pvid;
- b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+ if (vlan->vid == 0)
+ return 0;
- vl = &dev->vlans[vlan->vid];
+ pvid = dev->ports[port].pvid;
- b53_get_vlan_entry(dev, vlan->vid, vl);
+ vl = &dev->vlans[vlan->vid];
vl->members &= ~BIT(port);
if (pvid == vlan->vid)
pvid = b53_default_pvid(dev);
+ dev->ports[port].pvid = pvid;
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
vl->untag &= ~(BIT(port));
+ if (!dev->vlan_filtering)
+ return 0;
+
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
@@ -1583,7 +1844,82 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
return b53_arl_op_wait(dev);
}
-static int b53_arl_read(struct b53_device *dev, u64 mac,
+static void b53_arl_read_entry_25(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ u8 vid_entry;
+ u64 mac_vid;
+
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx),
+ &vid_entry);
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ &mac_vid);
+ b53_arl_to_entry_25(ent, mac_vid, vid_entry);
+}
+
+static void b53_arl_write_entry_25(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ u8 vid_entry;
+ u64 mac_vid;
+
+ b53_arl_from_entry_25(&mac_vid, &vid_entry, ent);
+ b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx), vid_entry);
+ b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ mac_vid);
+}
+
+static void b53_arl_read_entry_89(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ u64 mac_vid;
+ u16 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ &mac_vid);
+ b53_read16(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry);
+ b53_arl_to_entry_89(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_write_entry_89(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ u32 fwd_entry;
+ u64 mac_vid;
+
+ b53_arl_from_entry_89(&mac_vid, &fwd_entry, ent);
+ b53_write64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
+ b53_write16(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+}
+
+static void b53_arl_read_entry_95(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ u32 fwd_entry;
+ u64 mac_vid;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_write_entry_95(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ u32 fwd_entry;
+ u64 mac_vid;
+
+ b53_arl_from_entry(&mac_vid, &fwd_entry, ent);
+ b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx),
+ mac_vid);
+ b53_write32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx),
+ fwd_entry);
+}
+
+static int b53_arl_read(struct b53_device *dev, const u8 *mac,
u16 vid, struct b53_arl_entry *ent, u8 *idx)
{
DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
@@ -1598,43 +1934,30 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
/* Read the bins */
for (i = 0; i < dev->num_arl_bins; i++) {
- u64 mac_vid;
- u32 fwd_entry;
-
- b53_read64(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
- b53_read32(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
- b53_arl_to_entry(ent, mac_vid, fwd_entry);
+ b53_arl_read_entry(dev, ent, i);
- if (!(fwd_entry & ARLTBL_VALID)) {
+ if (!ent->is_valid) {
set_bit(i, free_bins);
continue;
}
- if ((mac_vid & ARLTBL_MAC_MASK) != mac)
+ if (!ether_addr_equal(ent->mac, mac))
continue;
- if (dev->vlan_enabled &&
- ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
+ if (dev->vlan_enabled && ent->vid != vid)
continue;
*idx = i;
return 0;
}
- if (bitmap_weight(free_bins, dev->num_arl_bins) == 0)
- return -ENOSPC;
-
*idx = find_first_bit(free_bins, dev->num_arl_bins);
-
- return -ENOENT;
+ return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
}
static int b53_arl_op(struct b53_device *dev, int op, int port,
const unsigned char *addr, u16 vid, bool is_valid)
{
struct b53_arl_entry ent;
- u32 fwd_entry;
- u64 mac, mac_vid = 0;
u8 idx = 0;
+ u64 mac;
int ret;
/* Convert the array into a 64-bit MAC */
@@ -1642,14 +1965,19 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
/* Perform a read for the given MAC and VID */
b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
- b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+ if (!is5325m(dev)) {
+ if (is5325(dev) || is5365(dev))
+ b53_write8(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+ else
+ b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+ }
/* Issue a read operation for this MAC */
ret = b53_arl_rw_op(dev, 1);
if (ret)
return ret;
- ret = b53_arl_read(dev, mac, vid, &ent, &idx);
+ ret = b53_arl_read(dev, addr, vid, &ent, &idx);
/* If this is a read, just finish now */
if (op)
@@ -1666,7 +1994,6 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
/* We could not find a matching MAC, so reset to a new entry */
dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
addr, vid, idx);
- fwd_entry = 0;
break;
default:
dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
@@ -1693,28 +2020,18 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
ent.is_static = true;
ent.is_age = false;
memcpy(ent.mac, addr, ETH_ALEN);
- b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
-
- b53_write64(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
- b53_write32(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+ b53_arl_write_entry(dev, &ent, idx);
return b53_arl_rw_op(dev, 0);
}
int b53_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
- /* 5325 and 5365 require some more massaging, but could
- * be supported eventually
- */
- if (is5325(priv) || is5365(priv))
- return -EOPNOTSUPP;
-
mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, addr, vid, true);
mutex_unlock(&priv->arl_mutex);
@@ -1724,7 +2041,8 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_fdb_add);
int b53_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1737,15 +2055,55 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_fdb_del);
+static void b53_read_arl_srch_ctl(struct b53_device *dev, u8 *val)
+{
+ u8 offset;
+
+ if (is5325(dev) || is5365(dev))
+ offset = B53_ARL_SRCH_CTL_25;
+ else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) ||
+ is63xx(dev))
+ offset = B53_ARL_SRCH_CTL_89;
+ else
+ offset = B53_ARL_SRCH_CTL;
+
+ if (is63xx(dev)) {
+ u16 val16;
+
+ b53_read16(dev, B53_ARLIO_PAGE, offset, &val16);
+ *val = val16 & 0xff;
+ } else {
+ b53_read8(dev, B53_ARLIO_PAGE, offset, val);
+ }
+}
+
+static void b53_write_arl_srch_ctl(struct b53_device *dev, u8 val)
+{
+ u8 offset;
+
+ if (is5325(dev) || is5365(dev))
+ offset = B53_ARL_SRCH_CTL_25;
+ else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) ||
+ is63xx(dev))
+ offset = B53_ARL_SRCH_CTL_89;
+ else
+ offset = B53_ARL_SRCH_CTL;
+
+ if (is63xx(dev))
+ b53_write16(dev, B53_ARLIO_PAGE, offset, val);
+ else
+ b53_write8(dev, B53_ARLIO_PAGE, offset, val);
+}
+
static int b53_arl_search_wait(struct b53_device *dev)
{
unsigned int timeout = 1000;
u8 reg;
do {
- b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
+ b53_read_arl_srch_ctl(dev, &reg);
if (!(reg & ARL_SRCH_STDN))
- return 0;
+ return -ENOENT;
if (reg & ARL_SRCH_VLID)
return 0;
@@ -1756,16 +2114,52 @@ static int b53_arl_search_wait(struct b53_device *dev)
return -ETIMEDOUT;
}
-static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
- struct b53_arl_entry *ent)
+static void b53_arl_search_read_25(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ u64 mac_vid;
+ u8 ext;
+
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_EXT_25, &ext);
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25,
+ &mac_vid);
+ b53_arl_search_to_entry_25(ent, mac_vid, ext);
+}
+
+static void b53_arl_search_read_89(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
{
+ u16 fwd_entry;
u64 mac_vid;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_89,
+ &mac_vid);
+ b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_89, &fwd_entry);
+ b53_arl_to_entry_89(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_search_read_63xx(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ u16 fwd_entry;
+ u64 mac_vid;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_63XX,
+ &mac_vid);
+ b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_63XX, &fwd_entry);
+ b53_arl_search_to_entry_63xx(ent, mac_vid, fwd_entry);
+}
+
+static void b53_arl_search_read_95(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
u32 fwd_entry;
+ u64 mac_vid;
- b53_read64(dev, B53_ARLIO_PAGE,
- B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
- b53_read32(dev, B53_ARLIO_PAGE,
- B53_ARL_SRCH_RSTL(idx), &fwd_entry);
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx),
+ &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx),
+ &fwd_entry);
b53_arl_to_entry(ent, mac_vid, fwd_entry);
}
@@ -1784,30 +2178,31 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
+ unsigned int count = 0, results_per_hit = 1;
struct b53_device *priv = ds->priv;
struct b53_arl_entry results[2];
- unsigned int count = 0;
int ret;
- u8 reg;
+
+ if (priv->num_arl_bins > 2)
+ results_per_hit = 2;
mutex_lock(&priv->arl_mutex);
/* Start search operation */
- reg = ARL_SRCH_STDN;
- b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
+ b53_write_arl_srch_ctl(priv, ARL_SRCH_STDN);
do {
ret = b53_arl_search_wait(priv);
if (ret)
break;
- b53_arl_search_rd(priv, 0, &results[0]);
+ b53_arl_search_read(priv, 0, &results[0]);
ret = b53_fdb_copy(port, &results[0], cb, data);
if (ret)
break;
- if (priv->num_arl_bins > 2) {
- b53_arl_search_rd(priv, 1, &results[1]);
+ if (results_per_hit == 2) {
+ b53_arl_search_read(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret)
break;
@@ -1816,7 +2211,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
break;
}
- } while (count++ < b53_max_arl_entries(priv) / 2);
+ } while (count++ < b53_max_arl_entries(priv) / results_per_hit);
mutex_unlock(&priv->arl_mutex);
@@ -1825,7 +2220,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_fdb_dump);
int b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1845,7 +2241,8 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_mdb_add);
int b53_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1860,11 +2257,13 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_mdb_del);
-int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
+int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- u16 pvlan, reg;
+ u16 pvlan, reg, pvid;
unsigned int i;
/* On 7278, port 7 which connects to the ASP should only receive
@@ -1873,21 +2272,32 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
return -EINVAL;
- /* Make this port leave the all VLANs join since we will have proper
- * VLAN entries from now on
- */
- if (is58xx(dev)) {
- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
- reg &= ~BIT(port);
- if ((reg & BIT(cpu_port)) == BIT(cpu_port))
- reg &= ~BIT(cpu_port);
- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+ if (dev->vlan_filtering) {
+ /* Make this port leave the all VLANs join since we will have
+ * proper VLAN entries from now on
+ */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
+ &reg);
+ reg &= ~BIT(port);
+ if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+ reg &= ~BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
+ reg);
+ }
+
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members &= ~BIT(port);
+ b53_set_vlan_entry(dev, pvid, vl);
}
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
b53_for_each_port(dev, i) {
- if (dsa_to_port(ds, i)->bridge_dev != br)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Add this local port to the remote port VLAN control
@@ -1901,6 +2311,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
pvlan |= BIT(i);
}
+ /* Disable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_BASIC);
+
/* Configure the local port VLAN control membership to include
* remote ports and update the local port bitmask
*/
@@ -1911,10 +2324,10 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
}
EXPORT_SYMBOL(b53_br_join);
-void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
+void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
{
struct b53_device *dev = ds->priv;
- struct b53_vlan *vl = &dev->vlans[0];
+ struct b53_vlan *vl;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1923,7 +2336,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
b53_for_each_port(dev, i) {
/* Don't touch the remaining ports */
- if (dsa_to_port(ds, i)->bridge_dev != br)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
@@ -1936,22 +2349,27 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
pvlan &= ~BIT(i);
}
+ /* Enable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+ if (dev->vlan_filtering) {
+ /* Make this port join all VLANs without VLAN entries */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
+ reg |= BIT(port);
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ }
- /* Make this port join all VLANs without VLAN entries */
- if (is58xx(dev)) {
- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
- reg |= BIT(port);
- if (!(reg & BIT(cpu_port)))
- reg |= BIT(cpu_port);
- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
- } else {
b53_get_vlan_entry(dev, pvid, vl);
- vl->members |= BIT(port) | BIT(cpu_port);
- vl->untag |= BIT(port) | BIT(cpu_port);
+ vl->members |= BIT(port);
b53_set_vlan_entry(dev, pvid, vl);
}
}
@@ -2004,7 +2422,13 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING))
+ struct b53_device *dev = ds->priv;
+ unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD | BR_ISOLATED);
+
+ if (!is5325(dev))
+ mask |= BR_LEARNING;
+
+ if (flags.mask & ~mask)
return -EINVAL;
return 0;
@@ -2024,6 +2448,9 @@ int b53_br_flags(struct dsa_switch *ds, int port,
if (flags.mask & BR_LEARNING)
b53_port_set_learning(ds->priv, port,
!!(flags.val & BR_LEARNING));
+ if (flags.mask & BR_ISOLATED)
+ b53_port_set_isolated(ds->priv, port,
+ !!(flags.val & BR_ISOLATED));
return 0;
}
@@ -2080,8 +2507,11 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
goto out;
}
- /* Older models require a different 6 byte tag */
- if (is5325(dev) || is5365(dev) || is63xx(dev)) {
+ /* Older models require different 6 byte tags */
+ if (is5325(dev) || is5365(dev)) {
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY_FCS;
+ goto out;
+ } else if (is63xx(dev)) {
dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY;
goto out;
}
@@ -2101,7 +2531,8 @@ out:
EXPORT_SYMBOL(b53_get_tag_protocol);
int b53_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
u16 reg, loc;
@@ -2164,28 +2595,16 @@ void b53_mirror_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_mirror_del);
-void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
-{
- struct b53_device *dev = ds->priv;
- u16 reg;
-
- b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
- if (enable)
- reg |= BIT(port);
- else
- reg &= ~BIT(port);
- b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
-}
-EXPORT_SYMBOL(b53_eee_enable_set);
-
-
/* Returns 0 if EEE was not enabled, or 1 otherwise
*/
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
{
int ret;
- ret = phy_init_eee(phy, 0);
+ if (!b53_support_eee(ds, port))
+ return 0;
+
+ ret = phy_init_eee(phy, false);
if (ret)
return 0;
@@ -2195,30 +2614,18 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL(b53_eee_init);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+bool b53_support_eee(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
- u16 reg;
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
-
- b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, &reg);
- e->eee_enabled = p->eee_enabled;
- e->eee_active = !!(reg & BIT(port));
-
- return 0;
+ return !is5325(dev) && !is5365(dev) && !is63xx(dev);
}
-EXPORT_SYMBOL(b53_get_mac_eee);
+EXPORT_SYMBOL(b53_support_eee);
-int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
-
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
+ struct ethtool_keee *p = &dev->ports[port].eee;
p->eee_enabled = e->eee_enabled;
b53_eee_enable_set(ds, port, e->eee_enabled);
@@ -2234,18 +2641,58 @@ static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
bool allow_10_100;
if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
+ return 0;
+
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
- enable_jumbo = (mtu >= JMS_MIN_SIZE);
- allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
+ enable_jumbo = (mtu > ETH_DATA_LEN);
+ allow_10_100 = !is63xx(dev);
return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
}
static int b53_get_max_mtu(struct dsa_switch *ds, int port)
{
- return JMS_MAX_SIZE;
+ struct b53_device *dev = ds->priv;
+
+ if (is5325(dev) || is5365(dev))
+ return B53_MAX_MTU_25;
+
+ return B53_MAX_MTU;
+}
+
+int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct b53_device *dev = ds->priv;
+ u32 atc;
+ int reg;
+
+ if (is63xx(dev))
+ reg = B53_AGING_TIME_CONTROL_63XX;
+ else
+ reg = B53_AGING_TIME_CONTROL;
+
+ if (dev->chip_id == BCM53101_DEVICE_ID)
+ atc = DIV_ROUND_CLOSEST(msecs, 500);
+ else
+ atc = DIV_ROUND_CLOSEST(msecs, 1000);
+
+ if (!is5325(dev) && !is5365(dev))
+ atc |= AGE_CHANGE;
+
+ b53_write32(dev, B53_MGMT_PAGE, reg, atc);
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(b53_set_ageing_time);
+
+static const struct phylink_mac_ops b53_phylink_mac_ops = {
+ .mac_select_pcs = b53_phylink_mac_select_pcs,
+ .mac_config = b53_phylink_mac_config,
+ .mac_link_down = b53_phylink_mac_link_down,
+ .mac_link_up = b53_phylink_mac_link_up,
+};
static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
@@ -2257,17 +2704,13 @@ static const struct dsa_switch_ops b53_switch_ops = {
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
- .adjust_link = b53_adjust_link,
- .phylink_validate = b53_phylink_validate,
- .phylink_mac_link_state = b53_phylink_mac_link_state,
- .phylink_mac_config = b53_phylink_mac_config,
- .phylink_mac_an_restart = b53_phylink_mac_an_restart,
- .phylink_mac_link_down = b53_phylink_mac_link_down,
- .phylink_mac_link_up = b53_phylink_mac_link_up,
+ .phylink_get_caps = b53_phylink_get_caps,
+ .port_setup = b53_setup_port,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
+ .set_ageing_time = b53_set_ageing_time,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
@@ -2288,6 +2731,30 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_change_mtu = b53_change_mtu,
};
+static const struct b53_arl_ops b53_arl_ops_25 = {
+ .arl_read_entry = b53_arl_read_entry_25,
+ .arl_write_entry = b53_arl_write_entry_25,
+ .arl_search_read = b53_arl_search_read_25,
+};
+
+static const struct b53_arl_ops b53_arl_ops_89 = {
+ .arl_read_entry = b53_arl_read_entry_89,
+ .arl_write_entry = b53_arl_write_entry_89,
+ .arl_search_read = b53_arl_search_read_89,
+};
+
+static const struct b53_arl_ops b53_arl_ops_63xx = {
+ .arl_read_entry = b53_arl_read_entry_89,
+ .arl_write_entry = b53_arl_write_entry_89,
+ .arl_search_read = b53_arl_search_read_63xx,
+};
+
+static const struct b53_arl_ops b53_arl_ops_95 = {
+ .arl_read_entry = b53_arl_read_entry_95,
+ .arl_write_entry = b53_arl_write_entry_95,
+ .arl_search_read = b53_arl_search_read_95,
+};
+
struct b53_chip_data {
u32 chip_id;
const char *dev_name;
@@ -2301,6 +2768,7 @@ struct b53_chip_data {
u8 duplex_reg;
u8 jumbo_pm_reg;
u8 jumbo_size_reg;
+ const struct b53_arl_ops *arl_ops;
};
#define B53_VTA_REGS \
@@ -2320,6 +2788,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_buckets = 1024,
.imp_port = 5,
.duplex_reg = B53_DUPLEX_STAT_FE,
+ .arl_ops = &b53_arl_ops_25,
},
{
.chip_id = BCM5365_DEVICE_ID,
@@ -2330,6 +2799,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_buckets = 1024,
.imp_port = 5,
.duplex_reg = B53_DUPLEX_STAT_FE,
+ .arl_ops = &b53_arl_ops_25,
},
{
.chip_id = BCM5389_DEVICE_ID,
@@ -2343,6 +2813,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_89,
},
{
.chip_id = BCM5395_DEVICE_ID,
@@ -2356,6 +2827,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM5397_DEVICE_ID,
@@ -2369,6 +2841,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_89,
},
{
.chip_id = BCM5398_DEVICE_ID,
@@ -2382,6 +2855,21 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_89,
+ },
+ {
+ .chip_id = BCM53101_DEVICE_ID,
+ .dev_name = "BCM53101",
+ .vlans = 4096,
+ .enabled_ports = 0x11f,
+ .arl_bins = 4,
+ .arl_buckets = 512,
+ .vta_regs = B53_VTA_REGS,
+ .imp_port = 8,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53115_DEVICE_ID,
@@ -2395,6 +2883,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53125_DEVICE_ID,
@@ -2408,6 +2897,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53128_DEVICE_ID,
@@ -2421,19 +2911,21 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM63XX_DEVICE_ID,
.dev_name = "BCM63xx",
.vlans = 4096,
.enabled_ports = 0, /* pdata must provide them */
- .arl_bins = 4,
- .arl_buckets = 1024,
+ .arl_bins = 1,
+ .arl_buckets = 4096,
.imp_port = 8,
.vta_regs = B53_VTA_REGS_63XX,
.duplex_reg = B53_DUPLEX_STAT_63XX,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
+ .arl_ops = &b53_arl_ops_63xx,
},
{
.chip_id = BCM53010_DEVICE_ID,
@@ -2447,6 +2939,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53011_DEVICE_ID,
@@ -2460,6 +2953,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53012_DEVICE_ID,
@@ -2473,6 +2967,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53018_DEVICE_ID,
@@ -2486,6 +2981,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM53019_DEVICE_ID,
@@ -2499,6 +2995,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM58XX_DEVICE_ID,
@@ -2512,6 +3009,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM583XX_DEVICE_ID,
@@ -2525,6 +3023,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
/* Starfighter 2 */
{
@@ -2539,6 +3038,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM7445_DEVICE_ID,
@@ -2552,6 +3052,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
{
.chip_id = BCM7278_DEVICE_ID,
@@ -2565,18 +3066,38 @@ static const struct b53_chip_data b53_switch_chips[] = {
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
+ },
+ {
+ .chip_id = BCM53134_DEVICE_ID,
+ .dev_name = "BCM53134",
+ .vlans = 4096,
+ .enabled_ports = 0x12f,
+ .imp_port = 8,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ .arl_ops = &b53_arl_ops_95,
},
};
static int b53_switch_init(struct b53_device *dev)
{
+ u32 chip_id = dev->chip_id;
unsigned int i;
int ret;
+ if (is63xx(dev))
+ chip_id = BCM63XX_DEVICE_ID;
+
for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
const struct b53_chip_data *chip = &b53_switch_chips[i];
- if (chip->chip_id == dev->chip_id) {
+ if (chip->chip_id == chip_id) {
if (!dev->enabled_ports)
dev->enabled_ports = chip->enabled_ports;
dev->name = chip->dev_name;
@@ -2589,6 +3110,7 @@ static int b53_switch_init(struct b53_device *dev)
dev->num_vlans = chip->vlans;
dev->num_arl_bins = chip->arl_bins;
dev->num_arl_buckets = chip->arl_buckets;
+ dev->arl_ops = chip->arl_ops;
break;
}
}
@@ -2619,6 +3141,9 @@ static int b53_switch_init(struct b53_device *dev)
}
}
+ if (is5325e(dev))
+ dev->num_arl_buckets = 512;
+
dev->num_ports = fls(dev->enabled_ports);
dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
@@ -2679,7 +3204,9 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
+ ds->phylink_mac_ops = &b53_phylink_mac_ops;
dev->vlan_enabled = true;
+ dev->vlan_filtering = false;
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
* would be breaking filtering semantics for any of the other bridge
@@ -2718,10 +3245,24 @@ int b53_switch_detect(struct b53_device *dev)
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
- if (tmp == 0xf)
+ if (tmp == 0xf) {
+ u32 phy_id;
+ int val;
+
dev->chip_id = BCM5325_DEVICE_ID;
- else
+
+ val = b53_phy_read16(dev->ds, 0, MII_PHYSID1);
+ phy_id = (val & 0xffff) << 16;
+ val = b53_phy_read16(dev->ds, 0, MII_PHYSID2);
+ phy_id |= (val & 0xfff0);
+
+ if (phy_id == 0x00406330)
+ dev->variant_id = B53_VARIANT_5325M;
+ else if (phy_id == 0x0143bc30)
+ dev->variant_id = B53_VARIANT_5325E;
+ } else {
dev->chip_id = BCM5365_DEVICE_ID;
+ }
break;
case BCM5389_DEVICE_ID:
case BCM5395_DEVICE_ID:
@@ -2735,6 +3276,7 @@ int b53_switch_detect(struct b53_device *dev)
return ret;
switch (id32) {
+ case BCM53101_DEVICE_ID:
case BCM53115_DEVICE_ID:
case BCM53125_DEVICE_ID:
case BCM53128_DEVICE_ID:
@@ -2743,6 +3285,7 @@ int b53_switch_detect(struct b53_device *dev)
case BCM53012_DEVICE_ID:
case BCM53018_DEVICE_ID:
case BCM53019_DEVICE_ID:
+ case BCM53134_DEVICE_ID:
dev->chip_id = id32;
break;
default:
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index a7aeb3c132c9..43a3b37b731b 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/phy.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/delay.h>
#include <linux/brcmphy.h>
#include <linux/rtnetlink.h>
@@ -286,6 +287,7 @@ static const struct b53_io_ops b53_mdio_ops = {
#define B53_BRCM_OUI_2 0x03625c00
#define B53_BRCM_OUI_3 0x00406000
#define B53_BRCM_OUI_4 0x01410c00
+#define B53_BRCM_OUI_5 0xae025000
static int b53_mdio_probe(struct mdio_device *mdiodev)
{
@@ -313,7 +315,8 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_3 &&
- (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) {
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_4 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_5) {
dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
return -ENODEV;
}
@@ -326,7 +329,7 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
* layer setup
*/
if (of_machine_is_compatible("brcm,bcm7445d0") &&
- strcmp(mdiodev->bus->name, "sf2 slave mii"))
+ strcmp(mdiodev->bus->name, "sf2 user mii"))
return -EPROBE_DEFER;
dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus);
@@ -340,10 +343,9 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
dev_set_drvdata(&mdiodev->dev, dev);
ret = b53_switch_register(dev);
- if (ret) {
- dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&mdiodev->dev, ret,
+ "failed to register switch\n");
return ret;
}
@@ -356,8 +358,6 @@ static void b53_mdio_remove(struct mdio_device *mdiodev)
return;
b53_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void b53_mdio_shutdown(struct mdio_device *mdiodev)
@@ -374,9 +374,11 @@ static void b53_mdio_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id b53_of_match[] = {
{ .compatible = "brcm,bcm5325" },
+ { .compatible = "brcm,bcm53101" },
{ .compatible = "brcm,bcm53115" },
{ .compatible = "brcm,bcm53125" },
{ .compatible = "brcm,bcm53128" },
+ { .compatible = "brcm,bcm53134" },
{ .compatible = "brcm,bcm5365" },
{ .compatible = "brcm,bcm5389" },
{ .compatible = "brcm,bcm5395" },
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index ae4c79d39bc0..f4a59d8fbdd6 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -19,14 +19,62 @@
#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/platform_data/b53.h>
+#include <linux/regmap.h>
#include "b53_priv.h"
+#define BCM63XX_EPHY_REG 0x3C
+#define BCM63268_GPHY_REG 0x54
+
+#define GPHY_CTRL_LOW_PWR BIT(3)
+#define GPHY_CTRL_IDDQ_BIAS BIT(0)
+
+struct b53_phy_info {
+ u32 gphy_port_mask;
+ u32 ephy_enable_mask;
+ u32 ephy_port_mask;
+ u32 ephy_bias_bit;
+ const u32 *ephy_offset;
+};
+
struct b53_mmap_priv {
void __iomem *regs;
+ struct regmap *gpio_ctrl;
+ const struct b53_phy_info *phy_info;
+ u32 phys_enabled;
+};
+
+static const u32 bcm6318_ephy_offsets[] = {4, 5, 6, 7};
+
+static const struct b53_phy_info bcm6318_ephy_info = {
+ .ephy_enable_mask = BIT(0) | BIT(4) | BIT(8) | BIT(12) | BIT(16),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm6318_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 24,
+ .ephy_offset = bcm6318_ephy_offsets,
+};
+
+static const u32 bcm6368_ephy_offsets[] = {2, 3, 4, 5};
+
+static const struct b53_phy_info bcm6368_ephy_info = {
+ .ephy_enable_mask = BIT(0),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm6368_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 0,
+ .ephy_offset = bcm6368_ephy_offsets,
+};
+
+static const u32 bcm63268_ephy_offsets[] = {4, 9, 14};
+
+static const struct b53_phy_info bcm63268_ephy_info = {
+ .gphy_port_mask = BIT(3),
+ .ephy_enable_mask = GENMASK(4, 0),
+ .ephy_port_mask = GENMASK((ARRAY_SIZE(bcm63268_ephy_offsets) - 1), 0),
+ .ephy_bias_bit = 24,
+ .ephy_offset = bcm63268_ephy_offsets,
};
static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
@@ -216,6 +264,83 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
return 0;
}
+static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg,
+ u16 *value)
+{
+ return -EIO;
+}
+
+static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
+ u16 value)
+{
+ return -EIO;
+}
+
+static int bcm63xx_ephy_set(struct b53_device *dev, int port, bool enable)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ const struct b53_phy_info *info = priv->phy_info;
+ struct regmap *gpio_ctrl = priv->gpio_ctrl;
+ u32 mask, val;
+
+ if (enable) {
+ mask = (info->ephy_enable_mask << info->ephy_offset[port])
+ | BIT(info->ephy_bias_bit);
+ val = 0;
+ } else {
+ mask = (info->ephy_enable_mask << info->ephy_offset[port]);
+ if (!((priv->phys_enabled & ~BIT(port)) & info->ephy_port_mask))
+ mask |= BIT(info->ephy_bias_bit);
+ val = mask;
+ }
+ return regmap_update_bits(gpio_ctrl, BCM63XX_EPHY_REG, mask, val);
+}
+
+static int bcm63268_gphy_set(struct b53_device *dev, bool enable)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ struct regmap *gpio_ctrl = priv->gpio_ctrl;
+ u32 mask = GPHY_CTRL_IDDQ_BIAS | GPHY_CTRL_LOW_PWR;
+ u32 val = 0;
+
+ if (!enable)
+ val = mask;
+
+ return regmap_update_bits(gpio_ctrl, BCM63268_GPHY_REG, mask, val);
+}
+
+static void b53_mmap_phy_enable(struct b53_device *dev, int port)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ int ret = 0;
+
+ if (priv->phy_info) {
+ if (BIT(port) & priv->phy_info->ephy_port_mask)
+ ret = bcm63xx_ephy_set(dev, port, true);
+ else if (BIT(port) & priv->phy_info->gphy_port_mask)
+ ret = bcm63268_gphy_set(dev, true);
+ }
+
+ if (!ret)
+ priv->phys_enabled |= BIT(port);
+}
+
+static void b53_mmap_phy_disable(struct b53_device *dev, int port)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ int ret = 0;
+
+ if (priv->phy_info) {
+ if (BIT(port) & priv->phy_info->ephy_port_mask)
+ ret = bcm63xx_ephy_set(dev, port, false);
+ else if (BIT(port) & priv->phy_info->gphy_port_mask)
+ ret = bcm63268_gphy_set(dev, false);
+ }
+
+ if (!ret)
+ priv->phys_enabled &= ~BIT(port);
+}
+
static const struct b53_io_ops b53_mmap_ops = {
.read8 = b53_mmap_read8,
.read16 = b53_mmap_read16,
@@ -227,6 +352,10 @@ static const struct b53_io_ops b53_mmap_ops = {
.write32 = b53_mmap_write32,
.write48 = b53_mmap_write48,
.write64 = b53_mmap_write64,
+ .phy_read16 = b53_mmap_phy_read16,
+ .phy_write16 = b53_mmap_phy_write16,
+ .phy_enable = b53_mmap_phy_enable,
+ .phy_disable = b53_mmap_phy_disable,
};
static int b53_mmap_probe_of(struct platform_device *pdev,
@@ -248,7 +377,7 @@ static int b53_mmap_probe_of(struct platform_device *pdev,
return -ENOMEM;
pdata->regs = mem;
- pdata->chip_id = BCM63XX_DEVICE_ID;
+ pdata->chip_id = (u32)(unsigned long)device_get_match_data(dev);
pdata->big_endian = of_property_read_bool(np, "big-endian");
of_ports = of_get_child_by_name(np, "ports");
@@ -263,7 +392,7 @@ static int b53_mmap_probe_of(struct platform_device *pdev,
if (of_property_read_u32(of_port, "reg", &reg))
continue;
- if (reg < B53_CPU_PORT)
+ if (reg < B53_N_PORTS)
pdata->enabled_ports |= BIT(reg);
}
@@ -298,6 +427,18 @@ static int b53_mmap_probe(struct platform_device *pdev)
priv->regs = pdata->regs;
+ priv->gpio_ctrl = syscon_regmap_lookup_by_phandle(np, "brcm,gpio-ctrl");
+ if (!IS_ERR(priv->gpio_ctrl)) {
+ if (pdata->chip_id == BCM6318_DEVICE_ID ||
+ pdata->chip_id == BCM6328_DEVICE_ID ||
+ pdata->chip_id == BCM6362_DEVICE_ID)
+ priv->phy_info = &bcm6318_ephy_info;
+ else if (pdata->chip_id == BCM6368_DEVICE_ID)
+ priv->phy_info = &bcm6368_ephy_info;
+ else if (pdata->chip_id == BCM63268_DEVICE_ID)
+ priv->phy_info = &bcm63268_ephy_info;
+ }
+
dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, priv);
if (!dev)
return -ENOMEM;
@@ -309,16 +450,12 @@ static int b53_mmap_probe(struct platform_device *pdev)
return b53_switch_register(dev);
}
-static int b53_mmap_remove(struct platform_device *pdev)
+static void b53_mmap_remove(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
if (dev)
b53_switch_remove(dev);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static void b53_mmap_shutdown(struct platform_device *pdev)
@@ -332,11 +469,28 @@ static void b53_mmap_shutdown(struct platform_device *pdev)
}
static const struct of_device_id b53_mmap_of_table[] = {
- { .compatible = "brcm,bcm3384-switch" },
- { .compatible = "brcm,bcm6328-switch" },
- { .compatible = "brcm,bcm6368-switch" },
- { .compatible = "brcm,bcm63xx-switch" },
- { /* sentinel */ },
+ {
+ .compatible = "brcm,bcm3384-switch",
+ .data = (void *)BCM63XX_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm6318-switch",
+ .data = (void *)BCM6318_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm6328-switch",
+ .data = (void *)BCM6328_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm6362-switch",
+ .data = (void *)BCM6362_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm6368-switch",
+ .data = (void *)BCM6368_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm63268-switch",
+ .data = (void *)BCM63268_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm63xx-switch",
+ .data = (void *)BCM63XX_DEVICE_ID,
+ }, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 579da74ada64..bd6849e5bb93 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -21,7 +21,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <net/dsa.h>
@@ -29,7 +29,6 @@
struct b53_device;
struct net_device;
-struct phylink_link_state;
struct b53_io_ops {
int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
@@ -46,19 +45,28 @@ struct b53_io_ops {
int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
int (*irq_enable)(struct b53_device *dev, int port);
void (*irq_disable)(struct b53_device *dev, int port);
+ void (*phy_enable)(struct b53_device *dev, int port);
+ void (*phy_disable)(struct b53_device *dev, int port);
+ void (*phylink_get_caps)(struct b53_device *dev, int port,
+ struct phylink_config *config);
+ struct phylink_pcs *(*phylink_mac_select_pcs)(struct b53_device *dev,
+ int port,
+ phy_interface_t interface);
u8 (*serdes_map_lane)(struct b53_device *dev, int port);
- int (*serdes_link_state)(struct b53_device *dev, int port,
- struct phylink_link_state *state);
- void (*serdes_config)(struct b53_device *dev, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
- void (*serdes_an_restart)(struct b53_device *dev, int port);
void (*serdes_link_set)(struct b53_device *dev, int port,
unsigned int mode, phy_interface_t interface,
bool link_up);
- void (*serdes_phylink_validate)(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
+};
+
+struct b53_arl_entry;
+
+struct b53_arl_ops {
+ void (*arl_read_entry)(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx);
+ void (*arl_write_entry)(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx);
+ void (*arl_search_read)(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent);
};
#define B53_INVALID_LANE 0xff
@@ -71,10 +79,16 @@ enum {
BCM5395_DEVICE_ID = 0x95,
BCM5397_DEVICE_ID = 0x97,
BCM5398_DEVICE_ID = 0x98,
+ BCM53101_DEVICE_ID = 0x53101,
BCM53115_DEVICE_ID = 0x53115,
BCM53125_DEVICE_ID = 0x53125,
BCM53128_DEVICE_ID = 0x53128,
BCM63XX_DEVICE_ID = 0x6300,
+ BCM6318_DEVICE_ID = 0x6318,
+ BCM6328_DEVICE_ID = 0x6328,
+ BCM6362_DEVICE_ID = 0x6362,
+ BCM6368_DEVICE_ID = 0x6368,
+ BCM63268_DEVICE_ID = 0x63268,
BCM53010_DEVICE_ID = 0x53010,
BCM53011_DEVICE_ID = 0x53011,
BCM53012_DEVICE_ID = 0x53012,
@@ -84,14 +98,29 @@ enum {
BCM583XX_DEVICE_ID = 0x58300,
BCM7445_DEVICE_ID = 0x7445,
BCM7278_DEVICE_ID = 0x7278,
+ BCM53134_DEVICE_ID = 0x5075,
+};
+
+enum b53_variant_id {
+ B53_VARIANT_NONE = 0,
+ B53_VARIANT_5325E,
+ B53_VARIANT_5325M,
+};
+
+struct b53_pcs {
+ struct phylink_pcs pcs;
+ struct b53_device *dev;
+ u8 lane;
};
#define B53_N_PORTS 9
#define B53_N_PORTS_25 6
+#define B53_N_PCS 2
struct b53_port {
u16 vlan_ctl_mask;
- struct ethtool_eee eee;
+ u16 pvid;
+ struct ethtool_keee eee;
};
struct b53_vlan {
@@ -109,9 +138,11 @@ struct b53_device {
struct mutex stats_mutex;
struct mutex arl_mutex;
const struct b53_io_ops *ops;
+ const struct b53_arl_ops *arl_ops;
/* chip specific data */
u32 chip_id;
+ enum b53_variant_id variant_id;
u8 core_rev;
u8 vta_regs[3];
u8 duplex_reg;
@@ -142,8 +173,11 @@ struct b53_device {
unsigned int num_vlans;
struct b53_vlan *vlans;
bool vlan_enabled;
+ bool vlan_filtering;
unsigned int num_ports;
struct b53_port *ports;
+
+ struct b53_pcs pcs[B53_N_PCS];
};
#define b53_for_each_port(dev, i) \
@@ -156,6 +190,18 @@ static inline int is5325(struct b53_device *dev)
return dev->chip_id == BCM5325_DEVICE_ID;
}
+static inline int is5325e(struct b53_device *dev)
+{
+ return is5325(dev) &&
+ dev->variant_id == B53_VARIANT_5325E;
+}
+
+static inline int is5325m(struct b53_device *dev)
+{
+ return is5325(dev) &&
+ dev->variant_id == B53_VARIANT_5325M;
+}
+
static inline int is5365(struct b53_device *dev)
{
#ifdef CONFIG_BCM47XX
@@ -182,12 +228,25 @@ static inline int is531x5(struct b53_device *dev)
{
return dev->chip_id == BCM53115_DEVICE_ID ||
dev->chip_id == BCM53125_DEVICE_ID ||
- dev->chip_id == BCM53128_DEVICE_ID;
+ dev->chip_id == BCM53101_DEVICE_ID ||
+ dev->chip_id == BCM53128_DEVICE_ID ||
+ dev->chip_id == BCM53134_DEVICE_ID;
}
static inline int is63xx(struct b53_device *dev)
{
- return dev->chip_id == BCM63XX_DEVICE_ID;
+ return dev->chip_id == BCM63XX_DEVICE_ID ||
+ dev->chip_id == BCM6318_DEVICE_ID ||
+ dev->chip_id == BCM6328_DEVICE_ID ||
+ dev->chip_id == BCM6362_DEVICE_ID ||
+ dev->chip_id == BCM6368_DEVICE_ID ||
+ dev->chip_id == BCM63268_DEVICE_ID;
+}
+
+static inline int is6318_268(struct b53_device *dev)
+{
+ return dev->chip_id == BCM6318_DEVICE_ID ||
+ dev->chip_id == BCM63268_DEVICE_ID;
}
static inline int is5301x(struct b53_device *dev)
@@ -204,9 +263,11 @@ static inline int is58xx(struct b53_device *dev)
return dev->chip_id == BCM58XX_DEVICE_ID ||
dev->chip_id == BCM583XX_DEVICE_ID ||
dev->chip_id == BCM7445_DEVICE_ID ||
- dev->chip_id == BCM7278_DEVICE_ID;
+ dev->chip_id == BCM7278_DEVICE_ID ||
+ dev->chip_id == BCM53134_DEVICE_ID;
}
+#define B53_63XX_RGMII0 4
#define B53_CPU_PORT_25 5
#define B53_CPU_PORT 8
@@ -279,6 +340,33 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
ent->vid = mac_vid >> ARLTBL_VID_S;
}
+static inline void b53_arl_to_entry_25(struct b53_arl_entry *ent,
+ u64 mac_vid, u8 vid_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->is_valid = !!(mac_vid & ARLTBL_VALID_25);
+ ent->is_age = !!(mac_vid & ARLTBL_AGE_25);
+ ent->is_static = !!(mac_vid & ARLTBL_STATIC_25);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->port = (mac_vid & ARLTBL_DATA_PORT_ID_MASK_25) >>
+ ARLTBL_DATA_PORT_ID_S_25;
+ if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT)
+ ent->port = B53_CPU_PORT_25;
+ ent->vid = vid_entry;
+}
+
+static inline void b53_arl_to_entry_89(struct b53_arl_entry *ent,
+ u64 mac_vid, u16 fwd_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK_89;
+ ent->is_valid = !!(fwd_entry & ARLTBL_VALID_89);
+ ent->is_age = !!(fwd_entry & ARLTBL_AGE_89);
+ ent->is_static = !!(fwd_entry & ARLTBL_STATIC_89);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S;
+}
+
static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
const struct b53_arl_entry *ent)
{
@@ -293,6 +381,89 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
*fwd_entry |= ARLTBL_AGE;
}
+static inline void b53_arl_from_entry_25(u64 *mac_vid, u8 *vid_entry,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = ether_addr_to_u64(ent->mac);
+ if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT_25)
+ *mac_vid |= (u64)B53_CPU_PORT << ARLTBL_DATA_PORT_ID_S_25;
+ else
+ *mac_vid |= ((u64)ent->port << ARLTBL_DATA_PORT_ID_S_25) &
+ ARLTBL_DATA_PORT_ID_MASK_25;
+ if (ent->is_valid)
+ *mac_vid |= ARLTBL_VALID_25;
+ if (ent->is_static)
+ *mac_vid |= ARLTBL_STATIC_25;
+ if (ent->is_age)
+ *mac_vid |= ARLTBL_AGE_25;
+ *vid_entry = ent->vid;
+}
+
+static inline void b53_arl_from_entry_89(u64 *mac_vid, u32 *fwd_entry,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = ether_addr_to_u64(ent->mac);
+ *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S;
+ *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK_89;
+ if (ent->is_valid)
+ *fwd_entry |= ARLTBL_VALID_89;
+ if (ent->is_static)
+ *fwd_entry |= ARLTBL_STATIC_89;
+ if (ent->is_age)
+ *fwd_entry |= ARLTBL_AGE_89;
+}
+
+static inline void b53_arl_search_to_entry_25(struct b53_arl_entry *ent,
+ u64 mac_vid, u8 ext)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->is_valid = !!(mac_vid & ARLTBL_VALID_25);
+ ent->is_age = !!(mac_vid & ARLTBL_AGE_25);
+ ent->is_static = !!(mac_vid & ARLTBL_STATIC_25);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = (mac_vid & ARL_SRCH_RSLT_VID_MASK_25) >>
+ ARL_SRCH_RSLT_VID_S_25;
+ ent->port = (mac_vid & ARL_SRCH_RSLT_PORT_ID_MASK_25) >>
+ ARL_SRCH_RSLT_PORT_ID_S_25;
+ if (is_multicast_ether_addr(ent->mac) && (ext & ARL_SRCH_RSLT_EXT_MC_MII))
+ ent->port |= BIT(B53_CPU_PORT_25);
+ else if (!is_multicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT)
+ ent->port = B53_CPU_PORT_25;
+}
+
+static inline void b53_arl_search_to_entry_63xx(struct b53_arl_entry *ent,
+ u64 mac_vid, u16 fwd_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S;
+
+ ent->port = fwd_entry & ARL_SRST_PORT_ID_MASK_63XX;
+ ent->port >>= 1;
+
+ ent->is_age = !!(fwd_entry & ARL_SRST_AGE_63XX);
+ ent->is_static = !!(fwd_entry & ARL_SRST_STATIC_63XX);
+ ent->is_valid = 1;
+}
+
+static inline void b53_arl_read_entry(struct b53_device *dev,
+ struct b53_arl_entry *ent, u8 idx)
+{
+ dev->arl_ops->arl_read_entry(dev, ent, idx);
+}
+
+static inline void b53_arl_write_entry(struct b53_device *dev,
+ const struct b53_arl_entry *ent, u8 idx)
+{
+ dev->arl_ops->arl_write_entry(dev, ent, idx);
+}
+
+static inline void b53_arl_search_read(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ dev->arl_ops->arl_search_read(dev, idx, ent);
+}
+
#ifdef CONFIG_BCM47XX
#include <linux/bcm47xx_nvram.h>
@@ -324,8 +495,10 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
-int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
-void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
+int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
+int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack);
+void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
int b53_br_flags_pre(struct dsa_switch *ds, int port,
@@ -336,24 +509,6 @@ int b53_br_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
-void b53_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
-int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
-void b53_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
-void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port);
-void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
-void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack);
int b53_vlan_add(struct dsa_switch *ds, int port,
@@ -362,27 +517,32 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
int b53_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int b53_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
int b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int b53_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int b53_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack);
enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+int b53_setup_port(struct dsa_switch *ds, int port);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
-void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable);
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
-int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+bool b53_support_eee(struct dsa_switch *ds, int port);
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
#endif
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index b2c539a42154..54a278db67c9 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -29,6 +29,7 @@
#define B53_ARLIO_PAGE 0x05 /* ARL Access */
#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */
#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */
+#define B53_IEEE_PAGE 0x0a /* IEEE 802.1X */
/* PHY Registers */
#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */
@@ -50,6 +51,9 @@
/* Jumbo Frame Registers */
#define B53_JUMBO_PAGE 0x40
+/* EAP Registers */
+#define B53_EAP_PAGE 0x42
+
/* EEE Control Registers Page */
#define B53_EEE_PAGE 0x92
@@ -92,18 +96,22 @@
#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S)
#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S)
#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_LP_FLOW_25 BIT(3) /* BCM5325 only */
#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */
#define PORT_OVERRIDE_RX_FLOW BIT(4)
#define PORT_OVERRIDE_TX_FLOW BIT(5)
#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */
#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */
-/* Power-down mode control */
+/* Power-down mode control (8 bit) */
#define B53_PD_MODE_CTRL_25 0x0f
+#define PD_MODE_PORT_MASK 0x1f
+/* Bit 0 also powers down the switch. */
+#define PD_MODE_POWER_DOWN_PORT(i) BIT(i)
/* IP Multicast control (8 bit) */
#define B53_IP_MULTICAST_CTRL 0x21
-#define B53_IPMC_FWD_EN BIT(1)
+#define B53_IP_MC BIT(0)
#define B53_UC_FWD_EN BIT(6)
#define B53_MC_FWD_EN BIT(7)
@@ -111,6 +119,10 @@
#define B53_SWITCH_CTRL 0x22
#define B53_MII_DUMB_FWDG_EN BIT(6)
+/* Protected Port Selection (16 bit) */
+#define B53_PROTECTED_PORT_SEL 0x24
+#define B53_PROTECTED_PORT_SEL_25 0x26
+
/* (16 bit) */
#define B53_UC_FLOOD_MASK 0x32
#define B53_MC_FLOOD_MASK 0x34
@@ -138,6 +150,7 @@
#define B53_RGMII_CTRL_IMP 0x60
#define RGMII_CTRL_ENABLE_GMII BIT(7)
+#define RGMII_CTRL_MII_OVERRIDE BIT(6)
#define RGMII_CTRL_TIMING_SEL BIT(2)
#define RGMII_CTRL_DLL_RXC BIT(1)
#define RGMII_CTRL_DLL_TXC BIT(0)
@@ -216,6 +229,13 @@
#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
#define BRCM_HDR_P7_EN BIT(2) /* Enable tagging on port 7 */
+/* Aging Time control register (32 bit) */
+#define B53_AGING_TIME_CONTROL 0x06
+#define B53_AGING_TIME_CONTROL_63XX 0x08
+#define AGE_CHANGE BIT(20)
+#define AGE_TIME_MASK 0x7ffff
+#define AGE_TIME_MAX 1048575
+
/* Mirror capture control register (16 bit) */
#define B53_MIR_CAP_CTL 0x10
#define CAP_PORT_MASK 0xf
@@ -309,13 +329,12 @@
#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10)
#define ARLTBL_MAC_MASK 0xffffffffffffULL
#define ARLTBL_VID_S 48
-#define ARLTBL_VID_MASK_25 0xff
#define ARLTBL_VID_MASK 0xfff
#define ARLTBL_DATA_PORT_ID_S_25 48
-#define ARLTBL_DATA_PORT_ID_MASK_25 0xf
-#define ARLTBL_AGE_25 BIT(61)
-#define ARLTBL_STATIC_25 BIT(62)
-#define ARLTBL_VALID_25 BIT(63)
+#define ARLTBL_DATA_PORT_ID_MASK_25 GENMASK_ULL(53, 48)
+#define ARLTBL_AGE_25 BIT_ULL(61)
+#define ARLTBL_STATIC_25 BIT_ULL(62)
+#define ARLTBL_VALID_25 BIT_ULL(63)
/* ARL Table Data Entry N Registers (32 bit) */
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
@@ -325,12 +344,23 @@
#define ARLTBL_STATIC BIT(15)
#define ARLTBL_VALID BIT(16)
+/* BCM5389 ARL Table Data Entry N Register format (16 bit) */
+#define ARLTBL_DATA_PORT_ID_MASK_89 GENMASK(8, 0)
+#define ARLTBL_TC_MASK_89 GENMASK(12, 10)
+#define ARLTBL_AGE_89 BIT(13)
+#define ARLTBL_STATIC_89 BIT(14)
+#define ARLTBL_VALID_89 BIT(15)
+
+/* BCM5325/BCM565 ARL Table VID Entry N Registers (8 bit) */
+#define B53_ARLTBL_VID_ENTRY_25(n) ((0x2 * (n)) + 0x30)
+
/* Maximum number of bin entries in the ARL for all switches */
#define B53_ARLTBL_MAX_BIN_ENTRIES 4
/* ARL Search Control Register (8 bit) */
#define B53_ARL_SRCH_CTL 0x50
#define B53_ARL_SRCH_CTL_25 0x20
+#define B53_ARL_SRCH_CTL_89 0x30
#define ARL_SRCH_VLID BIT(0)
#define ARL_SRCH_STDN BIT(7)
@@ -338,22 +368,54 @@
#define B53_ARL_SRCH_ADDR 0x51
#define B53_ARL_SRCH_ADDR_25 0x22
#define B53_ARL_SRCH_ADDR_65 0x24
+#define B53_ARL_SRCH_ADDR_89 0x31
+#define B53_ARL_SRCH_ADDR_63XX 0x32
#define ARL_ADDR_MASK GENMASK(14, 0)
/* ARL Search MAC/VID Result (64 bit) */
#define B53_ARL_SRCH_RSTL_0_MACVID 0x60
+#define B53_ARL_SRCH_RSLT_MACVID_89 0x33
+#define B53_ARL_SRCH_RSLT_MACVID_63XX 0x34
-/* Single register search result on 5325 */
+/* Single register search result on 5325/5365 */
#define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24
-/* Single register search result on 5365 */
-#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30
+#define ARL_SRCH_RSLT_PORT_ID_S_25 48
+#define ARL_SRCH_RSLT_PORT_ID_MASK_25 GENMASK_ULL(52, 48)
+#define ARL_SRCH_RSLT_VID_S_25 53
+#define ARL_SRCH_RSLT_VID_MASK_25 GENMASK_ULL(60, 53)
+
+/* BCM5325/5365 Search result extend register (8 bit) */
+#define B53_ARL_SRCH_RSLT_EXT_25 0x2c
+#define ARL_SRCH_RSLT_EXT_MC_MII BIT(2)
/* ARL Search Data Result (32 bit) */
#define B53_ARL_SRCH_RSTL_0 0x68
+/* BCM5389 ARL Search Data Result (16 bit) */
+#define B53_ARL_SRCH_RSLT_89 0x3b
+
#define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10))
#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10))
+/* 63XX ARL Search Data Result (16 bit) */
+#define B53_ARL_SRCH_RSLT_63XX 0x3c
+#define ARL_SRST_PORT_ID_MASK_63XX GENMASK(9, 1)
+#define ARL_SRST_TC_MASK_63XX GENMASK(13, 11)
+#define ARL_SRST_AGE_63XX BIT(14)
+#define ARL_SRST_STATIC_63XX BIT(15)
+
+/*************************************************************************
+ * IEEE 802.1X Registers
+ *************************************************************************/
+
+/* Multicast DLF Drop Control register (16 bit) */
+#define B53_IEEE_MCAST_DLF 0x94
+#define B53_IEEE_MCAST_DROP_EN BIT(11)
+
+/* Unicast DLF Drop Control register (16 bit) */
+#define B53_IEEE_UCAST_DLF 0x96
+#define B53_IEEE_UCAST_DROP_EN BIT(11)
+
/*************************************************************************
* Port VLAN Registers
*************************************************************************/
@@ -480,6 +542,17 @@
#define JMS_MAX_SIZE 9724
/*************************************************************************
+ * EAP Page Registers
+ *************************************************************************/
+#define B53_PORT_EAP_CONF(i) (0x20 + 8 * (i))
+#define EAP_MODE_SHIFT 51
+#define EAP_MODE_SHIFT_63XX 50
+#define EAP_MODE_MASK (0x3ull << EAP_MODE_SHIFT)
+#define EAP_MODE_MASK_63XX (0x3ull << EAP_MODE_SHIFT_63XX)
+#define EAP_MODE_BASIC 0
+#define EAP_MODE_SIMPLIFIED 3
+
+/*************************************************************************
* EEE Configuration Page Registers
*************************************************************************/
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 5ae3d9783b68..7460122f6abc 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Northstar Plus switch SerDes/SGMII PHY main logic
*
@@ -17,6 +17,11 @@
#include "b53_serdes.h"
#include "b53_regs.h"
+static inline struct b53_pcs *pcs_to_b53_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct b53_pcs, pcs);
+}
+
static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block,
u16 value)
{
@@ -60,51 +65,47 @@ static u16 b53_serdes_read(struct b53_device *dev, u8 lane,
return b53_serdes_read_blk(dev, offset, block);
}
-void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
- const struct phylink_link_state *state)
+static int b53_serdes_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 reg;
- if (lane == B53_INVALID_LANE)
- return;
-
reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
SERDES_DIGITAL_BLK);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ if (interface == PHY_INTERFACE_MODE_1000BASEX)
reg |= FIBER_MODE_1000X;
else
reg &= ~FIBER_MODE_1000X;
b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
SERDES_DIGITAL_BLK, reg);
+
+ return 0;
}
-EXPORT_SYMBOL(b53_serdes_config);
-void b53_serdes_an_restart(struct b53_device *dev, int port)
+static void b53_serdes_an_restart(struct phylink_pcs *pcs)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 reg;
- if (lane == B53_INVALID_LANE)
- return;
-
reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK);
reg |= BMCR_ANRESTART;
b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK, reg);
}
-EXPORT_SYMBOL(b53_serdes_an_restart);
-int b53_serdes_link_state(struct b53_device *dev, int port,
- struct phylink_link_state *state)
+static void b53_serdes_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
+ struct phylink_link_state *state)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 dig, bmsr;
- if (lane == B53_INVALID_LANE)
- return 1;
-
dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS,
SERDES_DIGITAL_BLK);
bmsr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMSR),
@@ -133,10 +134,7 @@ int b53_serdes_link_state(struct b53_device *dev, int port,
state->pause |= MLO_PAUSE_RX;
if (dig & PAUSE_RESOLUTION_TX_SIDE)
state->pause |= MLO_PAUSE_TX;
-
- return 0;
}
-EXPORT_SYMBOL(b53_serdes_link_state);
void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
phy_interface_t interface, bool link_up)
@@ -158,9 +156,14 @@ void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
}
EXPORT_SYMBOL(b53_serdes_link_set);
-void b53_serdes_phylink_validate(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static const struct phylink_pcs_ops b53_pcs_ops = {
+ .pcs_get_state = b53_serdes_get_state,
+ .pcs_config = b53_serdes_config,
+ .pcs_an_restart = b53_serdes_an_restart,
+};
+
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
{
u8 lane = b53_serdes_map_lane(dev, port);
@@ -169,20 +172,47 @@ void b53_serdes_phylink_validate(struct b53_device *dev, int port,
switch (lane) {
case 0:
- phylink_set(supported, 2500baseX_Full);
+ /* It appears lane 0 supports 2500base-X and 1000base-X */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
fallthrough;
case 1:
- phylink_set(supported, 1000baseX_Full);
+ /* It appears lane 1 only supports 1000base-X and SGMII */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000FD;
break;
default:
break;
}
}
-EXPORT_SYMBOL(b53_serdes_phylink_validate);
+EXPORT_SYMBOL(b53_serdes_phylink_get_caps);
+
+struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev,
+ int port,
+ phy_interface_t interface)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+
+ if (lane == B53_INVALID_LANE || lane >= B53_N_PCS ||
+ !dev->pcs[lane].dev)
+ return NULL;
+
+ if (!phy_interface_mode_is_8023z(interface) &&
+ interface != PHY_INTERFACE_MODE_SGMII)
+ return NULL;
+
+ return &dev->pcs[lane].pcs;
+}
+EXPORT_SYMBOL(b53_serdes_phylink_mac_select_pcs);
int b53_serdes_init(struct b53_device *dev, int port)
{
u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_pcs *pcs;
u16 id0, msb, lsb;
if (lane == B53_INVALID_LANE)
@@ -205,6 +235,11 @@ int b53_serdes_init(struct b53_device *dev, int port)
(id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK,
(u32)msb << 16 | lsb);
+ pcs = &dev->pcs[lane];
+ pcs->dev = dev;
+ pcs->lane = lane;
+ pcs->pcs.ops = &b53_pcs_ops;
+
return 0;
}
EXPORT_SYMBOL(b53_serdes_init);
diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h
index 55d280fe38e4..3d367c4df4d9 100644
--- a/drivers/net/dsa/b53/b53_serdes.h
+++ b/drivers/net/dsa/b53/b53_serdes.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Northstar Plus switch SerDes/SGMII PHY definitions
*
@@ -107,17 +107,13 @@ static inline u8 b53_serdes_map_lane(struct b53_device *dev, int port)
return dev->ops->serdes_map_lane(dev, port);
}
-int b53_serdes_get_link(struct b53_device *dev, int port);
-int b53_serdes_link_state(struct b53_device *dev, int port,
- struct phylink_link_state *state);
-void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
- const struct phylink_link_state *state);
-void b53_serdes_an_restart(struct b53_device *dev, int port);
void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
phy_interface_t interface, bool link_up);
-void b53_serdes_phylink_validate(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
+struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev,
+ int port,
+ phy_interface_t interface);
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config);
#if IS_ENABLED(CONFIG_B53_SERDES)
int b53_serdes_init(struct b53_device *dev, int port);
#else
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 01e37b75471e..467da057579e 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -16,7 +16,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/delay.h>
#include <linux/kernel.h>
@@ -314,16 +314,12 @@ static int b53_spi_probe(struct spi_device *spi)
return 0;
}
-static int b53_spi_remove(struct spi_device *spi)
+static void b53_spi_remove(struct spi_device *spi)
{
struct b53_device *dev = spi_get_drvdata(spi);
if (dev)
b53_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void b53_spi_shutdown(struct spi_device *spi)
@@ -349,6 +345,19 @@ static const struct of_device_id b53_spi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, b53_spi_of_match);
+static const struct spi_device_id b53_spi_ids[] = {
+ { .name = "bcm5325" },
+ { .name = "bcm5365" },
+ { .name = "bcm5395" },
+ { .name = "bcm5397" },
+ { .name = "bcm5398" },
+ { .name = "bcm53115" },
+ { .name = "bcm53125" },
+ { .name = "bcm53128" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, b53_spi_ids);
+
static struct spi_driver b53_spi_driver = {
.driver = {
.name = "b53-switch",
@@ -357,6 +366,7 @@ static struct spi_driver b53_spi_driver = {
.probe = b53_spi_probe,
.remove = b53_spi_remove,
.shutdown = b53_spi_shutdown,
+ .id_table = b53_spi_ids,
};
module_spi_driver(b53_spi_driver);
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 4591bb1c05d2..b9939bbd2cd5 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -443,6 +443,39 @@ static void b53_srab_irq_disable(struct b53_device *dev, int port)
}
}
+static void b53_srab_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+
+ switch (p->mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+#if IS_ENABLED(CONFIG_B53_SERDES)
+ /* If p->mode indicates SGMII mode, that essentially means we
+ * are using a serdes. As the serdes for the capabilities.
+ */
+ b53_serdes_phylink_get_caps(dev, port, config);
+#endif
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ /* If we support RGMII, support all RGMII modes, since
+ * that dictates the PHY delay settings.
+ */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+
+ default:
+ /* Some other mode (e.g. MII, GMII etc) */
+ __set_bit(p->mode, config->supported_interfaces);
+ break;
+ }
+}
+
static const struct b53_io_ops b53_srab_ops = {
.read8 = b53_srab_read8,
.read16 = b53_srab_read16,
@@ -456,13 +489,11 @@ static const struct b53_io_ops b53_srab_ops = {
.write64 = b53_srab_write64,
.irq_enable = b53_srab_irq_enable,
.irq_disable = b53_srab_irq_disable,
+ .phylink_get_caps = b53_srab_phylink_get_caps,
#if IS_ENABLED(CONFIG_B53_SERDES)
+ .phylink_mac_select_pcs = b53_serdes_phylink_mac_select_pcs,
.serdes_map_lane = b53_srab_serdes_map_lane,
- .serdes_link_state = b53_serdes_link_state,
- .serdes_config = b53_serdes_config,
- .serdes_an_restart = b53_serdes_an_restart,
.serdes_link_set = b53_serdes_link_set,
- .serdes_phylink_validate = b53_serdes_phylink_validate,
#endif
};
@@ -626,19 +657,15 @@ static int b53_srab_probe(struct platform_device *pdev)
return b53_switch_register(dev);
}
-static int b53_srab_remove(struct platform_device *pdev)
+static void b53_srab_remove(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
if (!dev)
- return 0;
+ return;
b53_srab_intr_set(dev->priv, false);
b53_switch_remove(dev);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static void b53_srab_shutdown(struct platform_device *pdev)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 13aa43b5cffd..960685596093 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -62,6 +62,56 @@ static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
return REG_SWITCH_STATUS;
}
+static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port)
+{
+ switch (port) {
+ case 0:
+ return REG_LED_0_CNTRL;
+ case 1:
+ return REG_LED_1_CNTRL;
+ case 2:
+ return REG_LED_2_CNTRL;
+ }
+
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ switch (port) {
+ case 3:
+ return REG_LED_3_CNTRL;
+ case 7:
+ return REG_LED_4_CNTRL;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ WARN_ONCE(1, "Unsupported port %d\n", port);
+
+ /* RO fallback reg */
+ return REG_SWITCH_STATUS;
+}
+
+static u32 bcm_sf2_port_override_offset(struct bcm_sf2_priv *priv, int port)
+{
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ case BCM7445_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP :
+ CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ case BCM7278_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP2 :
+ CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+ default:
+ WARN_ONCE(1, "Unsupported device: %d\n", priv->type);
+ }
+
+ /* RO fallback register */
+ return REG_SWITCH_STATUS;
+}
+
/* Return the number of active ports, not counting the IMP (CPU) port */
static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
{
@@ -109,7 +159,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int i;
- u32 reg, offset;
+ u32 reg;
/* Enable the port memories */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
@@ -135,21 +185,6 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
b53_brcm_hdr_setup(ds, port);
if (port == 8) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_IMP;
- else
- offset = CORE_STS_OVERRIDE_IMP2;
-
- /* Force link status for IMP port */
- reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS);
- if (priv->type == BCM4908_DEVICE_ID)
- reg |= GMII_SPEED_UP_2G;
- else
- reg &= ~GMII_SPEED_UP_2G;
- core_writel(priv, reg, offset);
-
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
reg = core_readl(priv, CORE_IMP_CTL);
reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
@@ -187,9 +222,14 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
/* Use PHY-driven LED signaling */
if (!enable) {
- reg = reg_readl(priv, REG_LED_CNTRL(0));
- reg |= SPDLNK_SRC_SEL;
- reg_writel(priv, reg, REG_LED_CNTRL(0));
+ u16 led_ctrl = bcm_sf2_reg_led_base(priv, 0);
+
+ if (priv->type == BCM7278_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID) {
+ reg = reg_led_readl(priv, led_ctrl, 0);
+ reg |= LED_CNTRL_SPDLNK_SRC_SEL;
+ reg_led_writel(priv, reg, led_ctrl, 0);
+ }
}
}
@@ -473,12 +513,12 @@ static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv)
u32 reg;
int i;
- mask = BIT(priv->num_crossbar_int_ports) - 1;
+ mask = BIT(priv->num_crossbar_ext_bits) - 1;
reg = reg_readl(priv, REG_CROSSBAR);
switch (priv->type) {
case BCM4908_DEVICE_ID:
- shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_int_ports;
+ shift = CROSSBAR_BCM4908_INT_P7 * priv->num_crossbar_ext_bits;
reg &= ~(mask << shift);
if (0) /* FIXME */
reg |= CROSSBAR_BCM4908_EXT_SERDES << shift;
@@ -496,7 +536,7 @@ static void bcm_sf2_crossbar_setup(struct bcm_sf2_priv *priv)
reg = reg_readl(priv, REG_CROSSBAR);
for (i = 0; i < priv->num_crossbar_int_ports; i++) {
- shift = i * priv->num_crossbar_int_ports;
+ shift = i * priv->num_crossbar_ext_bits;
dev_dbg(dev, "crossbar int port #%d - ext port #%d\n", i,
(reg >> shift) & mask);
@@ -577,26 +617,22 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
priv->master_mii_bus = of_mdio_find_bus(dn);
if (!priv->master_mii_bus) {
- of_node_put(dn);
- return -EPROBE_DEFER;
+ err = -EPROBE_DEFER;
+ goto err_of_node_put;
}
- get_device(&priv->master_mii_bus->dev);
- priv->master_mii_dn = dn;
-
- priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
- if (!priv->slave_mii_bus) {
- of_node_put(dn);
- return -ENOMEM;
+ priv->user_mii_bus = mdiobus_alloc();
+ if (!priv->user_mii_bus) {
+ err = -ENOMEM;
+ goto err_put_master_mii_bus_dev;
}
- priv->slave_mii_bus->priv = priv;
- priv->slave_mii_bus->name = "sf2 slave mii";
- priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
- priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
- snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
+ priv->user_mii_bus->priv = priv;
+ priv->user_mii_bus->name = "sf2 user mii";
+ priv->user_mii_bus->read = bcm_sf2_sw_mdio_read;
+ priv->user_mii_bus->write = bcm_sf2_sw_mdio_write;
+ snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
index++);
- priv->slave_mii_bus->dev.of_node = dn;
/* Include the pseudo-PHY address to divert reads towards our
* workaround. This is only required for 7445D0, since 7445E0
@@ -614,9 +650,9 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
priv->indir_phy_mask = 0;
ds->phys_mii_mask = priv->indir_phy_mask;
- ds->slave_mii_bus = priv->slave_mii_bus;
- priv->slave_mii_bus->parent = ds->dev->parent;
- priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
+ ds->user_mii_bus = priv->user_mii_bus;
+ priv->user_mii_bus->parent = ds->dev->parent;
+ priv->user_mii_bus->phy_mask = ~priv->indir_phy_mask;
/* We need to make sure that of_phy_connect() will not work by
* removing the 'phandle' and 'linux,phandle' properties and
@@ -639,21 +675,34 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
of_remove_property(child, prop);
phydev = of_phy_find_device(child);
- if (phydev)
+ if (phydev) {
phy_device_remove(phydev);
+ phy_device_free(phydev);
+ }
}
- err = mdiobus_register(priv->slave_mii_bus);
- if (err && dn)
- of_node_put(dn);
+ err = mdiobus_register(priv->user_mii_bus);
+ if (err)
+ goto err_free_user_mii_bus;
+ of_node_put(dn);
+
+ return 0;
+
+err_free_user_mii_bus:
+ mdiobus_free(priv->user_mii_bus);
+err_put_master_mii_bus_dev:
+ put_device(&priv->master_mii_bus->dev);
+err_of_node_put:
+ of_node_put(dn);
return err;
}
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
- mdiobus_unregister(priv->slave_mii_bus);
- of_node_put(priv->master_mii_dn);
+ mdiobus_unregister(priv->user_mii_bus);
+ mdiobus_free(priv->user_mii_bus);
+ put_device(&priv->master_mii_bus->dev);
}
static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
@@ -672,61 +721,40 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
PHY_BRCM_IDDQ_SUSPEND;
}
-static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
+ unsigned long *interfaces = config->supported_interfaces;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_MOCA) {
- linkmode_zero(supported);
- if (port != core_readl(priv, CORE_IMP0_PRT_ID))
- dev_err(ds->dev,
- "Unsupported interface: %d for port %d\n",
- state->interface, port);
- return;
- }
-
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- /* With the exclusion of MII and Reverse MII, we support Gigabit,
- * including Half duplex
- */
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ if (priv->int_phy_mask & BIT(port)) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ } else if (priv->moca_port == port) {
+ __set_bit(PHY_INTERFACE_MODE_MOCA, interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ phy_interface_set_rgmii(interfaces);
}
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
}
-static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
u32 id_mode_dis = 0, port_mode;
+ struct bcm_sf2_priv *priv;
u32 reg_rgmii_ctrl;
u32 reg;
- if (port == core_readl(priv, CORE_IMP0_PRT_ID))
+ priv = bcm_sf2_to_priv(dp->ds);
+
+ if (dp->index == core_readl(priv, CORE_IMP0_PRT_ID))
return;
switch (state->interface) {
@@ -747,7 +775,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
return;
}
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, dp->index);
/* Clear id_mode_dis bit, and the existing port mode, let
* RGMII_MODE_EN bet set by mac_link_{up,down}
@@ -786,84 +814,93 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
reg_writel(priv, reg, reg_rgmii_ctrl);
}
-static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct bcm_sf2_priv *priv;
+ int port = dp->index;
u32 reg, offset;
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+ priv = bcm_sf2_to_priv(dp->ds);
+ if (priv->wol_ports_mask & BIT(port))
+ return;
- reg = core_readl(priv, offset);
- reg &= ~LINK_STS;
- core_writel(priv, reg, offset);
- }
+ offset = bcm_sf2_port_override_offset(priv, port);
+ reg = core_readl(priv, offset);
+ reg &= ~LINK_STS;
+ core_writel(priv, reg, offset);
- bcm_sf2_sw_mac_link_set(ds, port, interface, false);
+ bcm_sf2_sw_mac_link_set(dp->ds, port, interface, false);
}
-static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_eee *p = &priv->dev->ports[port].eee;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct bcm_sf2_priv *priv;
+ u32 reg_rgmii_ctrl = 0;
+ struct ethtool_keee *p;
+ int port = dp->index;
+ u32 reg, offset;
- bcm_sf2_sw_mac_link_set(ds, port, interface, true);
+ bcm_sf2_sw_mac_link_set(dp->ds, port, interface, true);
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- u32 reg_rgmii_ctrl = 0;
- u32 reg, offset;
+ priv = bcm_sf2_to_priv(dp->ds);
+ offset = bcm_sf2_port_override_offset(priv, port);
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
-
- if (interface == PHY_INTERFACE_MODE_RGMII ||
- interface == PHY_INTERFACE_MODE_RGMII_TXID ||
- interface == PHY_INTERFACE_MODE_MII ||
- interface == PHY_INTERFACE_MODE_REVMII) {
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
- reg = reg_readl(priv, reg_rgmii_ctrl);
- reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
-
- if (tx_pause)
- reg |= TX_PAUSE_EN;
- if (rx_pause)
- reg |= RX_PAUSE_EN;
-
- reg_writel(priv, reg, reg_rgmii_ctrl);
- }
+ if (phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_MII ||
+ interface == PHY_INTERFACE_MODE_REVMII) {
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ reg = reg_readl(priv, reg_rgmii_ctrl);
+ reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
- reg = SW_OVERRIDE | LINK_STS;
- switch (speed) {
- case SPEED_1000:
- reg |= SPDSTS_1000 << SPEED_SHIFT;
- break;
- case SPEED_100:
- reg |= SPDSTS_100 << SPEED_SHIFT;
- break;
- }
+ if (tx_pause)
+ reg |= TX_PAUSE_EN;
+ if (rx_pause)
+ reg |= RX_PAUSE_EN;
- if (duplex == DUPLEX_FULL)
- reg |= DUPLX_MODE;
+ reg_writel(priv, reg, reg_rgmii_ctrl);
+ }
- core_writel(priv, reg, offset);
+ reg = LINK_STS;
+ if (port == 8) {
+ if (priv->type == BCM4908_DEVICE_ID)
+ reg |= GMII_SPEED_UP_2G;
+ reg |= MII_SW_OR;
+ } else {
+ reg |= SW_OVERRIDE;
}
- if (mode == MLO_AN_PHY && phydev)
- p->eee_enabled = b53_eee_init(ds, port, phydev);
+ switch (speed) {
+ case SPEED_1000:
+ reg |= SPDSTS_1000 << SPEED_SHIFT;
+ break;
+ case SPEED_100:
+ reg |= SPDSTS_100 << SPEED_SHIFT;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ reg |= DUPLX_MODE;
+
+ if (tx_pause)
+ reg |= TXFLOW_CNTL;
+ if (rx_pause)
+ reg |= RXFLOW_CNTL;
+
+ core_writel(priv, reg, offset);
+
+ if (mode == MLO_AN_PHY && phydev) {
+ p = &priv->dev->ports[port].eee;
+ p->eee_enabled = b53_eee_init(dp->ds, port, phydev);
+ }
}
static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
@@ -889,7 +926,7 @@ static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
* state machine and make it go in PHY_FORCING state instead.
*/
if (!status->link)
- netif_carrier_off(dsa_to_port(ds, port)->slave);
+ netif_carrier_off(dsa_to_port(ds, port)->user);
status->duplex = DUPLEX_FULL;
} else {
status->link = true;
@@ -963,7 +1000,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
@@ -987,7 +1024,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
@@ -1146,8 +1183,8 @@ static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port,
int cnt = b53_get_sset_count(ds, port, stringset);
b53_get_strings(ds, port, stringset, data);
- bcm_sf2_cfp_get_strings(ds, port, stringset,
- data + cnt * ETH_GSTRING_LEN);
+ data += cnt * ETH_GSTRING_LEN;
+ bcm_sf2_cfp_get_strings(ds, port, stringset, &data);
}
static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port,
@@ -1172,6 +1209,12 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
return cnt;
}
+static const struct phylink_mac_ops bcm_sf2_phylink_mac_ops = {
+ .mac_config = bcm_sf2_sw_mac_config,
+ .mac_link_down = bcm_sf2_sw_mac_link_down,
+ .mac_link_up = bcm_sf2_sw_mac_link_up,
+};
+
static const struct dsa_switch_ops bcm_sf2_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = bcm_sf2_sw_setup,
@@ -1181,19 +1224,18 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.get_sset_count = bcm_sf2_sw_get_sset_count,
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
- .phylink_validate = bcm_sf2_sw_validate,
- .phylink_mac_config = bcm_sf2_sw_mac_config,
- .phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
- .phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
+ .phylink_get_caps = bcm_sf2_sw_get_caps,
.phylink_fixed_state = bcm_sf2_sw_fixed_state,
.suspend = bcm_sf2_sw_suspend,
.resume = bcm_sf2_sw_resume,
.get_wol = bcm_sf2_sw_get_wol,
.set_wol = bcm_sf2_sw_set_wol,
+ .port_setup = b53_setup_port,
.port_enable = bcm_sf2_port_setup,
.port_disable = bcm_sf2_port_disable,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
+ .set_ageing_time = b53_set_ageing_time,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
@@ -1220,6 +1262,7 @@ struct bcm_sf2_of_data {
unsigned int core_reg_align;
unsigned int num_cfp_rules;
unsigned int num_crossbar_int_ports;
+ unsigned int num_crossbar_ext_bits;
};
static const u16 bcm_sf2_4908_reg_offsets[] = {
@@ -1232,9 +1275,14 @@ static const u16 bcm_sf2_4908_reg_offsets[] = {
[REG_SPHY_CNTRL] = 0x24,
[REG_CROSSBAR] = 0xc8,
[REG_RGMII_11_CNTRL] = 0x014c,
- [REG_LED_0_CNTRL] = 0x40,
- [REG_LED_1_CNTRL] = 0x4c,
- [REG_LED_2_CNTRL] = 0x58,
+ [REG_LED_0_CNTRL] = 0x40,
+ [REG_LED_1_CNTRL] = 0x4c,
+ [REG_LED_2_CNTRL] = 0x58,
+ [REG_LED_3_CNTRL] = 0x64,
+ [REG_LED_4_CNTRL] = 0x88,
+ [REG_LED_5_CNTRL] = 0xa0,
+ [REG_LED_AGGREGATE_CTRL] = 0xb8,
+
};
static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
@@ -1243,6 +1291,7 @@ static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
.reg_offsets = bcm_sf2_4908_reg_offsets,
.num_cfp_rules = 256,
.num_crossbar_int_ports = 2,
+ .num_crossbar_ext_bits = 2,
};
/* Register offsets for the SWITCH_REG_* block */
@@ -1354,6 +1403,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->core_reg_align = data->core_reg_align;
priv->num_cfp_rules = data->num_cfp_rules;
priv->num_crossbar_int_ports = data->num_crossbar_int_ports;
+ priv->num_crossbar_ext_bits = data->num_crossbar_ext_bits;
priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev,
"switch");
@@ -1370,6 +1420,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->dev = dev;
ds = dev->ds;
ds->ops = &bcm_sf2_ops;
+ ds->phylink_mac_ops = &bcm_sf2_phylink_mac_ops;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
@@ -1411,7 +1462,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
if (IS_ERR(priv->clk_mdiv)) {
@@ -1419,7 +1472,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
goto out_clk;
}
- clk_prepare_enable(priv->clk_mdiv);
+ ret = clk_prepare_enable(priv->clk_mdiv);
+ if (ret)
+ goto out_clk;
ret = bcm_sf2_sw_rst(priv);
if (ret) {
@@ -1508,12 +1563,12 @@ out_clk:
return ret;
}
-static int bcm_sf2_sw_remove(struct platform_device *pdev)
+static void bcm_sf2_sw_remove(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
if (!priv)
- return 0;
+ return;
priv->wol_ports_mask = 0;
/* Disable interrupts */
@@ -1525,10 +1580,6 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
if (priv->type == BCM7278_DEVICE_ID)
reset_control_assert(priv->rcdev);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
@@ -1574,7 +1625,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
- .remove = bcm_sf2_sw_remove,
+ .remove = bcm_sf2_sw_remove,
.shutdown = bcm_sf2_sw_shutdown,
.driver = {
.name = "brcm-sf2",
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 0d48402068d3..be9f3b29019f 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -75,6 +75,7 @@ struct bcm_sf2_priv {
unsigned int core_reg_align;
unsigned int num_cfp_rules;
unsigned int num_crossbar_int_ports;
+ unsigned int num_crossbar_ext_bits;
/* spinlock protecting access to the indirect registers */
spinlock_t indir_lock;
@@ -107,8 +108,7 @@ struct bcm_sf2_priv {
/* Master and slave MDIO bus controller */
unsigned int indir_phy_mask;
- struct device_node *master_mii_dn;
- struct mii_bus *slave_mii_bus;
+ struct mii_bus *user_mii_bus;
struct mii_bus *master_mii_bus;
/* Bitmask of ports needing BRCM tags */
@@ -210,6 +210,16 @@ SF2_IO_MACRO(acb);
SWITCH_INTR_L2(0);
SWITCH_INTR_L2(1);
+static inline u32 reg_led_readl(struct bcm_sf2_priv *priv, u16 off, u16 reg)
+{
+ return readl_relaxed(priv->reg + priv->reg_offsets[off] + reg);
+}
+
+static inline void reg_led_writel(struct bcm_sf2_priv *priv, u32 val, u16 off, u16 reg)
+{
+ writel_relaxed(val, priv->reg + priv->reg_offsets[off] + reg);
+}
+
/* RXNFC */
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs);
@@ -218,8 +228,8 @@ int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
void bcm_sf2_cfp_exit(struct dsa_switch *ds);
int bcm_sf2_cfp_resume(struct dsa_switch *ds);
-void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *data);
+void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t **data);
void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data);
int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset);
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index a7e2fcf2df2c..e22362e6f0cd 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -567,14 +567,14 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
int port, u32 location)
{
- struct cfp_rule *rule = NULL;
+ struct cfp_rule *rule;
list_for_each_entry(rule, &priv->cfp.rules_list, next) {
if (rule->port == port && rule->fs.location == location)
- break;
+ return rule;
}
- return rule;
+ return NULL;
}
static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
@@ -1102,7 +1102,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1145,7 +1145,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1279,27 +1279,19 @@ static const struct bcm_sf2_cfp_stat {
},
};
-void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *data)
+void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t **data)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
- char buf[ETH_GSTRING_LEN];
- unsigned int i, j, iter;
+ unsigned int i, j;
if (stringset != ETH_SS_STATS)
return;
- for (i = 1; i < priv->num_cfp_rules; i++) {
- for (j = 0; j < s; j++) {
- snprintf(buf, sizeof(buf),
- "CFP%03d_%sCntr",
- i, bcm_sf2_cfp_stats[j].name);
- iter = (i - 1) * s + j;
- strlcpy(data + iter * ETH_GSTRING_LEN,
- buf, ETH_GSTRING_LEN);
- }
- }
+ for (i = 1; i < priv->num_cfp_rules; i++)
+ for (j = 0; j < ARRAY_SIZE(bcm_sf2_cfp_stats); j++)
+ ethtool_sprintf(data, "CFP%03d_%sCntr", i,
+ bcm_sf2_cfp_stats[j].name);
}
void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 7bffc80f241f..da0dedbd6555 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -25,6 +25,10 @@ enum bcm_sf2_reg_offs {
REG_LED_0_CNTRL,
REG_LED_1_CNTRL,
REG_LED_2_CNTRL,
+ REG_LED_3_CNTRL,
+ REG_LED_4_CNTRL,
+ REG_LED_5_CNTRL,
+ REG_LED_AGGREGATE_CTRL,
REG_SWITCH_REG_MAX,
};
@@ -56,6 +60,63 @@ enum bcm_sf2_reg_offs {
#define CROSSBAR_BCM4908_EXT_GPHY4 1
#define CROSSBAR_BCM4908_EXT_RGMII 2
+/* Relative to REG_LED_*_CNTRL (BCM7278, BCM7445) */
+#define LED_CNTRL_NO_LINK_ENCODE_SHIFT 0
+#define LED_CNTRL_M10_ENCODE_SHIFT 2
+#define LED_CNTRL_M100_ENCODE_SHIFT 4
+#define LED_CNTRL_M1000_ENCODE_SHIFT 6
+#define LED_CNTRL_SEL_NO_LINK_ENCODE_SHIFT 8
+#define LED_CNTRL_SEL_10M_ENCODE_SHIFT 10
+#define LED_CNTRL_SEL_100M_ENCODE_SHIFT 12
+#define LED_CNTRL_SEL_1000M_ENCODE_SHIFT 14
+#define LED_CNTRL_RX_DV_EN (1 << 16)
+#define LED_CNTRL_TX_EN_EN (1 << 17)
+#define LED_CNTRL_SPDLNK_LED0_ACT_SEL_SHIFT 18
+#define LED_CNTRL_SPDLNK_LED1_ACT_SEL_SHIFT 20
+#define LED_CNTRL_ACT_LED_ACT_SEL_SHIFT 22
+#define LED_CNTRL_SPDLNK_SRC_SEL (1 << 24)
+#define LED_CNTRL_SPDLNK_LED0_ACT_POL_SEL (1 << 25)
+#define LED_CNTRL_SPDLNK_LED1_ACT_POL_SEL (1 << 26)
+#define LED_CNTRL_ACT_LED_POL_SEL (1 << 27)
+#define LED_CNTRL_MASK 0x3
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_CTRL 0x0
+#define LED_CTRL_RX_ACT_EN 0x00000001
+#define LED_CTRL_TX_ACT_EN 0x00000002
+#define LED_CTRL_SPDLNK_LED0_ACT_SEL 0x00000004
+#define LED_CTRL_SPDLNK_LED1_ACT_SEL 0x00000008
+#define LED_CTRL_SPDLNK_LED2_ACT_SEL 0x00000010
+#define LED_CTRL_ACT_LED_ACT_SEL 0x00000020
+#define LED_CTRL_SPDLNK_LED0_ACT_POL_SEL 0x00000040
+#define LED_CTRL_SPDLNK_LED1_ACT_POL_SEL 0x00000080
+#define LED_CTRL_SPDLNK_LED2_ACT_POL_SEL 0x00000100
+#define LED_CTRL_ACT_LED_POL_SEL 0x00000200
+#define LED_CTRL_LED_SPD_OVRD 0x00001c00
+#define LED_CTRL_LNK_STATUS_OVRD 0x00002000
+#define LED_CTRL_SPD_OVRD_EN 0x00004000
+#define LED_CTRL_LNK_OVRD_EN 0x00008000
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_LINK_SPEED_ENC_SEL 0x4
+#define LED_LINK_SPEED_ENC_SEL_NO_LINK_SHIFT 0
+#define LED_LINK_SPEED_ENC_SEL_10M_SHIFT 3
+#define LED_LINK_SPEED_ENC_SEL_100M_SHIFT 6
+#define LED_LINK_SPEED_ENC_SEL_1000M_SHIFT 9
+#define LED_LINK_SPEED_ENC_SEL_2500M_SHIFT 12
+#define LED_LINK_SPEED_ENC_SEL_10G_SHIFT 15
+#define LED_LINK_SPEED_ENC_SEL_MASK 0x7
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_LINK_SPEED_ENC 0x8
+#define LED_LINK_SPEED_ENC_NO_LINK_SHIFT 0
+#define LED_LINK_SPEED_ENC_M10_SHIFT 3
+#define LED_LINK_SPEED_ENC_M100_SHIFT 6
+#define LED_LINK_SPEED_ENC_M1000_SHIFT 9
+#define LED_LINK_SPEED_ENC_M2500_SHIFT 12
+#define LED_LINK_SPEED_ENC_M10G_SHIFT 15
+#define LED_LINK_SPEED_ENC_MASK 0x7
+
/* Relative to REG_RGMII_CNTRL */
#define RGMII_MODE_EN (1 << 0)
#define ID_MODE_DIS (1 << 1)
@@ -73,10 +134,6 @@ enum bcm_sf2_reg_offs {
#define LPI_COUNT_SHIFT 9
#define LPI_COUNT_MASK 0x3F
-#define REG_LED_CNTRL(x) (REG_LED_0_CNTRL + (x))
-
-#define SPDLNK_SRC_SEL (1 << 24)
-
/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
#define INTRL2_CPU_STATUS 0x00
#define INTRL2_CPU_SET 0x04
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index e638e3eea911..4a416f2717ba 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -17,7 +17,19 @@
#include <linux/dsa/loop.h>
#include <net/dsa.h>
-#include "dsa_loop.h"
+#define DSA_LOOP_NUM_PORTS 6
+#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
+#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+
+struct dsa_loop_pdata {
+ /* Must be first, such that dsa_register_switch() can access this
+ * without gory pointer manipulations
+ */
+ struct dsa_chip_data cd;
+ const char *name;
+ unsigned int enabled_ports;
+ const char *netdev;
+};
static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
[DSA_LOOP_PHY_READ_OK] = { "phy_read_ok", },
@@ -27,6 +39,7 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
};
static struct phy_device *phydevs[PHY_MAX_ADDR];
+static struct mdio_device *switch_mdiodev;
enum dsa_loop_devlink_resource_id {
DSA_LOOP_DEVLINK_PARAM_ID_VTU,
@@ -121,8 +134,7 @@ static void dsa_loop_get_strings(struct dsa_switch *ds, int port,
return;
for (i = 0; i < __DSA_LOOP_CNT_MAX; i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- ps->ports[port].mib[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, ps->ports[port].mib[i].name);
}
static void dsa_loop_get_ethtool_stats(struct dsa_switch *ds, int port,
@@ -167,19 +179,21 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
}
static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
- __func__, port, bridge->name);
+ __func__, port, bridge.dev->name);
return 0;
}
static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
- __func__, port, bridge->name);
+ __func__, port, bridge.dev->name);
}
static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
@@ -275,6 +289,14 @@ static int dsa_loop_port_max_mtu(struct dsa_switch *ds, int port)
return ETH_MAX_MTU;
}
+static void dsa_loop_phylink_get_caps(struct dsa_switch *dsa, int port,
+ struct phylink_config *config)
+{
+ bitmap_fill(config->supported_interfaces, PHY_INTERFACE_MODE_MAX);
+ __clear_bit(PHY_INTERFACE_MODE_NA, config->supported_interfaces);
+ config->mac_capabilities = ~0;
+}
+
static const struct dsa_switch_ops dsa_loop_driver = {
.get_tag_protocol = dsa_loop_get_protocol,
.setup = dsa_loop_setup,
@@ -293,6 +315,7 @@ static const struct dsa_switch_ops dsa_loop_driver = {
.port_vlan_del = dsa_loop_port_vlan_del,
.port_change_mtu = dsa_loop_port_change_mtu,
.port_max_mtu = dsa_loop_port_max_mtu,
+ .phylink_get_caps = dsa_loop_phylink_get_caps,
};
static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
@@ -349,8 +372,6 @@ static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(ds);
dev_put(ps->netdev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
@@ -374,36 +395,82 @@ static struct mdio_driver dsa_loop_drv = {
.shutdown = dsa_loop_drv_shutdown,
};
-#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+static void dsa_loop_phydevs_unregister(void)
+{
+ for (int i = 0; i < NUM_FIXED_PHYS; i++) {
+ if (!IS_ERR(phydevs[i]))
+ fixed_phy_unregister(phydevs[i]);
+ }
+}
-static int __init dsa_loop_init(void)
+static int __init dsa_loop_create_switch_mdiodev(void)
{
- struct fixed_phy_status status = {
- .link = 1,
- .speed = SPEED_100,
- .duplex = DUPLEX_FULL,
+ static struct dsa_loop_pdata dsa_loop_pdata = {
+ .cd = {
+ .port_names[0] = "lan1",
+ .port_names[1] = "lan2",
+ .port_names[2] = "lan3",
+ .port_names[3] = "lan4",
+ .port_names[DSA_LOOP_CPU_PORT] = "cpu",
+ },
+ .name = "DSA mockup driver",
+ .enabled_ports = 0x1f,
+ .netdev = "eth0",
};
+ struct mii_bus *bus;
+ int ret = -ENODEV;
+
+ bus = mdio_find_bus("fixed-0");
+ if (WARN_ON(!bus))
+ return ret;
+
+ switch_mdiodev = mdio_device_create(bus, 31);
+ if (IS_ERR(switch_mdiodev))
+ goto out;
+
+ strscpy(switch_mdiodev->modalias, "dsa-loop");
+ switch_mdiodev->dev.platform_data = &dsa_loop_pdata;
+
+ ret = mdio_device_register(switch_mdiodev);
+ if (ret)
+ mdio_device_free(switch_mdiodev);
+out:
+ put_device(&bus->dev);
+ return ret;
+}
+
+static int __init dsa_loop_init(void)
+{
unsigned int i;
+ int ret;
+
+ ret = dsa_loop_create_switch_mdiodev();
+ if (ret)
+ return ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
- phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
+ phydevs[i] = fixed_phy_register_100fd();
- return mdio_driver_register(&dsa_loop_drv);
+ ret = mdio_driver_register(&dsa_loop_drv);
+ if (ret) {
+ dsa_loop_phydevs_unregister();
+ mdio_device_remove(switch_mdiodev);
+ mdio_device_free(switch_mdiodev);
+ }
+
+ return ret;
}
module_init(dsa_loop_init);
static void __exit dsa_loop_exit(void)
{
- unsigned int i;
-
mdio_driver_unregister(&dsa_loop_drv);
- for (i = 0; i < NUM_FIXED_PHYS; i++)
- if (!IS_ERR(phydevs[i]))
- fixed_phy_unregister(phydevs[i]);
+ dsa_loop_phydevs_unregister();
+ mdio_device_remove(switch_mdiodev);
+ mdio_device_free(switch_mdiodev);
}
module_exit(dsa_loop_exit);
-MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver");
diff --git a/drivers/net/dsa/dsa_loop.h b/drivers/net/dsa/dsa_loop.h
deleted file mode 100644
index 93e5c15d0efd..000000000000
--- a/drivers/net/dsa/dsa_loop.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DSA_LOOP_H
-#define __DSA_LOOP_H
-
-struct dsa_chip_data;
-
-struct dsa_loop_pdata {
- /* Must be first, such that dsa_register_switch() can access this
- * without gory pointer manipulations
- */
- struct dsa_chip_data cd;
- const char *name;
- unsigned int enabled_ports;
- const char *netdev;
-};
-
-#define DSA_LOOP_NUM_PORTS 6
-#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
-
-#endif /* __DSA_LOOP_H */
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
deleted file mode 100644
index 237066d30704..000000000000
--- a/drivers/net/dsa/dsa_loop_bdinfo.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-
-#include "dsa_loop.h"
-
-static struct dsa_loop_pdata dsa_loop_pdata = {
- .cd = {
- .port_names[0] = "lan1",
- .port_names[1] = "lan2",
- .port_names[2] = "lan3",
- .port_names[3] = "lan4",
- .port_names[DSA_LOOP_CPU_PORT] = "cpu",
- },
- .name = "DSA mockup driver",
- .enabled_ports = 0x1f,
- .netdev = "eth0",
-};
-
-static const struct mdio_board_info bdinfo = {
- .bus_id = "fixed-0",
- .modalias = "dsa-loop",
- .mdio_addr = 31,
- .platform_data = &dsa_loop_pdata,
-};
-
-static int __init dsa_loop_bdinfo_init(void)
-{
- return mdiobus_register_board_info(&bdinfo, 1);
-}
-arch_initcall(dsa_loop_bdinfo_init)
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 4e0b53d94b52..dd5f263ab984 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 or MIT)
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* DSA driver for:
* Hirschmann Hellcreek TSN switch.
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
@@ -128,6 +127,16 @@ static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio)
hellcreek_write(hellcreek, val, HR_PSEL);
}
+static void hellcreek_select_port_prio(struct hellcreek *hellcreek, int port,
+ int prio)
+{
+ u16 val = port << HR_PSEL_PTWSEL_SHIFT;
+
+ val |= prio << HR_PSEL_PRTCWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter)
{
u16 val = counter << HR_CSEL_SHIFT;
@@ -285,12 +294,8 @@ static void hellcreek_get_strings(struct dsa_switch *ds, int port,
{
int i;
- for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
- const struct hellcreek_counter *counter = &hellcreek_counter[i];
-
- strlcpy(data + i * ETH_GSTRING_LEN,
- counter->name, ETH_GSTRING_LEN);
- }
+ for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i)
+ ethtool_puts(&data, hellcreek_counter[i].name);
}
static int hellcreek_get_sset_count(struct dsa_switch *ds, int port, int sset)
@@ -674,7 +679,9 @@ static int hellcreek_bridge_flags(struct dsa_switch *ds, int port,
}
static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
@@ -691,7 +698,7 @@ static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
}
static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct hellcreek *hellcreek = ds->priv;
@@ -710,8 +717,9 @@ static int __hellcreek_fdb_add(struct hellcreek *hellcreek,
u16 meta = 0;
dev_dbg(hellcreek->dev, "Add static FDB entry: MAC=%pM, MASK=0x%02x, "
- "OBT=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac, entry->portmask,
- entry->is_obt, entry->reprio_en, entry->reprio_tc);
+ "OBT=%d, PASS_BLOCKED=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac,
+ entry->portmask, entry->is_obt, entry->pass_blocked,
+ entry->reprio_en, entry->reprio_tc);
/* Add mac address */
hellcreek_write(hellcreek, entry->mac[1] | (entry->mac[0] << 8), HR_FDBWDH);
@@ -722,6 +730,8 @@ static int __hellcreek_fdb_add(struct hellcreek *hellcreek,
meta |= entry->portmask << HR_FDBWRM0_PORTMASK_SHIFT;
if (entry->is_obt)
meta |= HR_FDBWRM0_OBT;
+ if (entry->pass_blocked)
+ meta |= HR_FDBWRM0_PASS_BLOCKED;
if (entry->reprio_en) {
meta |= HR_FDBWRM0_REPRIO_EN;
meta |= entry->reprio_tc << HR_FDBWRM0_REPRIO_TC_SHIFT;
@@ -823,7 +833,8 @@ static int hellcreek_fdb_get(struct hellcreek *hellcreek,
}
static int hellcreek_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
@@ -868,7 +879,8 @@ out:
}
static int hellcreek_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
@@ -1049,7 +1061,7 @@ static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek)
static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
{
- static struct hellcreek_fdb_entry ptp = {
+ static const struct hellcreek_fdb_entry l2_ptp = {
/* MAC: 01-1B-19-00-00-00 */
.mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 },
.portmask = 0x03, /* Management ports */
@@ -1060,24 +1072,94 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry p2p = {
+ static const struct hellcreek_fdb_entry udp4_ptp = {
+ /* MAC: 01-00-5E-00-01-81 */
+ .mac = { 0x01, 0x00, 0x5e, 0x00, 0x01, 0x81 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static const struct hellcreek_fdb_entry udp6_ptp = {
+ /* MAC: 33-33-00-00-01-81 */
+ .mac = { 0x33, 0x33, 0x00, 0x00, 0x01, 0x81 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static const struct hellcreek_fdb_entry l2_p2p = {
/* MAC: 01-80-C2-00-00-0E */
.mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e },
.portmask = 0x03, /* Management ports */
.age = 0,
.is_obt = 0,
- .pass_blocked = 0,
+ .pass_blocked = 1,
.is_static = 1,
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
+ static const struct hellcreek_fdb_entry udp4_p2p = {
+ /* MAC: 01-00-5E-00-00-6B */
+ .mac = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x6b },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static const struct hellcreek_fdb_entry udp6_p2p = {
+ /* MAC: 33-33-00-00-00-6B */
+ .mac = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x6b },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static const struct hellcreek_fdb_entry stp = {
+ /* MAC: 01-80-C2-00-00-00 */
+ .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
int ret;
mutex_lock(&hellcreek->reg_lock);
- ret = __hellcreek_fdb_add(hellcreek, &ptp);
+ ret = __hellcreek_fdb_add(hellcreek, &l2_ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp4_ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp6_ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &l2_p2p);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp4_p2p);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp6_p2p);
if (ret)
goto out;
- ret = __hellcreek_fdb_add(hellcreek, &p2p);
+ ret = __hellcreek_fdb_add(hellcreek, &stp);
out:
mutex_unlock(&hellcreek->reg_lock);
@@ -1089,11 +1171,6 @@ static int hellcreek_devlink_info_get(struct dsa_switch *ds,
struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
- int ret;
-
- ret = devlink_info_driver_name_put(req, "hellcreek");
- if (ret)
- return ret;
return devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
@@ -1243,13 +1320,13 @@ static int hellcreek_devlink_region_fdb_snapshot(struct devlink *dl,
return 0;
}
-static struct devlink_region_ops hellcreek_region_vlan_ops = {
+static const struct devlink_region_ops hellcreek_region_vlan_ops = {
.name = "vlan",
.snapshot = hellcreek_devlink_region_vlan_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops hellcreek_region_fdb_ops = {
+static const struct devlink_region_ops hellcreek_region_fdb_ops = {
.name = "fdb",
.snapshot = hellcreek_devlink_region_fdb_snapshot,
.destructor = kfree,
@@ -1258,7 +1335,7 @@ static struct devlink_region_ops hellcreek_region_fdb_ops = {
static int hellcreek_setup_devlink_regions(struct dsa_switch *ds)
{
struct hellcreek *hellcreek = ds->priv;
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
int ret;
@@ -1384,14 +1461,19 @@ static void hellcreek_teardown(struct dsa_switch *ds)
dsa_devlink_resources_unregister(ds);
}
-static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void hellcreek_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct hellcreek *hellcreek = ds->priv;
- dev_dbg(hellcreek->dev, "Phylink validate for port %d\n", port);
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, config->supported_interfaces);
+
+ /* Include GMII - the hardware does not support this interface
+ * mode, but it's the default interface mode for phylib, so we
+ * need it for compatibility with existing DT.
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
/* The MAC settings are a hardware configuration option and cannot be
* changed at run time or by strapping. Therefore the attached PHYs
@@ -1399,12 +1481,9 @@ static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
* by the hardware.
*/
if (hellcreek->pdata->is_100_mbits)
- phylink_set(mask, 100baseT_Full);
+ config->mac_capabilities = MAC_100FD;
else
- phylink_set(mask, 1000baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ config->mac_capabilities = MAC_1000FD;
}
static int
@@ -1458,6 +1537,45 @@ out:
return ret;
}
+static void hellcreek_setup_maxsdu(struct hellcreek *hellcreek, int port,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u32 max_sdu = schedule->max_sdu[tc] + VLAN_ETH_HLEN - ETH_FCS_LEN;
+ u16 val;
+
+ if (!schedule->max_sdu[tc])
+ continue;
+
+ dev_dbg(hellcreek->dev, "Configure max-sdu %u for tc %d on port %d\n",
+ max_sdu, tc, port);
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (max_sdu & HR_PTPRTCCFG_MAXSDU_MASK) << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
+static void hellcreek_reset_maxsdu(struct hellcreek *hellcreek, int port)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u16 val;
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (HELLCREEK_DEFAULT_MAX_SDU & HR_PTPRTCCFG_MAXSDU_MASK)
+ << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
const struct tc_taprio_qopt_offload *schedule)
{
@@ -1641,7 +1759,10 @@ static int hellcreek_port_set_schedule(struct dsa_switch *ds, int port,
}
hellcreek_port->current_schedule = taprio_offload_get(taprio);
- /* Then select port */
+ /* Configure max sdu */
+ hellcreek_setup_maxsdu(hellcreek, port, hellcreek_port->current_schedule);
+
+ /* Select tdg */
hellcreek_select_tgd(hellcreek, port);
/* Enable gating and keep defaults */
@@ -1693,7 +1814,10 @@ static int hellcreek_port_del_schedule(struct dsa_switch *ds, int port)
hellcreek_port->current_schedule = NULL;
}
- /* Then select port */
+ /* Reset max sdu */
+ hellcreek_reset_maxsdu(hellcreek, port);
+
+ /* Select tgd */
hellcreek_select_tgd(hellcreek, port);
/* Disable gating and return to regular switching flow */
@@ -1730,22 +1854,47 @@ static bool hellcreek_validate_schedule(struct hellcreek *hellcreek,
return true;
}
-static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
- enum tc_setup_type type, void *type_data)
+static int hellcreek_tc_query_caps(struct tc_query_caps_base *base)
{
- struct tc_taprio_qopt_offload *taprio = type_data;
- struct hellcreek *hellcreek = ds->priv;
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
- if (type != TC_SETUP_QDISC_TAPRIO)
- return -EOPNOTSUPP;
+ caps->supports_queue_max_sdu = true;
- if (!hellcreek_validate_schedule(hellcreek, taprio))
+ return 0;
+ }
+ default:
return -EOPNOTSUPP;
+ }
+}
- if (taprio->enable)
- return hellcreek_port_set_schedule(ds, port, taprio);
+static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data)
+{
+ struct hellcreek *hellcreek = ds->priv;
- return hellcreek_port_del_schedule(ds, port);
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return hellcreek_tc_query_caps(type_data);
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_qopt_offload *taprio = type_data;
+
+ switch (taprio->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ if (!hellcreek_validate_schedule(hellcreek, taprio))
+ return -EOPNOTSUPP;
+
+ return hellcreek_port_set_schedule(ds, port, taprio);
+ case TAPRIO_CMD_DESTROY:
+ return hellcreek_port_del_schedule(ds, port);
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct dsa_switch_ops hellcreek_ds_ops = {
@@ -1755,7 +1904,7 @@ static const struct dsa_switch_ops hellcreek_ds_ops = {
.get_strings = hellcreek_get_strings,
.get_tag_protocol = hellcreek_get_tag_protocol,
.get_ts_info = hellcreek_get_ts_info,
- .phylink_validate = hellcreek_phylink_validate,
+ .phylink_get_caps = hellcreek_phylink_get_caps,
.port_bridge_flags = hellcreek_bridge_flags,
.port_bridge_join = hellcreek_port_bridge_join,
.port_bridge_leave = hellcreek_port_bridge_leave,
@@ -1777,6 +1926,8 @@ static const struct dsa_switch_ops hellcreek_ds_ops = {
.port_vlan_filtering = hellcreek_vlan_filtering,
.setup = hellcreek_setup,
.teardown = hellcreek_teardown,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static int hellcreek_probe(struct platform_device *pdev)
@@ -1815,11 +1966,8 @@ static int hellcreek_probe(struct platform_device *pdev)
if (!port->counter_values)
return -ENOMEM;
- port->vlan_dev_bitmap =
- devm_kcalloc(dev,
- BITS_TO_LONGS(VLAN_N_VID),
- sizeof(unsigned long),
- GFP_KERNEL);
+ port->vlan_dev_bitmap = devm_bitmap_zalloc(dev, VLAN_N_VID,
+ GFP_KERNEL);
if (!port->vlan_dev_bitmap)
return -ENOMEM;
@@ -1910,19 +2058,16 @@ err_ptp_setup:
return ret;
}
-static int hellcreek_remove(struct platform_device *pdev)
+static void hellcreek_remove(struct platform_device *pdev)
{
struct hellcreek *hellcreek = platform_get_drvdata(pdev);
if (!hellcreek)
- return 0;
+ return;
hellcreek_hwtstamp_free(hellcreek);
hellcreek_ptp_free(hellcreek);
dsa_unregister_switch(hellcreek->ds);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static void hellcreek_shutdown(struct platform_device *pdev)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 9e303b8ab13c..bebf0d3ff330 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* DSA driver for:
* Hirschmann Hellcreek TSN switch.
@@ -12,14 +12,16 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/container_of.h>
#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include <linux/leds.h>
+#include <linux/mutex.h>
#include <linux/platform_data/hirschmann-hellcreek.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include <net/dsa.h>
#include <net/pkt_sched.h>
@@ -37,6 +39,7 @@
#define HELLCREEK_VLAN_UNTAGGED_MEMBER 0x1
#define HELLCREEK_VLAN_TAGGED_MEMBER 0x3
#define HELLCREEK_NUM_EGRESS_QUEUES 8
+#define HELLCREEK_DEFAULT_MAX_SDU 1536
/* Register definitions */
#define HR_MODID_C (0 * 2)
@@ -72,6 +75,12 @@
#define HR_PRTCCFG_PCP_TC_MAP_SHIFT 0
#define HR_PRTCCFG_PCP_TC_MAP_MASK GENMASK(2, 0)
+#define HR_PTPRTCCFG (0xa9 * 2)
+#define HR_PTPRTCCFG_SET_QTRACK BIT(15)
+#define HR_PTPRTCCFG_REJECT BIT(14)
+#define HR_PTPRTCCFG_MAXSDU_SHIFT 0
+#define HR_PTPRTCCFG_MAXSDU_MASK GENMASK(10, 0)
+
#define HR_CSEL (0x8d * 2)
#define HR_CSEL_SHIFT 0
#define HR_CSEL_MASK GENMASK(7, 0)
@@ -235,7 +244,7 @@ struct hellcreek_port_hwtstamp {
struct sk_buff *tx_skb;
/* Current timestamp configuration */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
};
struct hellcreek_port {
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index 40b41c794dfa..99941ff1ebf9 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -16,7 +16,7 @@
#include "hellcreek_ptp.h"
int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct hellcreek *hellcreek = ds->priv;
@@ -40,7 +40,7 @@ int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
* the user requested what is actually available or not
*/
static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct hellcreek_port_hwtstamp *ps =
&hellcreek->ports[port].port_hwtstamp;
@@ -52,10 +52,6 @@ static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
*/
clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
- /* Reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_ON:
tx_tstamp_enable = true;
@@ -114,41 +110,35 @@ static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
}
int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
- struct hwtstamp_config config;
int err;
ps = &hellcreek->ports[port].port_hwtstamp;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = hellcreek_set_hwtstamp_config(hellcreek, port, &config);
+ err = hellcreek_set_hwtstamp_config(hellcreek, port, config);
if (err)
return err;
/* Save the chosen configuration to be returned later */
- memcpy(&ps->tstamp_config, &config, sizeof(config));
+ ps->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
- struct hwtstamp_config *config;
ps = &hellcreek->ports[port].port_hwtstamp;
- config = &ps->tstamp_config;
+ *config = ps->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp, or NULL
@@ -302,17 +292,10 @@ static void hellcreek_get_rxts(struct hellcreek *hellcreek,
struct sk_buff_head received;
unsigned long flags;
- /* The latched timestamp belongs to one of the received frames. */
+ /* Construct Rx timestamps for all received PTP packets. */
__skb_queue_head_init(&received);
-
- /* Lock & disable interrupts */
spin_lock_irqsave(&rxq->lock, flags);
-
- /* Add the reception queue "rxq" to the "received" queue an reintialize
- * "rxq". From now on, we deal with "received" not with "rxq"
- */
skb_queue_splice_tail_init(rxq, &received);
-
spin_unlock_irqrestore(&rxq->lock, flags);
for (; skb; skb = __skb_dequeue(&received)) {
@@ -335,7 +318,7 @@ static void hellcreek_get_rxts(struct hellcreek *hellcreek,
shwt = skb_hwtstamps(skb);
memset(shwt, 0, sizeof(*shwt));
shwt->hwtstamp = ns_to_ktime(ns);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
index 71af77efb28b..388821c4aa10 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -38,9 +38,10 @@
#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config);
bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
@@ -48,7 +49,7 @@ void hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp);
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
index 2572c6087bb5..cb23bea9c21b 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -9,6 +9,7 @@
* Kurt Kanzenbach <kurt@linutronix.de>
*/
+#include <linux/of.h>
#include <linux/ptp_clock_kernel.h>
#include "hellcreek.h"
#include "hellcreek_ptp.h"
@@ -26,7 +27,8 @@ void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
}
/* Get nanoseconds from PTP clock */
-static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
+static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek,
+ struct ptp_system_timestamp *sts)
{
u16 nsl, nsh;
@@ -44,16 +46,19 @@ static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ ptp_read_system_prets(sts);
nsl = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ ptp_read_system_postts(sts);
return (u64)nsl | ((u64)nsh << 16);
}
-static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek)
+static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek,
+ struct ptp_system_timestamp *sts)
{
u64 ns;
- ns = hellcreek_ptp_clock_read(hellcreek);
+ ns = hellcreek_ptp_clock_read(hellcreek, sts);
if (ns < hellcreek->last_ts)
hellcreek->seconds++;
hellcreek->last_ts = ns;
@@ -71,7 +76,7 @@ u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
{
u64 s;
- __hellcreek_ptp_gettime(hellcreek);
+ __hellcreek_ptp_gettime(hellcreek, NULL);
if (hellcreek->last_ts > ns)
s = hellcreek->seconds * NSEC_PER_SEC;
else
@@ -80,14 +85,15 @@ u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
return s;
}
-static int hellcreek_ptp_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int hellcreek_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
u64 ns;
mutex_lock(&hellcreek->ptp_lock);
- ns = __hellcreek_ptp_gettime(hellcreek);
+ ns = __hellcreek_ptp_gettime(hellcreek, sts);
mutex_unlock(&hellcreek->ptp_lock);
*ts = ns_to_timespec64(ns);
@@ -183,7 +189,7 @@ static int hellcreek_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
if (abs(delta) > MAX_SLOW_OFFSET_ADJ) {
struct timespec64 now, then = ns_to_timespec64(delta);
- hellcreek_ptp_gettime(ptp, &now);
+ hellcreek_ptp_gettimex(ptp, &now, NULL);
now = timespec64_add(now, then);
hellcreek_ptp_settime(ptp, &now);
@@ -232,7 +238,7 @@ static void hellcreek_ptp_overflow_check(struct work_struct *work)
hellcreek = dw_overflow_to_hellcreek(dw);
mutex_lock(&hellcreek->ptp_lock);
- __hellcreek_ptp_gettime(hellcreek);
+ __hellcreek_ptp_gettime(hellcreek, NULL);
mutex_unlock(&hellcreek->ptp_lock);
schedule_delayed_work(&hellcreek->overflow_work,
@@ -297,9 +303,11 @@ static enum led_brightness hellcreek_led_is_gm_get(struct led_classdev *ldev)
static int hellcreek_led_setup(struct hellcreek *hellcreek)
{
struct device_node *leds, *led = NULL;
- const char *label, *state;
+ enum led_default_state state;
+ const char *label;
int ret = -EINVAL;
+ of_node_get(hellcreek->dev->of_node);
leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
if (!leds) {
dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
@@ -317,16 +325,17 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
ret = of_property_read_string(led, "label", &label);
hellcreek->led_sync_good.name = ret ? "sync_good" : label;
- ret = of_property_read_string(led, "default-state", &state);
- if (!ret) {
- if (!strcmp(state, "on"))
- hellcreek->led_sync_good.brightness = 1;
- else if (!strcmp(state, "off"))
- hellcreek->led_sync_good.brightness = 0;
- else if (!strcmp(state, "keep"))
- hellcreek->led_sync_good.brightness =
- hellcreek_get_brightness(hellcreek,
- STATUS_OUT_SYNC_GOOD);
+ state = led_init_default_state_get(of_fwnode_handle(led));
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ hellcreek->led_sync_good.brightness = 1;
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ hellcreek->led_sync_good.brightness =
+ hellcreek_get_brightness(hellcreek, STATUS_OUT_SYNC_GOOD);
+ break;
+ default:
+ hellcreek->led_sync_good.brightness = 0;
}
hellcreek->led_sync_good.max_brightness = 1;
@@ -343,16 +352,17 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
ret = of_property_read_string(led, "label", &label);
hellcreek->led_is_gm.name = ret ? "is_gm" : label;
- ret = of_property_read_string(led, "default-state", &state);
- if (!ret) {
- if (!strcmp(state, "on"))
- hellcreek->led_is_gm.brightness = 1;
- else if (!strcmp(state, "off"))
- hellcreek->led_is_gm.brightness = 0;
- else if (!strcmp(state, "keep"))
- hellcreek->led_is_gm.brightness =
- hellcreek_get_brightness(hellcreek,
- STATUS_OUT_IS_GM);
+ state = led_init_default_state_get(of_fwnode_handle(led));
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ hellcreek->led_is_gm.brightness = 1;
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ hellcreek->led_is_gm.brightness =
+ hellcreek_get_brightness(hellcreek, STATUS_OUT_IS_GM);
+ break;
+ default:
+ hellcreek->led_is_gm.brightness = 0;
}
hellcreek->led_is_gm.max_brightness = 1;
@@ -366,8 +376,18 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1);
/* Register both leds */
- led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
- led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+ ret = led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to register sync_good LED\n");
+ goto out;
+ }
+
+ ret = led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to register is_gm LED\n");
+ led_classdev_unregister(&hellcreek->led_sync_good);
+ goto out;
+ }
ret = 0;
@@ -404,7 +424,7 @@ int hellcreek_ptp_setup(struct hellcreek *hellcreek)
hellcreek->ptp_clock_info.pps = 0;
hellcreek->ptp_clock_info.adjfine = hellcreek_ptp_adjfine;
hellcreek->ptp_clock_info.adjtime = hellcreek_ptp_adjtime;
- hellcreek->ptp_clock_info.gettime64 = hellcreek_ptp_gettime;
+ hellcreek->ptp_clock_info.gettimex64 = hellcreek_ptp_gettimex;
hellcreek->ptp_clock_info.settime64 = hellcreek_ptp_settime;
hellcreek->ptp_clock_info.enable = hellcreek_ptp_enable;
hellcreek->ptp_clock_info.do_aux_work = hellcreek_hwtstamp_work;
diff --git a/drivers/net/dsa/ks8995.c b/drivers/net/dsa/ks8995.c
new file mode 100644
index 000000000000..77d8b842693c
--- /dev/null
+++ b/drivers/net/dsa/ks8995.c
@@ -0,0 +1,857 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SPI driver for Micrel/Kendin KS8995M and KSZ8864RMN ethernet switches
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ * Copyright (C) 2025 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This file was based on: drivers/spi/at25.c
+ * Copyright (C) 2006 David Brownell
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bits.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+#include <net/dsa.h>
+
+#define DRV_VERSION "0.1.1"
+#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver"
+
+/* ------------------------------------------------------------------------ */
+
+#define KS8995_REG_ID0 0x00 /* Chip ID0 */
+#define KS8995_REG_ID1 0x01 /* Chip ID1 */
+
+#define KS8995_REG_GC0 0x02 /* Global Control 0 */
+
+#define KS8995_GC0_P5_PHY BIT(3) /* Port 5 PHY enabled */
+
+#define KS8995_REG_GC1 0x03 /* Global Control 1 */
+#define KS8995_REG_GC2 0x04 /* Global Control 2 */
+
+#define KS8995_GC2_HUGE BIT(2) /* Huge packet support */
+#define KS8995_GC2_LEGAL BIT(1) /* Legal size override */
+
+#define KS8995_REG_GC3 0x05 /* Global Control 3 */
+#define KS8995_REG_GC4 0x06 /* Global Control 4 */
+
+#define KS8995_GC4_10BT BIT(4) /* Force switch to 10Mbit */
+#define KS8995_GC4_MII_FLOW BIT(5) /* MII full-duplex flow control enable */
+#define KS8995_GC4_MII_HD BIT(6) /* MII half-duplex mode enable */
+
+#define KS8995_REG_GC5 0x07 /* Global Control 5 */
+#define KS8995_REG_GC6 0x08 /* Global Control 6 */
+#define KS8995_REG_GC7 0x09 /* Global Control 7 */
+#define KS8995_REG_GC8 0x0a /* Global Control 8 */
+#define KS8995_REG_GC9 0x0b /* Global Control 9 */
+
+#define KS8995_GC9_SPECIAL BIT(0) /* Special tagging mode (DSA) */
+
+/* In DSA the ports 1-4 are numbered 0-3 and the CPU port is port 4 */
+#define KS8995_REG_PC(p, r) (0x10 + (0x10 * (p)) + (r)) /* Port Control */
+#define KS8995_REG_PS(p, r) (0x1e + (0x10 * (p)) + (r)) /* Port Status */
+
+#define KS8995_REG_PC0 0x00 /* Port Control 0 */
+#define KS8995_REG_PC1 0x01 /* Port Control 1 */
+#define KS8995_REG_PC2 0x02 /* Port Control 2 */
+#define KS8995_REG_PC3 0x03 /* Port Control 3 */
+#define KS8995_REG_PC4 0x04 /* Port Control 4 */
+#define KS8995_REG_PC5 0x05 /* Port Control 5 */
+#define KS8995_REG_PC6 0x06 /* Port Control 6 */
+#define KS8995_REG_PC7 0x07 /* Port Control 7 */
+#define KS8995_REG_PC8 0x08 /* Port Control 8 */
+#define KS8995_REG_PC9 0x09 /* Port Control 9 */
+#define KS8995_REG_PC10 0x0a /* Port Control 10 */
+#define KS8995_REG_PC11 0x0b /* Port Control 11 */
+#define KS8995_REG_PC12 0x0c /* Port Control 12 */
+#define KS8995_REG_PC13 0x0d /* Port Control 13 */
+
+#define KS8995_PC0_TAG_INS BIT(2) /* Enable tag insertion on port */
+#define KS8995_PC0_TAG_REM BIT(1) /* Enable tag removal on port */
+#define KS8995_PC0_PRIO_EN BIT(0) /* Enable priority handling */
+
+#define KS8995_PC2_TXEN BIT(2) /* Enable TX on port */
+#define KS8995_PC2_RXEN BIT(1) /* Enable RX on port */
+#define KS8995_PC2_LEARN_DIS BIT(0) /* Disable learning on port */
+
+#define KS8995_PC13_TXDIS BIT(6) /* Disable transmitter */
+#define KS8995_PC13_PWDN BIT(3) /* Power down */
+
+#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */
+#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */
+#define KS8995_REG_TPC2 0x62 /* TOS Priority Control 2 */
+#define KS8995_REG_TPC3 0x63 /* TOS Priority Control 3 */
+#define KS8995_REG_TPC4 0x64 /* TOS Priority Control 4 */
+#define KS8995_REG_TPC5 0x65 /* TOS Priority Control 5 */
+#define KS8995_REG_TPC6 0x66 /* TOS Priority Control 6 */
+#define KS8995_REG_TPC7 0x67 /* TOS Priority Control 7 */
+
+#define KS8995_REG_MAC0 0x68 /* MAC address 0 */
+#define KS8995_REG_MAC1 0x69 /* MAC address 1 */
+#define KS8995_REG_MAC2 0x6a /* MAC address 2 */
+#define KS8995_REG_MAC3 0x6b /* MAC address 3 */
+#define KS8995_REG_MAC4 0x6c /* MAC address 4 */
+#define KS8995_REG_MAC5 0x6d /* MAC address 5 */
+
+#define KS8995_REG_IAC0 0x6e /* Indirect Access Control 0 */
+#define KS8995_REG_IAC1 0x6f /* Indirect Access Control 0 */
+#define KS8995_REG_IAD7 0x70 /* Indirect Access Data 7 */
+#define KS8995_REG_IAD6 0x71 /* Indirect Access Data 6 */
+#define KS8995_REG_IAD5 0x72 /* Indirect Access Data 5 */
+#define KS8995_REG_IAD4 0x73 /* Indirect Access Data 4 */
+#define KS8995_REG_IAD3 0x74 /* Indirect Access Data 3 */
+#define KS8995_REG_IAD2 0x75 /* Indirect Access Data 2 */
+#define KS8995_REG_IAD1 0x76 /* Indirect Access Data 1 */
+#define KS8995_REG_IAD0 0x77 /* Indirect Access Data 0 */
+
+#define KSZ8864_REG_ID1 0xfe /* Chip ID in bit 7 */
+
+#define KS8995_REGS_SIZE 0x80
+#define KSZ8864_REGS_SIZE 0x100
+#define KSZ8795_REGS_SIZE 0x100
+
+#define ID1_CHIPID_M 0xf
+#define ID1_CHIPID_S 4
+#define ID1_REVISION_M 0x7
+#define ID1_REVISION_S 1
+#define ID1_START_SW 1 /* start the switch */
+
+#define FAMILY_KS8995 0x95
+#define FAMILY_KSZ8795 0x87
+#define CHIPID_M 0
+#define KS8995_CHIP_ID 0x00
+#define KSZ8864_CHIP_ID 0x01
+#define KSZ8795_CHIP_ID 0x09
+
+#define KS8995_CMD_WRITE 0x02U
+#define KS8995_CMD_READ 0x03U
+
+#define KS8995_CPU_PORT 4
+#define KS8995_NUM_PORTS 5 /* 5 ports including the CPU port */
+#define KS8995_RESET_DELAY 10 /* usec */
+
+enum ks8995_chip_variant {
+ ks8995,
+ ksz8864,
+ ksz8795,
+ max_variant
+};
+
+struct ks8995_chip_params {
+ char *name;
+ int family_id;
+ int chip_id;
+ int regs_size;
+ int addr_width;
+ int addr_shift;
+};
+
+static const struct ks8995_chip_params ks8995_chip[] = {
+ [ks8995] = {
+ .name = "KS8995MA",
+ .family_id = FAMILY_KS8995,
+ .chip_id = KS8995_CHIP_ID,
+ .regs_size = KS8995_REGS_SIZE,
+ .addr_width = 8,
+ .addr_shift = 0,
+ },
+ [ksz8864] = {
+ .name = "KSZ8864RMN",
+ .family_id = FAMILY_KS8995,
+ .chip_id = KSZ8864_CHIP_ID,
+ .regs_size = KSZ8864_REGS_SIZE,
+ .addr_width = 8,
+ .addr_shift = 0,
+ },
+ [ksz8795] = {
+ .name = "KSZ8795CLX",
+ .family_id = FAMILY_KSZ8795,
+ .chip_id = KSZ8795_CHIP_ID,
+ .regs_size = KSZ8795_REGS_SIZE,
+ .addr_width = 12,
+ .addr_shift = 1,
+ },
+};
+
+struct ks8995_switch {
+ struct spi_device *spi;
+ struct device *dev;
+ struct dsa_switch *ds;
+ struct mutex lock;
+ struct gpio_desc *reset_gpio;
+ struct bin_attribute regs_attr;
+ const struct ks8995_chip_params *chip;
+ int revision_id;
+ unsigned int max_mtu[KS8995_NUM_PORTS];
+};
+
+static const struct spi_device_id ks8995_id[] = {
+ {"ks8995", ks8995},
+ {"ksz8864", ksz8864},
+ {"ksz8795", ksz8795},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ks8995_id);
+
+static const struct of_device_id ks8995_spi_of_match[] = {
+ { .compatible = "micrel,ks8995" },
+ { .compatible = "micrel,ksz8864" },
+ { .compatible = "micrel,ksz8795" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ks8995_spi_of_match);
+
+static inline u8 get_chip_id(u8 val)
+{
+ return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
+}
+
+static inline u8 get_chip_rev(u8 val)
+{
+ return (val >> ID1_REVISION_S) & ID1_REVISION_M;
+}
+
+/* create_spi_cmd - create a chip specific SPI command header
+ * @ks: pointer to switch instance
+ * @cmd: SPI command for switch
+ * @address: register address for command
+ *
+ * Different chip families use different bit pattern to address the switches
+ * registers:
+ *
+ * KS8995: 8bit command + 8bit address
+ * KSZ8795: 3bit command + 12bit address + 1bit TR (?)
+ */
+static inline __be16 create_spi_cmd(struct ks8995_switch *ks, int cmd,
+ unsigned address)
+{
+ u16 result = cmd;
+
+ /* make room for address (incl. address shift) */
+ result <<= ks->chip->addr_width + ks->chip->addr_shift;
+ /* add address */
+ result |= address << ks->chip->addr_shift;
+ /* SPI protocol needs big endian */
+ return cpu_to_be16(result);
+}
+/* ------------------------------------------------------------------------ */
+static int ks8995_read(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ __be16 cmd;
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ cmd = create_spi_cmd(ks, KS8995_CMD_READ, offset);
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = &cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+static int ks8995_write(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ __be16 cmd;
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ cmd = create_spi_cmd(ks, KS8995_CMD_WRITE, offset);
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = &cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
+{
+ return ks8995_read(ks, buf, addr, 1) != 1;
+}
+
+static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
+{
+ char buf = val;
+
+ return ks8995_write(ks, &buf, addr, 1) != 1;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int ks8995_stop(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 0);
+}
+
+static int ks8995_start(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 1);
+}
+
+static int ks8995_reset(struct ks8995_switch *ks)
+{
+ int err;
+
+ err = ks8995_stop(ks);
+ if (err)
+ return err;
+
+ udelay(KS8995_RESET_DELAY);
+
+ return ks8995_start(ks);
+}
+
+/* ks8995_get_revision - get chip revision
+ * @ks: pointer to switch instance
+ *
+ * Verify chip family and id and get chip revision.
+ */
+static int ks8995_get_revision(struct ks8995_switch *ks)
+{
+ int err;
+ u8 id0, id1, ksz8864_id;
+
+ /* read family id */
+ err = ks8995_read_reg(ks, KS8995_REG_ID0, &id0);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ /* verify family id */
+ if (id0 != ks->chip->family_id) {
+ dev_err(&ks->spi->dev, "chip family id mismatch: expected 0x%02x but 0x%02x read\n",
+ ks->chip->family_id, id0);
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ switch (ks->chip->family_id) {
+ case FAMILY_KS8995:
+ /* try reading chip id at CHIP ID1 */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ /* verify chip id */
+ if ((get_chip_id(id1) == CHIPID_M) &&
+ (get_chip_id(id1) == ks->chip->chip_id)) {
+ /* KS8995MA */
+ ks->revision_id = get_chip_rev(id1);
+ } else if (get_chip_id(id1) != CHIPID_M) {
+ /* KSZ8864RMN */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &ksz8864_id);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ if ((ksz8864_id & 0x80) &&
+ (ks->chip->chip_id == KSZ8864_CHIP_ID)) {
+ ks->revision_id = get_chip_rev(id1);
+ }
+
+ } else {
+ dev_err(&ks->spi->dev, "unsupported chip id for KS8995 family: 0x%02x\n",
+ id1);
+ err = -ENODEV;
+ }
+ break;
+ case FAMILY_KSZ8795:
+ /* try reading chip id at CHIP ID1 */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ if (get_chip_id(id1) == ks->chip->chip_id) {
+ ks->revision_id = get_chip_rev(id1);
+ } else {
+ dev_err(&ks->spi->dev, "unsupported chip id for KSZ8795 family: 0x%02x\n",
+ id1);
+ err = -ENODEV;
+ }
+ break;
+ default:
+ dev_err(&ks->spi->dev, "unsupported family id: 0x%02x\n", id0);
+ err = -ENODEV;
+ break;
+ }
+err_out:
+ return err;
+}
+
+static int ks8995_check_config(struct ks8995_switch *ks)
+{
+ int ret;
+ u8 val;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC0, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC0\n");
+ return ret;
+ }
+
+ dev_dbg(ks->dev, "port 5 PHY %senabled\n",
+ (val & KS8995_GC0_P5_PHY) ? "" : "not ");
+
+ val |= KS8995_GC0_P5_PHY;
+ ret = ks8995_write_reg(ks, KS8995_REG_GC0, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC0\n");
+
+ dev_dbg(ks->dev, "set KS8995_REG_GC0 to 0x%02x\n", val);
+
+ return 0;
+}
+
+static void
+ks8995_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void
+ks8995_mac_link_up(struct phylink_config *config, struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ks8995_switch *ks = dp->ds->priv;
+ int port = dp->index;
+ int ret;
+ u8 val;
+
+ /* Allow forcing the mode on the fixed CPU port, no autonegotiation.
+ * We assume autonegotiation works on the PHY-facing ports.
+ */
+ if (port != KS8995_CPU_PORT)
+ return;
+
+ dev_dbg(ks->dev, "MAC link up on CPU port (%d)\n", port);
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC4, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC4\n");
+ return;
+ }
+
+ /* Conjure port config */
+ switch (speed) {
+ case SPEED_10:
+ dev_dbg(ks->dev, "set switch MII to 100Mbit mode\n");
+ val |= KS8995_GC4_10BT;
+ break;
+ case SPEED_100:
+ default:
+ dev_dbg(ks->dev, "set switch MII to 100Mbit mode\n");
+ val &= ~KS8995_GC4_10BT;
+ break;
+ }
+
+ if (duplex == DUPLEX_HALF) {
+ dev_dbg(ks->dev, "set switch MII to half duplex\n");
+ val |= KS8995_GC4_MII_HD;
+ } else {
+ dev_dbg(ks->dev, "set switch MII to full duplex\n");
+ val &= ~KS8995_GC4_MII_HD;
+ }
+
+ dev_dbg(ks->dev, "set KS8995_REG_GC4 to %02x\n", val);
+
+ /* Enable the CPU port */
+ ret = ks8995_write_reg(ks, KS8995_REG_GC4, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC4\n");
+}
+
+static void
+ks8995_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ks8995_switch *ks = dp->ds->priv;
+ int port = dp->index;
+
+ if (port != KS8995_CPU_PORT)
+ return;
+
+ dev_dbg(ks->dev, "MAC link down on CPU port (%d)\n", port);
+
+ /* Disable the CPU port */
+}
+
+static const struct phylink_mac_ops ks8995_phylink_mac_ops = {
+ .mac_config = ks8995_mac_config,
+ .mac_link_up = ks8995_mac_link_up,
+ .mac_link_down = ks8995_mac_link_down,
+};
+
+static enum
+dsa_tag_protocol ks8995_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ /* This switch actually uses the 6 byte KS8995 protocol */
+ return DSA_TAG_PROTO_NONE;
+}
+
+static int ks8995_setup(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static int ks8995_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct ks8995_switch *ks = ds->priv;
+
+ dev_dbg(ks->dev, "enable port %d\n", port);
+
+ return 0;
+}
+
+static void ks8995_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ks8995_switch *ks = ds->priv;
+
+ dev_dbg(ks->dev, "disable port %d\n", port);
+}
+
+static int ks8995_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ /* We support enabling/disabling learning */
+ if (flags.mask & ~(BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ks8995_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ks8995_switch *ks = ds->priv;
+ int ret;
+ u8 val;
+
+ if (flags.mask & BR_LEARNING) {
+ ret = ks8995_read_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_PC2 on port %d\n", port);
+ return ret;
+ }
+
+ if (flags.val & BR_LEARNING)
+ val &= ~KS8995_PC2_LEARN_DIS;
+ else
+ val |= KS8995_PC2_LEARN_DIS;
+
+ ret = ks8995_write_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), val);
+ if (ret) {
+ dev_err(ks->dev, "failed to write KS8995_REG_PC2 on port %d\n", port);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ks8995_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct ks8995_switch *ks = ds->priv;
+ int ret;
+ u8 val;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_PC2 on port %d\n", port);
+ return;
+ }
+
+ /* Set the bits for the different STP states in accordance with
+ * the datasheet, pages 36-37 "Spanning tree support".
+ */
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val &= ~KS8995_PC2_TXEN;
+ val &= ~KS8995_PC2_RXEN;
+ val |= KS8995_PC2_LEARN_DIS;
+ break;
+ case BR_STATE_LEARNING:
+ val &= ~KS8995_PC2_TXEN;
+ val &= ~KS8995_PC2_RXEN;
+ val &= ~KS8995_PC2_LEARN_DIS;
+ break;
+ case BR_STATE_FORWARDING:
+ val |= KS8995_PC2_TXEN;
+ val |= KS8995_PC2_RXEN;
+ val &= ~KS8995_PC2_LEARN_DIS;
+ break;
+ default:
+ dev_err(ks->dev, "unknown bridge state requested\n");
+ return;
+ }
+
+ ret = ks8995_write_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), val);
+ if (ret) {
+ dev_err(ks->dev, "failed to write KS8995_REG_PC2 on port %d\n", port);
+ return;
+ }
+
+ dev_dbg(ks->dev, "set KS8995_REG_PC2 for port %d to %02x\n", port, val);
+}
+
+static void ks8995_phylink_get_caps(struct dsa_switch *dsa, int port,
+ struct phylink_config *config)
+{
+ unsigned long *interfaces = config->supported_interfaces;
+
+ if (port == KS8995_CPU_PORT)
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+
+ if (port <= 3) {
+ /* Internal PHYs */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ /* phylib default */
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ }
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+}
+
+/* Huge packet support up to 1916 byte packages "inclusive"
+ * which means that tags are included. If the bit is not set
+ * it is 1536 bytes "inclusive". We present the length without
+ * tags or ethernet headers. The setting affects all ports.
+ */
+static int ks8995_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct ks8995_switch *ks = ds->priv;
+ unsigned int max_mtu;
+ int ret;
+ u8 val;
+ int i;
+
+ ks->max_mtu[port] = new_mtu;
+
+ /* Roof out the MTU for the entire switch to the greatest
+ * common denominator: the biggest set for any one port will
+ * be the biggest MTU for the switch.
+ */
+ max_mtu = ETH_DATA_LEN;
+ for (i = 0; i < KS8995_NUM_PORTS; i++) {
+ if (ks->max_mtu[i] > max_mtu)
+ max_mtu = ks->max_mtu[i];
+ }
+
+ /* Translate to layer 2 size.
+ * Add ethernet and (possible) VLAN headers, and checksum to the size.
+ * For ETH_DATA_LEN (1500 bytes) this will add up to 1522 bytes.
+ */
+ max_mtu += VLAN_ETH_HLEN;
+ max_mtu += ETH_FCS_LEN;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC2, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC2\n");
+ return ret;
+ }
+
+ if (max_mtu <= 1522) {
+ val &= ~KS8995_GC2_HUGE;
+ val &= ~KS8995_GC2_LEGAL;
+ } else if (max_mtu > 1522 && max_mtu <= 1536) {
+ /* This accepts packets up to 1536 bytes */
+ val &= ~KS8995_GC2_HUGE;
+ val |= KS8995_GC2_LEGAL;
+ } else {
+ /* This accepts packets up to 1916 bytes */
+ val |= KS8995_GC2_HUGE;
+ val |= KS8995_GC2_LEGAL;
+ }
+
+ dev_dbg(ks->dev, "new max MTU %d bytes (inclusive)\n", max_mtu);
+
+ ret = ks8995_write_reg(ks, KS8995_REG_GC2, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC2\n");
+
+ return ret;
+}
+
+static int ks8995_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 1916 - ETH_HLEN - ETH_FCS_LEN;
+}
+
+static const struct dsa_switch_ops ks8995_ds_ops = {
+ .get_tag_protocol = ks8995_get_tag_protocol,
+ .setup = ks8995_setup,
+ .port_pre_bridge_flags = ks8995_port_pre_bridge_flags,
+ .port_bridge_flags = ks8995_port_bridge_flags,
+ .port_enable = ks8995_port_enable,
+ .port_disable = ks8995_port_disable,
+ .port_stp_state_set = ks8995_port_stp_state_set,
+ .port_change_mtu = ks8995_change_mtu,
+ .port_max_mtu = ks8995_get_max_mtu,
+ .phylink_get_caps = ks8995_phylink_get_caps,
+};
+
+/* ------------------------------------------------------------------------ */
+static int ks8995_probe(struct spi_device *spi)
+{
+ struct ks8995_switch *ks;
+ int err;
+ int variant = spi_get_device_id(spi)->driver_data;
+
+ if (variant >= max_variant) {
+ dev_err(&spi->dev, "bad chip variant %d\n", variant);
+ return -ENODEV;
+ }
+
+ ks = devm_kzalloc(&spi->dev, sizeof(*ks), GFP_KERNEL);
+ if (!ks)
+ return -ENOMEM;
+
+ mutex_init(&ks->lock);
+ ks->spi = spi;
+ ks->dev = &spi->dev;
+ ks->chip = &ks8995_chip[variant];
+
+ ks->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ err = PTR_ERR_OR_ZERO(ks->reset_gpio);
+ if (err) {
+ dev_err(&spi->dev,
+ "failed to get reset gpio: %d\n", err);
+ return err;
+ }
+
+ err = gpiod_set_consumer_name(ks->reset_gpio, "switch-reset");
+ if (err)
+ return err;
+
+ if (ks->reset_gpio) {
+ /*
+ * If a reset line was obtained, wait for 100us after
+ * de-asserting RESET before accessing any registers, see
+ * the KS8995MA datasheet, page 44.
+ */
+ gpiod_set_value_cansleep(ks->reset_gpio, 0);
+ udelay(100);
+ }
+
+ spi_set_drvdata(spi, ks);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
+ return err;
+ }
+
+ err = ks8995_get_revision(ks);
+ if (err)
+ return err;
+
+ err = ks8995_reset(ks);
+ if (err)
+ return err;
+
+ dev_info(&spi->dev, "%s device found, Chip ID:%x, Revision:%x\n",
+ ks->chip->name, ks->chip->chip_id, ks->revision_id);
+
+ err = ks8995_check_config(ks);
+ if (err)
+ return err;
+
+ ks->ds = devm_kzalloc(&spi->dev, sizeof(*ks->ds), GFP_KERNEL);
+ if (!ks->ds)
+ return -ENOMEM;
+
+ ks->ds->dev = &spi->dev;
+ ks->ds->num_ports = KS8995_NUM_PORTS;
+ ks->ds->ops = &ks8995_ds_ops;
+ ks->ds->phylink_mac_ops = &ks8995_phylink_mac_ops;
+ ks->ds->priv = ks;
+
+ err = dsa_register_switch(ks->ds);
+ if (err)
+ return dev_err_probe(&spi->dev, err,
+ "unable to register DSA switch\n");
+
+ return 0;
+}
+
+static void ks8995_remove(struct spi_device *spi)
+{
+ struct ks8995_switch *ks = spi_get_drvdata(spi);
+
+ dsa_unregister_switch(ks->ds);
+ /* assert reset */
+ gpiod_set_value_cansleep(ks->reset_gpio, 1);
+}
+
+/* ------------------------------------------------------------------------ */
+static struct spi_driver ks8995_driver = {
+ .driver = {
+ .name = "spi-ks8995",
+ .of_match_table = ks8995_spi_of_match,
+ },
+ .probe = ks8995_probe,
+ .remove = ks8995_remove,
+ .id_table = ks8995_id,
+};
+
+module_spi_driver(ks8995_driver);
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 89f920289ae2..d246f95d57ec 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -6,14 +6,20 @@
#include <linux/module.h>
#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
+#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/mii.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include "lan9303.h"
+/* For the LAN9303 and LAN9354, only port 0 is an XMII port. */
+#define IS_PORT_XMII(port) ((port) == 0)
+
#define LAN9303_NUM_PORTS 3
/* 13.2 System Control and Status Registers
@@ -21,6 +27,10 @@
*/
#define LAN9303_CHIP_REV 0x14
# define LAN9303_CHIP_ID 0x9303
+# define LAN9352_CHIP_ID 0x9352
+# define LAN9353_CHIP_ID 0x9353
+# define LAN9354_CHIP_ID 0x9354
+# define LAN9355_CHIP_ID 0x9355
#define LAN9303_IRQ_CFG 0x15
# define LAN9303_IRQ_CFG_IRQ_ENABLE BIT(8)
# define LAN9303_IRQ_CFG_IRQ_POL BIT(4)
@@ -31,6 +41,7 @@
#define LAN9303_INT_EN 0x17
# define LAN9303_INT_EN_PHY_INT2_EN BIT(27)
# define LAN9303_INT_EN_PHY_INT1_EN BIT(26)
+#define LAN9303_BYTE_ORDER 0x19
#define LAN9303_HW_CFG 0x1D
# define LAN9303_HW_CFG_READY BIT(27)
# define LAN9303_HW_CFG_AMDX_EN_PORT2 BIT(26)
@@ -44,6 +55,9 @@
#define LAN9303_MANUAL_FC_1 0x68
#define LAN9303_MANUAL_FC_2 0x69
#define LAN9303_MANUAL_FC_0 0x6a
+# define LAN9303_BP_EN BIT(6)
+# define LAN9303_RX_FC_EN BIT(2)
+# define LAN9303_TX_FC_EN BIT(1)
#define LAN9303_SWITCH_CSR_DATA 0x6b
#define LAN9303_SWITCH_CSR_CMD 0x6c
#define LAN9303_SWITCH_CSR_CMD_BUSY BIT(31)
@@ -219,6 +233,13 @@ const struct regmap_access_table lan9303_register_set = {
};
EXPORT_SYMBOL(lan9303_register_set);
+/* Flow Control registers indexed by port number */
+static unsigned int flow_ctl_reg[] = {
+ LAN9303_MANUAL_FC_0,
+ LAN9303_MANUAL_FC_1,
+ LAN9303_MANUAL_FC_2
+};
+
static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
{
int ret, i;
@@ -819,6 +840,8 @@ static void lan9303_handle_reset(struct lan9303 *chip)
if (!chip->reset_gpio)
return;
+ gpiod_set_value_cansleep(chip->reset_gpio, 1);
+
if (chip->reset_duration != 0)
msleep(chip->reset_duration);
@@ -844,21 +867,44 @@ static int lan9303_disable_processing(struct lan9303 *chip)
static int lan9303_check_device(struct lan9303 *chip)
{
int ret;
+ int err;
u32 reg;
+ /* In I2C-managed configurations this polling loop will clash with
+ * switch's reading of EEPROM right after reset and this behaviour is
+ * not configurable. While lan9303_read() already has quite long retry
+ * timeout, seems not all cases are being detected as arbitration error.
+ *
+ * According to datasheet, EEPROM loader has 30ms timeout (in case of
+ * missing EEPROM).
+ *
+ * Loading of the largest supported EEPROM is expected to take at least
+ * 5.9s.
+ */
+ err = read_poll_timeout(lan9303_read, ret,
+ !ret && reg & LAN9303_HW_CFG_READY,
+ 20000, 6000000, false,
+ chip->regmap, LAN9303_HW_CFG, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to read HW_CFG reg: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ if (err) {
+ dev_err(chip->dev, "HW_CFG not ready: 0x%08x\n", reg);
+ return err;
+ }
+
ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, &reg);
if (ret) {
dev_err(chip->dev, "failed to read chip revision register: %d\n",
ret);
- if (!chip->reset_gpio) {
- dev_dbg(chip->dev,
- "hint: maybe failed due to missing reset GPIO\n");
- }
return ret;
}
- if ((reg >> 16) != LAN9303_CHIP_ID) {
- dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
+ if (((reg >> 16) != LAN9303_CHIP_ID) &&
+ ((reg >> 16) != LAN9354_CHIP_ID)) {
+ dev_err(chip->dev, "unexpected device found: LAN%4.4X\n",
reg >> 16);
return -ENODEV;
}
@@ -874,7 +920,7 @@ static int lan9303_check_device(struct lan9303 *chip)
if (ret)
dev_warn(chip->dev, "failed to disable switching %d\n", ret);
- dev_info(chip->dev, "Found LAN9303 rev. %u\n", reg & 0xffff);
+ dev_info(chip->dev, "Found LAN%4.4X rev. %u\n", (reg >> 16), reg & 0xffff);
ret = lan9303_detect_phy_setup(chip);
if (ret) {
@@ -899,6 +945,7 @@ static int lan9303_setup(struct dsa_switch *ds)
{
struct lan9303 *chip = ds->priv;
int ret;
+ u32 reg;
/* Make sure that port 0 is the cpu port */
if (!dsa_is_cpu_port(ds, 0)) {
@@ -906,6 +953,17 @@ static int lan9303_setup(struct dsa_switch *ds)
return -EINVAL;
}
+ /* Virtual Phy: Remove Turbo 200Mbit mode */
+ ret = lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &reg);
+ if (ret)
+ return (ret);
+
+ /* Clear the TURBO Mode bit if it was set. */
+ if (reg & LAN9303_VIRT_SPECIAL_TURBO) {
+ reg &= ~LAN9303_VIRT_SPECIAL_TURBO;
+ regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, reg);
+ }
+
ret = lan9303_setup_tagging(chip);
if (ret)
dev_err(chip->dev, "failed to setup port tagging %d\n", ret);
@@ -958,7 +1016,7 @@ static const struct lan9303_mib_desc lan9303_mib[] = {
{ .offset = LAN9303_MAC_TX_BRDCST_CNT_0, .name = "TxBroad", },
{ .offset = LAN9303_MAC_TX_PAUSE_CNT_0, .name = "TxPause", },
{ .offset = LAN9303_MAC_TX_MULCST_CNT_0, .name = "TxMulti", },
- { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "TxUnderRun", },
+ { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "RxShort", },
{ .offset = LAN9303_MAC_TX_64_CNT_0, .name = "Tx64Byte", },
{ .offset = LAN9303_MAC_TX_127_CNT_0, .name = "Tx128Byte", },
{ .offset = LAN9303_MAC_TX_255_CNT_0, .name = "Tx256Byte", },
@@ -978,15 +1036,14 @@ static const struct lan9303_mib_desc lan9303_mib[] = {
static void lan9303_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
+ u8 *buf = data;
unsigned int u;
if (stringset != ETH_SS_STATS)
return;
- for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
- strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name,
- ETH_GSTRING_LEN);
- }
+ for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++)
+ ethtool_puts(&buf, lan9303_mib[u].name);
}
static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
@@ -1002,9 +1059,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
ret = lan9303_read_switch_port(
chip, port, lan9303_mib[u].offset, &reg);
- if (ret)
+ if (ret) {
dev_warn(chip->dev, "Reading status port %d reg %u failed\n",
port, lan9303_mib[u].offset);
+ reg = 0;
+ }
data[u] = reg;
}
}
@@ -1017,98 +1076,70 @@ static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(lan9303_mib);
}
-static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
+static int lan9303_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct lan9303 *chip = ds->priv;
int phy_base = chip->phy_addr_base;
- if (phy == phy_base)
+ if (port == 0)
return lan9303_virt_phy_reg_read(chip, regnum);
- if (phy > phy_base + 2)
+ if (port > 2)
return -ENODEV;
- return chip->ops->phy_read(chip, phy, regnum);
+ return chip->ops->phy_read(chip, phy_base + port, regnum);
}
-static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
+static int lan9303_phy_write(struct dsa_switch *ds, int port, int regnum,
u16 val)
{
struct lan9303 *chip = ds->priv;
int phy_base = chip->phy_addr_base;
- if (phy == phy_base)
+ if (port == 0)
return lan9303_virt_phy_reg_write(chip, regnum, val);
- if (phy > phy_base + 2)
+ if (port > 2)
return -ENODEV;
- return chip->ops->phy_write(chip, phy, regnum, val);
-}
-
-static void lan9303_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct lan9303 *chip = ds->priv;
- int ctl;
-
- if (!phy_is_pseudo_fixed_link(phydev))
- return;
-
- ctl = lan9303_phy_read(ds, port, MII_BMCR);
-
- ctl &= ~BMCR_ANENABLE;
-
- if (phydev->speed == SPEED_100)
- ctl |= BMCR_SPEED100;
- else if (phydev->speed == SPEED_10)
- ctl &= ~BMCR_SPEED100;
- else
- dev_err(ds->dev, "unsupported speed: %d\n", phydev->speed);
-
- if (phydev->duplex == DUPLEX_FULL)
- ctl |= BMCR_FULLDPLX;
- else
- ctl &= ~BMCR_FULLDPLX;
-
- lan9303_phy_write(ds, port, MII_BMCR, ctl);
-
- if (port == chip->phy_addr_base) {
- /* Virtual Phy: Remove Turbo 200Mbit mode */
- lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
-
- ctl &= ~LAN9303_VIRT_SPECIAL_TURBO;
- regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, ctl);
- }
+ return chip->ops->phy_write(chip, phy_base + port, regnum, val);
}
static int lan9303_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
+ vlan_vid_add(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port);
+
return lan9303_enable_processing_port(chip, port);
}
static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return;
+ vlan_vid_del(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port);
+
lan9303_disable_processing_port(chip, port);
- lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
+ lan9303_phy_write(ds, port, MII_BMCR, BMCR_PDOWN);
}
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(port %d)\n", __func__, port);
- if (dsa_to_port(ds, 1)->bridge_dev == dsa_to_port(ds, 2)->bridge_dev) {
+ if (dsa_port_bridge_same(dsa_to_port(ds, 1), dsa_to_port(ds, 2))) {
lan9303_bridge_ports(chip);
chip->is_bridged = true; /* unleash stp_state_set() */
}
@@ -1117,7 +1148,7 @@ static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
}
static void lan9303_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct lan9303 *chip = ds->priv;
@@ -1180,26 +1211,23 @@ static void lan9303_port_fast_age(struct dsa_switch *ds, int port)
}
static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
- if (vid)
- return -EOPNOTSUPP;
return lan9303_alr_add_port(chip, addr, port, false);
}
static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
- if (vid)
- return -EOPNOTSUPP;
lan9303_alr_del_port(chip, addr, port);
return 0;
@@ -1237,7 +1265,8 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
int err;
@@ -1252,7 +1281,8 @@ static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
@@ -1265,32 +1295,114 @@ static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
return 0;
}
+static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d) entered.", __func__, port);
+
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE;
+
+ if (port == 0) {
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
+}
+
+static void lan9303_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void lan9303_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+}
+
+static void lan9303_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct lan9303 *chip = dp->ds->priv;
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ u32 ctl;
+ u32 reg;
+
+ /* On this device, we are only interested in doing something here if
+ * this is the xMII port. All other ports are 10/100 phys using MDIO
+ * to control there link settings.
+ */
+ if (!IS_PORT_XMII(port))
+ return;
+
+ /* Disable auto-negotiation and force the speed/duplex settings. */
+ ctl = lan9303_phy_read(ds, port, MII_BMCR);
+ ctl &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ if (duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ lan9303_phy_write(ds, port, MII_BMCR, ctl);
+
+ /* Force the flow control settings. */
+ lan9303_read(chip->regmap, flow_ctl_reg[port], &reg);
+ reg &= ~(LAN9303_BP_EN | LAN9303_RX_FC_EN | LAN9303_TX_FC_EN);
+ if (rx_pause)
+ reg |= (LAN9303_RX_FC_EN | LAN9303_BP_EN);
+ if (tx_pause)
+ reg |= LAN9303_TX_FC_EN;
+ regmap_write(chip->regmap, flow_ctl_reg[port], reg);
+}
+
+static const struct phylink_mac_ops lan9303_phylink_mac_ops = {
+ .mac_config = lan9303_phylink_mac_config,
+ .mac_link_down = lan9303_phylink_mac_link_down,
+ .mac_link_up = lan9303_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops lan9303_switch_ops = {
- .get_tag_protocol = lan9303_get_tag_protocol,
- .setup = lan9303_setup,
- .get_strings = lan9303_get_strings,
- .phy_read = lan9303_phy_read,
- .phy_write = lan9303_phy_write,
- .adjust_link = lan9303_adjust_link,
- .get_ethtool_stats = lan9303_get_ethtool_stats,
- .get_sset_count = lan9303_get_sset_count,
- .port_enable = lan9303_port_enable,
- .port_disable = lan9303_port_disable,
- .port_bridge_join = lan9303_port_bridge_join,
- .port_bridge_leave = lan9303_port_bridge_leave,
- .port_stp_state_set = lan9303_port_stp_state_set,
- .port_fast_age = lan9303_port_fast_age,
- .port_fdb_add = lan9303_port_fdb_add,
- .port_fdb_del = lan9303_port_fdb_del,
- .port_fdb_dump = lan9303_port_fdb_dump,
- .port_mdb_add = lan9303_port_mdb_add,
- .port_mdb_del = lan9303_port_mdb_del,
+ .get_tag_protocol = lan9303_get_tag_protocol,
+ .setup = lan9303_setup,
+ .get_strings = lan9303_get_strings,
+ .phy_read = lan9303_phy_read,
+ .phy_write = lan9303_phy_write,
+ .phylink_get_caps = lan9303_phylink_get_caps,
+ .get_ethtool_stats = lan9303_get_ethtool_stats,
+ .get_sset_count = lan9303_get_sset_count,
+ .port_enable = lan9303_port_enable,
+ .port_disable = lan9303_port_disable,
+ .port_bridge_join = lan9303_port_bridge_join,
+ .port_bridge_leave = lan9303_port_bridge_leave,
+ .port_stp_state_set = lan9303_port_stp_state_set,
+ .port_fast_age = lan9303_port_fast_age,
+ .port_fdb_add = lan9303_port_fdb_add,
+ .port_fdb_del = lan9303_port_fdb_del,
+ .port_fdb_dump = lan9303_port_fdb_dump,
+ .port_mdb_add = lan9303_port_mdb_add,
+ .port_mdb_del = lan9303_port_mdb_del,
};
static int lan9303_register_switch(struct lan9303 *chip)
{
- int base;
-
chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
if (!chip->ds)
return -ENOMEM;
@@ -1299,8 +1411,8 @@ static int lan9303_register_switch(struct lan9303 *chip)
chip->ds->num_ports = LAN9303_NUM_PORTS;
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
- base = chip->phy_addr_base;
- chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
+ chip->ds->phylink_mac_ops = &lan9303_phylink_mac_ops;
+ chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1, 0);
return dsa_register_switch(chip->ds);
}
@@ -1309,7 +1421,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
struct device_node *np)
{
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
- GPIOD_OUT_LOW);
+ GPIOD_OUT_HIGH);
if (IS_ERR(chip->reset_gpio))
return PTR_ERR(chip->reset_gpio);
@@ -1337,6 +1449,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
int lan9303_probe(struct lan9303 *chip, struct device_node *np)
{
int ret;
+ u32 reg;
mutex_init(&chip->indirect_mutex);
mutex_init(&chip->alr_mutex);
@@ -1347,6 +1460,19 @@ int lan9303_probe(struct lan9303 *chip, struct device_node *np)
lan9303_handle_reset(chip);
+ /* First read to the device. This is a Dummy read to ensure MDIO */
+ /* access is in 32-bit sync. */
+ ret = lan9303_read(chip->regmap, LAN9303_BYTE_ORDER, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to access the device: %d\n",
+ ret);
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev,
+ "hint: maybe failed due to missing reset GPIO\n");
+ }
+ return ret;
+ }
+
ret = lan9303_check_device(chip);
if (ret)
return ret;
@@ -1373,7 +1499,6 @@ int lan9303_remove(struct lan9303 *chip)
/* assert reset to the whole device to prevent it from doing anything */
gpiod_set_value_cansleep(chip->reset_gpio, 1);
- gpiod_unexport(chip->reset_gpio);
return 0;
}
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 8ca4713310fa..c62d27cdc117 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -29,8 +29,7 @@ static const struct regmap_config lan9303_i2c_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-static int lan9303_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lan9303_i2c_probe(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev;
int ret;
@@ -65,18 +64,14 @@ static int lan9303_i2c_probe(struct i2c_client *client,
return 0;
}
-static int lan9303_i2c_remove(struct i2c_client *client)
+static void lan9303_i2c_remove(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
if (!sw_dev)
- return 0;
+ return;
lan9303_remove(&sw_dev->chip);
-
- i2c_set_clientdata(client, NULL);
-
- return 0;
}
static void lan9303_i2c_shutdown(struct i2c_client *client)
@@ -94,7 +89,7 @@ static void lan9303_i2c_shutdown(struct i2c_client *client)
/*-------------------------------------------------------------------------*/
static const struct i2c_device_id lan9303_i2c_id[] = {
- { "lan9303", 0 },
+ { "lan9303" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, lan9303_i2c_id);
@@ -108,7 +103,7 @@ MODULE_DEVICE_TABLE(of, lan9303_i2c_of_match);
static struct i2c_driver lan9303_i2c_driver = {
.driver = {
.name = "LAN9303_I2C",
- .of_match_table = of_match_ptr(lan9303_i2c_of_match),
+ .of_match_table = lan9303_i2c_of_match,
},
.probe = lan9303_i2c_probe,
.remove = lan9303_i2c_remove,
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index bbb7032409ba..0ac4857e5ee8 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
reg <<= 2; /* reg num to offset */
- mutex_lock(&sw_dev->device->bus->mdio_lock);
+ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
mutex_unlock(&sw_dev->device->bus->mdio_lock);
@@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
reg <<= 2; /* reg num to offset */
- mutex_lock(&sw_dev->device->bus->mdio_lock);
+ mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
*val = lan9303_mdio_real_read(sw_dev->device, reg);
*val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
mutex_unlock(&sw_dev->device->bus->mdio_lock);
@@ -58,19 +58,19 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
return 0;
}
-static int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg,
+static int lan9303_mdio_phy_write(struct lan9303 *chip, int addr, int reg,
u16 val)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
- return mdiobus_write_nested(sw_dev->device->bus, phy, reg, val);
+ return mdiobus_write_nested(sw_dev->device->bus, addr, reg, val);
}
-static int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg)
+static int lan9303_mdio_phy_read(struct lan9303 *chip, int addr, int reg)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
- return mdiobus_read_nested(sw_dev->device->bus, phy, reg);
+ return mdiobus_read_nested(sw_dev->device->bus, addr, reg);
}
static const struct lan9303_phy_ops lan9303_mdio_phy_ops = {
@@ -138,8 +138,6 @@ static void lan9303_mdio_remove(struct mdio_device *mdiodev)
return;
lan9303_remove(&sw_dev->chip);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
@@ -158,6 +156,7 @@ static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id lan9303_mdio_of_match[] = {
{ .compatible = "smsc,lan9303-mdio" },
+ { .compatible = "microchip,lan9354-mdio" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
@@ -165,7 +164,7 @@ MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
static struct mdio_driver lan9303_mdio_driver = {
.mdiodrv.driver = {
.name = "LAN9303_MDIO",
- .of_match_table = of_match_ptr(lan9303_mdio_of_match),
+ .of_match_table = lan9303_mdio_of_match,
},
.probe = lan9303_mdio_probe,
.remove = lan9303_mdio_remove,
diff --git a/drivers/net/dsa/lantiq/Kconfig b/drivers/net/dsa/lantiq/Kconfig
new file mode 100644
index 000000000000..4a9771be5d58
--- /dev/null
+++ b/drivers/net/dsa/lantiq/Kconfig
@@ -0,0 +1,24 @@
+config NET_DSA_LANTIQ_COMMON
+ tristate
+ select REGMAP
+
+config NET_DSA_LANTIQ_GSWIP
+ tristate "Lantiq / Intel GSWIP"
+ depends on HAS_IOMEM
+ select NET_DSA_TAG_GSWIP
+ select NET_DSA_LANTIQ_COMMON
+ help
+ This enables support for the Lantiq / Intel GSWIP 2.1 found in
+ the xrx200 / VR9 SoC.
+
+config NET_DSA_MXL_GSW1XX
+ tristate "MaxLinear GSW1xx Ethernet switch support"
+ select NET_DSA_TAG_MXL_GSW1XX
+ select NET_DSA_LANTIQ_COMMON
+ help
+ This enables support for the MaxLinear GSW1xx family of 1GE switches
+ GSW120 4 port, 2 PHYs, RGMII & SGMII/2500Base-X
+ GSW125 4 port, 2 PHYs, RGMII & SGMII/2500Base-X, industrial temperature
+ GSW140 6 port, 4 PHYs, RGMII & SGMII/2500Base-X
+ GSW141 6 port, 4 PHYs, RGMII & SGMII
+ GSW145 6 port, 4 PHYs, RGMII & SGMII/2500Base-X, industrial temperature
diff --git a/drivers/net/dsa/lantiq/Makefile b/drivers/net/dsa/lantiq/Makefile
new file mode 100644
index 000000000000..85fce605310b
--- /dev/null
+++ b/drivers/net/dsa/lantiq/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
+obj-$(CONFIG_NET_DSA_LANTIQ_COMMON) += lantiq_gswip_common.o
+obj-$(CONFIG_NET_DSA_MXL_GSW1XX) += mxl-gsw1xx.o
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.c b/drivers/net/dsa/lantiq/lantiq_gswip.c
new file mode 100644
index 000000000000..57dd063c0740
--- /dev/null
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ */
+
+#include "lantiq_gswip.h"
+#include "lantiq_pce.h"
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <dt-bindings/mips/lantiq_rcu_gphy.h>
+
+#include <net/dsa.h>
+
+struct xway_gphy_match_data {
+ char *fe_firmware_name;
+ char *ge_firmware_name;
+};
+
+static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0:
+ case 1:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+
+ case 2:
+ case 3:
+ case 4:
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 5:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+}
+
+static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 5:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ break;
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+}
+
+static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
+};
+
+static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
+};
+
+static const struct xway_gphy_match_data xrx300_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
+ .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
+};
+
+static const struct of_device_id xway_gphy_match[] __maybe_unused = {
+ { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
+ { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
+ { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
+ { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
+ { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
+ {},
+};
+
+static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
+{
+ struct device *dev = priv->dev;
+ const struct firmware *fw;
+ void *fw_addr;
+ dma_addr_t dma_addr;
+ dma_addr_t dev_addr;
+ size_t size;
+ int ret;
+
+ ret = clk_prepare_enable(gphy_fw->clk_gate);
+ if (ret)
+ return ret;
+
+ reset_control_assert(gphy_fw->reset);
+
+ /* The vendor BSP uses a 200ms delay after asserting the reset line.
+ * Without this some users are observing that the PHY is not coming up
+ * on the MDIO bus.
+ */
+ msleep(200);
+
+ ret = request_firmware(&fw, gphy_fw->fw_name, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to load firmware: %s\n",
+ gphy_fw->fw_name);
+
+ /* GPHY cores need the firmware code in a persistent and contiguous
+ * memory area with a 16 kB boundary aligned start address.
+ */
+ size = fw->size + XRX200_GPHY_FW_ALIGN;
+
+ fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
+ if (fw_addr) {
+ fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
+ dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
+ memcpy(fw_addr, fw->data, fw->size);
+ } else {
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ release_firmware(fw);
+
+ ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
+ if (ret)
+ return ret;
+
+ reset_control_deassert(gphy_fw->reset);
+
+ return ret;
+}
+
+static int gswip_gphy_fw_probe(struct gswip_priv *priv,
+ struct gswip_gphy_fw *gphy_fw,
+ struct device_node *gphy_fw_np, int i)
+{
+ struct device *dev = priv->dev;
+ u32 gphy_mode;
+ int ret;
+ char gphyname[10];
+
+ snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
+
+ gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
+ if (IS_ERR(gphy_fw->clk_gate)) {
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->clk_gate),
+ "Failed to lookup gate clock\n");
+ }
+
+ ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
+ /* Default to GE mode */
+ if (ret)
+ gphy_mode = GPHY_MODE_GE;
+
+ switch (gphy_mode) {
+ case GPHY_MODE_FE:
+ gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
+ break;
+ case GPHY_MODE_GE:
+ gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "Unknown GPHY mode %d\n",
+ gphy_mode);
+ }
+
+ gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
+ if (IS_ERR(gphy_fw->reset))
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
+ "Failed to lookup gphy reset\n");
+
+ return gswip_gphy_fw_load(priv, gphy_fw);
+}
+
+static void gswip_gphy_fw_remove(struct gswip_priv *priv,
+ struct gswip_gphy_fw *gphy_fw)
+{
+ int ret;
+
+ /* check if the device was fully probed */
+ if (!gphy_fw->fw_name)
+ return;
+
+ ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
+ if (ret)
+ dev_err(priv->dev, "can not reset GPHY FW pointer\n");
+
+ clk_disable_unprepare(gphy_fw->clk_gate);
+
+ reset_control_put(gphy_fw->reset);
+}
+
+static int gswip_gphy_fw_list(struct gswip_priv *priv,
+ struct device_node *gphy_fw_list_np, u32 version)
+{
+ struct device *dev = priv->dev;
+ struct device_node *gphy_fw_np;
+ const struct of_device_id *match;
+ int err;
+ int i = 0;
+
+ /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
+ * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
+ * needs a different GPHY firmware.
+ */
+ if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
+ switch (version) {
+ case GSWIP_VERSION_2_0:
+ priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
+ break;
+ case GSWIP_VERSION_2_1:
+ priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
+ break;
+ default:
+ return dev_err_probe(dev, -ENOENT,
+ "unknown GSWIP version: 0x%x\n",
+ version);
+ }
+ }
+
+ match = of_match_node(xway_gphy_match, gphy_fw_list_np);
+ if (match && match->data)
+ priv->gphy_fw_name_cfg = match->data;
+
+ if (!priv->gphy_fw_name_cfg)
+ return dev_err_probe(dev, -ENOENT,
+ "GPHY compatible type not supported\n");
+
+ priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
+ if (!priv->num_gphy_fw)
+ return -ENOENT;
+
+ priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
+ "lantiq,rcu");
+ if (IS_ERR(priv->rcu_regmap))
+ return PTR_ERR(priv->rcu_regmap);
+
+ priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
+ sizeof(*priv->gphy_fw),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!priv->gphy_fw)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
+ err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
+ gphy_fw_np, i);
+ if (err) {
+ of_node_put(gphy_fw_np);
+ goto remove_gphy;
+ }
+ i++;
+ }
+
+ /* The standalone PHY11G requires 300ms to be fully
+ * initialized and ready for any MDIO communication after being
+ * taken out of reset. For the SoC-internal GPHY variant there
+ * is no (known) documentation for the minimum time after a
+ * reset. Use the same value as for the standalone variant as
+ * some users have reported internal PHYs not being detected
+ * without any delay.
+ */
+ msleep(300);
+
+ return 0;
+
+remove_gphy:
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ return err;
+}
+
+static const struct regmap_config sw_regmap_config = {
+ .name = "switch",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_shift = REGMAP_UPSHIFT(2),
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .max_register = GSWIP_SDMA_PCTRLp(6),
+};
+
+static const struct regmap_config mdio_regmap_config = {
+ .name = "mdio",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_shift = REGMAP_UPSHIFT(2),
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .max_register = GSWIP_MDIO_PHYp(0),
+};
+
+static const struct regmap_config mii_regmap_config = {
+ .name = "mii",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_shift = REGMAP_UPSHIFT(2),
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .max_register = GSWIP_MII_CFGp(6),
+};
+
+static int gswip_probe(struct platform_device *pdev)
+{
+ struct device_node *np, *gphy_fw_np;
+ __iomem void *gswip, *mdio, *mii;
+ struct device *dev = &pdev->dev;
+ struct gswip_priv *priv;
+ int err;
+ int i;
+ u32 version;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ gswip = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gswip))
+ return PTR_ERR(gswip);
+
+ mdio = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mdio))
+ return PTR_ERR(mdio);
+
+ mii = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(mii))
+ return PTR_ERR(mii);
+
+ priv->gswip = devm_regmap_init_mmio(dev, gswip, &sw_regmap_config);
+ if (IS_ERR(priv->gswip))
+ return PTR_ERR(priv->gswip);
+
+ priv->mdio = devm_regmap_init_mmio(dev, mdio, &mdio_regmap_config);
+ if (IS_ERR(priv->mdio))
+ return PTR_ERR(priv->mdio);
+
+ priv->mii = devm_regmap_init_mmio(dev, mii, &mii_regmap_config);
+ if (IS_ERR(priv->mii))
+ return PTR_ERR(priv->mii);
+
+ priv->hw_info = of_device_get_match_data(dev);
+ if (!priv->hw_info)
+ return -EINVAL;
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ regmap_read(priv->gswip, GSWIP_VERSION, &version);
+
+ np = dev->of_node;
+ switch (version) {
+ case GSWIP_VERSION_2_0:
+ case GSWIP_VERSION_2_1:
+ if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
+ return -EINVAL;
+ break;
+ case GSWIP_VERSION_2_2:
+ case GSWIP_VERSION_2_2_ETC:
+ if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
+ !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
+ return -EINVAL;
+ break;
+ default:
+ return dev_err_probe(dev, -ENOENT,
+ "unknown GSWIP version: 0x%x\n", version);
+ }
+
+ /* bring up the mdio bus */
+ gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
+ if (gphy_fw_np) {
+ err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
+ of_node_put(gphy_fw_np);
+ if (err)
+ return dev_err_probe(dev, err,
+ "gphy fw probe failed\n");
+ }
+
+ err = gswip_probe_common(priv, version);
+ if (err)
+ goto gphy_fw_remove;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+gphy_fw_remove:
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ return err;
+}
+
+static void gswip_remove(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ if (!priv)
+ return;
+
+ /* disable the switch */
+ gswip_disable_switch(priv);
+
+ dsa_unregister_switch(priv->ds);
+
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+}
+
+static void gswip_shutdown(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct gswip_hw_info gswip_xrx200 = {
+ .max_ports = 7,
+ .allowed_cpu_ports = BIT(6),
+ .mii_ports = BIT(0) | BIT(1) | BIT(5),
+ .mii_port_reg_offset = 0,
+ .phylink_get_caps = gswip_xrx200_phylink_get_caps,
+ .pce_microcode = &gswip_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_GSWIP,
+};
+
+static const struct gswip_hw_info gswip_xrx300 = {
+ .max_ports = 7,
+ .allowed_cpu_ports = BIT(6),
+ .mii_ports = BIT(0) | BIT(5),
+ .mii_port_reg_offset = 0,
+ .phylink_get_caps = gswip_xrx300_phylink_get_caps,
+ .pce_microcode = &gswip_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_GSWIP,
+};
+
+static const struct of_device_id gswip_of_match[] = {
+ { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
+ { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
+ { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gswip_of_match);
+
+static struct platform_driver gswip_driver = {
+ .probe = gswip_probe,
+ .remove = gswip_remove,
+ .shutdown = gswip_shutdown,
+ .driver = {
+ .name = "gswip",
+ .of_match_table = gswip_of_match,
+ },
+};
+
+module_platform_driver(gswip_driver);
+
+MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.h b/drivers/net/dsa/lantiq/lantiq_gswip.h
new file mode 100644
index 000000000000..9c38e51a75e8
--- /dev/null
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.h
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __LANTIQ_GSWIP_H
+#define __LANTIQ_GSWIP_H
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/swab.h>
+#include <net/dsa.h>
+
+/* GSWIP MDIO Registers */
+#define GSWIP_MDIO_GLOB 0x00
+#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
+#define GSWIP_MDIO_CTRL 0x08
+#define GSWIP_MDIO_CTRL_BUSY BIT(12)
+#define GSWIP_MDIO_CTRL_RD BIT(11)
+#define GSWIP_MDIO_CTRL_WR BIT(10)
+#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
+#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
+#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
+#define GSWIP_MDIO_READ 0x09
+#define GSWIP_MDIO_WRITE 0x0A
+#define GSWIP_MDIO_MDC_CFG0 0x0B
+#define GSWIP_MDIO_MDC_CFG1 0x0C
+#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
+#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
+#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
+#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
+#define GSWIP_MDIO_PHY_LINK_UP 0x2000
+#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
+#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
+#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
+#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
+#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
+#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
+#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
+#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
+#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
+#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
+#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
+#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
+#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
+#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
+#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
+#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
+ GSWIP_MDIO_PHY_FCONRX_MASK | \
+ GSWIP_MDIO_PHY_FCONTX_MASK | \
+ GSWIP_MDIO_PHY_LINK_MASK | \
+ GSWIP_MDIO_PHY_SPEED_MASK | \
+ GSWIP_MDIO_PHY_FDUP_MASK)
+
+/* GSWIP MII Registers */
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
+#define GSWIP_MII_CFG_RESET BIT(15)
+#define GSWIP_MII_CFG_EN BIT(14)
+#define GSWIP_MII_CFG_ISOLATE BIT(13)
+#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
+#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
+#define GSWIP_MII_CFG_RMII_CLK BIT(7)
+#define GSWIP_MII_CFG_MODE_MIIP 0x0
+#define GSWIP_MII_CFG_MODE_MIIM 0x1
+#define GSWIP_MII_CFG_MODE_RMIIP 0x2
+#define GSWIP_MII_CFG_MODE_RMIIM 0x3
+#define GSWIP_MII_CFG_MODE_RGMII 0x4
+#define GSWIP_MII_CFG_MODE_GMII 0x9
+#define GSWIP_MII_CFG_MODE_MASK 0xf
+#define GSWIP_MII_CFG_RATE_M2P5 0x00
+#define GSWIP_MII_CFG_RATE_M25 0x10
+#define GSWIP_MII_CFG_RATE_M125 0x20
+#define GSWIP_MII_CFG_RATE_M50 0x30
+#define GSWIP_MII_CFG_RATE_AUTO 0x40
+#define GSWIP_MII_CFG_RATE_MASK 0x70
+#define GSWIP_MII_PCDU0 0x01
+#define GSWIP_MII_PCDU1 0x03
+#define GSWIP_MII_PCDU5 0x05
+#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
+#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
+#define GSWIP_MII_PCDU_TXDLY(x) u16_encode_bits(((x) / 500), GSWIP_MII_PCDU_TXDLY_MASK)
+#define GSWIP_MII_PCDU_RXDLY(x) u16_encode_bits(((x) / 500), GSWIP_MII_PCDU_RXDLY_MASK)
+#define GSWIP_MII_PCDU_RXDLY_DEFAULT 2000 /* picoseconds */
+#define GSWIP_MII_PCDU_TXDLY_DEFAULT 2000 /* picoseconds */
+
+/* GSWIP Core Registers */
+#define GSWIP_SWRES 0x000
+#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
+#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
+#define GSWIP_VERSION 0x013
+#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
+#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
+#define GSWIP_VERSION_REV(v) FIELD_GET(GSWIP_VERSION_REV_MASK, v)
+#define GSWIP_VERSION_MOD(v) FIELD_GET(GSWIP_VERSION_MOD_MASK, v)
+#define GSWIP_VERSION_2_0 0x100
+#define GSWIP_VERSION_2_1 0x021
+#define GSWIP_VERSION_2_2 0x122
+#define GSWIP_VERSION_2_2_ETC 0x022
+/* The hardware has the 'major/minor' version bytes in the wrong order
+ * preventing numerical comparisons. Swap the bytes of the 16-bit value
+ * to end up with REV being the most significant byte and MOD being the
+ * least significant byte, which then allows comparing it with the
+ * value stored in struct gswip_priv.
+ */
+#define GSWIP_VERSION_GE(priv, ver) ((priv)->version >= swab16(ver))
+
+#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
+#define GSWIP_BM_RAM_ADDR 0x044
+#define GSWIP_BM_RAM_CTRL 0x045
+#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
+#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
+#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_BM_QUEUE_GCTRL 0x04A
+#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
+/* buffer management Port Configuration Register */
+#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
+#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
+#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
+/* buffer management Port Control Register */
+#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
+#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
+#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
+
+/* PCE */
+#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
+#define GSWIP_PCE_TBL_MASK 0x448
+#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
+#define GSWIP_PCE_TBL_ADDR 0x44E
+#define GSWIP_PCE_TBL_CTRL 0x44F
+#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
+#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
+#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
+#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
+#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
+#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
+#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
+#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
+#define GSWIP_PCE_GCTRL_0 0x456
+#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
+#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
+#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
+#define GSWIP_PCE_GCTRL_1 0x457
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
+#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
+#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
+#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
+#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
+#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
+#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
+#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
+#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
+#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
+#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
+/* Ethernet Switch PCE Port Control Register 3 */
+#define GSWIP_PCE_PCTRL_3p(p) (0x483 + ((p) * 0xA))
+#define GSWIP_PCE_PCTRL_3_LNDIS BIT(15) /* Learning Disable */
+#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
+#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
+#define GSWIP_PCE_VCTRL_VINR GENMASK(2, 1) /* VLAN Ingress Tag Rule */
+#define GSWIP_PCE_VCTRL_VINR_ALL 0 /* Admit tagged and untagged packets */
+#define GSWIP_PCE_VCTRL_VINR_TAGGED 1 /* Admit only tagged packets */
+#define GSWIP_PCE_VCTRL_VINR_UNTAGGED 2 /* Admit only untagged packets */
+#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
+#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
+#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
+#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
+#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
+
+#define GSWIP_MAC_FLEN 0x8C5
+#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
+#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
+#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
+#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
+#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
+#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
+#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
+#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
+#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
+#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
+#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
+#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
+#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
+#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
+#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
+#define GSWIP_MAC_CTRL_4p(p) (0x907 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_4_LPIEN BIT(7) /* LPI Mode Enable */
+#define GSWIP_MAC_CTRL_4_GWAIT_MASK GENMASK(14, 8) /* LPI Wait Time 1G */
+#define GSWIP_MAC_CTRL_4_GWAIT(t) u16_encode_bits((t), GSWIP_MAC_CTRL_4_GWAIT_MASK)
+#define GSWIP_MAC_CTRL_4_WAIT_MASK GENMASK(6, 0) /* LPI Wait Time 100M */
+#define GSWIP_MAC_CTRL_4_WAIT(t) u16_encode_bits((t), GSWIP_MAC_CTRL_4_WAIT_MASK)
+
+/* Ethernet Switch Fetch DMA Port Control Register */
+#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
+#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
+#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
+#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+
+/* Ethernet Switch Store DMA Port Control Register */
+#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
+#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
+#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
+#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
+
+#define GSWIP_TABLE_ACTIVE_VLAN 0x01
+#define GSWIP_TABLE_VLAN_MAPPING 0x02
+#define GSWIP_TABLE_MAC_BRIDGE 0x0b
+#define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL1_VALID BIT(1) /* Valid bit */
+
+#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+
+/* Maximum packet size supported by the switch. In theory this should be 10240,
+ * but long packets currently cause lock-ups with an MTU of over 2526. Medium
+ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
+ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
+ * packet reception. This is probably caused by the PPA engine, which is on the
+ * RX part of the device. Packet transmission works properly up to 10240.
+ */
+#define GSWIP_MAX_PACKET_LENGTH 2400
+
+#define GSWIP_VLAN_UNAWARE_PVID 0
+
+struct gswip_pce_microcode {
+ u16 val_3;
+ u16 val_2;
+ u16 val_1;
+ u16 val_0;
+};
+
+struct gswip_hw_info {
+ int max_ports;
+ unsigned int allowed_cpu_ports;
+ unsigned int mii_ports;
+ int mii_port_reg_offset;
+ bool supports_2500m;
+ const struct gswip_pce_microcode (*pce_microcode)[];
+ size_t pce_microcode_size;
+ enum dsa_tag_protocol tag_protocol;
+ void (*phylink_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
+ struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
+ phy_interface_t interface);
+};
+
+struct gswip_gphy_fw {
+ struct clk *clk_gate;
+ struct reset_control *reset;
+ u32 fw_addr_offset;
+ char *fw_name;
+};
+
+struct gswip_vlan {
+ struct net_device *bridge;
+ u16 vid;
+ u8 fid;
+};
+
+struct gswip_priv {
+ struct regmap *gswip;
+ struct regmap *mdio;
+ struct regmap *mii;
+ const struct gswip_hw_info *hw_info;
+ const struct xway_gphy_match_data *gphy_fw_name_cfg;
+ struct dsa_switch *ds;
+ struct device *dev;
+ struct regmap *rcu_regmap;
+ struct gswip_vlan vlans[64];
+ int num_gphy_fw;
+ struct gswip_gphy_fw *gphy_fw;
+ struct mutex pce_table_lock;
+ u16 version;
+};
+
+void gswip_disable_switch(struct gswip_priv *priv);
+
+int gswip_probe_common(struct gswip_priv *priv, u32 version);
+
+#endif /* __LANTIQ_GSWIP_H */
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip_common.c b/drivers/net/dsa/lantiq/lantiq_gswip_common.c
new file mode 100644
index 000000000000..9da39edf8f57
--- /dev/null
+++ b/drivers/net/dsa/lantiq/lantiq_gswip_common.c
@@ -0,0 +1,1739 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lantiq / Intel / MaxLinear GSWIP common function library
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ * Copyright (C) 2022 Snap One, LLC. All rights reserved.
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ *
+ * The VLAN and bridge model the GSWIP hardware uses does not directly
+ * matches the model DSA uses.
+ *
+ * The hardware has 64 possible table entries for bridges with one VLAN
+ * ID, one flow id and a list of ports for each bridge. All entries which
+ * match the same flow ID are combined in the mac learning table, they
+ * act as one global bridge.
+ * The hardware does not support VLAN filter on the port, but on the
+ * bridge, this driver converts the DSA model to the hardware.
+ *
+ * The CPU gets all the exception frames which do not match any forwarding
+ * rule and the CPU port is also added to all bridges. This makes it possible
+ * to handle all the special cases easily in software.
+ * At the initialization the driver allocates one bridge table entry for
+ * each switch port which is used when the port is used without an
+ * explicit bridge. This prevents the frames from being forwarded
+ * between all LAN ports by default.
+ */
+
+#include "lantiq_gswip.h"
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+struct gswip_pce_table_entry {
+ u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
+ u16 table; // PCE_TBL_CTRL.ADDR = pData->table
+ u16 key[8];
+ u16 val[5];
+ u16 mask;
+ u8 gmap;
+ bool type;
+ bool valid;
+ bool key_mode;
+};
+
+struct gswip_rmon_cnt_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
+
+static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
+ /** Receive Packet Count (only packets that are accepted and not discarded). */
+ MIB_DESC(1, 0x1F, "RxGoodPkts"),
+ MIB_DESC(1, 0x23, "RxUnicastPkts"),
+ MIB_DESC(1, 0x22, "RxMulticastPkts"),
+ MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
+ MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
+ MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
+ MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
+ MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
+ MIB_DESC(1, 0x20, "RxGoodPausePkts"),
+ MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
+ MIB_DESC(1, 0x12, "Rx64BytePkts"),
+ MIB_DESC(1, 0x13, "Rx127BytePkts"),
+ MIB_DESC(1, 0x14, "Rx255BytePkts"),
+ MIB_DESC(1, 0x15, "Rx511BytePkts"),
+ MIB_DESC(1, 0x16, "Rx1023BytePkts"),
+ /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
+ MIB_DESC(1, 0x17, "RxMaxBytePkts"),
+ MIB_DESC(1, 0x18, "RxDroppedPkts"),
+ MIB_DESC(1, 0x19, "RxFilteredPkts"),
+ MIB_DESC(2, 0x24, "RxGoodBytes"),
+ MIB_DESC(2, 0x26, "RxBadBytes"),
+ MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
+ MIB_DESC(1, 0x0C, "TxGoodPkts"),
+ MIB_DESC(1, 0x06, "TxUnicastPkts"),
+ MIB_DESC(1, 0x07, "TxMulticastPkts"),
+ MIB_DESC(1, 0x00, "Tx64BytePkts"),
+ MIB_DESC(1, 0x01, "Tx127BytePkts"),
+ MIB_DESC(1, 0x02, "Tx255BytePkts"),
+ MIB_DESC(1, 0x03, "Tx511BytePkts"),
+ MIB_DESC(1, 0x04, "Tx1023BytePkts"),
+ /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
+ MIB_DESC(1, 0x05, "TxMaxBytePkts"),
+ MIB_DESC(1, 0x08, "TxSingleCollCount"),
+ MIB_DESC(1, 0x09, "TxMultCollCount"),
+ MIB_DESC(1, 0x0A, "TxLateCollCount"),
+ MIB_DESC(1, 0x0B, "TxExcessCollCount"),
+ MIB_DESC(1, 0x0D, "TxPauseCount"),
+ MIB_DESC(1, 0x10, "TxDroppedPkts"),
+ MIB_DESC(2, 0x0E, "TxGoodBytes"),
+};
+
+static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
+ u32 cleared)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(priv->gswip, offset, val,
+ !(val & cleared), 20, 50000);
+}
+
+static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 mask, u32 set,
+ int port)
+{
+ int reg_port;
+
+ /* MII_CFG register only exists for MII ports */
+ if (!(priv->hw_info->mii_ports & BIT(port)))
+ return;
+
+ reg_port = port + priv->hw_info->mii_port_reg_offset;
+
+ regmap_write_bits(priv->mii, GSWIP_MII_CFGp(reg_port), mask,
+ set);
+}
+
+static int gswip_mdio_poll(struct gswip_priv *priv)
+{
+ u32 ctrl;
+
+ return regmap_read_poll_timeout(priv->mdio, GSWIP_MDIO_CTRL, ctrl,
+ !(ctrl & GSWIP_MDIO_CTRL_BUSY), 40, 4000);
+}
+
+static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct gswip_priv *priv = bus->priv;
+ int err;
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ regmap_write(priv->mdio, GSWIP_MDIO_WRITE, val);
+ regmap_write(priv->mdio, GSWIP_MDIO_CTRL,
+ GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
+ ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
+ (reg & GSWIP_MDIO_CTRL_REGAD_MASK));
+
+ return 0;
+}
+
+static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
+{
+ struct gswip_priv *priv = bus->priv;
+ u32 val;
+ int err;
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ regmap_write(priv->mdio, GSWIP_MDIO_CTRL,
+ GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
+ ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
+ (reg & GSWIP_MDIO_CTRL_REGAD_MASK));
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ err = regmap_read(priv->mdio, GSWIP_MDIO_READ, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static int gswip_mdio(struct gswip_priv *priv)
+{
+ struct device_node *mdio_np, *switch_np = priv->dev->of_node;
+ struct device *dev = priv->dev;
+ struct mii_bus *bus;
+ int err = 0;
+
+ mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio");
+ if (!mdio_np)
+ mdio_np = of_get_child_by_name(switch_np, "mdio");
+
+ if (!of_device_is_available(mdio_np))
+ goto out_put_node;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus) {
+ err = -ENOMEM;
+ goto out_put_node;
+ }
+
+ bus->priv = priv;
+ bus->read = gswip_mdio_rd;
+ bus->write = gswip_mdio_wr;
+ bus->name = "lantiq,xrx200-mdio";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
+ bus->parent = priv->dev;
+
+ err = devm_of_mdiobus_register(dev, bus, mdio_np);
+
+out_put_node:
+ of_node_put(mdio_np);
+
+ return err;
+}
+
+static int gswip_pce_table_entry_read(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u32 crtl;
+ u32 tmp;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
+
+ mutex_lock(&priv->pce_table_lock);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ goto out_unlock;
+
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index);
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK |
+ GSWIP_PCE_TBL_CTRL_BAS,
+ tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ goto out_unlock;
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++) {
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_KEY(i), &tmp);
+ if (err)
+ goto out_unlock;
+ tbl->key[i] = tmp;
+ }
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++) {
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_VAL(i), &tmp);
+ if (err)
+ goto out_unlock;
+ tbl->val[i] = tmp;
+ }
+
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_MASK, &tmp);
+ if (err)
+ goto out_unlock;
+
+ tbl->mask = tmp;
+ err = regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl);
+ if (err)
+ goto out_unlock;
+
+ tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
+ tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
+ tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
+
+out_unlock:
+ mutex_unlock(&priv->pce_table_lock);
+
+ return err;
+}
+
+static int gswip_pce_table_entry_write(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u32 crtl;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
+
+ mutex_lock(&priv->pce_table_lock);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
+ return err;
+ }
+
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index);
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_KEY(i), tbl->key[i]);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(i), tbl->val[i]);
+
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode);
+
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, tbl->mask);
+
+ regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl);
+ crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
+ GSWIP_PCE_TBL_CTRL_GMAP_MASK);
+ if (tbl->type)
+ crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
+ if (tbl->valid)
+ crtl |= GSWIP_PCE_TBL_CTRL_VLD;
+ crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
+ crtl |= GSWIP_PCE_TBL_CTRL_BAS;
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_CTRL, crtl);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+
+ mutex_unlock(&priv->pce_table_lock);
+
+ return err;
+}
+
+/* Add the LAN port into a bridge with the CPU port by
+ * default. This prevents automatic forwarding of
+ * packages between the LAN ports when no explicit
+ * bridge is configured.
+ */
+static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ int err;
+
+ vlan_active.index = port + 1;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = GSWIP_VLAN_UNAWARE_PVID;
+ vlan_active.val[0] = port + 1 /* fid */;
+ vlan_active.valid = add;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ if (!add)
+ return 0;
+
+ vlan_mapping.index = port + 1;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ vlan_mapping.val[0] = GSWIP_VLAN_UNAWARE_PVID;
+ vlan_mapping.val[1] = BIT(port) | dsa_cpu_ports(priv->ds);
+ vlan_mapping.val[2] = 0;
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int gswip_port_set_learning(struct gswip_priv *priv, int port,
+ bool enable)
+{
+ if (!GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
+ return -EOPNOTSUPP;
+
+ /* learning disable bit */
+ return regmap_update_bits(priv->gswip, GSWIP_PCE_PCTRL_3p(port),
+ GSWIP_PCE_PCTRL_3_LNDIS,
+ enable ? 0 : GSWIP_PCE_PCTRL_3_LNDIS);
+}
+
+static int gswip_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct gswip_priv *priv = ds->priv;
+ unsigned long supported = 0;
+
+ if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
+ supported |= BR_LEARNING;
+
+ if (flags.mask & ~supported)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int gswip_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (flags.mask & BR_LEARNING)
+ return gswip_port_set_learning(priv, port,
+ !!(flags.val & BR_LEARNING));
+
+ return 0;
+}
+
+static int gswip_port_setup(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+ int err;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ err = gswip_add_single_port_br(priv, port, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int gswip_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ u32 mdio_phy = 0;
+
+ if (phydev)
+ mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_ADDR_MASK,
+ mdio_phy);
+ }
+
+ /* RMON Counter Enable for port */
+ regmap_write(priv->gswip, GSWIP_BM_PCFGp(port), GSWIP_BM_PCFG_CNTEN);
+
+ /* enable port fetch/store dma & VLAN Modification */
+ regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port),
+ GSWIP_FDMA_PCTRL_EN | GSWIP_FDMA_PCTRL_VLANMOD_BOTH);
+ regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+
+ return 0;
+}
+
+static void gswip_port_disable(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ regmap_clear_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port),
+ GSWIP_FDMA_PCTRL_EN);
+ regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+}
+
+static int gswip_pce_load_microcode(struct gswip_priv *priv)
+{
+ int i;
+ int err;
+
+ regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR,
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, 0);
+
+ for (i = 0; i < priv->hw_info->pce_microcode_size; i++) {
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, i);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(0),
+ (*priv->hw_info->pce_microcode)[i].val_0);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(1),
+ (*priv->hw_info->pce_microcode)[i].val_1);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(2),
+ (*priv->hw_info->pce_microcode)[i].val_2);
+ regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(3),
+ (*priv->hw_info->pce_microcode)[i].val_3);
+
+ /* start the table access: */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ return err;
+ }
+
+ /* tell the switch that the microcode is loaded */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MC_VALID);
+
+ return 0;
+}
+
+static void gswip_port_commit_pvid(struct gswip_priv *priv, int port)
+{
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+ u32 vinr;
+ int idx;
+
+ if (!dsa_port_is_user(dp))
+ return;
+
+ if (br) {
+ u16 pvid = GSWIP_VLAN_UNAWARE_PVID;
+
+ if (br_vlan_enabled(br))
+ br_vlan_get_pvid(br, &pvid);
+
+ /* VLAN-aware bridge ports with no PVID will use Active VLAN
+ * index 0. The expectation is that this drops all untagged and
+ * VID-0 tagged ingress traffic.
+ */
+ idx = 0;
+ for (int i = priv->hw_info->max_ports;
+ i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == br &&
+ priv->vlans[i].vid == pvid) {
+ idx = i;
+ break;
+ }
+ }
+ } else {
+ /* The Active VLAN table index as configured by
+ * gswip_add_single_port_br()
+ */
+ idx = port + 1;
+ }
+
+ vinr = idx ? GSWIP_PCE_VCTRL_VINR_ALL : GSWIP_PCE_VCTRL_VINR_TAGGED;
+ regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
+ GSWIP_PCE_VCTRL_VINR,
+ FIELD_PREP(GSWIP_PCE_VCTRL_VINR, vinr));
+
+ /* Note that in GSWIP 2.2 VLAN mode the VID needs to be programmed
+ * directly instead of referencing the index in the Active VLAN Tablet.
+ * However, without the VLANMD bit (9) in PCE_GCTRL_1 (0x457) even
+ * GSWIP 2.2 and newer hardware maintain the GSWIP 2.1 behavior.
+ */
+ regmap_write(priv->gswip, GSWIP_PCE_DEFPVID(port), idx);
+}
+
+static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (vlan_filtering) {
+ /* Use tag based VLAN */
+ regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
+ GSWIP_PCE_VCTRL_VSR |
+ GSWIP_PCE_VCTRL_UVR |
+ GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR |
+ GSWIP_PCE_VCTRL_VID0,
+ GSWIP_PCE_VCTRL_UVR |
+ GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR |
+ GSWIP_PCE_VCTRL_VID0);
+ regmap_clear_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
+ GSWIP_PCE_PCTRL_0_TVM);
+ } else {
+ /* Use port based VLAN */
+ regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port),
+ GSWIP_PCE_VCTRL_UVR |
+ GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR |
+ GSWIP_PCE_VCTRL_VID0 |
+ GSWIP_PCE_VCTRL_VSR,
+ GSWIP_PCE_VCTRL_VSR);
+ regmap_set_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
+ GSWIP_PCE_PCTRL_0_TVM);
+ }
+
+ gswip_port_commit_pvid(priv, port);
+
+ return 0;
+}
+
+static void gswip_mii_delay_setup(struct gswip_priv *priv, struct dsa_port *dp,
+ phy_interface_t interface)
+{
+ u32 tx_delay = GSWIP_MII_PCDU_TXDLY_DEFAULT;
+ u32 rx_delay = GSWIP_MII_PCDU_RXDLY_DEFAULT;
+ struct device_node *port_dn = dp->dn;
+ u16 mii_pcdu_reg;
+
+ /* As MII_PCDU registers only exist for MII ports, silently return
+ * unless the port is an MII port
+ */
+ if (!(priv->hw_info->mii_ports & BIT(dp->index)))
+ return;
+
+ switch (dp->index + priv->hw_info->mii_port_reg_offset) {
+ case 0:
+ mii_pcdu_reg = GSWIP_MII_PCDU0;
+ break;
+ case 1:
+ mii_pcdu_reg = GSWIP_MII_PCDU1;
+ break;
+ case 5:
+ mii_pcdu_reg = GSWIP_MII_PCDU5;
+ break;
+ default:
+ return;
+ }
+
+ /* legacy code to set default delays according to the interface mode */
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ tx_delay = 0;
+ rx_delay = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ rx_delay = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ tx_delay = 0;
+ break;
+ default:
+ break;
+ }
+
+ /* allow settings delays using device tree properties */
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
+
+ regmap_write_bits(priv->mii, mii_pcdu_reg,
+ GSWIP_MII_PCDU_TXDLY_MASK |
+ GSWIP_MII_PCDU_RXDLY_MASK,
+ GSWIP_MII_PCDU_TXDLY(tx_delay) |
+ GSWIP_MII_PCDU_RXDLY(rx_delay));
+}
+
+static int gswip_setup(struct dsa_switch *ds)
+{
+ unsigned int cpu_ports = dsa_cpu_ports(ds);
+ struct gswip_priv *priv = ds->priv;
+ struct dsa_port *cpu_dp;
+ int err, i;
+
+ regmap_write(priv->gswip, GSWIP_SWRES, GSWIP_SWRES_R0);
+ usleep_range(5000, 10000);
+ regmap_write(priv->gswip, GSWIP_SWRES, 0);
+
+ /* disable port fetch/store dma on all ports */
+ for (i = 0; i < priv->hw_info->max_ports; i++) {
+ gswip_port_disable(ds, i);
+ gswip_port_vlan_filtering(ds, i, false, NULL);
+ }
+
+ /* enable Switch */
+ regmap_set_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE);
+
+ err = gswip_pce_load_microcode(priv);
+ if (err) {
+ dev_err(priv->dev, "writing PCE microcode failed, %i\n", err);
+ return err;
+ }
+
+ /* Default unknown Broadcast/Multicast/Unicast port maps */
+ regmap_write(priv->gswip, GSWIP_PCE_PMAP1, cpu_ports);
+ regmap_write(priv->gswip, GSWIP_PCE_PMAP2, cpu_ports);
+ regmap_write(priv->gswip, GSWIP_PCE_PMAP3, cpu_ports);
+
+ /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
+ * interoperability problem with this auto polling mechanism because
+ * their status registers think that the link is in a different state
+ * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
+ * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
+ * auto polling state machine consider the link being negotiated with
+ * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
+ * to the switch port being completely dead (RX and TX are both not
+ * working).
+ * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
+ * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
+ * it would work fine for a few minutes to hours and then stop, on
+ * other device it would no traffic could be sent or received at all.
+ * Testing shows that when PHY auto polling is disabled these problems
+ * go away.
+ */
+ regmap_write(priv->mdio, GSWIP_MDIO_MDC_CFG0, 0x0);
+
+ /* Configure the MDIO Clock 2.5 MHz */
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_MDC_CFG1, 0xff, 0x09);
+
+ /* bring up the mdio bus */
+ err = gswip_mdio(priv);
+ if (err) {
+ dev_err(priv->dev, "mdio bus setup failed\n");
+ return err;
+ }
+
+ /* Disable the xMII interface and clear it's isolation bit */
+ for (i = 0; i < priv->hw_info->max_ports; i++)
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
+ 0, i);
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ /* enable special tag insertion on cpu port */
+ regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(cpu_dp->index),
+ GSWIP_FDMA_PCTRL_STEN);
+
+ /* accept special tag in ingress direction */
+ regmap_set_bits(priv->gswip,
+ GSWIP_PCE_PCTRL_0p(cpu_dp->index),
+ GSWIP_PCE_PCTRL_0_INGRESS);
+ }
+
+ regmap_set_bits(priv->gswip, GSWIP_BM_QUEUE_GCTRL,
+ GSWIP_BM_QUEUE_GCTRL_GL_MOD);
+
+ /* VLAN aware Switching */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_VLAN);
+
+ /* Flush MAC Table */
+ regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MTFL);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MTFL);
+ if (err) {
+ dev_err(priv->dev, "MAC flushing didn't finish\n");
+ return err;
+ }
+
+ ds->mtu_enforcement_ingress = true;
+
+ return 0;
+}
+
+static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ return priv->hw_info->tag_protocol;
+}
+
+static int gswip_vlan_active_create(struct gswip_priv *priv,
+ struct net_device *bridge,
+ int fid, u16 vid)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int idx = -1;
+ int err;
+ int i;
+
+ /* Look for a free slot */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (!priv->vlans[i].bridge) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1)
+ return -ENOSPC;
+
+ if (fid == -1)
+ fid = idx;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = vid;
+ vlan_active.val[0] = fid;
+ vlan_active.valid = true;
+
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ priv->vlans[idx].bridge = bridge;
+ priv->vlans[idx].vid = vid;
+ priv->vlans[idx].fid = fid;
+
+ return idx;
+}
+
+static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ int err;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.valid = false;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err)
+ dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
+ priv->vlans[idx].bridge = NULL;
+
+ return err;
+}
+
+static int gswip_vlan_add(struct gswip_priv *priv, struct net_device *bridge,
+ int port, u16 vid, bool untagged, bool pvid,
+ bool vlan_aware)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_ports = dsa_cpu_ports(priv->ds);
+ bool active_vlan_created = false;
+ int fid = -1, idx = -1;
+ int i, err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ if (vlan_aware) {
+ if (fid != -1 && fid != priv->vlans[i].fid)
+ dev_err(priv->dev, "one bridge with multiple flow ids\n");
+ fid = priv->vlans[i].fid;
+ }
+ if (priv->vlans[i].vid == vid) {
+ idx = i;
+ break;
+ }
+ }
+ }
+
+ /* If this bridge is not programmed yet, add a Active VLAN table
+ * entry in a free slot and prepare the VLAN mapping table entry.
+ */
+ if (idx == -1) {
+ idx = gswip_vlan_active_create(priv, bridge, fid, vid);
+ if (idx < 0)
+ return idx;
+ active_vlan_created = true;
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ } else {
+ /* Read the existing VLAN mapping entry from the switch */
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* VLAN ID byte, maps to the VLAN ID of vlan active table */
+ vlan_mapping.val[0] = vid;
+ /* Update the VLAN mapping entry and write it to the switch */
+ vlan_mapping.val[1] |= cpu_ports;
+ vlan_mapping.val[1] |= BIT(port);
+ if (vlan_aware)
+ vlan_mapping.val[2] |= cpu_ports;
+ if (untagged)
+ vlan_mapping.val[2] &= ~BIT(port);
+ else
+ vlan_mapping.val[2] |= BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ /* In case an Active VLAN was creaetd delete it again */
+ if (active_vlan_created)
+ gswip_vlan_active_remove(priv, idx);
+ return err;
+ }
+
+ gswip_port_commit_pvid(priv, port);
+
+ return 0;
+}
+
+static int gswip_vlan_remove(struct gswip_priv *priv,
+ struct net_device *bridge, int port,
+ u16 vid)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int idx = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vid) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1) {
+ dev_err(priv->dev, "Port %d cannot find VID %u of bridge %s\n",
+ port, vid, bridge ? bridge->name : "(null)");
+ return -ENOENT;
+ }
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ vlan_mapping.val[1] &= ~BIT(port);
+ vlan_mapping.val[2] &= ~BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ /* In case all ports are removed from the bridge, remove the VLAN */
+ if (!(vlan_mapping.val[1] & ~dsa_cpu_ports(priv->ds))) {
+ err = gswip_vlan_active_remove(priv, idx);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ gswip_port_commit_pvid(priv, port);
+
+ return 0;
+}
+
+static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *br = bridge.dev;
+ struct gswip_priv *priv = ds->priv;
+ int err;
+
+ /* Set up the VLAN for VLAN-unaware bridging for this port, and remove
+ * it from the "single-port bridge" through which it was operating as
+ * standalone.
+ */
+ err = gswip_vlan_add(priv, br, port, GSWIP_VLAN_UNAWARE_PVID,
+ true, true, false);
+ if (err)
+ return err;
+
+ return gswip_add_single_port_br(priv, port, false);
+}
+
+static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct net_device *br = bridge.dev;
+ struct gswip_priv *priv = ds->priv;
+
+ /* Add the port back to the "single-port bridge", and remove it from
+ * the VLAN-unaware PVID created for this bridge.
+ */
+ gswip_add_single_port_br(priv, port, true);
+ gswip_vlan_remove(priv, br, port, GSWIP_VLAN_UNAWARE_PVID);
+}
+
+static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int pos = max_ports;
+ int i, idx = -1;
+
+ /* We only support VLAN filtering on bridges */
+ if (!dsa_is_cpu_port(ds, port) && !bridge)
+ return -EOPNOTSUPP;
+
+ /* Check if there is already a page for this VLAN */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vlan->vid) {
+ idx = i;
+ break;
+ }
+ }
+
+ /* If this VLAN is not programmed yet, we have to reserve
+ * one entry in the VLAN table. Make sure we start at the
+ * next position round.
+ */
+ if (idx == -1) {
+ /* Look for a free slot */
+ for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
+ if (!priv->vlans[pos].bridge) {
+ idx = pos;
+ pos++;
+ break;
+ }
+ }
+
+ if (idx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
+ return -ENOSPC;
+ }
+ }
+
+ return 0;
+}
+
+static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ int err;
+
+ if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID)
+ return 0;
+
+ err = gswip_port_vlan_prepare(ds, port, vlan, extack);
+ if (err)
+ return err;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return gswip_vlan_add(priv, bridge, port, vlan->vid, untagged, pvid,
+ true);
+}
+
+static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
+ struct gswip_priv *priv = ds->priv;
+
+ if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID)
+ return 0;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return gswip_vlan_remove(priv, bridge, port, vlan->vid);
+}
+
+static void gswip_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to read mac bridge: %d\n",
+ err);
+ return;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC)
+ continue;
+
+ if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
+ mac_bridge.val[0]))
+ continue;
+
+ mac_bridge.valid = false;
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to write mac bridge: %d\n",
+ err);
+ return;
+ }
+ }
+}
+
+static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct gswip_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+ return;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
+ break;
+ default:
+ dev_err(priv->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port),
+ GSWIP_SDMA_PCTRL_EN);
+ regmap_write_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port),
+ GSWIP_PCE_PCTRL_0_PSTATE_MASK,
+ stp_state);
+}
+
+static int gswip_port_fdb(struct dsa_switch *ds, int port,
+ struct net_device *bridge, const unsigned char *addr,
+ u16 vid, bool add)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int fid = -1;
+ int i;
+ int err;
+
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ fid = priv->vlans[i].fid;
+ break;
+ }
+ }
+
+ if (fid == -1) {
+ dev_err(priv->dev, "no FID found for bridge %s\n",
+ bridge->name);
+ return -EINVAL;
+ }
+
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.key_mode = true;
+ mac_bridge.key[0] = addr[5] | (addr[4] << 8);
+ mac_bridge.key[1] = addr[3] | (addr[2] << 8);
+ mac_bridge.key[2] = addr[1] | (addr[0] << 8);
+ mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid);
+ mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
+ if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2_ETC))
+ mac_bridge.val[1] = add ? (GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC |
+ GSWIP_TABLE_MAC_BRIDGE_VAL1_VALID) : 0;
+ else
+ mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC;
+
+ mac_bridge.valid = add;
+
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err)
+ dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
+
+ return err;
+}
+
+static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ if (db.type != DSA_DB_BRIDGE)
+ return -EOPNOTSUPP;
+
+ return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, true);
+}
+
+static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ if (db.type != DSA_DB_BRIDGE)
+ return -EOPNOTSUPP;
+
+ return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, false);
+}
+
+static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned char addr[ETH_ALEN];
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev,
+ "failed to read mac bridge entry %d: %d\n",
+ i, err);
+ return err;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ addr[5] = mac_bridge.key[0] & 0xff;
+ addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
+ addr[3] = mac_bridge.key[1] & 0xff;
+ addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
+ addr[1] = mac_bridge.key[2] & 0xff;
+ addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) {
+ if (mac_bridge.val[0] & BIT(port)) {
+ err = cb(addr, 0, true, data);
+ if (err)
+ return err;
+ }
+ } else {
+ if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
+ mac_bridge.val[0])) {
+ err = cb(addr, 0, false, data);
+ if (err)
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* Includes 8 bytes for special header. */
+ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
+static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ /* CPU port always has maximum mtu of user ports, so use it to set
+ * switch frame size, including 8 byte special header.
+ */
+ if (dsa_is_cpu_port(ds, port)) {
+ new_mtu += 8;
+ regmap_write(priv->gswip, GSWIP_MAC_FLEN,
+ VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN);
+ }
+
+ /* Enable MLEN for ports with non-standard MTUs, including the special
+ * header on the CPU port added above.
+ */
+ if (new_mtu != ETH_DATA_LEN)
+ regmap_set_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port),
+ GSWIP_MAC_CTRL_2_MLEN);
+ else
+ regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port),
+ GSWIP_MAC_CTRL_2_MLEN);
+
+ return 0;
+}
+
+static void gswip_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ priv->hw_info->phylink_get_caps(ds, port, config);
+}
+
+static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
+{
+ u32 mdio_phy;
+
+ if (link)
+ mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
+ else
+ mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
+
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_LINK_MASK, mdio_phy);
+}
+
+static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
+ phy_interface_t interface)
+{
+ u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
+
+ switch (speed) {
+ case SPEED_10:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_100:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ mii_cfg = GSWIP_MII_CFG_RATE_M50;
+ else
+ mii_cfg = GSWIP_MII_CFG_RATE_M25;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+ break;
+
+ case SPEED_1000:
+ mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
+
+ mii_cfg = GSWIP_MII_CFG_RATE_M125;
+
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
+ break;
+ }
+
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy);
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
+ regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
+ GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0);
+}
+
+static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (duplex == DUPLEX_FULL) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
+ mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
+ }
+
+ regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
+ GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0);
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy);
+}
+
+static void gswip_port_set_pause(struct gswip_priv *priv, int port,
+ bool tx_pause, bool rx_pause)
+{
+ u32 mac_ctrl_0, mdio_phy;
+
+ if (tx_pause && rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else if (tx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ } else if (rx_pause) {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_EN;
+ } else {
+ mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
+ mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+ GSWIP_MDIO_PHY_FCONRX_DIS;
+ }
+
+ regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port),
+ GSWIP_MAC_CTRL_0_FCON_MASK, mac_ctrl_0);
+ regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port),
+ GSWIP_MDIO_PHY_FCONTX_MASK | GSWIP_MDIO_PHY_FCONRX_MASK,
+ mdio_phy);
+}
+
+static void gswip_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
+ u32 miicfg = 0;
+
+ miicfg |= GSWIP_MII_CFG_LDCLKDIS;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return;
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_INTERNAL:
+ miicfg |= GSWIP_MII_CFG_MODE_MIIM;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ miicfg |= GSWIP_MII_CFG_MODE_MIIP;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
+ if (of_property_read_bool(dp->dn, "maxlinear,rmii-refclk-out"))
+ miicfg |= GSWIP_MII_CFG_RMII_CLK;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ miicfg |= GSWIP_MII_CFG_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ miicfg |= GSWIP_MII_CFG_MODE_GMII;
+ break;
+ default:
+ dev_err(dp->ds->dev,
+ "Unsupported interface: %d\n", state->interface);
+ return;
+ }
+
+ gswip_mii_mask_cfg(priv,
+ GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
+ GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
+ miicfg, port);
+
+ gswip_mii_delay_setup(priv, dp, state->interface);
+}
+
+static void gswip_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
+
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
+
+ if (!dsa_port_is_cpu(dp))
+ gswip_port_set_link(priv, port, false);
+}
+
+static void gswip_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
+
+ if (!dsa_port_is_cpu(dp) || interface != PHY_INTERFACE_MODE_INTERNAL) {
+ gswip_port_set_link(priv, port, true);
+ gswip_port_set_speed(priv, port, speed, interface);
+ gswip_port_set_duplex(priv, port, duplex);
+ gswip_port_set_pause(priv, port, tx_pause, rx_pause);
+ }
+
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, GSWIP_MII_CFG_EN, port);
+}
+
+static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
+ ethtool_puts(&data, gswip_rmon_cnt[i].name);
+}
+
+static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
+ u32 index)
+{
+ u32 result, val;
+ int err;
+
+ regmap_write(priv->gswip, GSWIP_BM_RAM_ADDR, index);
+ regmap_write_bits(priv->gswip, GSWIP_BM_RAM_CTRL,
+ GSWIP_BM_RAM_CTRL_ADDR_MASK | GSWIP_BM_RAM_CTRL_OPMOD |
+ GSWIP_BM_RAM_CTRL_BAS,
+ table | GSWIP_BM_RAM_CTRL_BAS);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
+ GSWIP_BM_RAM_CTRL_BAS);
+ if (err) {
+ dev_err(priv->dev, "timeout while reading table: %u, index: %u\n",
+ table, index);
+ return 0;
+ }
+
+ regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(0), &result);
+ regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(1), &val);
+ result |= val << 16;
+
+ return result;
+}
+
+static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct gswip_priv *priv = ds->priv;
+ const struct gswip_rmon_cnt_desc *rmon_cnt;
+ int i;
+ u64 high;
+
+ for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
+ rmon_cnt = &gswip_rmon_cnt[i];
+
+ data[i] = gswip_bcm_ram_entry_read(priv, port,
+ rmon_cnt->offset);
+ if (rmon_cnt->size == 2) {
+ high = gswip_bcm_ram_entry_read(priv, port,
+ rmon_cnt->offset + 1);
+ data[i] |= high << 32;
+ }
+ }
+}
+
+static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(gswip_rmon_cnt);
+}
+
+static int gswip_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_keee *e)
+{
+ if (e->tx_lpi_timer > 0x7f)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void gswip_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+
+ regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index),
+ GSWIP_MAC_CTRL_4_LPIEN);
+}
+
+static int gswip_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+
+ return regmap_update_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index),
+ GSWIP_MAC_CTRL_4_LPIEN |
+ GSWIP_MAC_CTRL_4_GWAIT_MASK |
+ GSWIP_MAC_CTRL_4_WAIT_MASK,
+ GSWIP_MAC_CTRL_4_LPIEN |
+ GSWIP_MAC_CTRL_4_GWAIT(timer) |
+ GSWIP_MAC_CTRL_4_WAIT(timer));
+}
+
+static bool gswip_support_eee(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2))
+ return true;
+
+ return false;
+}
+
+static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+
+ if (priv->hw_info->mac_select_pcs)
+ return priv->hw_info->mac_select_pcs(config, interface);
+
+ return NULL;
+}
+
+static const struct phylink_mac_ops gswip_phylink_mac_ops = {
+ .mac_config = gswip_phylink_mac_config,
+ .mac_link_down = gswip_phylink_mac_link_down,
+ .mac_link_up = gswip_phylink_mac_link_up,
+ .mac_disable_tx_lpi = gswip_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = gswip_phylink_mac_enable_tx_lpi,
+ .mac_select_pcs = gswip_phylink_mac_select_pcs,
+};
+
+static const struct dsa_switch_ops gswip_switch_ops = {
+ .get_tag_protocol = gswip_get_tag_protocol,
+ .setup = gswip_setup,
+ .port_setup = gswip_port_setup,
+ .port_enable = gswip_port_enable,
+ .port_disable = gswip_port_disable,
+ .port_pre_bridge_flags = gswip_port_pre_bridge_flags,
+ .port_bridge_flags = gswip_port_bridge_flags,
+ .port_bridge_join = gswip_port_bridge_join,
+ .port_bridge_leave = gswip_port_bridge_leave,
+ .port_fast_age = gswip_port_fast_age,
+ .port_vlan_filtering = gswip_port_vlan_filtering,
+ .port_vlan_add = gswip_port_vlan_add,
+ .port_vlan_del = gswip_port_vlan_del,
+ .port_stp_state_set = gswip_port_stp_state_set,
+ .port_fdb_add = gswip_port_fdb_add,
+ .port_fdb_del = gswip_port_fdb_del,
+ .port_fdb_dump = gswip_port_fdb_dump,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
+ .phylink_get_caps = gswip_phylink_get_caps,
+ .get_strings = gswip_get_strings,
+ .get_ethtool_stats = gswip_get_ethtool_stats,
+ .get_sset_count = gswip_get_sset_count,
+ .set_mac_eee = gswip_set_mac_eee,
+ .support_eee = gswip_support_eee,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
+};
+
+void gswip_disable_switch(struct gswip_priv *priv)
+{
+ regmap_clear_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE);
+}
+EXPORT_SYMBOL_GPL(gswip_disable_switch);
+
+static int gswip_validate_cpu_port(struct dsa_switch *ds)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct dsa_port *cpu_dp;
+ int cpu_port = -1;
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ if (cpu_port != -1)
+ return dev_err_probe(ds->dev, -EINVAL,
+ "only a single CPU port is supported\n");
+
+ cpu_port = cpu_dp->index;
+ }
+
+ if (cpu_port == -1)
+ return dev_err_probe(ds->dev, -EINVAL, "no CPU port defined\n");
+
+ if (BIT(cpu_port) & ~priv->hw_info->allowed_cpu_ports)
+ return dev_err_probe(ds->dev, -EINVAL,
+ "unsupported CPU port defined\n");
+
+ return 0;
+}
+
+int gswip_probe_common(struct gswip_priv *priv, u32 version)
+{
+ int err;
+
+ mutex_init(&priv->pce_table_lock);
+
+ priv->ds = devm_kzalloc(priv->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = priv->dev;
+ priv->ds->num_ports = priv->hw_info->max_ports;
+ priv->ds->ops = &gswip_switch_ops;
+ priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops;
+ priv->ds->priv = priv;
+
+ /* The hardware has the 'major/minor' version bytes in the wrong order
+ * preventing numerical comparisons. Construct a 16-bit unsigned integer
+ * having the REV field as most significant byte and the MOD field as
+ * least significant byte. This is effectively swapping the two bytes of
+ * the version variable, but other than using swab16 it doesn't affect
+ * the source variable.
+ */
+ priv->version = GSWIP_VERSION_REV(version) << 8 |
+ GSWIP_VERSION_MOD(version);
+
+ err = dsa_register_switch(priv->ds);
+ if (err)
+ return dev_err_probe(priv->dev, err, "dsa switch registration failed\n");
+
+ err = gswip_validate_cpu_port(priv->ds);
+ if (err)
+ goto disable_switch;
+
+ dev_info(priv->dev, "probed GSWIP version %lx mod %lx\n",
+ GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version));
+
+ return 0;
+
+disable_switch:
+ gswip_disable_switch(priv);
+ dsa_unregister_switch(priv->ds);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(gswip_probe_common);
+
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_DESCRIPTION("Lantiq / Intel / MaxLinear GSWIP common functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/lantiq_pce.h b/drivers/net/dsa/lantiq/lantiq_pce.h
index e2be31f3672a..659f9a0638d9 100644
--- a/drivers/net/dsa/lantiq_pce.h
+++ b/drivers/net/dsa/lantiq/lantiq_pce.h
@@ -7,6 +7,8 @@
* Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
*/
+#include "lantiq_gswip.h"
+
enum {
OUT_MAC0 = 0,
OUT_MAC1,
@@ -74,13 +76,6 @@ enum {
FLAG_NO, /*13*/
};
-struct gswip_pce_microcode {
- u16 val_3;
- u16 val_2;
- u16 val_1;
- u16 val_0;
-};
-
#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
{ val, msk, ((ns) << 10 | (out) << 4 | (len) >> 1),\
((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.c b/drivers/net/dsa/lantiq/mxl-gsw1xx.c
new file mode 100644
index 000000000000..0816c61a47f1
--- /dev/null
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.c
@@ -0,0 +1,733 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* DSA Driver for MaxLinear GSW1xx switch devices
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ * Copyright (C) 2022 Snap One, LLC. All rights reserved.
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+#include "lantiq_gswip.h"
+#include "mxl-gsw1xx.h"
+#include "mxl-gsw1xx_pce.h"
+
+struct gsw1xx_priv {
+ struct mdio_device *mdio_dev;
+ int smdio_badr;
+ struct regmap *sgmii;
+ struct regmap *gpio;
+ struct regmap *clk;
+ struct regmap *shell;
+ struct phylink_pcs pcs;
+ phy_interface_t tbi_interface;
+ struct gswip_priv gswip;
+};
+
+static int gsw1xx_config_smdio_badr(struct gsw1xx_priv *priv,
+ unsigned int reg)
+{
+ struct mii_bus *bus = priv->mdio_dev->bus;
+ int sw_addr = priv->mdio_dev->addr;
+ int smdio_badr = priv->smdio_badr;
+ int res;
+
+ if (smdio_badr == GSW1XX_SMDIO_BADR_UNKNOWN ||
+ reg - smdio_badr >= GSW1XX_SMDIO_BADR ||
+ smdio_badr > reg) {
+ /* Configure the Switch Base Address */
+ smdio_badr = reg & ~GENMASK(3, 0);
+ res = __mdiobus_write(bus, sw_addr, GSW1XX_SMDIO_BADR, smdio_badr);
+ if (res < 0) {
+ dev_err(&priv->mdio_dev->dev,
+ "%s: Error %d, configuring switch base\n",
+ __func__, res);
+ return res;
+ }
+ priv->smdio_badr = smdio_badr;
+ }
+
+ return smdio_badr;
+}
+
+static int gsw1xx_regmap_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct gsw1xx_priv *priv = context;
+ struct mii_bus *bus = priv->mdio_dev->bus;
+ int sw_addr = priv->mdio_dev->addr;
+ int smdio_badr;
+ int res;
+
+ smdio_badr = gsw1xx_config_smdio_badr(priv, reg);
+ if (smdio_badr < 0)
+ return smdio_badr;
+
+ res = __mdiobus_read(bus, sw_addr, reg - smdio_badr);
+ if (res < 0) {
+ dev_err(&priv->mdio_dev->dev, "%s: Error %d reading 0x%x\n",
+ __func__, res, reg);
+ return res;
+ }
+
+ *val = res;
+
+ return 0;
+}
+
+static int gsw1xx_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct gsw1xx_priv *priv = context;
+ struct mii_bus *bus = priv->mdio_dev->bus;
+ int sw_addr = priv->mdio_dev->addr;
+ int smdio_badr;
+ int res;
+
+ smdio_badr = gsw1xx_config_smdio_badr(priv, reg);
+ if (smdio_badr < 0)
+ return smdio_badr;
+
+ res = __mdiobus_write(bus, sw_addr, reg - smdio_badr, val);
+ if (res < 0)
+ dev_err(&priv->mdio_dev->dev,
+ "%s: Error %d, writing 0x%x:0x%x\n", __func__, res, reg,
+ val);
+
+ return res;
+}
+
+static const struct regmap_bus gsw1xx_regmap_bus = {
+ .reg_write = gsw1xx_regmap_write,
+ .reg_read = gsw1xx_regmap_read,
+};
+
+static void gsw1xx_mdio_regmap_lock(void *mdio_lock)
+{
+ mutex_lock_nested(mdio_lock, MDIO_MUTEX_NESTED);
+}
+
+static void gsw1xx_mdio_regmap_unlock(void *mdio_lock)
+{
+ mutex_unlock(mdio_lock);
+}
+
+static unsigned int gsw1xx_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+}
+
+static struct gsw1xx_priv *pcs_to_gsw1xx(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct gsw1xx_priv, pcs);
+}
+
+static int gsw1xx_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+
+ /* Deassert SGMII shell reset */
+ return regmap_clear_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+}
+
+static void gsw1xx_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+
+ /* Assert SGMII shell reset */
+ regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+
+ priv->tbi_interface = PHY_INTERFACE_MODE_NA;
+}
+
+static void gsw1xx_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->sgmii, GSW1XX_SGMII_TBI_TBISTAT, &val);
+ if (ret < 0)
+ return;
+
+ state->link = !!(val & GSW1XX_SGMII_TBI_TBISTAT_LINK);
+ state->an_complete = !!(val & GSW1XX_SGMII_TBI_TBISTAT_AN_COMPLETE);
+
+ ret = regmap_read(priv->sgmii, GSW1XX_SGMII_TBI_LPSTAT, &val);
+ if (ret < 0)
+ return;
+
+ state->duplex = (val & GSW1XX_SGMII_TBI_LPSTAT_DUPLEX) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ if (val & GSW1XX_SGMII_TBI_LPSTAT_PAUSE_RX)
+ state->pause |= MLO_PAUSE_RX;
+
+ if (val & GSW1XX_SGMII_TBI_LPSTAT_PAUSE_TX)
+ state->pause |= MLO_PAUSE_TX;
+
+ switch (FIELD_GET(GSW1XX_SGMII_TBI_LPSTAT_SPEED, val)) {
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ case GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII:
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ state->speed = SPEED_1000;
+ else if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static int gsw1xx_pcs_phy_xaui_write(struct gsw1xx_priv *priv, u16 addr,
+ u16 data)
+{
+ int ret, val;
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_D, data);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_A, addr);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_C,
+ GSW1XX_SGMII_PHY_WRITE |
+ GSW1XX_SGMII_PHY_RESET_N);
+ if (ret < 0)
+ return ret;
+
+ return regmap_read_poll_timeout(priv->sgmii, GSW1XX_SGMII_PHY_C,
+ val, val & GSW1XX_SGMII_PHY_STATUS,
+ 1000, 100000);
+}
+
+static int gsw1xx_pcs_reset(struct gsw1xx_priv *priv)
+{
+ int ret;
+ u16 val;
+
+ /* Assert and deassert SGMII shell reset */
+ ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_clear_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+ if (ret < 0)
+ return ret;
+
+ /* Hardware Bringup FSM Enable */
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_HWBU_CTRL,
+ GSW1XX_SGMII_PHY_HWBU_CTRL_EN_HWBU_FSM |
+ GSW1XX_SGMII_PHY_HWBU_CTRL_HW_FSM_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Configure SGMII PHY Receiver */
+ val = FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_EQ,
+ GSW1XX_SGMII_PHY_RX0_CFG2_EQ_DEF) |
+ GSW1XX_SGMII_PHY_RX0_CFG2_LOS_EN |
+ GSW1XX_SGMII_PHY_RX0_CFG2_TERM_EN |
+ FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT,
+ GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF);
+
+ /* TODO: Take care of inverted RX pair once generic property is
+ * available
+ */
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_RX0_CFG2, val);
+ if (ret < 0)
+ return ret;
+
+ val = FIELD_PREP(GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL,
+ GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL_DEF);
+
+ /* TODO: Take care of inverted TX pair once generic property is
+ * available
+ */
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_TX0_CFG3, val);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and Release TBI */
+ val = GSW1XX_SGMII_TBI_TBICTL_INITTBI | GSW1XX_SGMII_TBI_TBICTL_ENTBI |
+ GSW1XX_SGMII_TBI_TBICTL_CRSTRR | GSW1XX_SGMII_TBI_TBICTL_CRSOFF;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TBICTL, val);
+ if (ret < 0)
+ return ret;
+ val &= ~GSW1XX_SGMII_TBI_TBICTL_INITTBI;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TBICTL, val);
+ if (ret < 0)
+ return ret;
+
+ /* Release Tx Data Buffers */
+ ret = regmap_set_bits(priv->sgmii, GSW1XX_SGMII_PCS_TXB_CTL,
+ GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB);
+ if (ret < 0)
+ return ret;
+ ret = regmap_clear_bits(priv->sgmii, GSW1XX_SGMII_PCS_TXB_CTL,
+ GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB);
+ if (ret < 0)
+ return ret;
+
+ /* Release Rx Data Buffers */
+ ret = regmap_set_bits(priv->sgmii, GSW1XX_SGMII_PCS_RXB_CTL,
+ GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB);
+ if (ret < 0)
+ return ret;
+ return regmap_clear_bits(priv->sgmii, GSW1XX_SGMII_PCS_RXB_CTL,
+ GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB);
+}
+
+static int gsw1xx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+ u16 txaneg, anegctl, nco_ctrl;
+ bool reconf = false;
+ int ret = 0;
+
+ /* do not unnecessarily disrupt link and skip resetting the hardware in
+ * case the PCS has previously been successfully configured for this
+ * interface mode
+ */
+ if (priv->tbi_interface == interface)
+ reconf = true;
+
+ /* mark PCS configuration as incomplete */
+ priv->tbi_interface = PHY_INTERFACE_MODE_NA;
+
+ if (!reconf)
+ ret = gsw1xx_pcs_reset(priv);
+
+ if (ret)
+ return ret;
+
+ /* override bootstrap pin settings
+ * OVRANEG sets ANEG Mode, Enable ANEG and restart ANEG to be
+ * taken from bits ANMODE, ANEGEN, RANEG of the ANEGCTL register.
+ * OVERABL sets ability bits in tx_config_reg to be taken from
+ * the TXANEGH and TXANEGL registers.
+ */
+ anegctl = GSW1XX_SGMII_TBI_ANEGCTL_OVRANEG |
+ GSW1XX_SGMII_TBI_ANEGCTL_OVRABL;
+
+ switch (phylink_get_link_timer_ns(interface)) {
+ case 10000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_10US);
+ break;
+ case 1600000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_1_6MS);
+ break;
+ case 5000000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_5MS);
+ break;
+ case 10000000:
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT,
+ GSW1XX_SGMII_TBI_ANEGCTL_LT_10MS);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (neg_mode & PHYLINK_PCS_NEG_INBAND)
+ anegctl |= GSW1XX_SGMII_TBI_ANEGCTL_ANEGEN;
+
+ txaneg = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* lacking a defined reverse-SGMII interface mode this
+ * driver only supports SGMII (MAC side) for now
+ */
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_ANMODE,
+ GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_MAC);
+ txaneg |= ADVERTISE_LPACK;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_2500BASEX) {
+ anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_ANMODE,
+ GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_1000BASEX);
+ } else {
+ dev_err(priv->gswip.dev, "%s: wrong interface mode %s\n",
+ __func__, phy_modes(interface));
+ return -EINVAL;
+ }
+
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TXANEGH,
+ FIELD_GET(GENMASK(15, 8), txaneg));
+ if (ret < 0)
+ return ret;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TXANEGL,
+ FIELD_GET(GENMASK(7, 0), txaneg));
+ if (ret < 0)
+ return ret;
+ ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL, anegctl);
+ if (ret < 0)
+ return ret;
+
+ if (!reconf) {
+ /* setup SerDes clock speed */
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ nco_ctrl = GSW1XX_SGMII_2G5 | GSW1XX_SGMII_2G5_NCO2;
+ else
+ nco_ctrl = GSW1XX_SGMII_1G | GSW1XX_SGMII_1G_NCO1;
+
+ ret = regmap_update_bits(priv->clk, GSW1XX_CLK_NCO_CTRL,
+ GSW1XX_SGMII_HSP_MASK |
+ GSW1XX_SGMII_SEL,
+ nco_ctrl);
+ if (ret)
+ return ret;
+
+ ret = gsw1xx_pcs_phy_xaui_write(priv, 0x30, 0x80);
+ if (ret)
+ return ret;
+ }
+
+ /* PCS configuration has now been completed, store mode to prevent
+ * disrupting the link in case of future calls of this function for the
+ * same interface mode.
+ */
+ priv->tbi_interface = interface;
+
+ return 0;
+}
+
+static void gsw1xx_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+
+ regmap_set_bits(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL,
+ GSW1XX_SGMII_TBI_ANEGCTL_RANEG);
+}
+
+static void gsw1xx_pcs_link_up(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ phy_interface_t interface, int speed,
+ int duplex)
+{
+ struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs);
+ u16 lpstat;
+
+ /* When in-band AN is enabled hardware will set lpstat */
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ return;
+
+ /* Force speed and duplex settings */
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ if (speed == SPEED_10)
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_10);
+ else if (speed == SPEED_100)
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_100);
+ else
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000);
+ } else {
+ lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED,
+ GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII);
+ }
+
+ if (duplex == DUPLEX_FULL)
+ lpstat |= GSW1XX_SGMII_TBI_LPSTAT_DUPLEX;
+
+ regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_LPSTAT, lpstat);
+}
+
+static const struct phylink_pcs_ops gsw1xx_pcs_ops = {
+ .pcs_inband_caps = gsw1xx_pcs_inband_caps,
+ .pcs_enable = gsw1xx_pcs_enable,
+ .pcs_disable = gsw1xx_pcs_disable,
+ .pcs_get_state = gsw1xx_pcs_get_state,
+ .pcs_config = gsw1xx_pcs_config,
+ .pcs_an_restart = gsw1xx_pcs_an_restart,
+ .pcs_link_up = gsw1xx_pcs_link_up,
+};
+
+static void gsw1xx_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+
+ switch (port) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+ case 4: /* port 4: SGMII */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ if (priv->hw_info->supports_2500m) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
+ }
+ return; /* no support for EEE on SGMII port */
+ case 5: /* port 5: RGMII or RMII */
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+ }
+
+ config->lpi_capabilities = MAC_100FD | MAC_1000FD;
+ config->lpi_timer_default = 20;
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+}
+
+static struct phylink_pcs *gsw1xx_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *gswip_priv = dp->ds->priv;
+ struct gsw1xx_priv *gsw1xx_priv = container_of(gswip_priv,
+ struct gsw1xx_priv,
+ gswip);
+
+ switch (dp->index) {
+ case GSW1XX_SGMII_PORT:
+ return &gsw1xx_priv->pcs;
+ default:
+ return NULL;
+ }
+}
+
+static struct regmap *gsw1xx_regmap_init(struct gsw1xx_priv *priv,
+ const char *name,
+ unsigned int reg_base,
+ unsigned int max_register)
+{
+ const struct regmap_config config = {
+ .name = name,
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_base = reg_base,
+ .max_register = max_register,
+ .lock = gsw1xx_mdio_regmap_lock,
+ .unlock = gsw1xx_mdio_regmap_unlock,
+ .lock_arg = &priv->mdio_dev->bus->mdio_lock,
+ };
+
+ return devm_regmap_init(&priv->mdio_dev->dev, &gsw1xx_regmap_bus,
+ priv, &config);
+}
+
+static int gsw1xx_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct gsw1xx_priv *priv;
+ u32 version;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mdio_dev = mdiodev;
+ priv->smdio_badr = GSW1XX_SMDIO_BADR_UNKNOWN;
+
+ priv->gswip.dev = dev;
+ priv->gswip.hw_info = of_device_get_match_data(dev);
+ if (!priv->gswip.hw_info)
+ return -EINVAL;
+
+ priv->gswip.gswip = gsw1xx_regmap_init(priv, "switch",
+ GSW1XX_SWITCH_BASE, 0xfff);
+ if (IS_ERR(priv->gswip.gswip))
+ return PTR_ERR(priv->gswip.gswip);
+
+ priv->gswip.mdio = gsw1xx_regmap_init(priv, "mdio", GSW1XX_MMDIO_BASE,
+ 0xff);
+ if (IS_ERR(priv->gswip.mdio))
+ return PTR_ERR(priv->gswip.mdio);
+
+ priv->gswip.mii = gsw1xx_regmap_init(priv, "mii", GSW1XX_RGMII_BASE,
+ 0xff);
+ if (IS_ERR(priv->gswip.mii))
+ return PTR_ERR(priv->gswip.mii);
+
+ priv->sgmii = gsw1xx_regmap_init(priv, "sgmii", GSW1XX_SGMII_BASE,
+ 0xfff);
+ if (IS_ERR(priv->sgmii))
+ return PTR_ERR(priv->sgmii);
+
+ priv->gpio = gsw1xx_regmap_init(priv, "gpio", GSW1XX_GPIO_BASE, 0xff);
+ if (IS_ERR(priv->gpio))
+ return PTR_ERR(priv->gpio);
+
+ priv->clk = gsw1xx_regmap_init(priv, "clk", GSW1XX_CLK_BASE, 0xff);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->shell = gsw1xx_regmap_init(priv, "shell", GSW1XX_SHELL_BASE,
+ 0xff);
+ if (IS_ERR(priv->shell))
+ return PTR_ERR(priv->shell);
+
+ priv->pcs.ops = &gsw1xx_pcs_ops;
+ priv->pcs.poll = true;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->pcs.supported_interfaces);
+ if (priv->gswip.hw_info->supports_2500m)
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ priv->pcs.supported_interfaces);
+ priv->tbi_interface = PHY_INTERFACE_MODE_NA;
+
+ /* assert SGMII reset to power down SGMII unit */
+ ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ,
+ GSW1XX_RST_REQ_SGMII_SHELL);
+ if (ret < 0)
+ return ret;
+
+ /* configure GPIO pin-mux for MMDIO in case of external PHY connected to
+ * SGMII or RGMII as slave interface
+ */
+ regmap_set_bits(priv->gpio, GPIO_ALTSEL0, 3);
+ regmap_set_bits(priv->gpio, GPIO_ALTSEL1, 3);
+
+ ret = regmap_read(priv->gswip.gswip, GSWIP_VERSION, &version);
+ if (ret)
+ return ret;
+
+ ret = gswip_probe_common(&priv->gswip, version);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, &priv->gswip);
+
+ return 0;
+}
+
+static void gsw1xx_remove(struct mdio_device *mdiodev)
+{
+ struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ gswip_disable_switch(priv);
+
+ dsa_unregister_switch(priv->ds);
+}
+
+static void gsw1xx_shutdown(struct mdio_device *mdiodev)
+{
+ struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+
+ gswip_disable_switch(priv);
+}
+
+static const struct gswip_hw_info gsw12x_data = {
+ .max_ports = GSW1XX_PORTS,
+ .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
+ .mii_ports = BIT(GSW1XX_MII_PORT),
+ .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
+ .phylink_get_caps = &gsw1xx_phylink_get_caps,
+ .supports_2500m = true,
+ .pce_microcode = &gsw1xx_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
+};
+
+static const struct gswip_hw_info gsw140_data = {
+ .max_ports = GSW1XX_PORTS,
+ .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
+ .mii_ports = BIT(GSW1XX_MII_PORT),
+ .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
+ .phylink_get_caps = &gsw1xx_phylink_get_caps,
+ .supports_2500m = true,
+ .pce_microcode = &gsw1xx_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
+};
+
+static const struct gswip_hw_info gsw141_data = {
+ .max_ports = GSW1XX_PORTS,
+ .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT),
+ .mii_ports = BIT(GSW1XX_MII_PORT),
+ .mii_port_reg_offset = -GSW1XX_MII_PORT,
+ .mac_select_pcs = gsw1xx_phylink_mac_select_pcs,
+ .phylink_get_caps = gsw1xx_phylink_get_caps,
+ .pce_microcode = &gsw1xx_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX,
+};
+
+/*
+ * GSW125 is the industrial temperature version of GSW120.
+ * GSW145 is the industrial temperature version of GSW140.
+ */
+static const struct of_device_id gsw1xx_of_match[] = {
+ { .compatible = "maxlinear,gsw120", .data = &gsw12x_data },
+ { .compatible = "maxlinear,gsw125", .data = &gsw12x_data },
+ { .compatible = "maxlinear,gsw140", .data = &gsw140_data },
+ { .compatible = "maxlinear,gsw141", .data = &gsw141_data },
+ { .compatible = "maxlinear,gsw145", .data = &gsw140_data },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, gsw1xx_of_match);
+
+static struct mdio_driver gsw1xx_driver = {
+ .probe = gsw1xx_probe,
+ .remove = gsw1xx_remove,
+ .shutdown = gsw1xx_shutdown,
+ .mdiodrv.driver = {
+ .name = "mxl-gsw1xx",
+ .of_match_table = gsw1xx_of_match,
+ },
+};
+
+mdio_module_driver(gsw1xx_driver);
+
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_DESCRIPTION("Driver for MaxLinear GSW1xx ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.h b/drivers/net/dsa/lantiq/mxl-gsw1xx.h
new file mode 100644
index 000000000000..38e03c048a26
--- /dev/null
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Register definitions for MaxLinear GSW1xx series switches
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ */
+#ifndef __MXL_GSW1XX_H
+#define __MXL_GSW1XX_H
+
+#include <linux/bitfield.h>
+
+#define GSW1XX_PORTS 6
+/* Port used for RGMII or optional RMII */
+#define GSW1XX_MII_PORT 5
+/* Port used for SGMII */
+#define GSW1XX_SGMII_PORT 4
+
+#define GSW1XX_SYS_CLK_FREQ 340000000
+
+/* SMDIO switch register base address */
+#define GSW1XX_SMDIO_BADR 0x1f
+#define GSW1XX_SMDIO_BADR_UNKNOWN -1
+
+/* GSW1XX SGMII PCS */
+#define GSW1XX_SGMII_BASE 0xd000
+#define GSW1XX_SGMII_PHY_HWBU_CTRL 0x009
+#define GSW1XX_SGMII_PHY_HWBU_CTRL_EN_HWBU_FSM BIT(0)
+#define GSW1XX_SGMII_PHY_HWBU_CTRL_HW_FSM_EN BIT(3)
+#define GSW1XX_SGMII_TBI_TXANEGH 0x300
+#define GSW1XX_SGMII_TBI_TXANEGL 0x301
+#define GSW1XX_SGMII_TBI_ANEGCTL 0x304
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT GENMASK(1, 0)
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_10US 0
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_1_6MS 1
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_5MS 2
+#define GSW1XX_SGMII_TBI_ANEGCTL_LT_10MS 3
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANEGEN BIT(2)
+#define GSW1XX_SGMII_TBI_ANEGCTL_RANEG BIT(3)
+#define GSW1XX_SGMII_TBI_ANEGCTL_OVRABL BIT(4)
+#define GSW1XX_SGMII_TBI_ANEGCTL_OVRANEG BIT(5)
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE GENMASK(7, 6)
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_1000BASEX 1
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_PHY 2
+#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_MAC 3
+#define GSW1XX_SGMII_TBI_ANEGCTL_BCOMP BIT(15)
+
+#define GSW1XX_SGMII_TBI_TBICTL 0x305
+#define GSW1XX_SGMII_TBI_TBICTL_INITTBI BIT(0)
+#define GSW1XX_SGMII_TBI_TBICTL_ENTBI BIT(1)
+#define GSW1XX_SGMII_TBI_TBICTL_CRSTRR BIT(4)
+#define GSW1XX_SGMII_TBI_TBICTL_CRSOFF BIT(5)
+#define GSW1XX_SGMII_TBI_TBISTAT 0x309
+#define GSW1XX_SGMII_TBI_TBISTAT_LINK BIT(0)
+#define GSW1XX_SGMII_TBI_TBISTAT_AN_COMPLETE BIT(1)
+#define GSW1XX_SGMII_TBI_LPSTAT 0x30a
+#define GSW1XX_SGMII_TBI_LPSTAT_DUPLEX BIT(0)
+#define GSW1XX_SGMII_TBI_LPSTAT_PAUSE_RX BIT(1)
+#define GSW1XX_SGMII_TBI_LPSTAT_PAUSE_TX BIT(2)
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED GENMASK(6, 5)
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_10 0
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_100 1
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000 2
+#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII 3
+#define GSW1XX_SGMII_PHY_D 0x100
+#define GSW1XX_SGMII_PHY_A 0x101
+#define GSW1XX_SGMII_PHY_C 0x102
+#define GSW1XX_SGMII_PHY_STATUS BIT(0)
+#define GSW1XX_SGMII_PHY_READ BIT(4)
+#define GSW1XX_SGMII_PHY_WRITE BIT(8)
+#define GSW1XX_SGMII_PHY_RESET_N BIT(12)
+#define GSW1XX_SGMII_PCS_RXB_CTL 0x401
+#define GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB BIT(1)
+#define GSW1XX_SGMII_PCS_TXB_CTL 0x404
+#define GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB BIT(1)
+
+#define GSW1XX_SGMII_PHY_RX0_CFG2 0x004
+#define GSW1XX_SGMII_PHY_RX0_CFG2_EQ GENMASK(2, 0)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_EQ_DEF 2
+#define GSW1XX_SGMII_PHY_RX0_CFG2_INVERT BIT(3)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_LOS_EN BIT(4)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_TERM_EN BIT(5)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT GENMASK(12, 6)
+#define GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF 20
+
+#define GSW1XX_SGMII_PHY_TX0_CFG3 0x007
+#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_EN BIT(12)
+#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL GENMASK(11, 9)
+#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL_DEF 4
+#define GSW1XX_SGMII_PHY_TX0_CFG3_INVERT BIT(8)
+
+/* GSW1XX PDI Registers */
+#define GSW1XX_SWITCH_BASE 0xe000
+
+/* GSW1XX MII Registers */
+#define GSW1XX_RGMII_BASE 0xf100
+
+/* GSW1XX GPIO Registers */
+#define GSW1XX_GPIO_BASE 0xf300
+#define GPIO_ALTSEL0 0x83
+#define GPIO_ALTSEL0_EXTPHY_MUX_VAL 0x03c3
+#define GPIO_ALTSEL1 0x84
+#define GPIO_ALTSEL1_EXTPHY_MUX_VAL 0x003f
+
+/* MDIO bus controller */
+#define GSW1XX_MMDIO_BASE 0xf400
+
+/* generic IC registers */
+#define GSW1XX_SHELL_BASE 0xfa00
+#define GSW1XX_SHELL_RST_REQ 0x01
+#define GSW1XX_RST_REQ_SGMII_SHELL BIT(5)
+/* RGMII PAD Slew Control Register */
+#define GSW1XX_SHELL_RGMII_SLEW_CFG 0x78
+#define RGMII_SLEW_CFG_RX_2_5_V BIT(4)
+#define RGMII_SLEW_CFG_TX_2_5_V BIT(5)
+
+/* SGMII clock related settings */
+#define GSW1XX_CLK_BASE 0xf900
+#define GSW1XX_CLK_NCO_CTRL 0x68
+#define GSW1XX_SGMII_HSP_MASK GENMASK(3, 2)
+#define GSW1XX_SGMII_SEL BIT(1)
+#define GSW1XX_SGMII_1G 0x0
+#define GSW1XX_SGMII_2G5 0xc
+#define GSW1XX_SGMII_1G_NCO1 0x0
+#define GSW1XX_SGMII_2G5_NCO2 0x2
+
+#endif /* __MXL_GSW1XX_H */
diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h b/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h
new file mode 100644
index 000000000000..eefcd411a340
--- /dev/null
+++ b/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCE microcode code update for driver for MaxLinear GSW1xx switch chips
+ *
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ * Copyright (C) 2022 Snap One, LLC. All rights reserved.
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Lantiq Deutschland
+ */
+
+#include "lantiq_gswip.h"
+
+#define INSTR 0
+#define IPV6 1
+#define LENACCU 2
+
+/* GSWIP_2.X */
+enum {
+ OUT_MAC0 = 0,
+ OUT_MAC1,
+ OUT_MAC2,
+ OUT_MAC3,
+ OUT_MAC4,
+ OUT_MAC5,
+ OUT_ETHTYP,
+ OUT_VTAG0,
+ OUT_VTAG1,
+ OUT_ITAG0,
+ OUT_ITAG1, /* 10 */
+ OUT_ITAG2,
+ OUT_ITAG3,
+ OUT_IP0,
+ OUT_IP1,
+ OUT_IP2,
+ OUT_IP3,
+ OUT_SIP0,
+ OUT_SIP1,
+ OUT_SIP2,
+ OUT_SIP3, /* 20 */
+ OUT_SIP4,
+ OUT_SIP5,
+ OUT_SIP6,
+ OUT_SIP7,
+ OUT_DIP0,
+ OUT_DIP1,
+ OUT_DIP2,
+ OUT_DIP3,
+ OUT_DIP4,
+ OUT_DIP5, /* 30 */
+ OUT_DIP6,
+ OUT_DIP7,
+ OUT_SESID,
+ OUT_PROT,
+ OUT_APP0,
+ OUT_APP1,
+ OUT_IGMP0,
+ OUT_IGMP1,
+ OUT_STAG0 = 61,
+ OUT_STAG1 = 62,
+ OUT_NONE = 63,
+};
+
+/* parser's microcode flag type */
+enum {
+ FLAG_ITAG = 0,
+ FLAG_VLAN,
+ FLAG_SNAP,
+ FLAG_PPPOE,
+ FLAG_IPV6,
+ FLAG_IPV6FL,
+ FLAG_IPV4,
+ FLAG_IGMP,
+ FLAG_TU,
+ FLAG_HOP,
+ FLAG_NN1, /* 10 */
+ FLAG_NN2,
+ FLAG_END,
+ FLAG_NO, /* 13 */
+ FLAG_SVLAN, /* 14 */
+};
+
+#define PCE_MC_M(val, msk, ns, out, len, type, flags, ipv4_len) \
+ { (val), (msk), ((ns) << 10 | (out) << 4 | (len) >> 1),\
+ ((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
+
+/* V22_2X (IPv6 issue fixed) */
+static const struct gswip_pce_microcode gsw1xx_pce_microcode[] = {
+ /* value mask ns fields L type flags ipv4_len */
+ PCE_MC_M(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
+ PCE_MC_M(0x8100, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0),
+ PCE_MC_M(0x88A8, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0),
+ PCE_MC_M(0x9100, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0),
+ PCE_MC_M(0x8100, 0xFFFF, 5, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ PCE_MC_M(0x88A8, 0xFFFF, 6, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ PCE_MC_M(0x9100, 0xFFFF, 4, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ PCE_MC_M(0x8864, 0xFFFF, 20, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0800, 0xFFFF, 24, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x86DD, 0xFFFF, 25, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x8863, 0xFFFF, 19, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0xF800, 13, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0600, 0x0600, 44, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 15, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0xAAAA, 0xFFFF, 17, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0300, 0xFF00, 45, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 21, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
+ PCE_MC_M(0x0021, 0xFFFF, 24, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0057, 0xFFFF, 25, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x4000, 0xF000, 27, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
+ PCE_MC_M(0x6000, 0xF000, 30, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 28, OUT_IP3, 2, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 29, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
+ PCE_MC_M(0x1100, 0xFF00, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0600, 0xFF00, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
+ PCE_MC_M(0x2B00, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
+ PCE_MC_M(0x3C00, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
+ PCE_MC_M(0x0000, 0x0000, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x00F0, 38, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
+ PCE_MC_M(0x2B00, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
+ PCE_MC_M(0x3C00, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
+ PCE_MC_M(0x0000, 0x00FC, 44, OUT_PROT, 0, IPV6, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, IPV6, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 44, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0),
+};
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
deleted file mode 100644
index 7056d98d8177..000000000000
--- a/drivers/net/dsa/lantiq_gswip.c
+++ /dev/null
@@ -1,2276 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
- *
- * Copyright (C) 2010 Lantiq Deutschland
- * Copyright (C) 2012 John Crispin <john@phrozen.org>
- * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * The VLAN and bridge model the GSWIP hardware uses does not directly
- * matches the model DSA uses.
- *
- * The hardware has 64 possible table entries for bridges with one VLAN
- * ID, one flow id and a list of ports for each bridge. All entries which
- * match the same flow ID are combined in the mac learning table, they
- * act as one global bridge.
- * The hardware does not support VLAN filter on the port, but on the
- * bridge, this driver converts the DSA model to the hardware.
- *
- * The CPU gets all the exception frames which do not match any forwarding
- * rule and the CPU port is also added to all bridges. This makes it possible
- * to handle all the special cases easily in software.
- * At the initialization the driver allocates one bridge table entry for
- * each switch port which is used when the port is used without an
- * explicit bridge. This prevents the frames from being forwarded
- * between all LAN ports by default.
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/firmware.h>
-#include <linux/if_bridge.h>
-#include <linux/if_vlan.h>
-#include <linux/iopoll.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of_mdio.h>
-#include <linux/of_net.h>
-#include <linux/of_platform.h>
-#include <linux/phy.h>
-#include <linux/phylink.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <net/dsa.h>
-#include <dt-bindings/mips/lantiq_rcu_gphy.h>
-
-#include "lantiq_pce.h"
-
-/* GSWIP MDIO Registers */
-#define GSWIP_MDIO_GLOB 0x00
-#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
-#define GSWIP_MDIO_CTRL 0x08
-#define GSWIP_MDIO_CTRL_BUSY BIT(12)
-#define GSWIP_MDIO_CTRL_RD BIT(11)
-#define GSWIP_MDIO_CTRL_WR BIT(10)
-#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
-#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
-#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
-#define GSWIP_MDIO_READ 0x09
-#define GSWIP_MDIO_WRITE 0x0A
-#define GSWIP_MDIO_MDC_CFG0 0x0B
-#define GSWIP_MDIO_MDC_CFG1 0x0C
-#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
-#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
-#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
-#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
-#define GSWIP_MDIO_PHY_LINK_UP 0x2000
-#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
-#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
-#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
-#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
-#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
-#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
-#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
-#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
-#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
-#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
-#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
-#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
-#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
-#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
-#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
-#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
- GSWIP_MDIO_PHY_FCONRX_MASK | \
- GSWIP_MDIO_PHY_FCONTX_MASK | \
- GSWIP_MDIO_PHY_LINK_MASK | \
- GSWIP_MDIO_PHY_SPEED_MASK | \
- GSWIP_MDIO_PHY_FDUP_MASK)
-
-/* GSWIP MII Registers */
-#define GSWIP_MII_CFGp(p) (0x2 * (p))
-#define GSWIP_MII_CFG_RESET BIT(15)
-#define GSWIP_MII_CFG_EN BIT(14)
-#define GSWIP_MII_CFG_ISOLATE BIT(13)
-#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
-#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
-#define GSWIP_MII_CFG_RMII_CLK BIT(7)
-#define GSWIP_MII_CFG_MODE_MIIP 0x0
-#define GSWIP_MII_CFG_MODE_MIIM 0x1
-#define GSWIP_MII_CFG_MODE_RMIIP 0x2
-#define GSWIP_MII_CFG_MODE_RMIIM 0x3
-#define GSWIP_MII_CFG_MODE_RGMII 0x4
-#define GSWIP_MII_CFG_MODE_GMII 0x9
-#define GSWIP_MII_CFG_MODE_MASK 0xf
-#define GSWIP_MII_CFG_RATE_M2P5 0x00
-#define GSWIP_MII_CFG_RATE_M25 0x10
-#define GSWIP_MII_CFG_RATE_M125 0x20
-#define GSWIP_MII_CFG_RATE_M50 0x30
-#define GSWIP_MII_CFG_RATE_AUTO 0x40
-#define GSWIP_MII_CFG_RATE_MASK 0x70
-#define GSWIP_MII_PCDU0 0x01
-#define GSWIP_MII_PCDU1 0x03
-#define GSWIP_MII_PCDU5 0x05
-#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
-#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
-
-/* GSWIP Core Registers */
-#define GSWIP_SWRES 0x000
-#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
-#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
-#define GSWIP_VERSION 0x013
-#define GSWIP_VERSION_REV_SHIFT 0
-#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
-#define GSWIP_VERSION_MOD_SHIFT 8
-#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
-#define GSWIP_VERSION_2_0 0x100
-#define GSWIP_VERSION_2_1 0x021
-#define GSWIP_VERSION_2_2 0x122
-#define GSWIP_VERSION_2_2_ETC 0x022
-
-#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
-#define GSWIP_BM_RAM_ADDR 0x044
-#define GSWIP_BM_RAM_CTRL 0x045
-#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
-#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
-#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
-#define GSWIP_BM_QUEUE_GCTRL 0x04A
-#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
-/* buffer management Port Configuration Register */
-#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
-#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
-#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
-/* buffer management Port Control Register */
-#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
-#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
-#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
-
-/* PCE */
-#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
-#define GSWIP_PCE_TBL_MASK 0x448
-#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
-#define GSWIP_PCE_TBL_ADDR 0x44E
-#define GSWIP_PCE_TBL_CTRL 0x44F
-#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
-#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
-#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
-#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
-#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
-#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
-#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
-#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
-#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
-#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
-#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
-#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
-#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
-#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
-#define GSWIP_PCE_GCTRL_0 0x456
-#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
-#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
-#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
-#define GSWIP_PCE_GCTRL_1 0x457
-#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
-#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
-#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
-#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
-#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
-#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
-#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
-#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
-#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
-#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
-#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
-#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
-#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
-#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
-#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
-#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
-#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
-#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
-#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
-
-#define GSWIP_MAC_FLEN 0x8C5
-#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
-#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
-#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
-#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
-#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
-#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
-#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
-#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
-#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
-#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
-#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
-#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
-#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
-#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
-#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
-#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
-
-/* Ethernet Switch Fetch DMA Port Control Register */
-#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
-#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
-#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
-#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
-#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
-#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-
-/* Ethernet Switch Store DMA Port Control Register */
-#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
-#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
-#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
-#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
-
-#define GSWIP_TABLE_ACTIVE_VLAN 0x01
-#define GSWIP_TABLE_VLAN_MAPPING 0x02
-#define GSWIP_TABLE_MAC_BRIDGE 0x0b
-#define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
-
-#define XRX200_GPHY_FW_ALIGN (16 * 1024)
-
-struct gswip_hw_info {
- int max_ports;
- int cpu_port;
- const struct dsa_switch_ops *ops;
-};
-
-struct xway_gphy_match_data {
- char *fe_firmware_name;
- char *ge_firmware_name;
-};
-
-struct gswip_gphy_fw {
- struct clk *clk_gate;
- struct reset_control *reset;
- u32 fw_addr_offset;
- char *fw_name;
-};
-
-struct gswip_vlan {
- struct net_device *bridge;
- u16 vid;
- u8 fid;
-};
-
-struct gswip_priv {
- __iomem void *gswip;
- __iomem void *mdio;
- __iomem void *mii;
- const struct gswip_hw_info *hw_info;
- const struct xway_gphy_match_data *gphy_fw_name_cfg;
- struct dsa_switch *ds;
- struct device *dev;
- struct regmap *rcu_regmap;
- struct gswip_vlan vlans[64];
- int num_gphy_fw;
- struct gswip_gphy_fw *gphy_fw;
- u32 port_vlan_filter;
- struct mutex pce_table_lock;
-};
-
-struct gswip_pce_table_entry {
- u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
- u16 table; // PCE_TBL_CTRL.ADDR = pData->table
- u16 key[8];
- u16 val[5];
- u16 mask;
- u8 gmap;
- bool type;
- bool valid;
- bool key_mode;
-};
-
-struct gswip_rmon_cnt_desc {
- unsigned int size;
- unsigned int offset;
- const char *name;
-};
-
-#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
-
-static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
- /** Receive Packet Count (only packets that are accepted and not discarded). */
- MIB_DESC(1, 0x1F, "RxGoodPkts"),
- MIB_DESC(1, 0x23, "RxUnicastPkts"),
- MIB_DESC(1, 0x22, "RxMulticastPkts"),
- MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
- MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
- MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
- MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
- MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
- MIB_DESC(1, 0x20, "RxGoodPausePkts"),
- MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
- MIB_DESC(1, 0x12, "Rx64BytePkts"),
- MIB_DESC(1, 0x13, "Rx127BytePkts"),
- MIB_DESC(1, 0x14, "Rx255BytePkts"),
- MIB_DESC(1, 0x15, "Rx511BytePkts"),
- MIB_DESC(1, 0x16, "Rx1023BytePkts"),
- /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
- MIB_DESC(1, 0x17, "RxMaxBytePkts"),
- MIB_DESC(1, 0x18, "RxDroppedPkts"),
- MIB_DESC(1, 0x19, "RxFilteredPkts"),
- MIB_DESC(2, 0x24, "RxGoodBytes"),
- MIB_DESC(2, 0x26, "RxBadBytes"),
- MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
- MIB_DESC(1, 0x0C, "TxGoodPkts"),
- MIB_DESC(1, 0x06, "TxUnicastPkts"),
- MIB_DESC(1, 0x07, "TxMulticastPkts"),
- MIB_DESC(1, 0x00, "Tx64BytePkts"),
- MIB_DESC(1, 0x01, "Tx127BytePkts"),
- MIB_DESC(1, 0x02, "Tx255BytePkts"),
- MIB_DESC(1, 0x03, "Tx511BytePkts"),
- MIB_DESC(1, 0x04, "Tx1023BytePkts"),
- /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
- MIB_DESC(1, 0x05, "TxMaxBytePkts"),
- MIB_DESC(1, 0x08, "TxSingleCollCount"),
- MIB_DESC(1, 0x09, "TxMultCollCount"),
- MIB_DESC(1, 0x0A, "TxLateCollCount"),
- MIB_DESC(1, 0x0B, "TxExcessCollCount"),
- MIB_DESC(1, 0x0D, "TxPauseCount"),
- MIB_DESC(1, 0x10, "TxDroppedPkts"),
- MIB_DESC(2, 0x0E, "TxGoodBytes"),
-};
-
-static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
-{
- return __raw_readl(priv->gswip + (offset * 4));
-}
-
-static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
-{
- __raw_writel(val, priv->gswip + (offset * 4));
-}
-
-static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
- u32 offset)
-{
- u32 val = gswip_switch_r(priv, offset);
-
- val &= ~(clear);
- val |= set;
- gswip_switch_w(priv, val, offset);
-}
-
-static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
- u32 cleared)
-{
- u32 val;
-
- return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
- (val & cleared) == 0, 20, 50000);
-}
-
-static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
-{
- return __raw_readl(priv->mdio + (offset * 4));
-}
-
-static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
-{
- __raw_writel(val, priv->mdio + (offset * 4));
-}
-
-static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
- u32 offset)
-{
- u32 val = gswip_mdio_r(priv, offset);
-
- val &= ~(clear);
- val |= set;
- gswip_mdio_w(priv, val, offset);
-}
-
-static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
-{
- return __raw_readl(priv->mii + (offset * 4));
-}
-
-static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
-{
- __raw_writel(val, priv->mii + (offset * 4));
-}
-
-static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
- u32 offset)
-{
- u32 val = gswip_mii_r(priv, offset);
-
- val &= ~(clear);
- val |= set;
- gswip_mii_w(priv, val, offset);
-}
-
-static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
- int port)
-{
- /* There's no MII_CFG register for the CPU port */
- if (!dsa_is_cpu_port(priv->ds, port))
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
-}
-
-static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
- int port)
-{
- switch (port) {
- case 0:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
- break;
- case 1:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
- break;
- case 5:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
- break;
- }
-}
-
-static int gswip_mdio_poll(struct gswip_priv *priv)
-{
- int cnt = 100;
-
- while (likely(cnt--)) {
- u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
-
- if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
- return 0;
- usleep_range(20, 40);
- }
-
- return -ETIMEDOUT;
-}
-
-static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
-{
- struct gswip_priv *priv = bus->priv;
- int err;
-
- err = gswip_mdio_poll(priv);
- if (err) {
- dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
- return err;
- }
-
- gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
- gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
- ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
- (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
- GSWIP_MDIO_CTRL);
-
- return 0;
-}
-
-static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
-{
- struct gswip_priv *priv = bus->priv;
- int err;
-
- err = gswip_mdio_poll(priv);
- if (err) {
- dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
- return err;
- }
-
- gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
- ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
- (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
- GSWIP_MDIO_CTRL);
-
- err = gswip_mdio_poll(priv);
- if (err) {
- dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
- return err;
- }
-
- return gswip_mdio_r(priv, GSWIP_MDIO_READ);
-}
-
-static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
-{
- struct dsa_switch *ds = priv->ds;
-
- ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
- if (!ds->slave_mii_bus)
- return -ENOMEM;
-
- ds->slave_mii_bus->priv = priv;
- ds->slave_mii_bus->read = gswip_mdio_rd;
- ds->slave_mii_bus->write = gswip_mdio_wr;
- ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
- snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
- dev_name(priv->dev));
- ds->slave_mii_bus->parent = priv->dev;
- ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
-
- return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
-}
-
-static int gswip_pce_table_entry_read(struct gswip_priv *priv,
- struct gswip_pce_table_entry *tbl)
-{
- int i;
- int err;
- u16 crtl;
- u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
- GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
-
- mutex_lock(&priv->pce_table_lock);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err) {
- mutex_unlock(&priv->pce_table_lock);
- return err;
- }
-
- gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
- GSWIP_PCE_TBL_CTRL);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err) {
- mutex_unlock(&priv->pce_table_lock);
- return err;
- }
-
- for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
- tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
-
- for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
- tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
-
- tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
-
- crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
-
- tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
- tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
- tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
-
- mutex_unlock(&priv->pce_table_lock);
-
- return 0;
-}
-
-static int gswip_pce_table_entry_write(struct gswip_priv *priv,
- struct gswip_pce_table_entry *tbl)
-{
- int i;
- int err;
- u16 crtl;
- u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
- GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
-
- mutex_lock(&priv->pce_table_lock);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err) {
- mutex_unlock(&priv->pce_table_lock);
- return err;
- }
-
- gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- tbl->table | addr_mode,
- GSWIP_PCE_TBL_CTRL);
-
- for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
- gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
-
- for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
- gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
-
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- tbl->table | addr_mode,
- GSWIP_PCE_TBL_CTRL);
-
- gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
-
- crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
- crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
- GSWIP_PCE_TBL_CTRL_GMAP_MASK);
- if (tbl->type)
- crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
- if (tbl->valid)
- crtl |= GSWIP_PCE_TBL_CTRL_VLD;
- crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
- crtl |= GSWIP_PCE_TBL_CTRL_BAS;
- gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
-
- mutex_unlock(&priv->pce_table_lock);
-
- return err;
-}
-
-/* Add the LAN port into a bridge with the CPU port by
- * default. This prevents automatic forwarding of
- * packages between the LAN ports when no explicit
- * bridge is configured.
- */
-static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
-{
- struct gswip_pce_table_entry vlan_active = {0,};
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int cpu_port = priv->hw_info->cpu_port;
- unsigned int max_ports = priv->hw_info->max_ports;
- int err;
-
- if (port >= max_ports) {
- dev_err(priv->dev, "single port for %i supported\n", port);
- return -EIO;
- }
-
- vlan_active.index = port + 1;
- vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
- vlan_active.key[0] = 0; /* vid */
- vlan_active.val[0] = port + 1 /* fid */;
- vlan_active.valid = add;
- err = gswip_pce_table_entry_write(priv, &vlan_active);
- if (err) {
- dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
- return err;
- }
-
- if (!add)
- return 0;
-
- vlan_mapping.index = port + 1;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- vlan_mapping.val[0] = 0 /* vid */;
- vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
- vlan_mapping.val[2] = 0;
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- return err;
- }
-
- return 0;
-}
-
-static int gswip_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct gswip_priv *priv = ds->priv;
- int err;
-
- if (!dsa_is_user_port(ds, port))
- return 0;
-
- if (!dsa_is_cpu_port(ds, port)) {
- err = gswip_add_single_port_br(priv, port, true);
- if (err)
- return err;
- }
-
- /* RMON Counter Enable for port */
- gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
-
- /* enable port fetch/store dma & VLAN Modification */
- gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
- GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
- GSWIP_FDMA_PCTRLp(port));
- gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
- GSWIP_SDMA_PCTRLp(port));
-
- if (!dsa_is_cpu_port(ds, port)) {
- u32 mdio_phy = 0;
-
- if (phydev)
- mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
-
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
- }
-
- return 0;
-}
-
-static void gswip_port_disable(struct dsa_switch *ds, int port)
-{
- struct gswip_priv *priv = ds->priv;
-
- if (!dsa_is_user_port(ds, port))
- return;
-
- gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
- GSWIP_FDMA_PCTRLp(port));
- gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
- GSWIP_SDMA_PCTRLp(port));
-}
-
-static int gswip_pce_load_microcode(struct gswip_priv *priv)
-{
- int i;
- int err;
-
- gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
- GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
- GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
- gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
-
- for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
- gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
- gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
- GSWIP_PCE_TBL_VAL(0));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
- GSWIP_PCE_TBL_VAL(1));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
- GSWIP_PCE_TBL_VAL(2));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
- GSWIP_PCE_TBL_VAL(3));
-
- /* start the table access: */
- gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
- GSWIP_PCE_TBL_CTRL);
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
- if (err)
- return err;
- }
-
- /* tell the switch that the microcode is loaded */
- gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
- GSWIP_PCE_GCTRL_0);
-
- return 0;
-}
-
-static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
- struct gswip_priv *priv = ds->priv;
-
- /* Do not allow changing the VLAN filtering options while in bridge */
- if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) {
- NL_SET_ERR_MSG_MOD(extack,
- "Dynamic toggling of vlan_filtering not supported");
- return -EIO;
- }
-
- if (vlan_filtering) {
- /* Use port based VLAN tag */
- gswip_switch_mask(priv,
- GSWIP_PCE_VCTRL_VSR,
- GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
- GSWIP_PCE_VCTRL_VEMR,
- GSWIP_PCE_VCTRL(port));
- gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
- GSWIP_PCE_PCTRL_0p(port));
- } else {
- /* Use port based VLAN tag */
- gswip_switch_mask(priv,
- GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
- GSWIP_PCE_VCTRL_VEMR,
- GSWIP_PCE_VCTRL_VSR,
- GSWIP_PCE_VCTRL(port));
- gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
- GSWIP_PCE_PCTRL_0p(port));
- }
-
- return 0;
-}
-
-static int gswip_setup(struct dsa_switch *ds)
-{
- struct gswip_priv *priv = ds->priv;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- int i;
- int err;
-
- gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
- usleep_range(5000, 10000);
- gswip_switch_w(priv, 0, GSWIP_SWRES);
-
- /* disable port fetch/store dma on all ports */
- for (i = 0; i < priv->hw_info->max_ports; i++) {
- gswip_port_disable(ds, i);
- gswip_port_vlan_filtering(ds, i, false, NULL);
- }
-
- /* enable Switch */
- gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
-
- err = gswip_pce_load_microcode(priv);
- if (err) {
- dev_err(priv->dev, "writing PCE microcode failed, %i", err);
- return err;
- }
-
- /* Default unknown Broadcast/Multicast/Unicast port maps */
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
-
- /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
- * interoperability problem with this auto polling mechanism because
- * their status registers think that the link is in a different state
- * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
- * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
- * auto polling state machine consider the link being negotiated with
- * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
- * to the switch port being completely dead (RX and TX are both not
- * working).
- * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
- * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
- * it would work fine for a few minutes to hours and then stop, on
- * other device it would no traffic could be sent or received at all.
- * Testing shows that when PHY auto polling is disabled these problems
- * go away.
- */
- gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
-
- /* Configure the MDIO Clock 2.5 MHz */
- gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
-
- /* Disable the xMII interface and clear it's isolation bit */
- for (i = 0; i < priv->hw_info->max_ports; i++)
- gswip_mii_mask_cfg(priv,
- GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
- 0, i);
-
- /* enable special tag insertion on cpu port */
- gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
- GSWIP_FDMA_PCTRLp(cpu_port));
-
- /* accept special tag in ingress direction */
- gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
- GSWIP_PCE_PCTRL_0p(cpu_port));
-
- gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
- GSWIP_MAC_CTRL_2p(cpu_port));
- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
- GSWIP_MAC_FLEN);
- gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
- GSWIP_BM_QUEUE_GCTRL);
-
- /* VLAN aware Switching */
- gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
-
- /* Flush MAC Table */
- gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
-
- err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
- GSWIP_PCE_GCTRL_0_MTFL);
- if (err) {
- dev_err(priv->dev, "MAC flushing didn't finish\n");
- return err;
- }
-
- gswip_port_enable(ds, cpu_port, NULL);
-
- ds->configure_vlan_while_not_filtering = false;
-
- return 0;
-}
-
-static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
-{
- return DSA_TAG_PROTO_GSWIP;
-}
-
-static int gswip_vlan_active_create(struct gswip_priv *priv,
- struct net_device *bridge,
- int fid, u16 vid)
-{
- struct gswip_pce_table_entry vlan_active = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- int idx = -1;
- int err;
- int i;
-
- /* Look for a free slot */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (!priv->vlans[i].bridge) {
- idx = i;
- break;
- }
- }
-
- if (idx == -1)
- return -ENOSPC;
-
- if (fid == -1)
- fid = idx;
-
- vlan_active.index = idx;
- vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
- vlan_active.key[0] = vid;
- vlan_active.val[0] = fid;
- vlan_active.valid = true;
-
- err = gswip_pce_table_entry_write(priv, &vlan_active);
- if (err) {
- dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
- return err;
- }
-
- priv->vlans[idx].bridge = bridge;
- priv->vlans[idx].vid = vid;
- priv->vlans[idx].fid = fid;
-
- return idx;
-}
-
-static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
-{
- struct gswip_pce_table_entry vlan_active = {0,};
- int err;
-
- vlan_active.index = idx;
- vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
- vlan_active.valid = false;
- err = gswip_pce_table_entry_write(priv, &vlan_active);
- if (err)
- dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
- priv->vlans[idx].bridge = NULL;
-
- return err;
-}
-
-static int gswip_vlan_add_unaware(struct gswip_priv *priv,
- struct net_device *bridge, int port)
-{
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- bool active_vlan_created = false;
- int idx = -1;
- int i;
- int err;
-
- /* Check if there is already a page for this bridge */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge) {
- idx = i;
- break;
- }
- }
-
- /* If this bridge is not programmed yet, add a Active VLAN table
- * entry in a free slot and prepare the VLAN mapping table entry.
- */
- if (idx == -1) {
- idx = gswip_vlan_active_create(priv, bridge, -1, 0);
- if (idx < 0)
- return idx;
- active_vlan_created = true;
-
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- /* VLAN ID byte, maps to the VLAN ID of vlan active table */
- vlan_mapping.val[0] = 0;
- } else {
- /* Read the existing VLAN mapping entry from the switch */
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- err = gswip_pce_table_entry_read(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
- err);
- return err;
- }
- }
-
- /* Update the VLAN mapping entry and write it to the switch */
- vlan_mapping.val[1] |= BIT(cpu_port);
- vlan_mapping.val[1] |= BIT(port);
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- /* In case an Active VLAN was creaetd delete it again */
- if (active_vlan_created)
- gswip_vlan_active_remove(priv, idx);
- return err;
- }
-
- gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
- return 0;
-}
-
-static int gswip_vlan_add_aware(struct gswip_priv *priv,
- struct net_device *bridge, int port,
- u16 vid, bool untagged,
- bool pvid)
-{
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- bool active_vlan_created = false;
- int idx = -1;
- int fid = -1;
- int i;
- int err;
-
- /* Check if there is already a page for this bridge */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge) {
- if (fid != -1 && fid != priv->vlans[i].fid)
- dev_err(priv->dev, "one bridge with multiple flow ids\n");
- fid = priv->vlans[i].fid;
- if (priv->vlans[i].vid == vid) {
- idx = i;
- break;
- }
- }
- }
-
- /* If this bridge is not programmed yet, add a Active VLAN table
- * entry in a free slot and prepare the VLAN mapping table entry.
- */
- if (idx == -1) {
- idx = gswip_vlan_active_create(priv, bridge, fid, vid);
- if (idx < 0)
- return idx;
- active_vlan_created = true;
-
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- /* VLAN ID byte, maps to the VLAN ID of vlan active table */
- vlan_mapping.val[0] = vid;
- } else {
- /* Read the existing VLAN mapping entry from the switch */
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- err = gswip_pce_table_entry_read(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
- err);
- return err;
- }
- }
-
- vlan_mapping.val[0] = vid;
- /* Update the VLAN mapping entry and write it to the switch */
- vlan_mapping.val[1] |= BIT(cpu_port);
- vlan_mapping.val[2] |= BIT(cpu_port);
- vlan_mapping.val[1] |= BIT(port);
- if (untagged)
- vlan_mapping.val[2] &= ~BIT(port);
- else
- vlan_mapping.val[2] |= BIT(port);
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- /* In case an Active VLAN was creaetd delete it again */
- if (active_vlan_created)
- gswip_vlan_active_remove(priv, idx);
- return err;
- }
-
- if (pvid)
- gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
-
- return 0;
-}
-
-static int gswip_vlan_remove(struct gswip_priv *priv,
- struct net_device *bridge, int port,
- u16 vid, bool pvid, bool vlan_aware)
-{
- struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- int idx = -1;
- int i;
- int err;
-
- /* Check if there is already a page for this bridge */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge &&
- (!vlan_aware || priv->vlans[i].vid == vid)) {
- idx = i;
- break;
- }
- }
-
- if (idx == -1) {
- dev_err(priv->dev, "bridge to leave does not exists\n");
- return -ENOENT;
- }
-
- vlan_mapping.index = idx;
- vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
- err = gswip_pce_table_entry_read(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
- return err;
- }
-
- vlan_mapping.val[1] &= ~BIT(port);
- vlan_mapping.val[2] &= ~BIT(port);
- err = gswip_pce_table_entry_write(priv, &vlan_mapping);
- if (err) {
- dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
- return err;
- }
-
- /* In case all ports are removed from the bridge, remove the VLAN */
- if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
- err = gswip_vlan_active_remove(priv, idx);
- if (err) {
- dev_err(priv->dev, "failed to write active VLAN: %d\n",
- err);
- return err;
- }
- }
-
- /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
- if (pvid)
- gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
-
- return 0;
-}
-
-static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
-{
- struct gswip_priv *priv = ds->priv;
- int err;
-
- /* When the bridge uses VLAN filtering we have to configure VLAN
- * specific bridges. No bridge is configured here.
- */
- if (!br_vlan_enabled(bridge)) {
- err = gswip_vlan_add_unaware(priv, bridge, port);
- if (err)
- return err;
- priv->port_vlan_filter &= ~BIT(port);
- } else {
- priv->port_vlan_filter |= BIT(port);
- }
- return gswip_add_single_port_br(priv, port, false);
-}
-
-static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
-{
- struct gswip_priv *priv = ds->priv;
-
- gswip_add_single_port_br(priv, port, true);
-
- /* When the bridge uses VLAN filtering we have to configure VLAN
- * specific bridges. No bridge is configured here.
- */
- if (!br_vlan_enabled(bridge))
- gswip_vlan_remove(priv, bridge, port, 0, true, false);
-}
-
-static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
- unsigned int max_ports = priv->hw_info->max_ports;
- int pos = max_ports;
- int i, idx = -1;
-
- /* We only support VLAN filtering on bridges */
- if (!dsa_is_cpu_port(ds, port) && !bridge)
- return -EOPNOTSUPP;
-
- /* Check if there is already a page for this VLAN */
- for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge &&
- priv->vlans[i].vid == vlan->vid) {
- idx = i;
- break;
- }
- }
-
- /* If this VLAN is not programmed yet, we have to reserve
- * one entry in the VLAN table. Make sure we start at the
- * next position round.
- */
- if (idx == -1) {
- /* Look for a free slot */
- for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
- if (!priv->vlans[pos].bridge) {
- idx = pos;
- pos++;
- break;
- }
- }
-
- if (idx == -1) {
- NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
- return -ENOSPC;
- }
- }
-
- return 0;
-}
-
-static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- int err;
-
- err = gswip_port_vlan_prepare(ds, port, vlan, extack);
- if (err)
- return err;
-
- /* We have to receive all packets on the CPU port and should not
- * do any VLAN filtering here. This is also called with bridge
- * NULL and then we do not know for which bridge to configure
- * this.
- */
- if (dsa_is_cpu_port(ds, port))
- return 0;
-
- return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
- untagged, pvid);
-}
-
-static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
-
- /* We have to receive all packets on the CPU port and should not
- * do any VLAN filtering here. This is also called with bridge
- * NULL and then we do not know for which bridge to configure
- * this.
- */
- if (dsa_is_cpu_port(ds, port))
- return 0;
-
- return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
-}
-
-static void gswip_port_fast_age(struct dsa_switch *ds, int port)
-{
- struct gswip_priv *priv = ds->priv;
- struct gswip_pce_table_entry mac_bridge = {0,};
- int i;
- int err;
-
- for (i = 0; i < 2048; i++) {
- mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
- mac_bridge.index = i;
-
- err = gswip_pce_table_entry_read(priv, &mac_bridge);
- if (err) {
- dev_err(priv->dev, "failed to read mac bridge: %d\n",
- err);
- return;
- }
-
- if (!mac_bridge.valid)
- continue;
-
- if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
- continue;
-
- if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
- continue;
-
- mac_bridge.valid = false;
- err = gswip_pce_table_entry_write(priv, &mac_bridge);
- if (err) {
- dev_err(priv->dev, "failed to write mac bridge: %d\n",
- err);
- return;
- }
- }
-}
-
-static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct gswip_priv *priv = ds->priv;
- u32 stp_state;
-
- switch (state) {
- case BR_STATE_DISABLED:
- gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
- GSWIP_SDMA_PCTRLp(port));
- return;
- case BR_STATE_BLOCKING:
- case BR_STATE_LISTENING:
- stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
- break;
- case BR_STATE_LEARNING:
- stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
- break;
- case BR_STATE_FORWARDING:
- stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
- break;
- default:
- dev_err(priv->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
- GSWIP_SDMA_PCTRLp(port));
- gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
- GSWIP_PCE_PCTRL_0p(port));
-}
-
-static int gswip_port_fdb(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid, bool add)
-{
- struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
- struct gswip_pce_table_entry mac_bridge = {0,};
- unsigned int cpu_port = priv->hw_info->cpu_port;
- int fid = -1;
- int i;
- int err;
-
- if (!bridge)
- return -EINVAL;
-
- for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) {
- if (priv->vlans[i].bridge == bridge) {
- fid = priv->vlans[i].fid;
- break;
- }
- }
-
- if (fid == -1) {
- dev_err(priv->dev, "Port not part of a bridge\n");
- return -EINVAL;
- }
-
- mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
- mac_bridge.key_mode = true;
- mac_bridge.key[0] = addr[5] | (addr[4] << 8);
- mac_bridge.key[1] = addr[3] | (addr[2] << 8);
- mac_bridge.key[2] = addr[1] | (addr[0] << 8);
- mac_bridge.key[3] = fid;
- mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
- mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
- mac_bridge.valid = add;
-
- err = gswip_pce_table_entry_write(priv, &mac_bridge);
- if (err)
- dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
-
- return err;
-}
-
-static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-{
- return gswip_port_fdb(ds, port, addr, vid, true);
-}
-
-static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-{
- return gswip_port_fdb(ds, port, addr, vid, false);
-}
-
-static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
-{
- struct gswip_priv *priv = ds->priv;
- struct gswip_pce_table_entry mac_bridge = {0,};
- unsigned char addr[6];
- int i;
- int err;
-
- for (i = 0; i < 2048; i++) {
- mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
- mac_bridge.index = i;
-
- err = gswip_pce_table_entry_read(priv, &mac_bridge);
- if (err) {
- dev_err(priv->dev, "failed to write mac bridge: %d\n",
- err);
- return err;
- }
-
- if (!mac_bridge.valid)
- continue;
-
- addr[5] = mac_bridge.key[0] & 0xff;
- addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
- addr[3] = mac_bridge.key[1] & 0xff;
- addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
- addr[1] = mac_bridge.key[2] & 0xff;
- addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
- if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
- if (mac_bridge.val[0] & BIT(port)) {
- err = cb(addr, 0, true, data);
- if (err)
- return err;
- }
- } else {
- if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
- err = cb(addr, 0, false, data);
- if (err)
- return err;
- }
- }
- }
- return 0;
-}
-
-static void gswip_phylink_set_capab(unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- /* With the exclusion of MII, Reverse MII and Reduced MII, we
- * support Gigabit, including Half duplex
- */
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_RMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- switch (port) {
- case 0:
- case 1:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_RMII)
- goto unsupported;
- break;
- case 2:
- case 3:
- case 4:
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- case 5:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- default:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
- }
-
- gswip_phylink_set_capab(supported, state);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
- phy_modes(state->interface), port);
-}
-
-static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- switch (port) {
- case 0:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_RMII)
- goto unsupported;
- break;
- case 1:
- case 2:
- case 3:
- case 4:
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- case 5:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_RMII)
- goto unsupported;
- break;
- default:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
- }
-
- gswip_phylink_set_capab(supported, state);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
- phy_modes(state->interface), port);
-}
-
-static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
-{
- u32 mdio_phy;
-
- if (link)
- mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
- else
- mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
-
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
-}
-
-static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
- phy_interface_t interface)
-{
- u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
-
- switch (speed) {
- case SPEED_10:
- mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
-
- if (interface == PHY_INTERFACE_MODE_RMII)
- mii_cfg = GSWIP_MII_CFG_RATE_M50;
- else
- mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
-
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
- break;
-
- case SPEED_100:
- mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
-
- if (interface == PHY_INTERFACE_MODE_RMII)
- mii_cfg = GSWIP_MII_CFG_RATE_M50;
- else
- mii_cfg = GSWIP_MII_CFG_RATE_M25;
-
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
- break;
-
- case SPEED_1000:
- mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
-
- mii_cfg = GSWIP_MII_CFG_RATE_M125;
-
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
- break;
- }
-
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
- gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
- GSWIP_MAC_CTRL_0p(port));
-}
-
-static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
-{
- u32 mac_ctrl_0, mdio_phy;
-
- if (duplex == DUPLEX_FULL) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
- mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
- } else {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
- mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
- }
-
- gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
- GSWIP_MAC_CTRL_0p(port));
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
-}
-
-static void gswip_port_set_pause(struct gswip_priv *priv, int port,
- bool tx_pause, bool rx_pause)
-{
- u32 mac_ctrl_0, mdio_phy;
-
- if (tx_pause && rx_pause) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
- GSWIP_MDIO_PHY_FCONRX_EN;
- } else if (tx_pause) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
- GSWIP_MDIO_PHY_FCONRX_DIS;
- } else if (rx_pause) {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
- GSWIP_MDIO_PHY_FCONRX_EN;
- } else {
- mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
- mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
- GSWIP_MDIO_PHY_FCONRX_DIS;
- }
-
- gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
- mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
- gswip_mdio_mask(priv,
- GSWIP_MDIO_PHY_FCONTX_MASK |
- GSWIP_MDIO_PHY_FCONRX_MASK,
- mdio_phy, GSWIP_MDIO_PHYp(port));
-}
-
-static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct gswip_priv *priv = ds->priv;
- u32 miicfg = 0;
-
- miicfg |= GSWIP_MII_CFG_LDCLKDIS;
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_INTERNAL:
- miicfg |= GSWIP_MII_CFG_MODE_MIIM;
- break;
- case PHY_INTERFACE_MODE_REVMII:
- miicfg |= GSWIP_MII_CFG_MODE_MIIP;
- break;
- case PHY_INTERFACE_MODE_RMII:
- miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
-
- /* Configure the RMII clock as output: */
- miicfg |= GSWIP_MII_CFG_RMII_CLK;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- miicfg |= GSWIP_MII_CFG_MODE_RGMII;
- break;
- case PHY_INTERFACE_MODE_GMII:
- miicfg |= GSWIP_MII_CFG_MODE_GMII;
- break;
- default:
- dev_err(ds->dev,
- "Unsupported interface: %d\n", state->interface);
- return;
- }
-
- gswip_mii_mask_cfg(priv,
- GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
- GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
- miicfg, port);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_RGMII_ID:
- gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
- GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
- break;
- case PHY_INTERFACE_MODE_RGMII_RXID:
- gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
- break;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
- break;
- default:
- break;
- }
-}
-
-static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface)
-{
- struct gswip_priv *priv = ds->priv;
-
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
-
- if (!dsa_is_cpu_port(ds, port))
- gswip_port_set_link(priv, port, false);
-}
-
-static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause)
-{
- struct gswip_priv *priv = ds->priv;
-
- if (!dsa_is_cpu_port(ds, port)) {
- gswip_port_set_link(priv, port, true);
- gswip_port_set_speed(priv, port, speed, interface);
- gswip_port_set_duplex(priv, port, duplex);
- gswip_port_set_pause(priv, port, tx_pause, rx_pause);
- }
-
- gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
-}
-
-static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
- uint8_t *data)
-{
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
- strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name,
- ETH_GSTRING_LEN);
-}
-
-static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
- u32 index)
-{
- u32 result;
- int err;
-
- gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
- gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
- GSWIP_BM_RAM_CTRL_OPMOD,
- table | GSWIP_BM_RAM_CTRL_BAS,
- GSWIP_BM_RAM_CTRL);
-
- err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
- GSWIP_BM_RAM_CTRL_BAS);
- if (err) {
- dev_err(priv->dev, "timeout while reading table: %u, index: %u",
- table, index);
- return 0;
- }
-
- result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
- result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
-
- return result;
-}
-
-static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data)
-{
- struct gswip_priv *priv = ds->priv;
- const struct gswip_rmon_cnt_desc *rmon_cnt;
- int i;
- u64 high;
-
- for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
- rmon_cnt = &gswip_rmon_cnt[i];
-
- data[i] = gswip_bcm_ram_entry_read(priv, port,
- rmon_cnt->offset);
- if (rmon_cnt->size == 2) {
- high = gswip_bcm_ram_entry_read(priv, port,
- rmon_cnt->offset + 1);
- data[i] |= high << 32;
- }
- }
-}
-
-static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
-{
- if (sset != ETH_SS_STATS)
- return 0;
-
- return ARRAY_SIZE(gswip_rmon_cnt);
-}
-
-static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
- .get_tag_protocol = gswip_get_tag_protocol,
- .setup = gswip_setup,
- .port_enable = gswip_port_enable,
- .port_disable = gswip_port_disable,
- .port_bridge_join = gswip_port_bridge_join,
- .port_bridge_leave = gswip_port_bridge_leave,
- .port_fast_age = gswip_port_fast_age,
- .port_vlan_filtering = gswip_port_vlan_filtering,
- .port_vlan_add = gswip_port_vlan_add,
- .port_vlan_del = gswip_port_vlan_del,
- .port_stp_state_set = gswip_port_stp_state_set,
- .port_fdb_add = gswip_port_fdb_add,
- .port_fdb_del = gswip_port_fdb_del,
- .port_fdb_dump = gswip_port_fdb_dump,
- .phylink_validate = gswip_xrx200_phylink_validate,
- .phylink_mac_config = gswip_phylink_mac_config,
- .phylink_mac_link_down = gswip_phylink_mac_link_down,
- .phylink_mac_link_up = gswip_phylink_mac_link_up,
- .get_strings = gswip_get_strings,
- .get_ethtool_stats = gswip_get_ethtool_stats,
- .get_sset_count = gswip_get_sset_count,
-};
-
-static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
- .get_tag_protocol = gswip_get_tag_protocol,
- .setup = gswip_setup,
- .port_enable = gswip_port_enable,
- .port_disable = gswip_port_disable,
- .port_bridge_join = gswip_port_bridge_join,
- .port_bridge_leave = gswip_port_bridge_leave,
- .port_fast_age = gswip_port_fast_age,
- .port_vlan_filtering = gswip_port_vlan_filtering,
- .port_vlan_add = gswip_port_vlan_add,
- .port_vlan_del = gswip_port_vlan_del,
- .port_stp_state_set = gswip_port_stp_state_set,
- .port_fdb_add = gswip_port_fdb_add,
- .port_fdb_del = gswip_port_fdb_del,
- .port_fdb_dump = gswip_port_fdb_dump,
- .phylink_validate = gswip_xrx300_phylink_validate,
- .phylink_mac_config = gswip_phylink_mac_config,
- .phylink_mac_link_down = gswip_phylink_mac_link_down,
- .phylink_mac_link_up = gswip_phylink_mac_link_up,
- .get_strings = gswip_get_strings,
- .get_ethtool_stats = gswip_get_ethtool_stats,
- .get_sset_count = gswip_get_sset_count,
-};
-
-static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
- .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
- .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
-};
-
-static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
- .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
- .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
-};
-
-static const struct xway_gphy_match_data xrx300_gphy_data = {
- .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
- .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
-};
-
-static const struct of_device_id xway_gphy_match[] = {
- { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
- { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
- { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
- { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
- { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
- {},
-};
-
-static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
-{
- struct device *dev = priv->dev;
- const struct firmware *fw;
- void *fw_addr;
- dma_addr_t dma_addr;
- dma_addr_t dev_addr;
- size_t size;
- int ret;
-
- ret = clk_prepare_enable(gphy_fw->clk_gate);
- if (ret)
- return ret;
-
- reset_control_assert(gphy_fw->reset);
-
- /* The vendor BSP uses a 200ms delay after asserting the reset line.
- * Without this some users are observing that the PHY is not coming up
- * on the MDIO bus.
- */
- msleep(200);
-
- ret = request_firmware(&fw, gphy_fw->fw_name, dev);
- if (ret) {
- dev_err(dev, "failed to load firmware: %s, error: %i\n",
- gphy_fw->fw_name, ret);
- return ret;
- }
-
- /* GPHY cores need the firmware code in a persistent and contiguous
- * memory area with a 16 kB boundary aligned start address.
- */
- size = fw->size + XRX200_GPHY_FW_ALIGN;
-
- fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
- if (fw_addr) {
- fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
- dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
- memcpy(fw_addr, fw->data, fw->size);
- } else {
- dev_err(dev, "failed to alloc firmware memory\n");
- release_firmware(fw);
- return -ENOMEM;
- }
-
- release_firmware(fw);
-
- ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
- if (ret)
- return ret;
-
- reset_control_deassert(gphy_fw->reset);
-
- return ret;
-}
-
-static int gswip_gphy_fw_probe(struct gswip_priv *priv,
- struct gswip_gphy_fw *gphy_fw,
- struct device_node *gphy_fw_np, int i)
-{
- struct device *dev = priv->dev;
- u32 gphy_mode;
- int ret;
- char gphyname[10];
-
- snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
-
- gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
- if (IS_ERR(gphy_fw->clk_gate)) {
- dev_err(dev, "Failed to lookup gate clock\n");
- return PTR_ERR(gphy_fw->clk_gate);
- }
-
- ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
- if (ret)
- return ret;
-
- ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
- /* Default to GE mode */
- if (ret)
- gphy_mode = GPHY_MODE_GE;
-
- switch (gphy_mode) {
- case GPHY_MODE_FE:
- gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
- break;
- case GPHY_MODE_GE:
- gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
- break;
- default:
- dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
- return -EINVAL;
- }
-
- gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
- if (IS_ERR(gphy_fw->reset)) {
- if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
- dev_err(dev, "Failed to lookup gphy reset\n");
- return PTR_ERR(gphy_fw->reset);
- }
-
- return gswip_gphy_fw_load(priv, gphy_fw);
-}
-
-static void gswip_gphy_fw_remove(struct gswip_priv *priv,
- struct gswip_gphy_fw *gphy_fw)
-{
- int ret;
-
- /* check if the device was fully probed */
- if (!gphy_fw->fw_name)
- return;
-
- ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
- if (ret)
- dev_err(priv->dev, "can not reset GPHY FW pointer");
-
- clk_disable_unprepare(gphy_fw->clk_gate);
-
- reset_control_put(gphy_fw->reset);
-}
-
-static int gswip_gphy_fw_list(struct gswip_priv *priv,
- struct device_node *gphy_fw_list_np, u32 version)
-{
- struct device *dev = priv->dev;
- struct device_node *gphy_fw_np;
- const struct of_device_id *match;
- int err;
- int i = 0;
-
- /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
- * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
- * needs a different GPHY firmware.
- */
- if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
- switch (version) {
- case GSWIP_VERSION_2_0:
- priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
- break;
- case GSWIP_VERSION_2_1:
- priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
- break;
- default:
- dev_err(dev, "unknown GSWIP version: 0x%x", version);
- return -ENOENT;
- }
- }
-
- match = of_match_node(xway_gphy_match, gphy_fw_list_np);
- if (match && match->data)
- priv->gphy_fw_name_cfg = match->data;
-
- if (!priv->gphy_fw_name_cfg) {
- dev_err(dev, "GPHY compatible type not supported");
- return -ENOENT;
- }
-
- priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
- if (!priv->num_gphy_fw)
- return -ENOENT;
-
- priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
- "lantiq,rcu");
- if (IS_ERR(priv->rcu_regmap))
- return PTR_ERR(priv->rcu_regmap);
-
- priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
- sizeof(*priv->gphy_fw),
- GFP_KERNEL | __GFP_ZERO);
- if (!priv->gphy_fw)
- return -ENOMEM;
-
- for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
- err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
- gphy_fw_np, i);
- if (err)
- goto remove_gphy;
- i++;
- }
-
- /* The standalone PHY11G requires 300ms to be fully
- * initialized and ready for any MDIO communication after being
- * taken out of reset. For the SoC-internal GPHY variant there
- * is no (known) documentation for the minimum time after a
- * reset. Use the same value as for the standalone variant as
- * some users have reported internal PHYs not being detected
- * without any delay.
- */
- msleep(300);
-
- return 0;
-
-remove_gphy:
- for (i = 0; i < priv->num_gphy_fw; i++)
- gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
- return err;
-}
-
-static int gswip_probe(struct platform_device *pdev)
-{
- struct gswip_priv *priv;
- struct device_node *np, *mdio_np, *gphy_fw_np;
- struct device *dev = &pdev->dev;
- int err;
- int i;
- u32 version;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->gswip = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->gswip))
- return PTR_ERR(priv->gswip);
-
- priv->mdio = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(priv->mdio))
- return PTR_ERR(priv->mdio);
-
- priv->mii = devm_platform_ioremap_resource(pdev, 2);
- if (IS_ERR(priv->mii))
- return PTR_ERR(priv->mii);
-
- priv->hw_info = of_device_get_match_data(dev);
- if (!priv->hw_info)
- return -EINVAL;
-
- priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = dev;
- priv->ds->num_ports = priv->hw_info->max_ports;
- priv->ds->priv = priv;
- priv->ds->ops = priv->hw_info->ops;
- priv->dev = dev;
- mutex_init(&priv->pce_table_lock);
- version = gswip_switch_r(priv, GSWIP_VERSION);
-
- np = dev->of_node;
- switch (version) {
- case GSWIP_VERSION_2_0:
- case GSWIP_VERSION_2_1:
- if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
- return -EINVAL;
- break;
- case GSWIP_VERSION_2_2:
- case GSWIP_VERSION_2_2_ETC:
- if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
- !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
- return -EINVAL;
- break;
- default:
- dev_err(dev, "unknown GSWIP version: 0x%x", version);
- return -ENOENT;
- }
-
- /* bring up the mdio bus */
- gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
- if (gphy_fw_np) {
- err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
- of_node_put(gphy_fw_np);
- if (err) {
- dev_err(dev, "gphy fw probe failed\n");
- return err;
- }
- }
-
- /* bring up the mdio bus */
- mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
- if (mdio_np) {
- err = gswip_mdio(priv, mdio_np);
- if (err) {
- dev_err(dev, "mdio probe failed\n");
- goto put_mdio_node;
- }
- }
-
- err = dsa_register_switch(priv->ds);
- if (err) {
- dev_err(dev, "dsa switch register failed: %i\n", err);
- goto mdio_bus;
- }
- if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
- dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
- priv->hw_info->cpu_port);
- err = -EINVAL;
- goto disable_switch;
- }
-
- platform_set_drvdata(pdev, priv);
-
- dev_info(dev, "probed GSWIP version %lx mod %lx\n",
- (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
- (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
- return 0;
-
-disable_switch:
- gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
- dsa_unregister_switch(priv->ds);
-mdio_bus:
- if (mdio_np)
- mdiobus_unregister(priv->ds->slave_mii_bus);
-put_mdio_node:
- of_node_put(mdio_np);
- for (i = 0; i < priv->num_gphy_fw; i++)
- gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
- return err;
-}
-
-static int gswip_remove(struct platform_device *pdev)
-{
- struct gswip_priv *priv = platform_get_drvdata(pdev);
- int i;
-
- if (!priv)
- return 0;
-
- /* disable the switch */
- gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
-
- dsa_unregister_switch(priv->ds);
-
- if (priv->ds->slave_mii_bus) {
- mdiobus_unregister(priv->ds->slave_mii_bus);
- of_node_put(priv->ds->slave_mii_bus->dev.of_node);
- }
-
- for (i = 0; i < priv->num_gphy_fw; i++)
- gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static void gswip_shutdown(struct platform_device *pdev)
-{
- struct gswip_priv *priv = platform_get_drvdata(pdev);
-
- if (!priv)
- return;
-
- dsa_switch_shutdown(priv->ds);
-
- platform_set_drvdata(pdev, NULL);
-}
-
-static const struct gswip_hw_info gswip_xrx200 = {
- .max_ports = 7,
- .cpu_port = 6,
- .ops = &gswip_xrx200_switch_ops,
-};
-
-static const struct gswip_hw_info gswip_xrx300 = {
- .max_ports = 7,
- .cpu_port = 6,
- .ops = &gswip_xrx300_switch_ops,
-};
-
-static const struct of_device_id gswip_of_match[] = {
- { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
- { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
- { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
- {},
-};
-MODULE_DEVICE_TABLE(of, gswip_of_match);
-
-static struct platform_driver gswip_driver = {
- .probe = gswip_probe,
- .remove = gswip_remove,
- .shutdown = gswip_shutdown,
- .driver = {
- .name = "gswip",
- .of_match_table = gswip_of_match,
- },
-};
-
-module_platform_driver(gswip_driver);
-
-MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
-MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
-MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
-MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
-MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index c9e2a8989556..c71d3fd5dfeb 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,49 +1,46 @@
# SPDX-License-Identifier: GPL-2.0-only
-config NET_DSA_MICROCHIP_KSZ_COMMON
- select NET_DSA_TAG_KSZ
- tristate
-
-menuconfig NET_DSA_MICROCHIP_KSZ9477
- tristate "Microchip KSZ9477 series switch support"
+menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
+ tristate "Microchip KSZ8XXX/KSZ9XXX/LAN937X series switch support"
depends on NET_DSA
- select NET_DSA_MICROCHIP_KSZ_COMMON
+ select NET_DSA_TAG_KSZ
+ select NET_DSA_TAG_NONE
+ select NET_IEEE8021Q_HELPERS
+ select DCB
+ select PCS_XPCS
help
- This driver adds support for Microchip KSZ9477 switch chips.
+ This driver adds support for Microchip KSZ8, KSZ9 and
+ LAN937X series switch chips, being KSZ8863/8873,
+ KSZ8895/8864, KSZ8794/8795/8765,
+ KSZ9477/9897/9896/9567/8567, KSZ9893/9563/8563 and
+ LAN9370/9371/9372/9373/9374.
config NET_DSA_MICROCHIP_KSZ9477_I2C
- tristate "KSZ9477 series I2C connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ9477 && I2C
+ tristate "KSZ series I2C connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && I2C
select REGMAP_I2C
help
Select to enable support for registering switches configured through I2C.
-config NET_DSA_MICROCHIP_KSZ9477_SPI
- tristate "KSZ9477 series SPI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ9477 && SPI
+config NET_DSA_MICROCHIP_KSZ_SPI
+ tristate "KSZ series SPI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && SPI
select REGMAP_SPI
help
Select to enable support for registering switches configured through SPI.
-menuconfig NET_DSA_MICROCHIP_KSZ8795
- tristate "Microchip KSZ8795 series switch support"
- depends on NET_DSA
- select NET_DSA_MICROCHIP_KSZ_COMMON
- help
- This driver adds support for Microchip KSZ8795/KSZ88X3 switch chips.
-
-config NET_DSA_MICROCHIP_KSZ8795_SPI
- tristate "KSZ8795 series SPI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ8795 && SPI
- select REGMAP_SPI
+config NET_DSA_MICROCHIP_KSZ_PTP
+ bool "Support for the PTP clock on the KSZ9563/LAN937x Ethernet Switch"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && PTP_1588_CLOCK
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON=m || PTP_1588_CLOCK=y
help
- This driver accesses KSZ8795 chip through SPI.
-
- It is required to use the KSZ8795 switch driver as the only access
- is through SPI.
+ Select to enable support for timestamping & PTP clock manipulation in
+ KSZ8563/KSZ9563/LAN937x series of switches. KSZ9563/KSZ8563 supports
+ only one step timestamping. LAN937x switch supports both one step and
+ two step timestamping.
config NET_DSA_MICROCHIP_KSZ8863_SMI
tristate "KSZ series SMI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ8795
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON
select MDIO_BITBANG
help
Select to enable support for registering switches configured through
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 2a03b21a3386..9347cfb3d0b5 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,8 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
+ksz_switch-objs := ksz_common.o ksz_dcb.o
+ksz_switch-objs += ksz9477.o ksz9477_acl.o ksz9477_tc_flower.o
+ksz_switch-objs += ksz8.o
+ksz_switch-objs += lan937x_main.o
+
+ifdef CONFIG_NET_DSA_MICROCHIP_KSZ_PTP
+ksz_switch-objs += ksz_ptp.o
+endif
+
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795) += ksz8795.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI) += ksz8795_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_SPI) += ksz_spi.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o
diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c
new file mode 100644
index 000000000000..c354abdafc1b
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8.c
@@ -0,0 +1,2115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ8XXX series switch driver
+ *
+ * It supports the following switches:
+ * - KSZ8463
+ * - KSZ8863, KSZ8873 aka KSZ88X3
+ * - KSZ8895, KSZ8864 aka KSZ8895 family
+ * - KSZ8794, KSZ8795, KSZ8765 aka KSZ87XX
+ * Note that it does NOT support:
+ * - KSZ8563, KSZ8567 - see KSZ9477 driver
+ *
+ * Copyright (C) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/microchip-ksz.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/micrel_phy.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+#include <linux/phylink.h>
+
+#include "ksz_common.h"
+#include "ksz8_reg.h"
+#include "ksz8.h"
+
+static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ ksz_rmw8(dev, addr, bits, set ? bits : 0);
+}
+
+static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
+ bool set)
+{
+ ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset), bits,
+ set ? bits : 0);
+}
+
+/**
+ * ksz8_ind_write8 - EEE/ACL/PME indirect register write
+ * @dev: The device structure.
+ * @table: Function & table select, register 110.
+ * @addr: Indirect access control, register 111.
+ * @data: The data to be written.
+ *
+ * This function performs an indirect register write for EEE, ACL or
+ * PME switch functionalities. Both 8-bit registers 110 and 111 are
+ * written at once with ksz_write16, using the serial multiple write
+ * functionality.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+ int ret = 0;
+
+ regs = dev->info->regs;
+
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table) | addr;
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (!ret)
+ ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+/**
+ * ksz8_ind_read8 - EEE/ACL/PME indirect register read
+ * @dev: The device structure.
+ * @table: Function & table select, register 110.
+ * @addr: Indirect access control, register 111.
+ * @val: The value read.
+ *
+ * This function performs an indirect register read for EEE, ACL or
+ * PME switch functionalities. Both 8-bit registers 110 and 111 are
+ * written at once with ksz_write16, using the serial multiple write
+ * functionality.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ksz8_ind_read8(struct ksz_device *dev, u8 table, u16 addr, u8 *val)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+ int ret = 0;
+
+ regs = dev->info->regs;
+
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (!ret)
+ ret = ksz_read8(dev, regs[REG_IND_BYTE], val);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ return ksz8_ind_write8(dev, (u8)(reg >> 8), (u8)(reg), value);
+}
+
+int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data)
+{
+ u8 table = (u8)(offset >> 8 | (port + 1));
+
+ return ksz8_ind_read8(dev, table, (u8)(offset), data);
+}
+
+int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data)
+{
+ u8 table = (u8)(offset >> 8 | (port + 1));
+
+ return ksz8_ind_write8(dev, table, (u8)(offset), data);
+}
+
+int ksz8_reset_switch(struct ksz_device *dev)
+{
+ if (ksz_is_ksz88x3(dev)) {
+ /* reset switch */
+ ksz_cfg(dev, KSZ8863_REG_SW_RESET,
+ KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, true);
+ ksz_cfg(dev, KSZ8863_REG_SW_RESET,
+ KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, false);
+ } else if (ksz_is_ksz8463(dev)) {
+ ksz_cfg(dev, KSZ8463_REG_SW_RESET,
+ KSZ8463_GLOBAL_SOFTWARE_RESET, true);
+ ksz_cfg(dev, KSZ8463_REG_SW_RESET,
+ KSZ8463_GLOBAL_SOFTWARE_RESET, false);
+ } else {
+ /* reset switch */
+ ksz_write8(dev, REG_POWER_MANAGEMENT_1,
+ SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S);
+ ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0);
+ }
+
+ return 0;
+}
+
+static int ksz8863_change_mtu(struct ksz_device *dev, int frame_size)
+{
+ u8 ctrl2 = 0;
+
+ if (frame_size <= KSZ8_LEGAL_PACKET_SIZE)
+ ctrl2 |= KSZ8863_LEGAL_PACKET_ENABLE;
+ else if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
+ ctrl2 |= KSZ8863_HUGE_PACKET_ENABLE;
+
+ return ksz_rmw8(dev, REG_SW_CTRL_2, KSZ8863_LEGAL_PACKET_ENABLE |
+ KSZ8863_HUGE_PACKET_ENABLE, ctrl2);
+}
+
+static int ksz8795_change_mtu(struct ksz_device *dev, int frame_size)
+{
+ u8 ctrl1 = 0, ctrl2 = 0;
+ int ret;
+
+ if (frame_size > KSZ8_LEGAL_PACKET_SIZE)
+ ctrl2 |= SW_LEGAL_PACKET_DISABLE;
+ if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
+ ctrl1 |= SW_HUGE_PACKET;
+
+ ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_HUGE_PACKET, ctrl1);
+ if (ret)
+ return ret;
+
+ return ksz_rmw8(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, ctrl2);
+}
+
+int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
+{
+ u16 frame_size;
+
+ if (!dsa_is_cpu_port(dev->ds, port))
+ return 0;
+
+ frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ switch (dev->chip_id) {
+ case KSZ8795_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8765_CHIP_ID:
+ return ksz8795_change_mtu(dev, frame_size);
+ case KSZ8463_CHIP_ID:
+ case KSZ88X3_CHIP_ID:
+ case KSZ8864_CHIP_ID:
+ case KSZ8895_CHIP_ID:
+ return ksz8863_change_mtu(dev, frame_size);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int ksz8_port_queue_split(struct ksz_device *dev, int port, int queues)
+{
+ u8 mask_4q, mask_2q;
+ u8 reg_4q, reg_2q;
+ u8 data_4q = 0;
+ u8 data_2q = 0;
+ int ret;
+
+ if (ksz_is_ksz88x3(dev)) {
+ mask_4q = KSZ8873_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8873_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = REG_PORT_CTRL_0;
+ reg_2q = REG_PORT_CTRL_2;
+
+ /* KSZ8795 family switches have Weighted Fair Queueing (WFQ)
+ * enabled by default. Enable it for KSZ8873 family switches
+ * too. Default value for KSZ8873 family is strict priority,
+ * which should be enabled by using TC_SETUP_QDISC_ETS, not
+ * by default.
+ */
+ ret = ksz_rmw8(dev, REG_SW_CTRL_3, WEIGHTED_FAIR_QUEUE_ENABLE,
+ WEIGHTED_FAIR_QUEUE_ENABLE);
+ if (ret)
+ return ret;
+ } else if (ksz_is_ksz8463(dev)) {
+ mask_4q = KSZ8873_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8873_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = P1CR1;
+ reg_2q = P1CR1 + 1;
+ } else {
+ mask_4q = KSZ8795_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8795_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = REG_PORT_CTRL_13;
+ reg_2q = REG_PORT_CTRL_0;
+
+ /* TODO: this is legacy from initial KSZ8795 driver, should be
+ * moved to appropriate place in the future.
+ */
+ ret = ksz_rmw8(dev, REG_SW_CTRL_19,
+ SW_OUT_RATE_LIMIT_QUEUE_BASED,
+ SW_OUT_RATE_LIMIT_QUEUE_BASED);
+ if (ret)
+ return ret;
+ }
+
+ if (queues == 4)
+ data_4q = mask_4q;
+ else if (queues == 2)
+ data_2q = mask_2q;
+
+ ret = ksz_prmw8(dev, port, reg_4q, mask_4q, data_4q);
+ if (ret)
+ return ret;
+
+ return ksz_prmw8(dev, port, reg_2q, mask_2q, data_2q);
+}
+
+int ksz8_all_queues_split(struct ksz_device *dev, int queues)
+{
+ struct dsa_switch *ds = dev->ds;
+ const struct dsa_port *dp;
+
+ dsa_switch_for_each_port(dp, ds) {
+ int ret = ksz8_port_queue_split(dev, dp->index, queues);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
+{
+ const u32 *masks;
+ const u16 *regs;
+ u16 ctrl_addr;
+ u32 data;
+ u8 check;
+ int loop;
+
+ masks = dev->info->masks;
+ regs = dev->info->regs;
+
+ ctrl_addr = addr + dev->info->reg_mib_cnt * port;
+ ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+
+ /* It is almost guaranteed to always read the valid bit because of
+ * slow SPI speed.
+ */
+ for (loop = 2; loop > 0; loop--) {
+ ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check);
+
+ if (check & masks[MIB_COUNTER_VALID]) {
+ ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
+ if (check & masks[MIB_COUNTER_OVERFLOW])
+ *cnt += MIB_COUNTER_VALUE + 1;
+ *cnt += data & MIB_COUNTER_VALUE;
+ break;
+ }
+ }
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
+{
+ const u32 *masks;
+ const u16 *regs;
+ u16 ctrl_addr;
+ u32 data;
+ u8 check;
+ int loop;
+
+ masks = dev->info->masks;
+ regs = dev->info->regs;
+
+ addr -= dev->info->reg_mib_cnt;
+ ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port;
+ ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0;
+ ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+
+ /* It is almost guaranteed to always read the valid bit because of
+ * slow SPI speed.
+ */
+ for (loop = 2; loop > 0; loop--) {
+ ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check);
+
+ if (check & masks[MIB_COUNTER_VALID]) {
+ ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
+ if (addr < 2) {
+ u64 total;
+
+ total = check & MIB_TOTAL_BYTES_H;
+ total <<= 32;
+ *cnt += total;
+ *cnt += data;
+ if (check & masks[MIB_COUNTER_OVERFLOW]) {
+ total = MIB_TOTAL_BYTES_H + 1;
+ total <<= 32;
+ *cnt += total;
+ }
+ } else {
+ if (check & masks[MIB_COUNTER_OVERFLOW])
+ *cnt += MIB_PACKET_DROPPED + 1;
+ *cnt += data & MIB_PACKET_DROPPED;
+ }
+ break;
+ }
+ }
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
+{
+ u32 *last = (u32 *)dropped;
+ const u16 *regs;
+ u16 ctrl_addr;
+ u32 data;
+ u32 cur;
+
+ regs = dev->info->regs;
+
+ addr -= dev->info->reg_mib_cnt;
+ ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
+ KSZ8863_MIB_PACKET_DROPPED_RX_0;
+ if (ksz_is_8895_family(dev) &&
+ ctrl_addr == KSZ8863_MIB_PACKET_DROPPED_RX_0)
+ ctrl_addr = KSZ8895_MIB_PACKET_DROPPED_RX_0;
+ ctrl_addr += port;
+ ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
+ mutex_unlock(&dev->alu_mutex);
+
+ data &= MIB_PACKET_DROPPED;
+ cur = last[addr];
+ if (data != cur) {
+ last[addr] = data;
+ if (data < cur)
+ data += MIB_PACKET_DROPPED + 1;
+ data -= cur;
+ *cnt += data;
+ }
+}
+
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
+{
+ if (is_ksz88xx(dev))
+ ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
+ else
+ ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
+}
+
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+{
+ if (is_ksz88xx(dev))
+ return;
+
+ /* enable the port for flush/freeze function */
+ if (freeze)
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
+ ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FREEZE, freeze);
+
+ /* disable the port after freeze is done */
+ if (!freeze)
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
+}
+
+void ksz8_port_init_cnt(struct ksz_device *dev, int port)
+{
+ struct ksz_port_mib *mib = &dev->ports[port].mib;
+ u64 *dropped;
+
+ /* For KSZ8795 family. */
+ if (ksz_is_ksz87xx(dev)) {
+ /* flush all enabled port MIB counters */
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
+ ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
+ }
+
+ mib->cnt_ptr = 0;
+
+ /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
+ dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
+ &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+
+ /* last one in storage */
+ dropped = &mib->counters[dev->info->mib_cnt];
+
+ /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->info->mib_cnt) {
+ dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
+ dropped, &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+}
+
+static int ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+ int ret;
+
+ regs = dev->info->regs;
+
+ ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (ret)
+ goto unlock_alu;
+
+ ret = ksz_read64(dev, regs[REG_IND_DATA_HI], data);
+unlock_alu:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static int ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+ int ret;
+
+ regs = dev->info->regs;
+
+ ctrl_addr = IND_ACC_TABLE(table) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ret = ksz_write64(dev, regs[REG_IND_DATA_HI], data);
+ if (ret)
+ goto unlock_alu;
+
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+unlock_alu:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
+{
+ int timeout = 100;
+ const u32 *masks;
+ const u16 *regs;
+ int ret;
+
+ masks = dev->info->masks;
+ regs = dev->info->regs;
+
+ do {
+ ret = ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
+ if (ret)
+ return ret;
+
+ timeout--;
+ } while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout);
+
+ /* Entry is not ready for accessing. */
+ if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY])
+ return -ETIMEDOUT;
+
+ /* Entry is ready for accessing. */
+ return ksz_read8(dev, regs[REG_IND_DATA_8], data);
+}
+
+static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u16 *entries)
+{
+ u32 data_hi, data_lo;
+ const u8 *shifts;
+ const u32 *masks;
+ const u16 *regs;
+ u16 ctrl_addr;
+ u64 buf = 0;
+ u8 data;
+ int cnt;
+ int ret;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
+
+ ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (ret)
+ goto unlock_alu;
+
+ ret = ksz8_valid_dyn_entry(dev, &data);
+ if (ret)
+ goto unlock_alu;
+
+ if (data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY]) {
+ *entries = 0;
+ goto unlock_alu;
+ }
+
+ ret = ksz_read64(dev, regs[REG_IND_DATA_HI], &buf);
+ if (ret)
+ goto unlock_alu;
+
+ data_hi = (u32)(buf >> 32);
+ data_lo = (u32)buf;
+
+ /* Check out how many valid entry in the table. */
+ cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H];
+ cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H];
+ cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >>
+ shifts[DYNAMIC_MAC_ENTRIES];
+ *entries = cnt + 1;
+
+ *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >>
+ shifts[DYNAMIC_MAC_FID];
+ *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >>
+ shifts[DYNAMIC_MAC_SRC_PORT];
+
+ mac_addr[5] = (u8)data_lo;
+ mac_addr[4] = (u8)(data_lo >> 8);
+ mac_addr[3] = (u8)(data_lo >> 16);
+ mac_addr[2] = (u8)(data_lo >> 24);
+
+ mac_addr[1] = (u8)data_hi;
+ mac_addr[0] = (u8)(data_hi >> 8);
+
+unlock_alu:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu, bool *valid)
+{
+ u32 data_hi, data_lo;
+ const u8 *shifts;
+ const u32 *masks;
+ u64 data;
+ int ret;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
+ ret = ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
+ if (ret)
+ return ret;
+
+ data_hi = data >> 32;
+ data_lo = (u32)data;
+
+ if (!(data_hi & (masks[STATIC_MAC_TABLE_VALID] |
+ masks[STATIC_MAC_TABLE_OVERRIDE]))) {
+ *valid = false;
+ return 0;
+ }
+
+ alu->mac[5] = (u8)data_lo;
+ alu->mac[4] = (u8)(data_lo >> 8);
+ alu->mac[3] = (u8)(data_lo >> 16);
+ alu->mac[2] = (u8)(data_lo >> 24);
+ alu->mac[1] = (u8)data_hi;
+ alu->mac[0] = (u8)(data_hi >> 8);
+ alu->port_forward =
+ (data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
+ shifts[STATIC_MAC_FWD_PORTS];
+ alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
+
+ /* KSZ8795/KSZ8895 family switches have STATIC_MAC_TABLE_USE_FID and
+ * STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the
+ * static MAC table compared to doing write.
+ */
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev))
+ data_hi >>= 1;
+ alu->is_static = true;
+ alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
+ alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
+ shifts[STATIC_MAC_FID];
+
+ *valid = true;
+
+ return 0;
+}
+
+static int ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
+{
+ u32 data_hi, data_lo;
+ const u8 *shifts;
+ const u32 *masks;
+ u64 data;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
+ data_lo = ((u32)alu->mac[2] << 24) |
+ ((u32)alu->mac[3] << 16) |
+ ((u32)alu->mac[4] << 8) | alu->mac[5];
+ data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1];
+ data_hi |= (u32)alu->port_forward << shifts[STATIC_MAC_FWD_PORTS];
+
+ if (alu->is_override)
+ data_hi |= masks[STATIC_MAC_TABLE_OVERRIDE];
+ if (alu->is_use_fid) {
+ data_hi |= masks[STATIC_MAC_TABLE_USE_FID];
+ data_hi |= (u32)alu->fid << shifts[STATIC_MAC_FID];
+ }
+ if (alu->is_static)
+ data_hi |= masks[STATIC_MAC_TABLE_VALID];
+ else
+ data_hi &= ~masks[STATIC_MAC_TABLE_OVERRIDE];
+
+ data = (u64)data_hi << 32 | data_lo;
+
+ return ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data);
+}
+
+static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
+ u8 *member, u8 *valid)
+{
+ const u8 *shifts;
+ const u32 *masks;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
+ *fid = vlan & masks[VLAN_TABLE_FID];
+ *member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >>
+ shifts[VLAN_TABLE_MEMBERSHIP_S];
+ *valid = !!(vlan & masks[VLAN_TABLE_VALID]);
+}
+
+static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
+ u16 *vlan)
+{
+ const u8 *shifts;
+ const u32 *masks;
+
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
+ *vlan = fid;
+ *vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S];
+ if (valid)
+ *vlan |= masks[VLAN_TABLE_VALID];
+}
+
+static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
+{
+ const u8 *shifts;
+ u64 data;
+ int i;
+
+ shifts = dev->info->shifts;
+
+ ksz8_r_table(dev, TABLE_VLAN, addr, &data);
+ addr *= 4;
+ for (i = 0; i < 4; i++) {
+ dev->vlan_cache[addr + i].table[0] = (u16)data;
+ data >>= shifts[VLAN_TABLE];
+ }
+}
+
+static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
+{
+ int index;
+ u16 *data;
+ u16 addr;
+ u64 buf;
+
+ data = (u16 *)&buf;
+ addr = vid / 4;
+ index = vid & 3;
+ ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
+ *vlan = data[index];
+}
+
+static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
+{
+ int index;
+ u16 *data;
+ u16 addr;
+ u64 buf;
+
+ data = (u16 *)&buf;
+ addr = vid / 4;
+ index = vid & 3;
+ ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
+ data[index] = vlan;
+ dev->vlan_cache[vid].table[0] = vlan;
+ ksz8_w_table(dev, TABLE_VLAN, addr, buf);
+}
+
+/**
+ * ksz879x_get_loopback - KSZ879x specific function to get loopback
+ * configuration status for a specific port
+ * @dev: Pointer to the device structure
+ * @port: Port number to query
+ * @val: Pointer to store the result
+ *
+ * This function reads the SMI registers to determine whether loopback mode
+ * is enabled for a specific port.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz879x_get_loopback(struct ksz_device *dev, u16 port,
+ u16 *val)
+{
+ u8 stat3;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_3, &stat3);
+ if (ret)
+ return ret;
+
+ if (stat3 & PORT_PHY_LOOPBACK)
+ *val |= BMCR_LOOPBACK;
+
+ return 0;
+}
+
+/**
+ * ksz879x_set_loopback - KSZ879x specific function to set loopback mode for
+ * a specific port
+ * @dev: Pointer to the device structure.
+ * @port: Port number to modify.
+ * @val: Value indicating whether to enable or disable loopback mode.
+ *
+ * This function translates loopback bit of the BMCR register into the
+ * corresponding hardware register bit value and writes it to the SMI interface.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz879x_set_loopback(struct ksz_device *dev, u16 port, u16 val)
+{
+ u8 stat3 = 0;
+
+ if (val & BMCR_LOOPBACK)
+ stat3 |= PORT_PHY_LOOPBACK;
+
+ return ksz_prmw8(dev, port, REG_PORT_STATUS_3, PORT_PHY_LOOPBACK,
+ stat3);
+}
+
+/**
+ * ksz8_r_phy_ctrl - Translates and reads from the SMI interface to a MIIM PHY
+ * Control register (Reg. 31).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be read.
+ * @val: The value read from the SMI interface.
+ *
+ * This function reads the SMI interface and translates the hardware register
+ * bit values into their corresponding control settings for a MIIM PHY Control
+ * register.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_r_phy_ctrl(struct ksz_device *dev, int port, u16 *val)
+{
+ const u16 *regs = dev->info->regs;
+ u8 reg_val;
+ int ret;
+
+ *val = 0;
+
+ ret = ksz_pread8(dev, port, regs[P_LINK_STATUS], &reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (reg_val & PORT_MDIX_STATUS)
+ *val |= KSZ886X_CTRL_MDIX_STAT;
+
+ ret = ksz_pread8(dev, port, REG_PORT_LINK_MD_CTRL, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (reg_val & PORT_FORCE_LINK)
+ *val |= KSZ886X_CTRL_FORCE_LINK;
+
+ if (reg_val & PORT_POWER_SAVING)
+ *val |= KSZ886X_CTRL_PWRSAVE;
+
+ if (reg_val & PORT_PHY_REMOTE_LOOPBACK)
+ *val |= KSZ886X_CTRL_REMOTE_LOOPBACK;
+
+ return 0;
+}
+
+/**
+ * ksz8_r_phy_bmcr - Translates and reads from the SMI interface to a MIIM PHY
+ * Basic mode control register (Reg. 0).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be read.
+ * @val: The value read from the SMI interface.
+ *
+ * This function reads the SMI interface and translates the hardware register
+ * bit values into their corresponding control settings for a MIIM PHY Basic
+ * mode control register.
+ *
+ * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873
+ * -------------------------------------------------------------------
+ * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit
+ * ----------------------------+-----------------------------+----------------
+ * Bit 15 - Soft Reset | 0xF/4 | Not supported
+ * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY)
+ * Bit 13 - Force 100 | 0xC/6 = 0xC/6
+ * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7
+ * Bit 11 - Power Down | 0xD/3 = 0xD/3
+ * Bit 10 - PHY Isolate | 0xF/5 | Not supported
+ * Bit 9 - Restart AN | 0xD/5 = 0xD/5
+ * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5
+ * Bit 7 - Collision Test/Res. | Not supported | Not supported
+ * Bit 6 - Reserved | Not supported | Not supported
+ * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7
+ * Bit 4 - Force MDI | 0xD/1 = 0xD/1
+ * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2
+ * Bit 2 - Disable Far-End F. | ???? | 0xD/4
+ * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6
+ * Bit 0 - Disable LED | 0xD/7 = 0xD/7
+ * -------------------------------------------------------------------
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_r_phy_bmcr(struct ksz_device *dev, u16 port, u16 *val)
+{
+ const u16 *regs = dev->info->regs;
+ u8 restart, speed, ctrl;
+ int ret;
+
+ *val = 0;
+
+ ret = ksz_pread8(dev, port, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, port, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, port, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
+ if (ctrl & PORT_FORCE_100_MBIT)
+ *val |= BMCR_SPEED100;
+
+ if (ksz_is_ksz88x3(dev)) {
+ if (restart & KSZ8873_PORT_PHY_LOOPBACK)
+ *val |= BMCR_LOOPBACK;
+
+ if ((ctrl & PORT_AUTO_NEG_ENABLE))
+ *val |= BMCR_ANENABLE;
+ } else {
+ ret = ksz879x_get_loopback(dev, port, val);
+ if (ret)
+ return ret;
+
+ if (!(ctrl & PORT_AUTO_NEG_DISABLE))
+ *val |= BMCR_ANENABLE;
+ }
+
+ if (restart & PORT_POWER_DOWN)
+ *val |= BMCR_PDOWN;
+
+ if (restart & PORT_AUTO_NEG_RESTART)
+ *val |= BMCR_ANRESTART;
+
+ if (ctrl & PORT_FORCE_FULL_DUPLEX)
+ *val |= BMCR_FULLDPLX;
+
+ if (speed & PORT_HP_MDIX)
+ *val |= KSZ886X_BMCR_HP_MDIX;
+
+ if (restart & PORT_FORCE_MDIX)
+ *val |= KSZ886X_BMCR_FORCE_MDI;
+
+ if (restart & PORT_AUTO_MDIX_DISABLE)
+ *val |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
+
+ if (restart & PORT_TX_DISABLE)
+ *val |= KSZ886X_BMCR_DISABLE_TRANSMIT;
+
+ if (restart & PORT_LED_OFF)
+ *val |= KSZ886X_BMCR_DISABLE_LED;
+
+ return 0;
+}
+
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+{
+ u8 ctrl, link, val1, val2;
+ int processed = true;
+ const u16 *regs;
+ u16 data = 0;
+ u16 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
+
+ switch (reg) {
+ case MII_BMCR:
+ ret = ksz8_r_phy_bmcr(dev, p, &data);
+ if (ret)
+ return ret;
+ break;
+ case MII_BMSR:
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
+ data = BMSR_100FULL |
+ BMSR_100HALF |
+ BMSR_10FULL |
+ BMSR_10HALF |
+ BMSR_ANEGCAPABLE;
+ if (link & PORT_AUTO_NEG_COMPLETE)
+ data |= BMSR_ANEGCOMPLETE;
+ if (link & PORT_STAT_LINK_GOOD)
+ data |= BMSR_LSTATUS;
+ break;
+ case MII_PHYSID1:
+ data = KSZ8795_ID_HI;
+ break;
+ case MII_PHYSID2:
+ if (ksz_is_ksz88x3(dev))
+ data = KSZ8863_ID_LO;
+ else
+ data = KSZ8795_ID_LO;
+ break;
+ case MII_ADVERTISE:
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
+ data = ADVERTISE_CSMA;
+ if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
+ data |= ADVERTISE_PAUSE_CAP;
+ if (ctrl & PORT_AUTO_NEG_100BTX_FD)
+ data |= ADVERTISE_100FULL;
+ if (ctrl & PORT_AUTO_NEG_100BTX)
+ data |= ADVERTISE_100HALF;
+ if (ctrl & PORT_AUTO_NEG_10BT_FD)
+ data |= ADVERTISE_10FULL;
+ if (ctrl & PORT_AUTO_NEG_10BT)
+ data |= ADVERTISE_10HALF;
+ break;
+ case MII_LPA:
+ ret = ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ if (ret)
+ return ret;
+
+ data = LPA_SLCT;
+ if (link & PORT_REMOTE_SYM_PAUSE)
+ data |= LPA_PAUSE_CAP;
+ if (link & PORT_REMOTE_100BTX_FD)
+ data |= LPA_100FULL;
+ if (link & PORT_REMOTE_100BTX)
+ data |= LPA_100HALF;
+ if (link & PORT_REMOTE_10BT_FD)
+ data |= LPA_10FULL;
+ if (link & PORT_REMOTE_10BT)
+ data |= LPA_10HALF;
+ if (data & ~LPA_SLCT)
+ data |= LPA_LPACK;
+ break;
+ case PHY_REG_LINK_MD:
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ if (ret)
+ return ret;
+
+ if (val1 & PORT_START_CABLE_DIAG)
+ data |= PHY_START_CABLE_DIAG;
+
+ if (val1 & PORT_CABLE_10M_SHORT)
+ data |= PHY_CABLE_10M_SHORT;
+
+ data |= FIELD_PREP(PHY_CABLE_DIAG_RESULT_M,
+ FIELD_GET(PORT_CABLE_DIAG_RESULT_M, val1));
+
+ data |= FIELD_PREP(PHY_CABLE_FAULT_COUNTER_M,
+ (FIELD_GET(PORT_CABLE_FAULT_COUNTER_H, val1) << 8) |
+ FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2));
+ break;
+ case PHY_REG_PHY_CTRL:
+ ret = ksz8_r_phy_ctrl(dev, p, &data);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ processed = false;
+ break;
+ }
+ if (processed)
+ *val = data;
+
+ return 0;
+}
+
+/**
+ * ksz8_w_phy_ctrl - Translates and writes to the SMI interface from a MIIM PHY
+ * Control register (Reg. 31).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be configured.
+ * @val: The register value to be written.
+ *
+ * This function translates control settings from a MIIM PHY Control register
+ * into their corresponding hardware register bit values for the SMI
+ * interface.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_w_phy_ctrl(struct ksz_device *dev, int port, u16 val)
+{
+ u8 reg_val = 0;
+ int ret;
+
+ if (val & KSZ886X_CTRL_FORCE_LINK)
+ reg_val |= PORT_FORCE_LINK;
+
+ if (val & KSZ886X_CTRL_PWRSAVE)
+ reg_val |= PORT_POWER_SAVING;
+
+ if (val & KSZ886X_CTRL_REMOTE_LOOPBACK)
+ reg_val |= PORT_PHY_REMOTE_LOOPBACK;
+
+ ret = ksz_prmw8(dev, port, REG_PORT_LINK_MD_CTRL, PORT_FORCE_LINK |
+ PORT_POWER_SAVING | PORT_PHY_REMOTE_LOOPBACK, reg_val);
+ return ret;
+}
+
+/**
+ * ksz8_w_phy_bmcr - Translates and writes to the SMI interface from a MIIM PHY
+ * Basic mode control register (Reg. 0).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be configured.
+ * @val: The register value to be written.
+ *
+ * This function translates control settings from a MIIM PHY Basic mode control
+ * register into their corresponding hardware register bit values for the SMI
+ * interface.
+ *
+ * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873
+ * -------------------------------------------------------------------
+ * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit
+ * ----------------------------+-----------------------------+----------------
+ * Bit 15 - Soft Reset | 0xF/4 | Not supported
+ * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY)
+ * Bit 13 - Force 100 | 0xC/6 = 0xC/6
+ * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7
+ * Bit 11 - Power Down | 0xD/3 = 0xD/3
+ * Bit 10 - PHY Isolate | 0xF/5 | Not supported
+ * Bit 9 - Restart AN | 0xD/5 = 0xD/5
+ * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5
+ * Bit 7 - Collision Test/Res. | Not supported | Not supported
+ * Bit 6 - Reserved | Not supported | Not supported
+ * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7
+ * Bit 4 - Force MDI | 0xD/1 = 0xD/1
+ * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2
+ * Bit 2 - Disable Far-End F. | ???? | 0xD/4
+ * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6
+ * Bit 0 - Disable LED | 0xD/7 = 0xD/7
+ * -------------------------------------------------------------------
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_w_phy_bmcr(struct ksz_device *dev, u16 port, u16 val)
+{
+ u8 restart, speed, ctrl, restart_mask;
+ const u16 *regs = dev->info->regs;
+ int ret;
+
+ /* Do not support PHY reset function. */
+ if (val & BMCR_RESET)
+ return 0;
+
+ speed = 0;
+ if (val & KSZ886X_BMCR_HP_MDIX)
+ speed |= PORT_HP_MDIX;
+
+ ret = ksz_prmw8(dev, port, regs[P_SPEED_STATUS], PORT_HP_MDIX, speed);
+ if (ret)
+ return ret;
+
+ ctrl = 0;
+ if (ksz_is_ksz88x3(dev)) {
+ if ((val & BMCR_ANENABLE))
+ ctrl |= PORT_AUTO_NEG_ENABLE;
+ } else {
+ if (!(val & BMCR_ANENABLE))
+ ctrl |= PORT_AUTO_NEG_DISABLE;
+
+ /* Fiber port does not support auto-negotiation. */
+ if (dev->ports[port].fiber)
+ ctrl |= PORT_AUTO_NEG_DISABLE;
+ }
+
+ if (val & BMCR_SPEED100)
+ ctrl |= PORT_FORCE_100_MBIT;
+
+ if (val & BMCR_FULLDPLX)
+ ctrl |= PORT_FORCE_FULL_DUPLEX;
+
+ ret = ksz_prmw8(dev, port, regs[P_FORCE_CTRL], PORT_FORCE_100_MBIT |
+ /* PORT_AUTO_NEG_ENABLE and PORT_AUTO_NEG_DISABLE are the same
+ * bits
+ */
+ PORT_FORCE_FULL_DUPLEX | PORT_AUTO_NEG_ENABLE, ctrl);
+ if (ret)
+ return ret;
+
+ restart = 0;
+ restart_mask = PORT_LED_OFF | PORT_TX_DISABLE | PORT_AUTO_NEG_RESTART |
+ PORT_POWER_DOWN | PORT_AUTO_MDIX_DISABLE | PORT_FORCE_MDIX;
+
+ if (val & KSZ886X_BMCR_DISABLE_LED)
+ restart |= PORT_LED_OFF;
+
+ if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
+ restart |= PORT_TX_DISABLE;
+
+ if (val & BMCR_ANRESTART)
+ restart |= PORT_AUTO_NEG_RESTART;
+
+ if (val & BMCR_PDOWN)
+ restart |= PORT_POWER_DOWN;
+
+ if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
+ restart |= PORT_AUTO_MDIX_DISABLE;
+
+ if (val & KSZ886X_BMCR_FORCE_MDI)
+ restart |= PORT_FORCE_MDIX;
+
+ if (ksz_is_ksz88x3(dev)) {
+ restart_mask |= KSZ8873_PORT_PHY_LOOPBACK;
+
+ if (val & BMCR_LOOPBACK)
+ restart |= KSZ8873_PORT_PHY_LOOPBACK;
+ } else {
+ ret = ksz879x_set_loopback(dev, port, val);
+ if (ret)
+ return ret;
+ }
+
+ return ksz_prmw8(dev, port, regs[P_NEG_RESTART_CTRL], restart_mask,
+ restart);
+}
+
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ const u16 *regs;
+ u8 ctrl, data;
+ u16 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
+
+ switch (reg) {
+ case MII_BMCR:
+ ret = ksz8_w_phy_bmcr(dev, p, val);
+ if (ret)
+ return ret;
+ break;
+ case MII_ADVERTISE:
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
+ data = ctrl;
+ data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
+ PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT_FD |
+ PORT_AUTO_NEG_10BT);
+ if (val & ADVERTISE_PAUSE_CAP)
+ data |= PORT_AUTO_NEG_SYM_PAUSE;
+ if (val & ADVERTISE_100FULL)
+ data |= PORT_AUTO_NEG_100BTX_FD;
+ if (val & ADVERTISE_100HALF)
+ data |= PORT_AUTO_NEG_100BTX;
+ if (val & ADVERTISE_10FULL)
+ data |= PORT_AUTO_NEG_10BT_FD;
+ if (val & ADVERTISE_10HALF)
+ data |= PORT_AUTO_NEG_10BT;
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+ if (ret)
+ return ret;
+ }
+ break;
+ case PHY_REG_LINK_MD:
+ if (val & PHY_START_CABLE_DIAG)
+ ksz_port_cfg(dev, p, REG_PORT_LINK_MD_CTRL, PORT_START_CABLE_DIAG, true);
+ break;
+
+ case PHY_REG_PHY_CTRL:
+ ret = ksz8_w_phy_ctrl(dev, p, val);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
+{
+ int offset = P_MIRROR_CTRL;
+ u8 data;
+
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
+ ksz_pread8(dev, port, offset, &data);
+ data &= ~dev->port_mask;
+ data |= (member & dev->port_mask);
+ ksz_pwrite8(dev, port, offset, data);
+}
+
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
+{
+ u8 learn[DSA_MAX_PORTS];
+ int first, index, cnt;
+ const u16 *regs;
+ int reg = S_FLUSH_TABLE_CTRL;
+ int mask = SW_FLUSH_DYN_MAC_TABLE;
+
+ regs = dev->info->regs;
+
+ if ((uint)port < dev->info->port_cnt) {
+ first = port;
+ cnt = port + 1;
+ } else {
+ /* Flush all ports. */
+ first = 0;
+ cnt = dev->info->port_cnt;
+ }
+ for (index = first; index < cnt; index++) {
+ ksz_pread8(dev, index, regs[P_STP_CTRL], &learn[index]);
+ if (!(learn[index] & PORT_LEARN_DISABLE))
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL],
+ learn[index] | PORT_LEARN_DISABLE);
+ }
+ if (ksz_is_ksz8463(dev)) {
+ reg = KSZ8463_FLUSH_TABLE_CTRL;
+ mask = KSZ8463_FLUSH_DYN_MAC_TABLE;
+ }
+ ksz_cfg(dev, reg, mask, true);
+ for (index = first; index < cnt; index++) {
+ if (!(learn[index] & PORT_LEARN_DISABLE))
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]);
+ }
+}
+
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ u8 mac[ETH_ALEN];
+ u8 src_port, fid;
+ u16 entries = 0;
+ int ret, i;
+
+ for (i = 0; i < KSZ8_DYN_MAC_ENTRIES; i++) {
+ ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port,
+ &entries);
+ if (ret)
+ return ret;
+
+ if (i >= entries)
+ return 0;
+
+ if (port == src_port) {
+ ret = cb(mac, fid, false, data);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ksz8_add_sta_mac(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct alu_struct alu;
+ int index, ret;
+ int empty = 0;
+
+ alu.port_forward = 0;
+ for (index = 0; index < dev->info->num_statics; index++) {
+ bool valid;
+
+ ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
+ if (ret)
+ return ret;
+ if (!valid) {
+ /* Remember the first empty entry. */
+ if (!empty)
+ empty = index + 1;
+ continue;
+ }
+
+ if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
+ break;
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics && !empty)
+ return -ENOSPC;
+
+ /* add entry */
+ if (index == dev->info->num_statics) {
+ index = empty - 1;
+ memset(&alu, 0, sizeof(alu));
+ memcpy(alu.mac, addr, ETH_ALEN);
+ alu.is_static = true;
+ }
+ alu.port_forward |= BIT(port);
+ if (vid) {
+ alu.is_use_fid = true;
+
+ /* Need a way to map VID to FID. */
+ alu.fid = vid;
+ }
+
+ return ksz8_w_sta_mac_table(dev, index, &alu);
+}
+
+static int ksz8_del_sta_mac(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct alu_struct alu;
+ int index, ret;
+
+ for (index = 0; index < dev->info->num_statics; index++) {
+ bool valid;
+
+ ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
+ if (ret)
+ return ret;
+ if (!valid)
+ continue;
+
+ if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
+ break;
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics)
+ return 0;
+
+ /* clear port */
+ alu.port_forward &= ~BIT(port);
+ if (!alu.port_forward)
+ alu.is_static = false;
+
+ return ksz8_w_sta_mac_table(dev, index, &alu);
+}
+
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ return ksz8_add_sta_mac(dev, port, mdb->addr, mdb->vid);
+}
+
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ return ksz8_del_sta_mac(dev, port, mdb->addr, mdb->vid);
+}
+
+int ksz8_fdb_add(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db)
+{
+ return ksz8_add_sta_mac(dev, port, addr, vid);
+}
+
+int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db)
+{
+ return ksz8_del_sta_mac(dev, port, addr, vid);
+}
+
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack)
+{
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
+ return -ENOTSUPP;
+
+ /* Discard packets with VID not enabled on the switch */
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
+
+ /* Discard packets with VID not enabled on the ingress port */
+ for (port = 0; port < dev->phy_port_cnt; ++port)
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER,
+ flag);
+
+ return 0;
+}
+
+static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
+{
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)) {
+ int reg = REG_SW_INSERT_SRC_PVID;
+
+ if (ksz_is_ksz8463(dev))
+ reg = KSZ8463_REG_SW_CTRL_9;
+ ksz_cfg(dev, reg, 0x03 << (4 - 2 * port), state);
+ } else {
+ ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
+ }
+}
+
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct ksz_port *p = &dev->ports[port];
+ u16 data, new_pvid = 0;
+ u8 fid, member, valid;
+
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
+ return -ENOTSUPP;
+
+ /* If a VLAN is added with untagged flag different from the
+ * port's Remove Tag flag, we need to change the latter.
+ * Ignore VID 0, which is always untagged.
+ * Ignore CPU port, which will always be tagged.
+ */
+ if (untagged != p->remove_tag && vlan->vid != 0 &&
+ port != dev->cpu_port) {
+ unsigned int vid;
+
+ /* Reject attempts to add a VLAN that requires the
+ * Remove Tag flag to be changed, unless there are no
+ * other VLANs currently configured.
+ */
+ for (vid = 1; vid < dev->info->num_vlans; ++vid) {
+ /* Skip the VID we are going to add or reconfigure */
+ if (vid == vlan->vid)
+ continue;
+
+ ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
+ &fid, &member, &valid);
+ if (valid && (member & BIT(port)))
+ return -EINVAL;
+ }
+
+ ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+ p->remove_tag = untagged;
+ }
+
+ ksz8_r_vlan_table(dev, vlan->vid, &data);
+ ksz8_from_vlan(dev, data, &fid, &member, &valid);
+
+ /* First time to setup the VLAN entry. */
+ if (!valid) {
+ /* Need to find a way to map VID to FID. */
+ fid = 1;
+ valid = 1;
+ }
+ member |= BIT(port);
+
+ ksz8_to_vlan(dev, fid, member, valid, &data);
+ ksz8_w_vlan_table(dev, vlan->vid, data);
+
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ new_pvid = vlan->vid;
+
+ if (new_pvid) {
+ u16 vid;
+
+ ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
+ vid &= ~VLAN_VID_MASK;
+ vid |= new_pvid;
+ ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
+
+ ksz8_port_enable_pvid(dev, port, true);
+ }
+
+ return 0;
+}
+
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ u16 data, pvid;
+ u8 fid, member, valid;
+
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
+ return -ENOTSUPP;
+
+ ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
+ pvid = pvid & 0xFFF;
+
+ ksz8_r_vlan_table(dev, vlan->vid, &data);
+ ksz8_from_vlan(dev, data, &fid, &member, &valid);
+
+ member &= ~BIT(port);
+
+ /* Invalidate the entry if no more member. */
+ if (!member) {
+ fid = 0;
+ valid = 0;
+ }
+
+ ksz8_to_vlan(dev, fid, member, valid, &data);
+ ksz8_w_vlan_table(dev, vlan->vid, data);
+
+ if (pvid == vlan->vid)
+ ksz8_port_enable_pvid(dev, port, false);
+
+ return 0;
+}
+
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ int offset = P_MIRROR_CTRL;
+
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
+ if (ingress) {
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_RX, true);
+ dev->mirror_rx |= BIT(port);
+ } else {
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_TX, true);
+ dev->mirror_tx |= BIT(port);
+ }
+
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_SNIFFER, false);
+
+ /* configure mirror port */
+ if (dev->mirror_rx || dev->mirror_tx)
+ ksz_port_cfg(dev, mirror->to_local_port, offset,
+ PORT_MIRROR_SNIFFER, true);
+
+ return 0;
+}
+
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ int offset = P_MIRROR_CTRL;
+ u8 data;
+
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
+ if (mirror->ingress) {
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_RX, false);
+ dev->mirror_rx &= ~BIT(port);
+ } else {
+ ksz_port_cfg(dev, port, offset, PORT_MIRROR_TX, false);
+ dev->mirror_tx &= ~BIT(port);
+ }
+
+ ksz_pread8(dev, port, offset, &data);
+
+ if (!dev->mirror_rx && !dev->mirror_tx)
+ ksz_port_cfg(dev, mirror->to_local_port, offset,
+ PORT_MIRROR_SNIFFER, false);
+}
+
+static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
+{
+ struct ksz_port *p = &dev->ports[port];
+
+ if (!ksz_is_ksz87xx(dev))
+ return;
+
+ if (!p->interface && dev->compat_interface) {
+ dev_warn(dev->dev,
+ "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
+ "Please update your device tree.\n",
+ port);
+ p->interface = dev->compat_interface;
+ }
+}
+
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ const u16 *regs = dev->info->regs;
+ struct dsa_switch *ds = dev->ds;
+ const u32 *masks;
+ int offset;
+ u8 member;
+
+ masks = dev->info->masks;
+
+ /* enable broadcast storm limit */
+ offset = P_BCAST_STORM_CTRL;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR1;
+ ksz_port_cfg(dev, port, offset, PORT_BROADCAST_STORM, true);
+
+ ksz8_port_queue_split(dev, port, dev->info->num_tx_queues);
+
+ /* replace priority */
+ offset = P_802_1P_CTRL;
+ if (ksz_is_ksz8463(dev))
+ offset = P1CR2;
+ ksz_port_cfg(dev, port, offset,
+ masks[PORT_802_1P_REMAPPING], false);
+
+ if (cpu_port)
+ member = dsa_user_ports(ds);
+ else
+ member = BIT(dsa_upstream_port(ds, port));
+
+ ksz8_cfg_port_member(dev, port, member);
+
+ /* Disable all WoL options by default. Otherwise
+ * ksz_switch_macaddr_get/put logic will not work properly.
+ * CPU port 4 has no WoL functionality.
+ */
+ if (ksz_is_ksz87xx(dev) && !cpu_port)
+ ksz8_pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
+}
+
+static void ksz88x3_config_rmii_clk(struct ksz_device *dev)
+{
+ struct dsa_port *cpu_dp = dsa_to_port(dev->ds, dev->cpu_port);
+ bool rmii_clk_internal;
+
+ if (!ksz_is_ksz88x3(dev))
+ return;
+
+ rmii_clk_internal = of_property_read_bool(cpu_dp->dn,
+ "microchip,rmii-clk-internal");
+
+ ksz_cfg(dev, KSZ88X3_REG_FVID_AND_HOST_MODE,
+ KSZ88X3_PORT3_RMII_CLK_INTERNAL, rmii_clk_internal);
+}
+
+void ksz8_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ const u32 *masks;
+ const u16 *regs;
+ u8 remote;
+ u8 fiber_ports = 0;
+ int i;
+
+ masks = dev->info->masks;
+ regs = dev->info->regs;
+
+ ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
+
+ ksz8_port_setup(dev, dev->cpu_port, true);
+
+ ksz8795_cpu_interface_select(dev, dev->cpu_port);
+ ksz88x3_config_rmii_clk(dev);
+
+ for (i = 0; i < dev->phy_port_cnt; i++) {
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ }
+ for (i = 0; i < dev->phy_port_cnt; i++) {
+ p = &dev->ports[i];
+
+ /* For KSZ8795 family. */
+ if (ksz_is_ksz87xx(dev)) {
+ ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
+ if (remote & KSZ8_PORT_FIBER_MODE)
+ p->fiber = 1;
+ }
+ if (p->fiber)
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, true);
+ else
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, false);
+ if (p->fiber)
+ fiber_ports |= (1 << i);
+ }
+ if (ksz_is_ksz8463(dev)) {
+ /* Setup fiber ports. */
+ if (fiber_ports) {
+ fiber_ports &= 3;
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_REG_CFG_CTRL,
+ fiber_ports << PORT_COPPER_MODE_S,
+ 0);
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_REG_DSP_CTRL_6,
+ COPPER_RECEIVE_ADJUSTMENT, 0);
+ }
+
+ /* Turn off PTP function as the switch's proprietary way of
+ * handling timestamp is not supported in current Linux PTP
+ * stack implementation.
+ */
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_PTP_MSG_CONF1,
+ PTP_ENABLE, 0);
+ regmap_update_bits(ksz_regmap_16(dev),
+ KSZ8463_PTP_CLK_CTRL,
+ PTP_CLK_ENABLE, 0);
+ }
+}
+
+/**
+ * ksz8_phy_port_link_up - Configures ports with integrated PHYs
+ * @dev: The KSZ device instance.
+ * @port: The port number to configure.
+ * @duplex: The desired duplex mode.
+ * @tx_pause: If true, enables transmit pause.
+ * @rx_pause: If true, enables receive pause.
+ *
+ * Description:
+ * The function configures flow control settings for a given port based on the
+ * desired settings and current duplex mode.
+ *
+ * According to the KSZ8873 datasheet, the PORT_FORCE_FLOW_CTRL bit in the
+ * Port Control 2 register (0x1A for Port 1, 0x22 for Port 2, 0x32 for Port 3)
+ * determines how flow control is handled on the port:
+ * "1 = will always enable full-duplex flow control on the port, regardless
+ * of AN result.
+ * 0 = full-duplex flow control is enabled based on AN result."
+ *
+ * This means that the flow control behavior depends on the state of this bit:
+ * - If PORT_FORCE_FLOW_CTRL is set to 1, the switch will ignore AN results and
+ * force flow control on the port.
+ * - If PORT_FORCE_FLOW_CTRL is set to 0, the switch will enable or disable
+ * flow control based on the AN results.
+ *
+ * However, there is a potential limitation in this configuration. It is
+ * currently not possible to force disable flow control on a port if we still
+ * advertise pause support. While such a configuration is not currently
+ * supported by Linux, and may not make practical sense, it's important to be
+ * aware of this limitation when working with the KSZ8873 and similar devices.
+ */
+static void ksz8_phy_port_link_up(struct ksz_device *dev, int port, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ const u16 *regs = dev->info->regs;
+ u8 sctrl = 0;
+
+ /* The KSZ8795 switch differs from the KSZ8873 by supporting
+ * asymmetric pause control. However, since a single bit is used to
+ * control both RX and TX pause, we can't enforce asymmetric pause
+ * control - both TX and RX pause will be either enabled or disabled
+ * together.
+ *
+ * If auto-negotiation is enabled, we usually allow the flow control to
+ * be determined by the auto-negotiation process based on the
+ * capabilities of both link partners. However, for KSZ8873, the
+ * PORT_FORCE_FLOW_CTRL bit may be set by the hardware bootstrap,
+ * ignoring the auto-negotiation result. Thus, even in auto-negotiation
+ * mode, we need to ensure that the PORT_FORCE_FLOW_CTRL bit is
+ * properly cleared.
+ *
+ * In the absence of pause auto-negotiation, we will enforce symmetric
+ * pause control for both variants of switches - KSZ8873 and KSZ8795.
+ *
+ * Autoneg Pause Autoneg rx,tx PORT_FORCE_FLOW_CTRL
+ * 1 1 x 0
+ * 0 1 x 0 (flow control probably disabled)
+ * x 0 1 1 (flow control force enabled)
+ * 1 0 0 0 (flow control still depends on
+ * aneg result due to hardware)
+ * 0 0 0 0 (flow control probably disabled)
+ */
+ if (dev->ports[port].manual_flow && tx_pause)
+ sctrl |= PORT_FORCE_FLOW_CTRL;
+
+ ksz_prmw8(dev, port, regs[P_STP_CTRL], PORT_FORCE_FLOW_CTRL, sctrl);
+}
+
+/**
+ * ksz8_cpu_port_link_up - Configures the CPU port of the switch.
+ * @dev: The KSZ device instance.
+ * @speed: The desired link speed.
+ * @duplex: The desired duplex mode.
+ * @tx_pause: If true, enables transmit pause.
+ * @rx_pause: If true, enables receive pause.
+ *
+ * Description:
+ * The function configures flow control and speed settings for the CPU
+ * port of the switch based on the desired settings, current duplex mode, and
+ * speed.
+ */
+static void ksz8_cpu_port_link_up(struct ksz_device *dev, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ const u16 *regs = dev->info->regs;
+ u8 ctrl = 0;
+
+ /* SW_FLOW_CTRL, SW_HALF_DUPLEX, and SW_10_MBIT bits are bootstrappable
+ * at least on KSZ8873. They can have different values depending on your
+ * board setup.
+ */
+ if (tx_pause || rx_pause)
+ ctrl |= SW_FLOW_CTRL;
+
+ if (duplex == DUPLEX_HALF)
+ ctrl |= SW_HALF_DUPLEX;
+
+ /* This hardware only supports SPEED_10 and SPEED_100. For SPEED_10
+ * we need to set the SW_10_MBIT bit. Otherwise, we can leave it 0.
+ */
+ if (speed == SPEED_10)
+ ctrl |= SW_10_MBIT;
+
+ ksz_rmw8(dev, regs[S_BROADCAST_CTRL], SW_HALF_DUPLEX | SW_FLOW_CTRL |
+ SW_10_MBIT, ctrl);
+}
+
+void ksz8_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
+
+ /* If the port is the CPU port, apply special handling. Only the CPU
+ * port is configured via global registers.
+ */
+ if (dev->cpu_port == port)
+ ksz8_cpu_port_link_up(dev, speed, duplex, tx_pause, rx_pause);
+ else if (dev->info->internal_phy[port])
+ ksz8_phy_port_link_up(dev, port, duplex, tx_pause, rx_pause);
+}
+
+static int ksz8_handle_global_errata(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
+ /* KSZ87xx Errata DS80000687C.
+ * Module 2: Link drops with some EEE link partners.
+ * An issue with the EEE next page exchange between the
+ * KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in
+ * the link dropping.
+ */
+ if (dev->info->ksz87xx_eee_link_erratum)
+ ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0);
+
+ return ret;
+}
+
+int ksz8_enable_stp_addr(struct ksz_device *dev)
+{
+ struct alu_struct alu;
+
+ /* Setup STP address for STP operation. */
+ memset(&alu, 0, sizeof(alu));
+ ether_addr_copy(alu.mac, eth_stp_addr);
+ alu.is_static = true;
+ alu.is_override = true;
+ alu.port_forward = dev->info->cpu_ports;
+
+ return ksz8_w_sta_mac_table(dev, 0, &alu);
+}
+
+int ksz8_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ int i, ret = 0;
+
+ ds->mtu_enforcement_ingress = true;
+
+ /* We rely on software untagging on the CPU port, so that we
+ * can support both tagged and untagged VLANs
+ */
+ ds->untag_bridge_pvid = true;
+
+ /* VLAN filtering is partly controlled by the global VLAN
+ * Enable flag
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* Enable automatic fast aging when link changed detected. */
+ ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
+
+ /* Enable aggressive back off algorithm in half duplex mode. */
+ ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure unicast VLAN boundary is set as default and
+ * enable no excessive collision drop.
+ */
+ ret = ksz_rmw8(dev, REG_SW_CTRL_2,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+ if (ret)
+ return ret;
+
+ ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
+
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+
+ if (!ksz_is_ksz88x3(dev) && !ksz_is_ksz8463(dev))
+ ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
+
+ for (i = 0; i < (dev->info->num_vlans / 4); i++)
+ ksz8_r_vlan_entries(dev, i);
+
+ /* Make sure PME (WoL) is not enabled. If requested, it will
+ * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
+ * do not like PME events changes before shutdown. PME only
+ * available on KSZ87xx family.
+ */
+ if (ksz_is_ksz87xx(dev)) {
+ ret = ksz8_pme_write8(dev, regs[REG_SW_PME_CTRL], 0);
+ if (!ret)
+ ret = ksz_rmw8(dev, REG_INT_ENABLE, INT_PME, 0);
+ }
+
+ if (!ret)
+ return ksz8_handle_global_errata(ds);
+ else
+ return ret;
+}
+
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_10 | MAC_100;
+
+ /* Silicon Errata Sheet (DS80000830A):
+ * "Port 1 does not respond to received flow control PAUSE frames"
+ * So, disable Pause support on "Port 1" (port == 0) for all ksz88x3
+ * switches.
+ */
+ if (!ksz_is_ksz88x3(dev) || port)
+ config->mac_capabilities |= MAC_SYM_PAUSE;
+
+ /* Asym pause is not supported on KSZ8863 and KSZ8873 */
+ if (!ksz_is_ksz88x3(dev))
+ config->mac_capabilities |= MAC_ASYM_PAUSE;
+}
+
+u32 ksz8_get_port_addr(int port, int offset)
+{
+ return PORT_CTRL_ADDR(port, offset);
+}
+
+u32 ksz8463_get_port_addr(int port, int offset)
+{
+ return offset + 0x18 * port;
+}
+
+static u16 ksz8463_get_phy_addr(u16 phy, u16 reg, u16 offset)
+{
+ return offset + reg * 2 + phy * (P2MBCR - P1MBCR);
+}
+
+int ksz8463_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+{
+ u16 sw_reg = 0;
+ u16 data = 0;
+ int ret;
+
+ if (phy > 1)
+ return -ENOSPC;
+ switch (reg) {
+ case MII_PHYSID1:
+ sw_reg = ksz8463_get_phy_addr(phy, 0, PHY1IHR);
+ break;
+ case MII_PHYSID2:
+ sw_reg = ksz8463_get_phy_addr(phy, 0, PHY1ILR);
+ break;
+ case MII_BMCR:
+ case MII_BMSR:
+ case MII_ADVERTISE:
+ case MII_LPA:
+ sw_reg = ksz8463_get_phy_addr(phy, reg, P1MBCR);
+ break;
+ case MII_TPISTATUS:
+ /* This register holds the PHY interrupt status for simulated
+ * Micrel KSZ PHY.
+ */
+ data = 0x0505;
+ break;
+ default:
+ break;
+ }
+ if (sw_reg) {
+ ret = ksz_read16(dev, sw_reg, &data);
+ if (ret)
+ return ret;
+ }
+ *val = data;
+
+ return 0;
+}
+
+int ksz8463_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ u16 sw_reg = 0;
+ int ret;
+
+ if (phy > 1)
+ return -ENOSPC;
+
+ /* No write to fiber port. */
+ if (dev->ports[phy].fiber)
+ return 0;
+ switch (reg) {
+ case MII_BMCR:
+ case MII_ADVERTISE:
+ sw_reg = ksz8463_get_phy_addr(phy, reg, P1MBCR);
+ break;
+ default:
+ break;
+ }
+ if (sw_reg) {
+ ret = ksz_write16(dev, sw_reg, val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ksz8_switch_init(struct ksz_device *dev)
+{
+ dev->cpu_port = fls(dev->info->cpu_ports) - 1;
+ dev->phy_port_cnt = dev->info->port_cnt - 1;
+ dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports;
+
+ return 0;
+}
+
+void ksz8_switch_exit(struct ksz_device *dev)
+{
+ ksz8_reset_switch(dev);
+}
+
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 9d611895d3cf..0f2cd1474b44 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -7,63 +7,64 @@
#ifndef __KSZ8XXX_H
#define __KSZ8XXX_H
-#include <linux/kernel.h>
-enum ksz_regs {
- REG_IND_CTRL_0,
- REG_IND_DATA_8,
- REG_IND_DATA_CHECK,
- REG_IND_DATA_HI,
- REG_IND_DATA_LO,
- REG_IND_MIB_CHECK,
- P_FORCE_CTRL,
- P_LINK_STATUS,
- P_LOCAL_CTRL,
- P_NEG_RESTART_CTRL,
- P_REMOTE_STATUS,
- P_SPEED_STATUS,
- S_TAIL_TAG_CTRL,
-};
+#include <linux/types.h>
+#include <net/dsa.h>
+#include "ksz_common.h"
-enum ksz_masks {
- PORT_802_1P_REMAPPING,
- SW_TAIL_TAG_ENABLE,
- MIB_COUNTER_OVERFLOW,
- MIB_COUNTER_VALID,
- VLAN_TABLE_FID,
- VLAN_TABLE_MEMBERSHIP,
- VLAN_TABLE_VALID,
- STATIC_MAC_TABLE_VALID,
- STATIC_MAC_TABLE_USE_FID,
- STATIC_MAC_TABLE_FID,
- STATIC_MAC_TABLE_OVERRIDE,
- STATIC_MAC_TABLE_FWD_PORTS,
- DYNAMIC_MAC_TABLE_ENTRIES_H,
- DYNAMIC_MAC_TABLE_MAC_EMPTY,
- DYNAMIC_MAC_TABLE_NOT_READY,
- DYNAMIC_MAC_TABLE_ENTRIES,
- DYNAMIC_MAC_TABLE_FID,
- DYNAMIC_MAC_TABLE_SRC_PORT,
- DYNAMIC_MAC_TABLE_TIMESTAMP,
-};
+int ksz8_setup(struct dsa_switch *ds);
+u32 ksz8_get_port_addr(int port, int offset);
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz8_port_init_cnt(struct ksz_device *dev, int port);
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz8_fdb_add(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db);
+int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db);
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void ksz8_config_cpu_port(struct dsa_switch *ds);
+int ksz8_enable_stp_addr(struct ksz_device *dev);
+int ksz8_reset_switch(struct ksz_device *dev);
+int ksz8_switch_init(struct ksz_device *dev);
+void ksz8_switch_exit(struct ksz_device *dev);
+int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu);
+int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value);
+int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data);
+int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data);
+void ksz8_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause);
+int ksz8_all_queues_split(struct ksz_device *dev, int queues);
-enum ksz_shifts {
- VLAN_TABLE_MEMBERSHIP_S,
- VLAN_TABLE,
- STATIC_MAC_FWD_PORTS,
- STATIC_MAC_FID,
- DYNAMIC_MAC_ENTRIES_H,
- DYNAMIC_MAC_ENTRIES,
- DYNAMIC_MAC_FID,
- DYNAMIC_MAC_TIMESTAMP,
- DYNAMIC_MAC_SRC_PORT,
-};
-
-struct ksz8 {
- const u8 *regs;
- const u32 *masks;
- const u8 *shifts;
- void *priv;
-};
+u32 ksz8463_get_port_addr(int port, int offset);
+int ksz8463_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+int ksz8463_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
#endif
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
deleted file mode 100644
index 43fc3087aeb3..000000000000
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ /dev/null
@@ -1,1806 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Microchip KSZ8795 switch driver
- *
- * Copyright (C) 2017 Microchip Technology Inc.
- * Tristram Ha <Tristram.Ha@microchip.com>
- */
-
-#include <linux/bitfield.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/gpio.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_data/microchip-ksz.h>
-#include <linux/phy.h>
-#include <linux/etherdevice.h>
-#include <linux/if_bridge.h>
-#include <linux/micrel_phy.h>
-#include <net/dsa.h>
-#include <net/switchdev.h>
-#include <linux/phylink.h>
-
-#include "ksz_common.h"
-#include "ksz8795_reg.h"
-#include "ksz8.h"
-
-static const u8 ksz8795_regs[] = {
- [REG_IND_CTRL_0] = 0x6E,
- [REG_IND_DATA_8] = 0x70,
- [REG_IND_DATA_CHECK] = 0x72,
- [REG_IND_DATA_HI] = 0x71,
- [REG_IND_DATA_LO] = 0x75,
- [REG_IND_MIB_CHECK] = 0x74,
- [P_FORCE_CTRL] = 0x0C,
- [P_LINK_STATUS] = 0x0E,
- [P_LOCAL_CTRL] = 0x07,
- [P_NEG_RESTART_CTRL] = 0x0D,
- [P_REMOTE_STATUS] = 0x08,
- [P_SPEED_STATUS] = 0x09,
- [S_TAIL_TAG_CTRL] = 0x0C,
-};
-
-static const u32 ksz8795_masks[] = {
- [PORT_802_1P_REMAPPING] = BIT(7),
- [SW_TAIL_TAG_ENABLE] = BIT(1),
- [MIB_COUNTER_OVERFLOW] = BIT(6),
- [MIB_COUNTER_VALID] = BIT(5),
- [VLAN_TABLE_FID] = GENMASK(6, 0),
- [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
- [VLAN_TABLE_VALID] = BIT(12),
- [STATIC_MAC_TABLE_VALID] = BIT(21),
- [STATIC_MAC_TABLE_USE_FID] = BIT(23),
- [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
- [STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
- [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
- [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
- [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
- [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
- [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
- [DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
- [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
- [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
-};
-
-static const u8 ksz8795_shifts[] = {
- [VLAN_TABLE_MEMBERSHIP_S] = 7,
- [VLAN_TABLE] = 16,
- [STATIC_MAC_FWD_PORTS] = 16,
- [STATIC_MAC_FID] = 24,
- [DYNAMIC_MAC_ENTRIES_H] = 3,
- [DYNAMIC_MAC_ENTRIES] = 29,
- [DYNAMIC_MAC_FID] = 16,
- [DYNAMIC_MAC_TIMESTAMP] = 27,
- [DYNAMIC_MAC_SRC_PORT] = 24,
-};
-
-static const u8 ksz8863_regs[] = {
- [REG_IND_CTRL_0] = 0x79,
- [REG_IND_DATA_8] = 0x7B,
- [REG_IND_DATA_CHECK] = 0x7B,
- [REG_IND_DATA_HI] = 0x7C,
- [REG_IND_DATA_LO] = 0x80,
- [REG_IND_MIB_CHECK] = 0x80,
- [P_FORCE_CTRL] = 0x0C,
- [P_LINK_STATUS] = 0x0E,
- [P_LOCAL_CTRL] = 0x0C,
- [P_NEG_RESTART_CTRL] = 0x0D,
- [P_REMOTE_STATUS] = 0x0E,
- [P_SPEED_STATUS] = 0x0F,
- [S_TAIL_TAG_CTRL] = 0x03,
-};
-
-static const u32 ksz8863_masks[] = {
- [PORT_802_1P_REMAPPING] = BIT(3),
- [SW_TAIL_TAG_ENABLE] = BIT(6),
- [MIB_COUNTER_OVERFLOW] = BIT(7),
- [MIB_COUNTER_VALID] = BIT(6),
- [VLAN_TABLE_FID] = GENMASK(15, 12),
- [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
- [VLAN_TABLE_VALID] = BIT(19),
- [STATIC_MAC_TABLE_VALID] = BIT(19),
- [STATIC_MAC_TABLE_USE_FID] = BIT(21),
- [STATIC_MAC_TABLE_FID] = GENMASK(29, 26),
- [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
- [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
- [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(5, 0),
- [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
- [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
- [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 28),
- [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
- [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
- [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
-};
-
-static u8 ksz8863_shifts[] = {
- [VLAN_TABLE_MEMBERSHIP_S] = 16,
- [STATIC_MAC_FWD_PORTS] = 16,
- [STATIC_MAC_FID] = 22,
- [DYNAMIC_MAC_ENTRIES_H] = 3,
- [DYNAMIC_MAC_ENTRIES] = 24,
- [DYNAMIC_MAC_FID] = 16,
- [DYNAMIC_MAC_TIMESTAMP] = 24,
- [DYNAMIC_MAC_SRC_PORT] = 20,
-};
-
-struct mib_names {
- char string[ETH_GSTRING_LEN];
-};
-
-static const struct mib_names ksz87xx_mib_names[] = {
- { "rx_hi" },
- { "rx_undersize" },
- { "rx_fragments" },
- { "rx_oversize" },
- { "rx_jabbers" },
- { "rx_symbol_err" },
- { "rx_crc_err" },
- { "rx_align_err" },
- { "rx_mac_ctrl" },
- { "rx_pause" },
- { "rx_bcast" },
- { "rx_mcast" },
- { "rx_ucast" },
- { "rx_64_or_less" },
- { "rx_65_127" },
- { "rx_128_255" },
- { "rx_256_511" },
- { "rx_512_1023" },
- { "rx_1024_1522" },
- { "rx_1523_2000" },
- { "rx_2001" },
- { "tx_hi" },
- { "tx_late_col" },
- { "tx_pause" },
- { "tx_bcast" },
- { "tx_mcast" },
- { "tx_ucast" },
- { "tx_deferred" },
- { "tx_total_col" },
- { "tx_exc_col" },
- { "tx_single_col" },
- { "tx_mult_col" },
- { "rx_total" },
- { "tx_total" },
- { "rx_discards" },
- { "tx_discards" },
-};
-
-static const struct mib_names ksz88xx_mib_names[] = {
- { "rx" },
- { "rx_hi" },
- { "rx_undersize" },
- { "rx_fragments" },
- { "rx_oversize" },
- { "rx_jabbers" },
- { "rx_symbol_err" },
- { "rx_crc_err" },
- { "rx_align_err" },
- { "rx_mac_ctrl" },
- { "rx_pause" },
- { "rx_bcast" },
- { "rx_mcast" },
- { "rx_ucast" },
- { "rx_64_or_less" },
- { "rx_65_127" },
- { "rx_128_255" },
- { "rx_256_511" },
- { "rx_512_1023" },
- { "rx_1024_1522" },
- { "tx" },
- { "tx_hi" },
- { "tx_late_col" },
- { "tx_pause" },
- { "tx_bcast" },
- { "tx_mcast" },
- { "tx_ucast" },
- { "tx_deferred" },
- { "tx_total_col" },
- { "tx_exc_col" },
- { "tx_single_col" },
- { "tx_mult_col" },
- { "rx_discards" },
- { "tx_discards" },
-};
-
-static bool ksz_is_ksz88x3(struct ksz_device *dev)
-{
- return dev->chip_id == 0x8830;
-}
-
-static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
-{
- regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
-}
-
-static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
- bool set)
-{
- regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
- bits, set ? bits : 0);
-}
-
-static int ksz8_reset_switch(struct ksz_device *dev)
-{
- if (ksz_is_ksz88x3(dev)) {
- /* reset switch */
- ksz_cfg(dev, KSZ8863_REG_SW_RESET,
- KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, true);
- ksz_cfg(dev, KSZ8863_REG_SW_RESET,
- KSZ8863_GLOBAL_SOFTWARE_RESET | KSZ8863_PCS_RESET, false);
- } else {
- /* reset switch */
- ksz_write8(dev, REG_POWER_MANAGEMENT_1,
- SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S);
- ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0);
- }
-
- return 0;
-}
-
-static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
-{
- u8 hi, lo;
-
- /* Number of queues can only be 1, 2, or 4. */
- switch (queue) {
- case 4:
- case 3:
- queue = PORT_QUEUE_SPLIT_4;
- break;
- case 2:
- queue = PORT_QUEUE_SPLIT_2;
- break;
- default:
- queue = PORT_QUEUE_SPLIT_1;
- }
- ksz_pread8(dev, port, REG_PORT_CTRL_0, &lo);
- ksz_pread8(dev, port, P_DROP_TAG_CTRL, &hi);
- lo &= ~PORT_QUEUE_SPLIT_L;
- if (queue & PORT_QUEUE_SPLIT_2)
- lo |= PORT_QUEUE_SPLIT_L;
- hi &= ~PORT_QUEUE_SPLIT_H;
- if (queue & PORT_QUEUE_SPLIT_4)
- hi |= PORT_QUEUE_SPLIT_H;
- ksz_pwrite8(dev, port, REG_PORT_CTRL_0, lo);
- ksz_pwrite8(dev, port, P_DROP_TAG_CTRL, hi);
-
- /* Default is port based for egress rate limit. */
- if (queue != PORT_QUEUE_SPLIT_1)
- ksz_cfg(dev, REG_SW_CTRL_19, SW_OUT_RATE_LIMIT_QUEUE_BASED,
- true);
-}
-
-static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
-{
- struct ksz8 *ksz8 = dev->priv;
- const u32 *masks;
- const u8 *regs;
- u16 ctrl_addr;
- u32 data;
- u8 check;
- int loop;
-
- masks = ksz8->masks;
- regs = ksz8->regs;
-
- ctrl_addr = addr + dev->reg_mib_cnt * port;
- ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
-
- mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
-
- /* It is almost guaranteed to always read the valid bit because of
- * slow SPI speed.
- */
- for (loop = 2; loop > 0; loop--) {
- ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check);
-
- if (check & masks[MIB_COUNTER_VALID]) {
- ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
- if (check & masks[MIB_COUNTER_OVERFLOW])
- *cnt += MIB_COUNTER_VALUE + 1;
- *cnt += data & MIB_COUNTER_VALUE;
- break;
- }
- }
- mutex_unlock(&dev->alu_mutex);
-}
-
-static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
-{
- struct ksz8 *ksz8 = dev->priv;
- const u32 *masks;
- const u8 *regs;
- u16 ctrl_addr;
- u32 data;
- u8 check;
- int loop;
-
- masks = ksz8->masks;
- regs = ksz8->regs;
-
- addr -= dev->reg_mib_cnt;
- ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port;
- ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0;
- ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
-
- mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
-
- /* It is almost guaranteed to always read the valid bit because of
- * slow SPI speed.
- */
- for (loop = 2; loop > 0; loop--) {
- ksz_read8(dev, regs[REG_IND_MIB_CHECK], &check);
-
- if (check & masks[MIB_COUNTER_VALID]) {
- ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
- if (addr < 2) {
- u64 total;
-
- total = check & MIB_TOTAL_BYTES_H;
- total <<= 32;
- *cnt += total;
- *cnt += data;
- if (check & masks[MIB_COUNTER_OVERFLOW]) {
- total = MIB_TOTAL_BYTES_H + 1;
- total <<= 32;
- *cnt += total;
- }
- } else {
- if (check & masks[MIB_COUNTER_OVERFLOW])
- *cnt += MIB_PACKET_DROPPED + 1;
- *cnt += data & MIB_PACKET_DROPPED;
- }
- break;
- }
- }
- mutex_unlock(&dev->alu_mutex);
-}
-
-static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
-{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
- u32 *last = (u32 *)dropped;
- u16 ctrl_addr;
- u32 data;
- u32 cur;
-
- addr -= dev->reg_mib_cnt;
- ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
- KSZ8863_MIB_PACKET_DROPPED_RX_0;
- ctrl_addr += port;
- ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
-
- mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
- ksz_read32(dev, regs[REG_IND_DATA_LO], &data);
- mutex_unlock(&dev->alu_mutex);
-
- data &= MIB_PACKET_DROPPED;
- cur = last[addr];
- if (data != cur) {
- last[addr] = data;
- if (data < cur)
- data += MIB_PACKET_DROPPED + 1;
- data -= cur;
- *cnt += data;
- }
-}
-
-static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
-{
- if (ksz_is_ksz88x3(dev))
- ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
- else
- ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
-}
-
-static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
-{
- if (ksz_is_ksz88x3(dev))
- return;
-
- /* enable the port for flush/freeze function */
- if (freeze)
- ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
- ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FREEZE, freeze);
-
- /* disable the port after freeze is done */
- if (!freeze)
- ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
-}
-
-static void ksz8_port_init_cnt(struct ksz_device *dev, int port)
-{
- struct ksz_port_mib *mib = &dev->ports[port].mib;
- u64 *dropped;
-
- if (!ksz_is_ksz88x3(dev)) {
- /* flush all enabled port MIB counters */
- ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
- ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
- ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
- }
-
- mib->cnt_ptr = 0;
-
- /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->reg_mib_cnt) {
- dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
- &mib->counters[mib->cnt_ptr]);
- ++mib->cnt_ptr;
- }
-
- /* last one in storage */
- dropped = &mib->counters[dev->mib_cnt];
-
- /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->mib_cnt) {
- dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
- dropped, &mib->counters[mib->cnt_ptr]);
- ++mib->cnt_ptr;
- }
- mib->cnt_ptr = 0;
- memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
-}
-
-static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
-{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
- u16 ctrl_addr;
-
- ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
-
- mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
- ksz_read64(dev, regs[REG_IND_DATA_HI], data);
- mutex_unlock(&dev->alu_mutex);
-}
-
-static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
-{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
- u16 ctrl_addr;
-
- ctrl_addr = IND_ACC_TABLE(table) | addr;
-
- mutex_lock(&dev->alu_mutex);
- ksz_write64(dev, regs[REG_IND_DATA_HI], data);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
- mutex_unlock(&dev->alu_mutex);
-}
-
-static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
-{
- struct ksz8 *ksz8 = dev->priv;
- int timeout = 100;
- const u32 *masks;
- const u8 *regs;
-
- masks = ksz8->masks;
- regs = ksz8->regs;
-
- do {
- ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
- timeout--;
- } while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout);
-
- /* Entry is not ready for accessing. */
- if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) {
- return -EAGAIN;
- /* Entry is ready for accessing. */
- } else {
- ksz_read8(dev, regs[REG_IND_DATA_8], data);
-
- /* There is no valid entry in the table. */
- if (*data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY])
- return -ENXIO;
- }
- return 0;
-}
-
-static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
- u8 *mac_addr, u8 *fid, u8 *src_port,
- u8 *timestamp, u16 *entries)
-{
- struct ksz8 *ksz8 = dev->priv;
- u32 data_hi, data_lo;
- const u8 *shifts;
- const u32 *masks;
- const u8 *regs;
- u16 ctrl_addr;
- u8 data;
- int rc;
-
- shifts = ksz8->shifts;
- masks = ksz8->masks;
- regs = ksz8->regs;
-
- ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
-
- mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
-
- rc = ksz8_valid_dyn_entry(dev, &data);
- if (rc == -EAGAIN) {
- if (addr == 0)
- *entries = 0;
- } else if (rc == -ENXIO) {
- *entries = 0;
- /* At least one valid entry in the table. */
- } else {
- u64 buf = 0;
- int cnt;
-
- ksz_read64(dev, regs[REG_IND_DATA_HI], &buf);
- data_hi = (u32)(buf >> 32);
- data_lo = (u32)buf;
-
- /* Check out how many valid entry in the table. */
- cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H];
- cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H];
- cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >>
- shifts[DYNAMIC_MAC_ENTRIES];
- *entries = cnt + 1;
-
- *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >>
- shifts[DYNAMIC_MAC_FID];
- *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >>
- shifts[DYNAMIC_MAC_SRC_PORT];
- *timestamp = (data_hi & masks[DYNAMIC_MAC_TABLE_TIMESTAMP]) >>
- shifts[DYNAMIC_MAC_TIMESTAMP];
-
- mac_addr[5] = (u8)data_lo;
- mac_addr[4] = (u8)(data_lo >> 8);
- mac_addr[3] = (u8)(data_lo >> 16);
- mac_addr[2] = (u8)(data_lo >> 24);
-
- mac_addr[1] = (u8)data_hi;
- mac_addr[0] = (u8)(data_hi >> 8);
- rc = 0;
- }
- mutex_unlock(&dev->alu_mutex);
-
- return rc;
-}
-
-static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
-{
- struct ksz8 *ksz8 = dev->priv;
- u32 data_hi, data_lo;
- const u8 *shifts;
- const u32 *masks;
- u64 data;
-
- shifts = ksz8->shifts;
- masks = ksz8->masks;
-
- ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
- data_hi = data >> 32;
- data_lo = (u32)data;
- if (data_hi & (masks[STATIC_MAC_TABLE_VALID] |
- masks[STATIC_MAC_TABLE_OVERRIDE])) {
- alu->mac[5] = (u8)data_lo;
- alu->mac[4] = (u8)(data_lo >> 8);
- alu->mac[3] = (u8)(data_lo >> 16);
- alu->mac[2] = (u8)(data_lo >> 24);
- alu->mac[1] = (u8)data_hi;
- alu->mac[0] = (u8)(data_hi >> 8);
- alu->port_forward =
- (data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
- shifts[STATIC_MAC_FWD_PORTS];
- alu->is_override =
- (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
- data_hi >>= 1;
- alu->is_static = true;
- alu->is_use_fid =
- (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
- alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
- shifts[STATIC_MAC_FID];
- return 0;
- }
- return -ENXIO;
-}
-
-static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
-{
- struct ksz8 *ksz8 = dev->priv;
- u32 data_hi, data_lo;
- const u8 *shifts;
- const u32 *masks;
- u64 data;
-
- shifts = ksz8->shifts;
- masks = ksz8->masks;
-
- data_lo = ((u32)alu->mac[2] << 24) |
- ((u32)alu->mac[3] << 16) |
- ((u32)alu->mac[4] << 8) | alu->mac[5];
- data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1];
- data_hi |= (u32)alu->port_forward << shifts[STATIC_MAC_FWD_PORTS];
-
- if (alu->is_override)
- data_hi |= masks[STATIC_MAC_TABLE_OVERRIDE];
- if (alu->is_use_fid) {
- data_hi |= masks[STATIC_MAC_TABLE_USE_FID];
- data_hi |= (u32)alu->fid << shifts[STATIC_MAC_FID];
- }
- if (alu->is_static)
- data_hi |= masks[STATIC_MAC_TABLE_VALID];
- else
- data_hi &= ~masks[STATIC_MAC_TABLE_OVERRIDE];
-
- data = (u64)data_hi << 32 | data_lo;
- ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data);
-}
-
-static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
- u8 *member, u8 *valid)
-{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *shifts;
- const u32 *masks;
-
- shifts = ksz8->shifts;
- masks = ksz8->masks;
-
- *fid = vlan & masks[VLAN_TABLE_FID];
- *member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >>
- shifts[VLAN_TABLE_MEMBERSHIP_S];
- *valid = !!(vlan & masks[VLAN_TABLE_VALID]);
-}
-
-static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
- u16 *vlan)
-{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *shifts;
- const u32 *masks;
-
- shifts = ksz8->shifts;
- masks = ksz8->masks;
-
- *vlan = fid;
- *vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S];
- if (valid)
- *vlan |= masks[VLAN_TABLE_VALID];
-}
-
-static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
-{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *shifts;
- u64 data;
- int i;
-
- shifts = ksz8->shifts;
-
- ksz8_r_table(dev, TABLE_VLAN, addr, &data);
- addr *= 4;
- for (i = 0; i < 4; i++) {
- dev->vlan_cache[addr + i].table[0] = (u16)data;
- data >>= shifts[VLAN_TABLE];
- }
-}
-
-static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
-{
- int index;
- u16 *data;
- u16 addr;
- u64 buf;
-
- data = (u16 *)&buf;
- addr = vid / 4;
- index = vid & 3;
- ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
- *vlan = data[index];
-}
-
-static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
-{
- int index;
- u16 *data;
- u16 addr;
- u64 buf;
-
- data = (u16 *)&buf;
- addr = vid / 4;
- index = vid & 3;
- ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
- data[index] = vlan;
- dev->vlan_cache[vid].table[0] = vlan;
- ksz8_w_table(dev, TABLE_VLAN, addr, buf);
-}
-
-static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
-{
- struct ksz8 *ksz8 = dev->priv;
- u8 restart, speed, ctrl, link;
- const u8 *regs = ksz8->regs;
- int processed = true;
- u8 val1, val2;
- u16 data = 0;
- u8 p = phy;
-
- switch (reg) {
- case MII_BMCR:
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
- if (restart & PORT_PHY_LOOPBACK)
- data |= BMCR_LOOPBACK;
- if (ctrl & PORT_FORCE_100_MBIT)
- data |= BMCR_SPEED100;
- if (ksz_is_ksz88x3(dev)) {
- if ((ctrl & PORT_AUTO_NEG_ENABLE))
- data |= BMCR_ANENABLE;
- } else {
- if (!(ctrl & PORT_AUTO_NEG_DISABLE))
- data |= BMCR_ANENABLE;
- }
- if (restart & PORT_POWER_DOWN)
- data |= BMCR_PDOWN;
- if (restart & PORT_AUTO_NEG_RESTART)
- data |= BMCR_ANRESTART;
- if (ctrl & PORT_FORCE_FULL_DUPLEX)
- data |= BMCR_FULLDPLX;
- if (speed & PORT_HP_MDIX)
- data |= KSZ886X_BMCR_HP_MDIX;
- if (restart & PORT_FORCE_MDIX)
- data |= KSZ886X_BMCR_FORCE_MDI;
- if (restart & PORT_AUTO_MDIX_DISABLE)
- data |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
- if (restart & PORT_TX_DISABLE)
- data |= KSZ886X_BMCR_DISABLE_TRANSMIT;
- if (restart & PORT_LED_OFF)
- data |= KSZ886X_BMCR_DISABLE_LED;
- break;
- case MII_BMSR:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
- data = BMSR_100FULL |
- BMSR_100HALF |
- BMSR_10FULL |
- BMSR_10HALF |
- BMSR_ANEGCAPABLE;
- if (link & PORT_AUTO_NEG_COMPLETE)
- data |= BMSR_ANEGCOMPLETE;
- if (link & PORT_STAT_LINK_GOOD)
- data |= BMSR_LSTATUS;
- break;
- case MII_PHYSID1:
- data = KSZ8795_ID_HI;
- break;
- case MII_PHYSID2:
- if (ksz_is_ksz88x3(dev))
- data = KSZ8863_ID_LO;
- else
- data = KSZ8795_ID_LO;
- break;
- case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
- data = ADVERTISE_CSMA;
- if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
- data |= ADVERTISE_PAUSE_CAP;
- if (ctrl & PORT_AUTO_NEG_100BTX_FD)
- data |= ADVERTISE_100FULL;
- if (ctrl & PORT_AUTO_NEG_100BTX)
- data |= ADVERTISE_100HALF;
- if (ctrl & PORT_AUTO_NEG_10BT_FD)
- data |= ADVERTISE_10FULL;
- if (ctrl & PORT_AUTO_NEG_10BT)
- data |= ADVERTISE_10HALF;
- break;
- case MII_LPA:
- ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
- data = LPA_SLCT;
- if (link & PORT_REMOTE_SYM_PAUSE)
- data |= LPA_PAUSE_CAP;
- if (link & PORT_REMOTE_100BTX_FD)
- data |= LPA_100FULL;
- if (link & PORT_REMOTE_100BTX)
- data |= LPA_100HALF;
- if (link & PORT_REMOTE_10BT_FD)
- data |= LPA_10FULL;
- if (link & PORT_REMOTE_10BT)
- data |= LPA_10HALF;
- if (data & ~LPA_SLCT)
- data |= LPA_LPACK;
- break;
- case PHY_REG_LINK_MD:
- ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
- ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
- if (val1 & PORT_START_CABLE_DIAG)
- data |= PHY_START_CABLE_DIAG;
-
- if (val1 & PORT_CABLE_10M_SHORT)
- data |= PHY_CABLE_10M_SHORT;
-
- data |= FIELD_PREP(PHY_CABLE_DIAG_RESULT_M,
- FIELD_GET(PORT_CABLE_DIAG_RESULT_M, val1));
-
- data |= FIELD_PREP(PHY_CABLE_FAULT_COUNTER_M,
- (FIELD_GET(PORT_CABLE_FAULT_COUNTER_H, val1) << 8) |
- FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2));
- break;
- case PHY_REG_PHY_CTRL:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
- if (link & PORT_MDIX_STATUS)
- data |= KSZ886X_CTRL_MDIX_STAT;
- break;
- default:
- processed = false;
- break;
- }
- if (processed)
- *val = data;
-}
-
-static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
-{
- struct ksz8 *ksz8 = dev->priv;
- u8 restart, speed, ctrl, data;
- const u8 *regs = ksz8->regs;
- u8 p = phy;
-
- switch (reg) {
- case MII_BMCR:
-
- /* Do not support PHY reset function. */
- if (val & BMCR_RESET)
- break;
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- data = speed;
- if (val & KSZ886X_BMCR_HP_MDIX)
- data |= PORT_HP_MDIX;
- else
- data &= ~PORT_HP_MDIX;
- if (data != speed)
- ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
- data = ctrl;
- if (ksz_is_ksz88x3(dev)) {
- if ((val & BMCR_ANENABLE))
- data |= PORT_AUTO_NEG_ENABLE;
- else
- data &= ~PORT_AUTO_NEG_ENABLE;
- } else {
- if (!(val & BMCR_ANENABLE))
- data |= PORT_AUTO_NEG_DISABLE;
- else
- data &= ~PORT_AUTO_NEG_DISABLE;
-
- /* Fiber port does not support auto-negotiation. */
- if (dev->ports[p].fiber)
- data |= PORT_AUTO_NEG_DISABLE;
- }
-
- if (val & BMCR_SPEED100)
- data |= PORT_FORCE_100_MBIT;
- else
- data &= ~PORT_FORCE_100_MBIT;
- if (val & BMCR_FULLDPLX)
- data |= PORT_FORCE_FULL_DUPLEX;
- else
- data &= ~PORT_FORCE_FULL_DUPLEX;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
- data = restart;
- if (val & KSZ886X_BMCR_DISABLE_LED)
- data |= PORT_LED_OFF;
- else
- data &= ~PORT_LED_OFF;
- if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
- data |= PORT_TX_DISABLE;
- else
- data &= ~PORT_TX_DISABLE;
- if (val & BMCR_ANRESTART)
- data |= PORT_AUTO_NEG_RESTART;
- else
- data &= ~(PORT_AUTO_NEG_RESTART);
- if (val & BMCR_PDOWN)
- data |= PORT_POWER_DOWN;
- else
- data &= ~PORT_POWER_DOWN;
- if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
- data |= PORT_AUTO_MDIX_DISABLE;
- else
- data &= ~PORT_AUTO_MDIX_DISABLE;
- if (val & KSZ886X_BMCR_FORCE_MDI)
- data |= PORT_FORCE_MDIX;
- else
- data &= ~PORT_FORCE_MDIX;
- if (val & BMCR_LOOPBACK)
- data |= PORT_PHY_LOOPBACK;
- else
- data &= ~PORT_PHY_LOOPBACK;
- if (data != restart)
- ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL], data);
- break;
- case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
- data = ctrl;
- data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
- PORT_AUTO_NEG_100BTX_FD |
- PORT_AUTO_NEG_100BTX |
- PORT_AUTO_NEG_10BT_FD |
- PORT_AUTO_NEG_10BT);
- if (val & ADVERTISE_PAUSE_CAP)
- data |= PORT_AUTO_NEG_SYM_PAUSE;
- if (val & ADVERTISE_100FULL)
- data |= PORT_AUTO_NEG_100BTX_FD;
- if (val & ADVERTISE_100HALF)
- data |= PORT_AUTO_NEG_100BTX;
- if (val & ADVERTISE_10FULL)
- data |= PORT_AUTO_NEG_10BT_FD;
- if (val & ADVERTISE_10HALF)
- data |= PORT_AUTO_NEG_10BT;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
- break;
- case PHY_REG_LINK_MD:
- if (val & PHY_START_CABLE_DIAG)
- ksz_port_cfg(dev, p, REG_PORT_LINK_MD_CTRL, PORT_START_CABLE_DIAG, true);
- break;
- default:
- break;
- }
-}
-
-static enum dsa_tag_protocol ksz8_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
-{
- struct ksz_device *dev = ds->priv;
-
- /* ksz88x3 uses the same tag schema as KSZ9893 */
- return ksz_is_ksz88x3(dev) ?
- DSA_TAG_PROTO_KSZ9893 : DSA_TAG_PROTO_KSZ8795;
-}
-
-static u32 ksz8_sw_get_phy_flags(struct dsa_switch *ds, int port)
-{
- /* Silicon Errata Sheet (DS80000830A):
- * Port 1 does not work with LinkMD Cable-Testing.
- * Port 1 does not respond to received PAUSE control frames.
- */
- if (!port)
- return MICREL_KSZ8_P1_ERRATA;
-
- return 0;
-}
-
-static void ksz8_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
-{
- struct ksz_device *dev = ds->priv;
- int i;
-
- for (i = 0; i < dev->mib_cnt; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN,
- dev->mib_names[i].string, ETH_GSTRING_LEN);
- }
-}
-
-static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
-{
- u8 data;
-
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
- data &= ~PORT_VLAN_MEMBERSHIP;
- data |= (member & dev->port_mask);
- ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
- dev->ports[port].member = member;
-}
-
-static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct ksz_device *dev = ds->priv;
- int forward = dev->member;
- struct ksz_port *p;
- int member = -1;
- u8 data;
-
- p = &dev->ports[port];
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- if (port < dev->phy_port_cnt)
- member = 0;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- if (port < dev->phy_port_cnt &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
-
- /* This function is also used internally. */
- if (port == dev->cpu_port)
- break;
-
- /* Port is a member of a bridge. */
- if (dev->br_member & BIT(port)) {
- dev->member |= BIT(port);
- member = dev->member;
- } else {
- member = dev->host_mask | p->vid_member;
- }
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- if (port < dev->phy_port_cnt &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
- p->stp_state = state;
- /* Port membership may share register with STP state. */
- if (member >= 0 && member != p->member)
- ksz8_cfg_port_member(dev, port, (u8)member);
-
- /* Check if forwarding needs to be updated. */
- if (state != BR_STATE_FORWARDING) {
- if (dev->br_member & BIT(port))
- dev->member &= ~BIT(port);
- }
-
- /* When topology has changed the function ksz_update_port_member
- * should be called to modify port forwarding behavior.
- */
- if (forward != dev->member)
- ksz_update_port_member(dev, port);
-}
-
-static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
-{
- u8 learn[DSA_MAX_PORTS];
- int first, index, cnt;
- struct ksz_port *p;
-
- if ((uint)port < dev->port_cnt) {
- first = port;
- cnt = port + 1;
- } else {
- /* Flush all ports. */
- first = 0;
- cnt = dev->port_cnt;
- }
- for (index = first; index < cnt; index++) {
- p = &dev->ports[index];
- if (!p->on)
- continue;
- ksz_pread8(dev, index, P_STP_CTRL, &learn[index]);
- if (!(learn[index] & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, index, P_STP_CTRL,
- learn[index] | PORT_LEARN_DISABLE);
- }
- ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
- for (index = first; index < cnt; index++) {
- p = &dev->ports[index];
- if (!p->on)
- continue;
- if (!(learn[index] & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, index, P_STP_CTRL, learn[index]);
- }
-}
-
-static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
- struct netlink_ext_ack *extack)
-{
- struct ksz_device *dev = ds->priv;
-
- if (ksz_is_ksz88x3(dev))
- return -ENOTSUPP;
-
- /* Discard packets with VID not enabled on the switch */
- ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
-
- /* Discard packets with VID not enabled on the ingress port */
- for (port = 0; port < dev->phy_port_cnt; ++port)
- ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER,
- flag);
-
- return 0;
-}
-
-static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
-{
- if (ksz_is_ksz88x3(dev)) {
- ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
- 0x03 << (4 - 2 * port), state);
- } else {
- ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
- }
-}
-
-static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p = &dev->ports[port];
- u16 data, new_pvid = 0;
- u8 fid, member, valid;
-
- if (ksz_is_ksz88x3(dev))
- return -ENOTSUPP;
-
- /* If a VLAN is added with untagged flag different from the
- * port's Remove Tag flag, we need to change the latter.
- * Ignore VID 0, which is always untagged.
- * Ignore CPU port, which will always be tagged.
- */
- if (untagged != p->remove_tag && vlan->vid != 0 &&
- port != dev->cpu_port) {
- unsigned int vid;
-
- /* Reject attempts to add a VLAN that requires the
- * Remove Tag flag to be changed, unless there are no
- * other VLANs currently configured.
- */
- for (vid = 1; vid < dev->num_vlans; ++vid) {
- /* Skip the VID we are going to add or reconfigure */
- if (vid == vlan->vid)
- continue;
-
- ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
- &fid, &member, &valid);
- if (valid && (member & BIT(port)))
- return -EINVAL;
- }
-
- ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
- p->remove_tag = untagged;
- }
-
- ksz8_r_vlan_table(dev, vlan->vid, &data);
- ksz8_from_vlan(dev, data, &fid, &member, &valid);
-
- /* First time to setup the VLAN entry. */
- if (!valid) {
- /* Need to find a way to map VID to FID. */
- fid = 1;
- valid = 1;
- }
- member |= BIT(port);
-
- ksz8_to_vlan(dev, fid, member, valid, &data);
- ksz8_w_vlan_table(dev, vlan->vid, data);
-
- /* change PVID */
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
- new_pvid = vlan->vid;
-
- if (new_pvid) {
- u16 vid;
-
- ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
- vid &= ~VLAN_VID_MASK;
- vid |= new_pvid;
- ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
-
- ksz8_port_enable_pvid(dev, port, true);
- }
-
- return 0;
-}
-
-static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct ksz_device *dev = ds->priv;
- u16 data, pvid;
- u8 fid, member, valid;
-
- if (ksz_is_ksz88x3(dev))
- return -ENOTSUPP;
-
- ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
- pvid = pvid & 0xFFF;
-
- ksz8_r_vlan_table(dev, vlan->vid, &data);
- ksz8_from_vlan(dev, data, &fid, &member, &valid);
-
- member &= ~BIT(port);
-
- /* Invalidate the entry if no more member. */
- if (!member) {
- fid = 0;
- valid = 0;
- }
-
- ksz8_to_vlan(dev, fid, member, valid, &data);
- ksz8_w_vlan_table(dev, vlan->vid, data);
-
- if (pvid == vlan->vid)
- ksz8_port_enable_pvid(dev, port, false);
-
- return 0;
-}
-
-static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
-{
- struct ksz_device *dev = ds->priv;
-
- if (ingress) {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
- dev->mirror_rx |= BIT(port);
- } else {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
- dev->mirror_tx |= BIT(port);
- }
-
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
-
- /* configure mirror port */
- if (dev->mirror_rx || dev->mirror_tx)
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
- PORT_MIRROR_SNIFFER, true);
-
- return 0;
-}
-
-static void ksz8_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
-{
- struct ksz_device *dev = ds->priv;
- u8 data;
-
- if (mirror->ingress) {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
- dev->mirror_rx &= ~BIT(port);
- } else {
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
- dev->mirror_tx &= ~BIT(port);
- }
-
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
-
- if (!dev->mirror_rx && !dev->mirror_tx)
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
- PORT_MIRROR_SNIFFER, false);
-}
-
-static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
-{
- struct ksz_port *p = &dev->ports[port];
- u8 data8;
-
- if (!p->interface && dev->compat_interface) {
- dev_warn(dev->dev,
- "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
- "Please update your device tree.\n",
- port);
- p->interface = dev->compat_interface;
- }
-
- /* Configure MII interface for proper network communication. */
- ksz_read8(dev, REG_PORT_5_CTRL_6, &data8);
- data8 &= ~PORT_INTERFACE_TYPE;
- data8 &= ~PORT_GMII_1GPS_MODE;
- switch (p->interface) {
- case PHY_INTERFACE_MODE_MII:
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_RMII:
- data8 |= PORT_INTERFACE_RMII;
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_GMII:
- data8 |= PORT_GMII_1GPS_MODE;
- data8 |= PORT_INTERFACE_GMII;
- p->phydev.speed = SPEED_1000;
- break;
- default:
- data8 &= ~PORT_RGMII_ID_IN_ENABLE;
- data8 &= ~PORT_RGMII_ID_OUT_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- data8 |= PORT_RGMII_ID_IN_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- data8 |= PORT_RGMII_ID_OUT_ENABLE;
- data8 |= PORT_GMII_1GPS_MODE;
- data8 |= PORT_INTERFACE_RGMII;
- p->phydev.speed = SPEED_1000;
- break;
- }
- ksz_write8(dev, REG_PORT_5_CTRL_6, data8);
- p->phydev.duplex = 1;
-}
-
-static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
-{
- struct ksz_port *p = &dev->ports[port];
- struct ksz8 *ksz8 = dev->priv;
- const u32 *masks;
- u8 member;
-
- masks = ksz8->masks;
-
- /* enable broadcast storm limit */
- ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
-
- if (!ksz_is_ksz88x3(dev))
- ksz8795_set_prio_queue(dev, port, 4);
-
- /* disable DiffServ priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false);
-
- /* replace priority */
- ksz_port_cfg(dev, port, P_802_1P_CTRL,
- masks[PORT_802_1P_REMAPPING], false);
-
- /* enable 802.1p priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true);
-
- if (cpu_port) {
- if (!ksz_is_ksz88x3(dev))
- ksz8795_cpu_interface_select(dev, port);
-
- member = dev->port_mask;
- } else {
- member = dev->host_mask | p->vid_member;
- }
- ksz8_cfg_port_member(dev, port, member);
-}
-
-static void ksz8_config_cpu_port(struct dsa_switch *ds)
-{
- struct ksz_device *dev = ds->priv;
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
- struct ksz_port *p;
- const u32 *masks;
- u8 remote;
- int i;
-
- masks = ksz8->masks;
-
- /* Switch marks the maximum frame with extra byte as oversize. */
- ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true);
- ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
-
- p = &dev->ports[dev->cpu_port];
- p->vid_member = dev->port_mask;
- p->on = 1;
-
- ksz8_port_setup(dev, dev->cpu_port, true);
- dev->member = dev->host_mask;
-
- for (i = 0; i < dev->phy_port_cnt; i++) {
- p = &dev->ports[i];
-
- /* Initialize to non-zero so that ksz_cfg_port_member() will
- * be called.
- */
- p->vid_member = BIT(i);
- p->member = dev->port_mask;
- ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED);
-
- /* Last port may be disabled. */
- if (i == dev->phy_port_cnt)
- break;
- p->on = 1;
- p->phy = 1;
- }
- for (i = 0; i < dev->phy_port_cnt; i++) {
- p = &dev->ports[i];
- if (!p->on)
- continue;
- if (!ksz_is_ksz88x3(dev)) {
- ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
- if (remote & PORT_FIBER_MODE)
- p->fiber = 1;
- }
- if (p->fiber)
- ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
- true);
- else
- ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
- false);
- }
-}
-
-static int ksz8_setup(struct dsa_switch *ds)
-{
- struct ksz_device *dev = ds->priv;
- struct alu_struct alu;
- int i, ret = 0;
-
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
-
- ret = ksz8_reset_switch(dev);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
- return ret;
- }
-
- ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true);
-
- /* Enable automatic fast aging when link changed detected. */
- ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
-
- /* Enable aggressive back off algorithm in half duplex mode. */
- regmap_update_bits(dev->regmap[0], REG_SW_CTRL_1,
- SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
-
- /*
- * Make sure unicast VLAN boundary is set as default and
- * enable no excessive collision drop.
- */
- regmap_update_bits(dev->regmap[0], REG_SW_CTRL_2,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
-
- ksz8_config_cpu_port(ds);
-
- ksz_cfg(dev, REG_SW_CTRL_2, MULTICAST_STORM_DISABLE, true);
-
- ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
-
- ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
-
- if (!ksz_is_ksz88x3(dev))
- ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
-
- /* set broadcast storm protection 10% rate */
- regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
-
- for (i = 0; i < (dev->num_vlans / 4); i++)
- ksz8_r_vlan_entries(dev, i);
-
- /* Setup STP address for STP operation. */
- memset(&alu, 0, sizeof(alu));
- ether_addr_copy(alu.mac, eth_stp_addr);
- alu.is_static = true;
- alu.is_override = true;
- alu.port_forward = dev->host_mask;
-
- ksz8_w_sta_mac_table(dev, 0, &alu);
-
- ksz_init_mib_timer(dev);
-
- ds->configure_vlan_while_not_filtering = false;
-
- return 0;
-}
-
-static void ksz8_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct ksz_device *dev = ds->priv;
-
- if (port == dev->cpu_port) {
- if (state->interface != PHY_INTERFACE_MODE_RMII &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_NA)
- goto unsupported;
- } else {
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_NA)
- goto unsupported;
- }
-
- /* Allow all the expected bits */
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
-
- /* Silicon Errata Sheet (DS80000830A):
- * "Port 1 does not respond to received flow control PAUSE frames"
- * So, disable Pause support on "Port 1" (port == 0) for all ksz88x3
- * switches.
- */
- if (!ksz_is_ksz88x3(dev) || port)
- phylink_set(mask, Pause);
-
- /* Asym pause is not supported on KSZ8863 and KSZ8873 */
- if (!ksz_is_ksz88x3(dev))
- phylink_set(mask, Asym_Pause);
-
- /* 10M and 100M are only supported */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",
- phy_modes(state->interface), port);
-}
-
-static const struct dsa_switch_ops ksz8_switch_ops = {
- .get_tag_protocol = ksz8_get_tag_protocol,
- .get_phy_flags = ksz8_sw_get_phy_flags,
- .setup = ksz8_setup,
- .phy_read = ksz_phy_read16,
- .phy_write = ksz_phy_write16,
- .phylink_validate = ksz8_validate,
- .phylink_mac_link_down = ksz_mac_link_down,
- .port_enable = ksz_enable_port,
- .get_strings = ksz8_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_bridge_join = ksz_port_bridge_join,
- .port_bridge_leave = ksz_port_bridge_leave,
- .port_stp_state_set = ksz8_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz8_port_vlan_filtering,
- .port_vlan_add = ksz8_port_vlan_add,
- .port_vlan_del = ksz8_port_vlan_del,
- .port_fdb_dump = ksz_port_fdb_dump,
- .port_mdb_add = ksz_port_mdb_add,
- .port_mdb_del = ksz_port_mdb_del,
- .port_mirror_add = ksz8_port_mirror_add,
- .port_mirror_del = ksz8_port_mirror_del,
-};
-
-static u32 ksz8_get_port_addr(int port, int offset)
-{
- return PORT_CTRL_ADDR(port, offset);
-}
-
-static int ksz8_switch_detect(struct ksz_device *dev)
-{
- u8 id1, id2;
- u16 id16;
- int ret;
-
- /* read chip id */
- ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
- if (ret)
- return ret;
-
- id1 = id16 >> 8;
- id2 = id16 & SW_CHIP_ID_M;
-
- switch (id1) {
- case KSZ87_FAMILY_ID:
- if ((id2 != CHIP_ID_94 && id2 != CHIP_ID_95))
- return -ENODEV;
-
- if (id2 == CHIP_ID_95) {
- u8 val;
-
- id2 = 0x95;
- ksz_read8(dev, REG_PORT_STATUS_0, &val);
- if (val & PORT_FIBER_MODE)
- id2 = 0x65;
- } else if (id2 == CHIP_ID_94) {
- id2 = 0x94;
- }
- break;
- case KSZ88_FAMILY_ID:
- if (id2 != CHIP_ID_63)
- return -ENODEV;
- break;
- default:
- dev_err(dev->dev, "invalid family id: %d\n", id1);
- return -ENODEV;
- }
- id16 &= ~0xff;
- id16 |= id2;
- dev->chip_id = id16;
-
- return 0;
-}
-
-struct ksz_chip_data {
- u16 chip_id;
- const char *dev_name;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_ports;
- int port_cnt;
-};
-
-static const struct ksz_chip_data ksz8_switch_chips[] = {
- {
- .chip_id = 0x8795,
- .dev_name = "KSZ8795",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 5, /* total cpu and user ports */
- },
- {
- /*
- * WARNING
- * =======
- * KSZ8794 is similar to KSZ8795, except the port map
- * contains a gap between external and CPU ports, the
- * port map is NOT continuous. The per-port register
- * map is shifted accordingly too, i.e. registers at
- * offset 0x40 are NOT used on KSZ8794 and they ARE
- * used on KSZ8795 for external port 3.
- * external cpu
- * KSZ8794 0,1,2 4
- * KSZ8795 0,1,2,3 4
- * KSZ8765 0,1,2,3 4
- */
- .chip_id = 0x8794,
- .dev_name = "KSZ8794",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 4, /* total cpu and user ports */
- },
- {
- .chip_id = 0x8765,
- .dev_name = "KSZ8765",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 5, /* total cpu and user ports */
- },
- {
- .chip_id = 0x8830,
- .dev_name = "KSZ8863/KSZ8873",
- .num_vlans = 16,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x4, /* can be configured as cpu port */
- .port_cnt = 3,
- },
-};
-
-static int ksz8_switch_init(struct ksz_device *dev)
-{
- struct ksz8 *ksz8 = dev->priv;
- int i;
-
- dev->ds->ops = &ksz8_switch_ops;
-
- for (i = 0; i < ARRAY_SIZE(ksz8_switch_chips); i++) {
- const struct ksz_chip_data *chip = &ksz8_switch_chips[i];
-
- if (dev->chip_id == chip->chip_id) {
- dev->name = chip->dev_name;
- dev->num_vlans = chip->num_vlans;
- dev->num_alus = chip->num_alus;
- dev->num_statics = chip->num_statics;
- dev->port_cnt = fls(chip->cpu_ports);
- dev->cpu_port = fls(chip->cpu_ports) - 1;
- dev->phy_port_cnt = dev->port_cnt - 1;
- dev->cpu_ports = chip->cpu_ports;
- dev->host_mask = chip->cpu_ports;
- dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
- chip->cpu_ports;
- break;
- }
- }
-
- /* no switch found */
- if (!dev->cpu_ports)
- return -ENODEV;
-
- if (ksz_is_ksz88x3(dev)) {
- ksz8->regs = ksz8863_regs;
- ksz8->masks = ksz8863_masks;
- ksz8->shifts = ksz8863_shifts;
- dev->mib_cnt = ARRAY_SIZE(ksz88xx_mib_names);
- dev->mib_names = ksz88xx_mib_names;
- } else {
- ksz8->regs = ksz8795_regs;
- ksz8->masks = ksz8795_masks;
- ksz8->shifts = ksz8795_shifts;
- dev->mib_cnt = ARRAY_SIZE(ksz87xx_mib_names);
- dev->mib_names = ksz87xx_mib_names;
- }
-
- dev->reg_mib_cnt = MIB_COUNTER_NUM;
-
- dev->ports = devm_kzalloc(dev->dev,
- dev->port_cnt * sizeof(struct ksz_port),
- GFP_KERNEL);
- if (!dev->ports)
- return -ENOMEM;
- for (i = 0; i < dev->port_cnt; i++) {
- mutex_init(&dev->ports[i].mib.cnt_mutex);
- dev->ports[i].mib.counters =
- devm_kzalloc(dev->dev,
- sizeof(u64) *
- (dev->mib_cnt + 1),
- GFP_KERNEL);
- if (!dev->ports[i].mib.counters)
- return -ENOMEM;
- }
-
- /* set the real number of ports */
- dev->ds->num_ports = dev->port_cnt;
-
- /* We rely on software untagging on the CPU port, so that we
- * can support both tagged and untagged VLANs
- */
- dev->ds->untag_bridge_pvid = true;
-
- /* VLAN filtering is partly controlled by the global VLAN
- * Enable flag
- */
- dev->ds->vlan_filtering_is_global = true;
-
- return 0;
-}
-
-static void ksz8_switch_exit(struct ksz_device *dev)
-{
- ksz8_reset_switch(dev);
-}
-
-static const struct ksz_dev_ops ksz8_dev_ops = {
- .get_port_addr = ksz8_get_port_addr,
- .cfg_port_member = ksz8_cfg_port_member,
- .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
- .port_setup = ksz8_port_setup,
- .r_phy = ksz8_r_phy,
- .w_phy = ksz8_w_phy,
- .r_dyn_mac_table = ksz8_r_dyn_mac_table,
- .r_sta_mac_table = ksz8_r_sta_mac_table,
- .w_sta_mac_table = ksz8_w_sta_mac_table,
- .r_mib_cnt = ksz8_r_mib_cnt,
- .r_mib_pkt = ksz8_r_mib_pkt,
- .freeze_mib = ksz8_freeze_mib,
- .port_init_cnt = ksz8_port_init_cnt,
- .shutdown = ksz8_reset_switch,
- .detect = ksz8_switch_detect,
- .init = ksz8_switch_init,
- .exit = ksz8_switch_exit,
-};
-
-int ksz8_switch_register(struct ksz_device *dev)
-{
- return ksz_switch_register(dev, &ksz8_dev_ops);
-}
-EXPORT_SYMBOL(ksz8_switch_register);
-
-MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
deleted file mode 100644
index 866767b70d65..000000000000
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ /dev/null
@@ -1,142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Microchip KSZ8795 series register access through SPI
- *
- * Copyright (C) 2017 Microchip Technology Inc.
- * Tristram Ha <Tristram.Ha@microchip.com>
- */
-
-#include <asm/unaligned.h>
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
-
-#include "ksz8.h"
-#include "ksz_common.h"
-
-#define KSZ8795_SPI_ADDR_SHIFT 12
-#define KSZ8795_SPI_ADDR_ALIGN 3
-#define KSZ8795_SPI_TURNAROUND_SHIFT 1
-
-#define KSZ8863_SPI_ADDR_SHIFT 8
-#define KSZ8863_SPI_ADDR_ALIGN 8
-#define KSZ8863_SPI_TURNAROUND_SHIFT 0
-
-KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT,
- KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN);
-
-KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
- KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN);
-
-static int ksz8795_spi_probe(struct spi_device *spi)
-{
- const struct regmap_config *regmap_config;
- struct device *ddev = &spi->dev;
- struct regmap_config rc;
- struct ksz_device *dev;
- struct ksz8 *ksz8;
- int i, ret = 0;
-
- ksz8 = devm_kzalloc(&spi->dev, sizeof(struct ksz8), GFP_KERNEL);
- if (!ksz8)
- return -ENOMEM;
-
- ksz8->priv = spi;
-
- dev = ksz_switch_alloc(&spi->dev, ksz8);
- if (!dev)
- return -ENOMEM;
-
- regmap_config = device_get_match_data(ddev);
- if (!regmap_config)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
- rc = regmap_config[i];
- rc.lock_arg = &dev->regmap_mutex;
- dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
- if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&spi->dev,
- "Failed to initialize regmap%i: %d\n",
- regmap_config[i].val_bits, ret);
- return ret;
- }
- }
-
- if (spi->dev.platform_data)
- dev->pdata = spi->dev.platform_data;
-
- /* setup spi */
- spi->mode = SPI_MODE_3;
- ret = spi_setup(spi);
- if (ret)
- return ret;
-
- ret = ksz8_switch_register(dev);
-
- /* Main DSA driver may not be started yet. */
- if (ret)
- return ret;
-
- spi_set_drvdata(spi, dev);
-
- return 0;
-}
-
-static int ksz8795_spi_remove(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- ksz_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
-}
-
-static void ksz8795_spi_shutdown(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (!dev)
- return;
-
- if (dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
-
- dsa_switch_shutdown(dev->ds);
-
- spi_set_drvdata(spi, NULL);
-}
-
-static const struct of_device_id ksz8795_dt_ids[] = {
- { .compatible = "microchip,ksz8765", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8794", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8795", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8863", .data = &ksz8863_regmap_config },
- { .compatible = "microchip,ksz8873", .data = &ksz8863_regmap_config },
- {},
-};
-MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
-
-static struct spi_driver ksz8795_spi_driver = {
- .driver = {
- .name = "ksz8795-switch",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz8795_dt_ids),
- },
- .probe = ksz8795_spi_probe,
- .remove = ksz8795_spi_remove,
- .shutdown = ksz8795_spi_shutdown,
-};
-
-module_spi_driver(ksz8795_spi_driver);
-
-MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch SPI Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 5883fa7edda2..a8bfcd917bf7 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -5,6 +5,9 @@
* Copyright (C) 2019 Pengutronix, Michael Grzeschik <kernel@pengutronix.de>
*/
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+
#include "ksz8.h"
#include "ksz_common.h"
@@ -26,11 +29,9 @@ static int ksz8863_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
struct mdio_device *mdev;
u8 reg = *(u8 *)reg_buf;
u8 *val = val_buf;
- struct ksz8 *ksz8;
int i, ret = 0;
- ksz8 = dev->priv;
- mdev = ksz8->priv;
+ mdev = dev->priv;
mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED);
for (i = 0; i < val_len; i++) {
@@ -55,13 +56,11 @@ static int ksz8863_mdio_write(void *ctx, const void *data, size_t count)
{
struct ksz_device *dev = ctx;
struct mdio_device *mdev;
- struct ksz8 *ksz8;
int i, ret = 0;
u32 reg;
u8 *val;
- ksz8 = dev->priv;
- mdev = ksz8->priv;
+ mdev = dev->priv;
val = (u8 *)(data + 4);
reg = *(u32 *)data;
@@ -86,22 +85,16 @@ static const struct regmap_bus regmap_smi[] = {
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
- .max_raw_read = 1,
- .max_raw_write = 1,
},
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
- .max_raw_read = 2,
- .max_raw_write = 2,
},
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
- .max_raw_read = 4,
- .max_raw_write = 4,
}
};
@@ -112,9 +105,9 @@ static const struct regmap_config ksz8863_regmap_config[] = {
.pad_bits = 24,
.val_bits = 8,
.cache_type = REGCACHE_NONE,
- .use_single_read = 1,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
+ .max_register = U8_MAX,
},
{
.name = "#16",
@@ -122,9 +115,9 @@ static const struct regmap_config ksz8863_regmap_config[] = {
.pad_bits = 24,
.val_bits = 16,
.cache_type = REGCACHE_NONE,
- .use_single_read = 1,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
+ .max_register = U8_MAX,
},
{
.name = "#32",
@@ -132,49 +125,49 @@ static const struct regmap_config ksz8863_regmap_config[] = {
.pad_bits = 24,
.val_bits = 32,
.cache_type = REGCACHE_NONE,
- .use_single_read = 1,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
+ .max_register = U8_MAX,
}
};
static int ksz8863_smi_probe(struct mdio_device *mdiodev)
{
+ struct device *ddev = &mdiodev->dev;
+ const struct ksz_chip_data *chip;
struct regmap_config rc;
struct ksz_device *dev;
- struct ksz8 *ksz8;
int ret;
int i;
- ksz8 = devm_kzalloc(&mdiodev->dev, sizeof(struct ksz8), GFP_KERNEL);
- if (!ksz8)
- return -ENOMEM;
-
- ksz8->priv = mdiodev;
-
- dev = ksz_switch_alloc(&mdiodev->dev, ksz8);
+ dev = ksz_switch_alloc(&mdiodev->dev, mdiodev);
if (!dev)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(ksz8863_regmap_config); i++) {
+ chip = device_get_match_data(ddev);
+ if (!chip)
+ return -EINVAL;
+
+ for (i = 0; i < __KSZ_NUM_REGMAPS; i++) {
rc = ksz8863_regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
+ rc.wr_table = chip->wr_table;
+ rc.rd_table = chip->rd_table;
dev->regmap[i] = devm_regmap_init(&mdiodev->dev,
&regmap_smi[i], dev,
&rc);
if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&mdiodev->dev,
- "Failed to initialize regmap%i: %d\n",
- ksz8863_regmap_config[i].val_bits, ret);
- return ret;
+ return dev_err_probe(&mdiodev->dev,
+ PTR_ERR(dev->regmap[i]),
+ "Failed to initialize regmap%i\n",
+ ksz8863_regmap_config[i].val_bits);
}
}
if (mdiodev->dev.platform_data)
dev->pdata = mdiodev->dev.platform_data;
- ret = ksz8_switch_register(dev);
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
@@ -191,8 +184,6 @@ static void ksz8863_smi_remove(struct mdio_device *mdiodev)
if (dev)
ksz_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
@@ -206,8 +197,14 @@ static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
}
static const struct of_device_id ksz8863_dt_ids[] = {
- { .compatible = "microchip,ksz8863" },
- { .compatible = "microchip,ksz8873" },
+ {
+ .compatible = "microchip,ksz8863",
+ .data = &ksz_switch_chips[KSZ88X3]
+ },
+ {
+ .compatible = "microchip,ksz8873",
+ .data = &ksz_switch_chips[KSZ88X3]
+ },
{ },
};
MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8_reg.h
index 6b40bc25f7ff..332408567b47 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8_reg.h
@@ -1,41 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Microchip KSZ8795 register definitions
+ * Microchip KSZ8XXX series register definitions
+ *
+ * The base for these definitions is KSZ8795 but unless indicated
+ * differently by their prefix, they apply to all KSZ8 series
+ * devices. Registers and masks that do change are defined in
+ * dedicated structures in ksz_common.c.
*
* Copyright (c) 2017 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
*/
-#ifndef __KSZ8795_REG_H
-#define __KSZ8795_REG_H
+#ifndef __KSZ8_REG_H
+#define __KSZ8_REG_H
#define KS_PORT_M 0x1F
#define KS_PRIO_M 0x3
#define KS_PRIO_S 2
-#define REG_CHIP_ID0 0x00
-
-#define KSZ87_FAMILY_ID 0x87
-#define KSZ88_FAMILY_ID 0x88
-
-#define REG_CHIP_ID1 0x01
-
-#define SW_CHIP_ID_M 0xF0
-#define SW_CHIP_ID_S 4
#define SW_REVISION_M 0x0E
#define SW_REVISION_S 1
-#define SW_START 0x01
-
-#define CHIP_ID_94 0x60
-#define CHIP_ID_95 0x90
-#define CHIP_ID_63 0x30
#define KSZ8863_REG_SW_RESET 0x43
#define KSZ8863_GLOBAL_SOFTWARE_RESET BIT(4)
#define KSZ8863_PCS_RESET BIT(0)
+#define KSZ88X3_REG_FVID_AND_HOST_MODE 0xC6
+#define KSZ88X3_PORT3_RMII_CLK_INTERNAL BIT(3)
+
#define REG_SW_CTRL_0 0x02
#define SW_NEW_BACKOFF BIT(7)
@@ -57,12 +51,14 @@
#define REG_SW_CTRL_2 0x04
#define UNICAST_VLAN_BOUNDARY BIT(7)
-#define MULTICAST_STORM_DISABLE BIT(6)
#define SW_BACK_PRESSURE BIT(5)
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
#define SW_LEGAL_PACKET_DISABLE BIT(1)
+#define KSZ8863_HUGE_PACKET_ENABLE BIT(2)
+#define KSZ8863_LEGAL_PACKET_ENABLE BIT(1)
+
#define REG_SW_CTRL_3 0x05
#define WEIGHTED_FAIR_QUEUE_ENABLE BIT(3)
@@ -77,13 +73,9 @@
#define SW_FLOW_CTRL BIT(5)
#define SW_10_MBIT BIT(4)
#define SW_REPLACE_VID BIT(3)
-#define BROADCAST_STORM_RATE_HI 0x07
#define REG_SW_CTRL_5 0x07
-#define BROADCAST_STORM_RATE_LO 0xFF
-#define BROADCAST_STORM_RATE 0x07FF
-
#define REG_SW_CTRL_6 0x08
#define SW_MIB_COUNTER_FLUSH BIT(7)
@@ -137,7 +129,8 @@
#define PORT_BASED_PRIO_3 3
#define PORT_INSERT_TAG BIT(2)
#define PORT_REMOVE_TAG BIT(1)
-#define PORT_QUEUE_SPLIT_L BIT(0)
+#define KSZ8795_PORT_2QUEUE_SPLIT_EN BIT(0)
+#define KSZ8873_PORT_4QUEUE_SPLIT_EN BIT(0)
#define REG_PORT_1_CTRL_1 0x11
#define REG_PORT_2_CTRL_1 0x21
@@ -156,13 +149,11 @@
#define REG_PORT_4_CTRL_2 0x42
#define REG_PORT_5_CTRL_2 0x52
+#define KSZ8873_PORT_2QUEUE_SPLIT_EN BIT(7)
#define PORT_INGRESS_FILTER BIT(6)
#define PORT_DISCARD_NON_VID BIT(5)
#define PORT_FORCE_FLOW_CTRL BIT(4)
#define PORT_BACK_PRESSURE BIT(3)
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
#define REG_PORT_1_CTRL_3 0x13
#define REG_PORT_2_CTRL_3 0x23
@@ -192,15 +183,7 @@
#define REG_PORT_5_CTRL_6 0x56
#define PORT_MII_INTERNAL_CLOCK BIT(7)
-#define PORT_GMII_1GPS_MODE BIT(6)
-#define PORT_RGMII_ID_IN_ENABLE BIT(4)
-#define PORT_RGMII_ID_OUT_ENABLE BIT(3)
#define PORT_GMII_MAC_MODE BIT(2)
-#define PORT_INTERFACE_TYPE 0x3
-#define PORT_INTERFACE_MII 0
-#define PORT_INTERFACE_RMII 1
-#define PORT_INTERFACE_GMII 2
-#define PORT_INTERFACE_RGMII 3
#define REG_PORT_1_CTRL_7 0x17
#define REG_PORT_2_CTRL_7 0x27
@@ -220,8 +203,6 @@
#define REG_PORT_4_STATUS_0 0x48
/* For KSZ8765. */
-#define PORT_FIBER_MODE BIT(7)
-
#define PORT_REMOTE_ASYM_PAUSE BIT(5)
#define PORT_REMOTE_SYM_PAUSE BIT(4)
#define PORT_REMOTE_100BTX_FD BIT(3)
@@ -291,6 +272,7 @@
#define PORT_AUTO_MDIX_DISABLE BIT(2)
#define PORT_FORCE_MDIX BIT(1)
#define PORT_MAC_LOOPBACK BIT(0)
+#define KSZ8873_PORT_PHY_LOOPBACK BIT(0)
#define REG_PORT_1_STATUS_2 0x1E
#define REG_PORT_2_STATUS_2 0x2E
@@ -325,7 +307,6 @@
#define REG_PORT_CTRL_5 0x05
-#define REG_PORT_STATUS_0 0x08
#define REG_PORT_STATUS_1 0x09
#define REG_PORT_LINK_MD_CTRL 0x0A
#define REG_PORT_LINK_MD_RESULT 0x0B
@@ -353,13 +334,6 @@
((addr) + REG_PORT_1_CTRL_0 + (port) * \
(REG_PORT_2_CTRL_0 - REG_PORT_1_CTRL_0))
-#define REG_SW_MAC_ADDR_0 0x68
-#define REG_SW_MAC_ADDR_1 0x69
-#define REG_SW_MAC_ADDR_2 0x6A
-#define REG_SW_MAC_ADDR_3 0x6B
-#define REG_SW_MAC_ADDR_4 0x6C
-#define REG_SW_MAC_ADDR_5 0x6D
-
#define TABLE_EXT_SELECT_S 5
#define TABLE_EEE_V 1
#define TABLE_ACL_V 2
@@ -390,8 +364,6 @@
#define REG_IND_DATA_1 0x77
#define REG_IND_DATA_0 0x78
-#define REG_IND_DATA_PME_EEE_ACL 0xA0
-
#define REG_INT_STATUS 0x7C
#define REG_INT_ENABLE 0x7D
@@ -472,20 +444,6 @@
#define TOS_PRIO_M KS_PRIO_M
#define TOS_PRIO_S KS_PRIO_S
-#define REG_SW_CTRL_20 0xA3
-
-#define SW_GMII_DRIVE_STRENGTH_S 4
-#define SW_DRIVE_STRENGTH_M 0x7
-#define SW_DRIVE_STRENGTH_2MA 0
-#define SW_DRIVE_STRENGTH_4MA 1
-#define SW_DRIVE_STRENGTH_8MA 2
-#define SW_DRIVE_STRENGTH_12MA 3
-#define SW_DRIVE_STRENGTH_16MA 4
-#define SW_DRIVE_STRENGTH_20MA 5
-#define SW_DRIVE_STRENGTH_24MA 6
-#define SW_DRIVE_STRENGTH_28MA 7
-#define SW_MII_DRIVE_STRENGTH_S 0
-
#define REG_SW_CTRL_21 0xA4
#define SW_IPV6_MLD_OPTION BIT(3)
@@ -510,10 +468,7 @@
#define REG_PORT_4_CTRL_13 0xE1
#define REG_PORT_5_CTRL_13 0xF1
-#define PORT_QUEUE_SPLIT_H BIT(1)
-#define PORT_QUEUE_SPLIT_1 0
-#define PORT_QUEUE_SPLIT_2 1
-#define PORT_QUEUE_SPLIT_4 2
+#define KSZ8795_PORT_4QUEUE_SPLIT_EN BIT(1)
#define PORT_DROP_TAG BIT(0)
#define REG_PORT_1_CTRL_14 0xB2
@@ -752,8 +707,6 @@
#define KSZ8795_ID_LO 0x1550
#define KSZ8863_ID_LO 0x1430
-#define KSZ8795_SW_ID 0x8795
-
#define PHY_REG_LINK_MD 0x1D
#define PHY_START_CABLE_DIAG BIT(15)
@@ -776,6 +729,55 @@
#define PHY_POWER_SAVING_ENABLE BIT(2)
#define PHY_REMOTE_LOOPBACK BIT(1)
+/* KSZ8463 specific registers. */
+#define P1MBCR 0x4C
+#define P1MBSR 0x4E
+#define PHY1ILR 0x50
+#define PHY1IHR 0x52
+#define P1ANAR 0x54
+#define P1ANLPR 0x56
+#define P2MBCR 0x58
+#define P2MBSR 0x5A
+#define PHY2ILR 0x5C
+#define PHY2IHR 0x5E
+#define P2ANAR 0x60
+#define P2ANLPR 0x62
+
+#define P1CR1 0x6C
+#define P1CR2 0x6E
+#define P1CR3 0x72
+#define P1CR4 0x7E
+#define P1SR 0x80
+
+#define KSZ8463_FLUSH_TABLE_CTRL 0xAD
+
+#define KSZ8463_FLUSH_DYN_MAC_TABLE BIT(2)
+#define KSZ8463_FLUSH_STA_MAC_TABLE BIT(1)
+
+#define KSZ8463_REG_SW_CTRL_9 0xAE
+
+#define KSZ8463_REG_CFG_CTRL 0xD8
+
+#define PORT_2_COPPER_MODE BIT(7)
+#define PORT_1_COPPER_MODE BIT(6)
+#define PORT_COPPER_MODE_S 6
+
+#define KSZ8463_REG_SW_RESET 0x126
+
+#define KSZ8463_GLOBAL_SOFTWARE_RESET BIT(0)
+
+#define KSZ8463_PTP_CLK_CTRL 0x600
+
+#define PTP_CLK_ENABLE BIT(1)
+
+#define KSZ8463_PTP_MSG_CONF1 0x620
+
+#define PTP_ENABLE BIT(6)
+
+#define KSZ8463_REG_DSP_CTRL_6 0x734
+
+#define COPPER_RECEIVE_ADJUSTMENT BIT(13)
+
/* Chip resource */
#define PRIO_QUEUES 4
@@ -791,7 +793,6 @@
#define P_TAG_CTRL REG_PORT_CTRL_0
#define P_MIRROR_CTRL REG_PORT_CTRL_1
#define P_802_1P_CTRL REG_PORT_CTRL_2
-#define P_STP_CTRL REG_PORT_CTRL_2
#define P_PASS_ALL_CTRL REG_PORT_CTRL_12
#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12
#define P_DROP_TAG_CTRL REG_PORT_CTRL_13
@@ -812,11 +813,9 @@
#define IND_ACC_TABLE(table) ((table) << 8)
-/* Driver set switch broadcast storm protection at 10% rate. */
-#define BROADCAST_STORM_PROT_RATE 10
-
-/* 148,800 frames * 67 ms / 100 */
-#define BROADCAST_STORM_VALUE 9969
+/* */
+#define REG_IND_EEE_GLOB2_LO 0x34
+#define REG_IND_EEE_GLOB2_HI 0x35
/**
* MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
@@ -834,7 +833,9 @@
#define KSZ8795_MIB_TOTAL_TX_1 0x105
#define KSZ8863_MIB_PACKET_DROPPED_TX_0 0x100
-#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x105
+#define KSZ8863_MIB_PACKET_DROPPED_RX_0 0x103
+
+#define KSZ8895_MIB_PACKET_DROPPED_RX_0 0x105
#define MIB_PACKET_DROPPED 0x0000FFFF
@@ -844,5 +845,6 @@
#define TAIL_TAG_LOOKUP BIT(7)
#define FID_ENTRIES 128
+#define KSZ8_DYN_MAC_ENTRIES 1024
#endif
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 854e25f43fa7..5facffbb9c9a 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 switch driver main logic
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#include <linux/kernel.h>
@@ -11,88 +11,56 @@
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz9477_reg.h"
#include "ksz_common.h"
-
-/* Used with variable features to indicate capabilities. */
-#define GBIT_SUPPORT BIT(0)
-#define NEW_XMII BIT(1)
-#define IS_9893 BIT(2)
-
-static const struct {
- int index;
- char string[ETH_GSTRING_LEN];
-} ksz9477_mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
- { 0x00, "rx_hi" },
- { 0x01, "rx_undersize" },
- { 0x02, "rx_fragments" },
- { 0x03, "rx_oversize" },
- { 0x04, "rx_jabbers" },
- { 0x05, "rx_symbol_err" },
- { 0x06, "rx_crc_err" },
- { 0x07, "rx_align_err" },
- { 0x08, "rx_mac_ctrl" },
- { 0x09, "rx_pause" },
- { 0x0A, "rx_bcast" },
- { 0x0B, "rx_mcast" },
- { 0x0C, "rx_ucast" },
- { 0x0D, "rx_64_or_less" },
- { 0x0E, "rx_65_127" },
- { 0x0F, "rx_128_255" },
- { 0x10, "rx_256_511" },
- { 0x11, "rx_512_1023" },
- { 0x12, "rx_1024_1522" },
- { 0x13, "rx_1523_2000" },
- { 0x14, "rx_2001" },
- { 0x15, "tx_hi" },
- { 0x16, "tx_late_col" },
- { 0x17, "tx_pause" },
- { 0x18, "tx_bcast" },
- { 0x19, "tx_mcast" },
- { 0x1A, "tx_ucast" },
- { 0x1B, "tx_deferred" },
- { 0x1C, "tx_total_col" },
- { 0x1D, "tx_exc_col" },
- { 0x1E, "tx_single_col" },
- { 0x1F, "tx_mult_col" },
- { 0x80, "rx_total" },
- { 0x81, "tx_total" },
- { 0x82, "rx_discards" },
- { 0x83, "tx_discards" },
-};
+#include "ksz9477.h"
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
- regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
+ regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
}
static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bool set)
{
- regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
+ regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
bits, set ? bits : 0);
}
static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
{
- regmap_update_bits(dev->regmap[2], addr, bits, set ? bits : 0);
+ regmap_update_bits(ksz_regmap_32(dev), addr, bits, set ? bits : 0);
}
static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
u32 bits, bool set)
{
- regmap_update_bits(dev->regmap[2], PORT_CTRL_ADDR(port, offset),
+ regmap_update_bits(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, offset),
bits, set ? bits : 0);
}
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
+{
+ u16 frame_size;
+
+ if (!dsa_is_cpu_port(dev->ds, port))
+ return 0;
+
+ frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ return regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2,
+ REG_SW_MTU_MASK, frame_size);
+}
+
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
unsigned int val;
- return regmap_read_poll_timeout(dev->regmap[0], REG_SW_VLAN_CTRL,
+ return regmap_read_poll_timeout(ksz_regmap_8(dev), REG_SW_VLAN_CTRL,
val, !(val & VLAN_START), 10, 1000);
}
@@ -179,7 +147,7 @@ static int ksz9477_wait_alu_ready(struct ksz_device *dev)
{
unsigned int val;
- return regmap_read_poll_timeout(dev->regmap[2], REG_SW_ALU_CTRL__4,
+ return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_CTRL__4,
val, !(val & ALU_START), 10, 1000);
}
@@ -187,13 +155,197 @@ static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
{
unsigned int val;
- return regmap_read_poll_timeout(dev->regmap[2],
+ return regmap_read_poll_timeout(ksz_regmap_32(dev),
REG_SW_ALU_STAT_CTRL__4,
val, !(val & ALU_STAT_START),
10, 1000);
}
-static int ksz9477_reset_switch(struct ksz_device *dev)
+static void port_sgmii_s(struct ksz_device *dev, uint port, u16 devid, u16 reg)
+{
+ u32 data;
+
+ data = (devid & MII_MMD_CTRL_DEVAD_MASK) << 16;
+ data |= reg;
+ ksz_pwrite32(dev, port, REG_PORT_SGMII_ADDR__4, data);
+}
+
+static void port_sgmii_r(struct ksz_device *dev, uint port, u16 devid, u16 reg,
+ u16 *buf)
+{
+ port_sgmii_s(dev, port, devid, reg);
+ ksz_pread16(dev, port, REG_PORT_SGMII_DATA__4 + 2, buf);
+}
+
+static void port_sgmii_w(struct ksz_device *dev, uint port, u16 devid, u16 reg,
+ u16 buf)
+{
+ port_sgmii_s(dev, port, devid, reg);
+ ksz_pwrite32(dev, port, REG_PORT_SGMII_DATA__4, buf);
+}
+
+static int ksz9477_pcs_read(struct mii_bus *bus, int phy, int mmd, int reg)
+{
+ struct ksz_device *dev = bus->priv;
+ int port = ksz_get_sgmii_port(dev);
+ u16 val;
+
+ port_sgmii_r(dev, port, mmd, reg, &val);
+
+ /* Simulate a value to activate special code in the XPCS driver if
+ * supported.
+ */
+ if (mmd == MDIO_MMD_PMAPMD) {
+ if (reg == MDIO_DEVID1)
+ val = 0x9477;
+ else if (reg == MDIO_DEVID2)
+ val = 0x22 << 10;
+ } else if (mmd == MDIO_MMD_VEND2) {
+ struct ksz_port *p = &dev->ports[port];
+
+ /* Need to update MII_BMCR register with the exact speed and
+ * duplex mode when running in SGMII mode and this register is
+ * used to detect connected speed in that mode.
+ */
+ if (reg == MMD_SR_MII_AUTO_NEG_STATUS) {
+ int duplex, speed;
+
+ if (val & SR_MII_STAT_LINK_UP) {
+ speed = (val >> SR_MII_STAT_S) & SR_MII_STAT_M;
+ if (speed == SR_MII_STAT_1000_MBPS)
+ speed = SPEED_1000;
+ else if (speed == SR_MII_STAT_100_MBPS)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ if (val & SR_MII_STAT_FULL_DUPLEX)
+ duplex = DUPLEX_FULL;
+ else
+ duplex = DUPLEX_HALF;
+
+ if (!p->phydev.link ||
+ p->phydev.speed != speed ||
+ p->phydev.duplex != duplex) {
+ u16 ctrl;
+
+ p->phydev.link = 1;
+ p->phydev.speed = speed;
+ p->phydev.duplex = duplex;
+ port_sgmii_r(dev, port, mmd, MII_BMCR,
+ &ctrl);
+ ctrl &= BMCR_ANENABLE;
+ ctrl |= mii_bmcr_encode_fixed(speed,
+ duplex);
+ port_sgmii_w(dev, port, mmd, MII_BMCR,
+ ctrl);
+ }
+ } else {
+ p->phydev.link = 0;
+ }
+ } else if (reg == MII_BMSR) {
+ p->phydev.link = !!(val & BMSR_LSTATUS);
+ }
+ }
+
+ return val;
+}
+
+static int ksz9477_pcs_write(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+ int port = ksz_get_sgmii_port(dev);
+
+ if (mmd == MDIO_MMD_VEND2) {
+ struct ksz_port *p = &dev->ports[port];
+
+ if (reg == MMD_SR_MII_AUTO_NEG_CTRL) {
+ u16 sgmii_mode = SR_MII_PCS_SGMII << SR_MII_PCS_MODE_S;
+
+ /* Need these bits for 1000BASE-X mode to work with
+ * AN on.
+ */
+ if (!(val & sgmii_mode))
+ val |= SR_MII_SGMII_LINK_UP |
+ SR_MII_TX_CFG_PHY_MASTER;
+
+ /* SGMII interrupt in the port cannot be masked, so
+ * make sure interrupt is not enabled as it is not
+ * handled.
+ */
+ val &= ~SR_MII_AUTO_NEG_COMPLETE_INTR;
+ } else if (reg == MII_BMCR) {
+ /* The MII_ADVERTISE register needs to write once
+ * before doing auto-negotiation for the correct
+ * config_word to be sent out after reset.
+ */
+ if ((val & BMCR_ANENABLE) && !p->sgmii_adv_write) {
+ u16 adv;
+
+ /* The SGMII port cannot disable flow control
+ * so it is better to just advertise symmetric
+ * pause.
+ */
+ port_sgmii_r(dev, port, mmd, MII_ADVERTISE,
+ &adv);
+ adv |= ADVERTISE_1000XPAUSE;
+ adv &= ~ADVERTISE_1000XPSE_ASYM;
+ port_sgmii_w(dev, port, mmd, MII_ADVERTISE,
+ adv);
+ p->sgmii_adv_write = 1;
+ } else if (val & BMCR_RESET) {
+ p->sgmii_adv_write = 0;
+ }
+ } else if (reg == MII_ADVERTISE) {
+ /* XPCS driver writes to this register so there is no
+ * need to update it for the errata.
+ */
+ p->sgmii_adv_write = 1;
+ }
+ }
+ port_sgmii_w(dev, port, mmd, reg, val);
+
+ return 0;
+}
+
+int ksz9477_pcs_create(struct ksz_device *dev)
+{
+ /* This chip has a SGMII port. */
+ if (ksz_has_sgmii_port(dev)) {
+ int port = ksz_get_sgmii_port(dev);
+ struct ksz_port *p = &dev->ports[port];
+ struct phylink_pcs *pcs;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(dev->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "ksz_pcs_mdio_bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
+ dev_name(dev->dev));
+ bus->read_c45 = &ksz9477_pcs_read;
+ bus->write_c45 = &ksz9477_pcs_write;
+ bus->parent = dev->dev;
+ bus->phy_mask = ~0;
+ bus->priv = dev;
+
+ ret = devm_mdiobus_register(dev->dev, bus);
+ if (ret)
+ return ret;
+
+ pcs = xpcs_create_pcs_mdiodev(bus, 0);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+ p->pcs = pcs;
+ }
+
+ return 0;
+}
+
+int ksz9477_reset_switch(struct ksz_device *dev)
{
u8 data8;
u32 data32;
@@ -202,35 +354,35 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
/* turn off SPI DO Edge select */
- regmap_update_bits(dev->regmap[0], REG_SW_GLOBAL_SERIAL_CTRL_0,
+ regmap_update_bits(ksz_regmap_8(dev), REG_SW_GLOBAL_SERIAL_CTRL_0,
SPI_AUTO_EDGE_DETECTION, 0);
/* default configuration */
- ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
- data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
- SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+ ksz_write8(dev, REG_SW_LUE_CTRL_1,
+ SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
/* disable interrupts */
ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
- /* set broadcast storm protection 10% rate */
- regmap_update_bits(dev->regmap[1], REG_SW_MAC_CTRL_2,
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
+ /* KSZ9893 compatible chips do not support refclk configuration */
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID ||
+ dev->chip_id == KSZ9563_CHIP_ID)
+ return 0;
- if (dev->synclko_125)
- ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
- SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ);
+ data8 = SW_ENABLE_REFCLKO;
+ if (dev->synclko_disable)
+ data8 = 0;
+ else if (dev->synclko_125)
+ data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
+ ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
return 0;
}
-static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
- u64 *cnt)
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
struct ksz_port *p = &dev->ports[port];
unsigned int val;
@@ -243,7 +395,7 @@ static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
data |= (addr << MIB_COUNTER_INDEX_S);
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
- ret = regmap_read_poll_timeout(dev->regmap[2],
+ ret = regmap_read_poll_timeout(ksz_regmap_32(dev),
PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
val, !(val & MIB_COUNTER_READ), 10, 1000);
/* failed to read MIB. get out of loop */
@@ -257,14 +409,14 @@ static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
*cnt += data;
}
-static void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
{
- addr = ksz9477_mib_names[addr].index;
+ addr = dev->info->mib_names[addr].index;
ksz9477_r_mib_cnt(dev, port, addr, cnt);
}
-static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
struct ksz_port *p = &dev->ports[port];
@@ -278,7 +430,74 @@ static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
-static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
+static int ksz9477_half_duplex_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
+{
+ u8 lue_ctrl;
+ u32 pmavbc;
+ u16 pqm;
+ int ret;
+
+ /* Errata DS80000754 recommends monitoring potential faults in
+ * half-duplex mode. The switch might not be able to communicate anymore
+ * in these states. If you see this message, please read the
+ * errata-sheet for more information:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
+ * To workaround this issue, half-duplex mode should be avoided.
+ * A software reset could be implemented to recover from this state.
+ */
+ dev_warn_once(dev->dev,
+ "Half-duplex detected on port %d, transmission halt may occur\n",
+ port);
+ if (tx_late_col != 0) {
+ /* Transmission halt with late collisions */
+ dev_crit_once(dev->dev,
+ "TX late collisions detected, transmission may be halted on port %d\n",
+ port);
+ }
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &lue_ctrl);
+ if (ret)
+ return ret;
+ if (lue_ctrl & SW_VLAN_ENABLE) {
+ ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
+ if (ret)
+ return ret;
+
+ ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
+ if (ret)
+ return ret;
+
+ if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
+ (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
+ /* Transmission halt with Half-Duplex and VLAN */
+ dev_crit_once(dev->dev,
+ "resources out of limits, transmission may be halted\n");
+ }
+ }
+
+ return ret;
+}
+
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
+{
+ u8 status;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ if (ret)
+ return ret;
+
+ if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status)
+ == PORT_INTF_SPEED_NONE) &&
+ !(status & PORT_INTF_FULL_DUPLEX)) {
+ ret = ksz9477_half_duplex_monitor(dev, port, tx_late_col);
+ }
+
+ return ret;
+}
+
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
@@ -289,27 +508,22 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
mutex_unlock(&mib->cnt_mutex);
-
- mib->cnt_ptr = 0;
- memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
}
-static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
+static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
+ u16 *data)
{
- enum dsa_tag_protocol proto = DSA_TAG_PROTO_KSZ9477;
- struct ksz_device *dev = ds->priv;
-
- if (dev->features & IS_9893)
- proto = DSA_TAG_PROTO_KSZ9893;
- return proto;
+ /* KSZ8563R do not have extended registers but BMSR_ESTATEN and
+ * BMSR_ERCAP bits are set.
+ */
+ if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
+ *data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
}
-static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
- struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
+ int ret;
/* No real PHY after this. Simulate the PHY.
* A fixed PHY can be setup in the device tree, but this function is
@@ -317,7 +531,7 @@ static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
* For RGMII PHY there is no way to access it so the fixed PHY should
* be used. For SGMII PHY the supporting code will be added later.
*/
- if (addr >= dev->phy_port_cnt) {
+ if (!dev->info->internal_phy[addr]) {
struct ksz_port *p = &dev->ports[addr];
switch (reg) {
@@ -350,154 +564,74 @@ static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
break;
}
} else {
- ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ if (ret)
+ return ret;
+
+ ksz9477_r_phy_quirks(dev, addr, reg, &val);
}
- return val;
+ *data = val;
+
+ return 0;
}
-static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg,
- u16 val)
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
- struct ksz_device *dev = ds->priv;
+ u32 mask, val32;
/* No real PHY after this. */
- if (addr >= dev->phy_port_cnt)
- return 0;
-
- /* No gigabit support. Do not write to this register. */
- if (!(dev->features & GBIT_SUPPORT) && reg == MII_CTRL1000)
+ if (!dev->info->internal_phy[addr])
return 0;
- ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
-
- return 0;
-}
-static void ksz9477_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
-{
- int i;
+ if (reg < 0x10)
+ return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN, ksz9477_mib_names[i].string,
- ETH_GSTRING_LEN);
+ /* Errata: When using SPI, I2C, or in-band register access,
+ * writes to certain PHY registers should be performed as
+ * 32-bit writes instead of 16-bit writes.
+ */
+ val32 = val;
+ mask = 0xffff;
+ if ((reg & 1) == 0) {
+ val32 <<= 16;
+ mask <<= 16;
}
+ reg &= ~1;
+ return ksz_prmw32(dev, addr, 0x100 + (reg << 1), mask, val32);
}
-static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
- u8 member)
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
- dev->ports[port].member = member;
}
-static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
- u8 state)
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p = &dev->ports[port];
+ const u16 *regs = dev->info->regs;
u8 data;
- int member = -1;
- int forward = dev->member;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- if (port != dev->cpu_port)
- member = 0;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- if (port != dev->cpu_port &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
-
- /* This function is also used internally. */
- if (port == dev->cpu_port)
- break;
-
- member = dev->host_mask | p->vid_member;
- mutex_lock(&dev->dev_mutex);
-
- /* Port is a member of a bridge. */
- if (dev->br_member & (1 << port)) {
- dev->member |= (1 << port);
- member = dev->member;
- }
- mutex_unlock(&dev->dev_mutex);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- if (port != dev->cpu_port &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
- p->stp_state = state;
- mutex_lock(&dev->dev_mutex);
- /* Port membership may share register with STP state. */
- if (member >= 0 && member != p->member)
- ksz9477_cfg_port_member(dev, port, (u8)member);
-
- /* Check if forwarding needs to be updated. */
- if (state != BR_STATE_FORWARDING) {
- if (dev->br_member & (1 << port))
- dev->member &= ~(1 << port);
- }
-
- /* When topology has changed the function ksz_update_port_member
- * should be called to modify port forwarding behavior.
- */
- if (forward != dev->member)
- ksz_update_port_member(dev, port);
- mutex_unlock(&dev->dev_mutex);
-}
-static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
-{
- u8 data;
-
- regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2,
+ regmap_update_bits(ksz_regmap_8(dev), REG_SW_LUE_CTRL_2,
SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
- if (port < dev->port_cnt) {
+ if (port < dev->info->port_cnt) {
/* flush individual port */
- ksz_pread8(dev, port, P_STP_CTRL, &data);
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
if (!(data & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, port, P_STP_CTRL,
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL],
data | PORT_LEARN_DISABLE);
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
} else {
/* flush all */
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
}
}
-static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag,
- struct netlink_ext_ack *extack)
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
-
if (flag) {
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, true);
@@ -511,11 +645,10 @@ static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
u32 vlan_table[3];
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
int err;
@@ -548,10 +681,9 @@ static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
- struct ksz_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
u32 vlan_table[3];
u16 pvid;
@@ -582,10 +714,9 @@ static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 alu_table[4];
u32 data;
int ret = 0;
@@ -639,10 +770,9 @@ exit:
return ret;
}
-static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 alu_table[4];
u32 data;
int ret = 0;
@@ -675,10 +805,10 @@ static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
/* clear forwarding port */
- alu_table[2] &= ~BIT(port);
+ alu_table[1] &= ~BIT(port);
/* if there is no port to forward, clear table */
- if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
+ if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
alu_table[0] = 0;
alu_table[1] = 0;
alu_table[2] = 0;
@@ -729,10 +859,9 @@ static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
alu->mac[5] = alu_table[3] & 0xFF;
}
-static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ksz_device *dev = ds->priv;
int ret = 0;
u32 ksz_data;
u32 alu_table[4];
@@ -759,6 +888,9 @@ static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
goto exit;
}
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
/* read ALU table */
ksz9477_read_table(dev, alu_table);
@@ -781,26 +913,30 @@ exit:
return ret;
}
-static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
u32 data;
int index;
u32 mac_hi, mac_lo;
int err = 0;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
mutex_lock(&dev->alu_mutex);
- for (index = 0; index < dev->num_statics; index++) {
+ for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -828,7 +964,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics) {
+ if (index == dev->info->num_statics) {
err = -ENOSPC;
goto exit;
}
@@ -844,7 +980,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
ksz9477_write_table(dev, static_table);
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -856,26 +992,30 @@ exit:
return err;
}
-static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
u32 data;
int index;
int ret = 0;
u32 mac_hi, mac_lo;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
mutex_lock(&dev->alu_mutex);
- for (index = 0; index < dev->num_statics; index++) {
+ for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -901,7 +1041,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics)
+ if (index == dev->info->num_statics)
goto exit;
/* clear port */
@@ -917,7 +1057,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
ksz9477_write_table(dev, static_table);
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -931,19 +1071,36 @@ exit:
return ret;
}
-static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
+ u8 data;
+ int p;
+
+ /* Limit to one sniffer port
+ * Check if any of the port is already set for sniffing
+ * If yes, instruct the user to remove the previous entry & exit
+ */
+ for (p = 0; p < dev->info->port_cnt; p++) {
+ /* Skip the current sniffing port */
+ if (p == mirror->to_local_port)
+ continue;
+
+ ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
+
+ if (data & PORT_MIRROR_SNIFFER) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sniffer port is already configured, delete existing rules & retry");
+ return -EBUSY;
+ }
+ }
if (ingress)
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
else
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
-
/* configure mirror port */
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
PORT_MIRROR_SNIFFER, true);
@@ -953,231 +1110,148 @@ static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- struct ksz_device *dev = ds->priv;
+ bool in_use = false;
u8 data;
+ int p;
if (mirror->ingress)
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
else
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
- if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX)))
+ /* Check if any of the port is still referring to sniffer port */
+ for (p = 0; p < dev->info->port_cnt; p++) {
+ ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
+
+ if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
+ in_use = true;
+ break;
+ }
+ }
+
+ /* delete sniffing if there are no other mirroring rules */
+ if (!in_use)
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
PORT_MIRROR_SNIFFER, false);
}
-static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data)
+static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
{
+ phy_interface_t interface;
bool gbit;
- if (dev->features & NEW_XMII)
- gbit = !(data & PORT_MII_NOT_1GBIT);
- else
- gbit = !!(data & PORT_MII_1000MBIT_S1);
- return gbit;
-}
+ if (dev->info->internal_phy[port])
+ return PHY_INTERFACE_MODE_NA;
-static void ksz9477_set_gbit(struct ksz_device *dev, bool gbit, u8 *data)
-{
- if (dev->features & NEW_XMII) {
- if (gbit)
- *data &= ~PORT_MII_NOT_1GBIT;
- else
- *data |= PORT_MII_NOT_1GBIT;
- } else {
- if (gbit)
- *data |= PORT_MII_1000MBIT_S1;
- else
- *data &= ~PORT_MII_1000MBIT_S1;
- }
-}
+ gbit = ksz_get_gbit(dev, port);
-static int ksz9477_get_xmii(struct ksz_device *dev, u8 data)
-{
- int mode;
+ interface = ksz_get_xmii(dev, port, gbit);
- if (dev->features & NEW_XMII) {
- switch (data & PORT_MII_SEL_M) {
- case PORT_MII_SEL:
- mode = 0;
- break;
- case PORT_RMII_SEL:
- mode = 1;
- break;
- case PORT_GMII_SEL:
- mode = 2;
- break;
- default:
- mode = 3;
- }
- } else {
- switch (data & PORT_MII_SEL_M) {
- case PORT_MII_SEL_S1:
- mode = 0;
- break;
- case PORT_RMII_SEL_S1:
- mode = 1;
- break;
- case PORT_GMII_SEL_S1:
- mode = 2;
- break;
- default:
- mode = 3;
- }
- }
- return mode;
+ return interface;
}
-static void ksz9477_set_xmii(struct ksz_device *dev, int mode, u8 *data)
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
{
- u8 xmii;
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE;
- if (dev->features & NEW_XMII) {
- switch (mode) {
- case 0:
- xmii = PORT_MII_SEL;
- break;
- case 1:
- xmii = PORT_RMII_SEL;
- break;
- case 2:
- xmii = PORT_GMII_SEL;
- break;
- default:
- xmii = PORT_RGMII_SEL;
- break;
- }
- } else {
- switch (mode) {
- case 0:
- xmii = PORT_MII_SEL_S1;
- break;
- case 1:
- xmii = PORT_RMII_SEL_S1;
- break;
- case 2:
- xmii = PORT_GMII_SEL_S1;
- break;
- default:
- xmii = PORT_RGMII_SEL_S1;
- break;
- }
+ if (dev->info->gbit_capable[port])
+ config->mac_capabilities |= MAC_1000FD;
+
+ if (ksz_is_sgmii_port(dev, port)) {
+ struct ksz_port *p = &dev->ports[port];
+
+ phy_interface_or(config->supported_interfaces,
+ config->supported_interfaces,
+ p->pcs->supported_interfaces);
}
- *data &= ~PORT_MII_SEL_M;
- *data |= xmii;
}
-static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
- phy_interface_t interface;
- bool gbit;
- int mode;
- u8 data8;
+ u32 secs = msecs / 1000;
+ u8 data, mult, value;
+ u32 max_val;
+ int ret;
- if (port < dev->phy_port_cnt)
- return PHY_INTERFACE_MODE_NA;
- ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- gbit = ksz9477_get_gbit(dev, data8);
- mode = ksz9477_get_xmii(dev, data8);
- switch (mode) {
- case 2:
- interface = PHY_INTERFACE_MODE_GMII;
- if (gbit)
- break;
- fallthrough;
- case 0:
- interface = PHY_INTERFACE_MODE_MII;
- break;
- case 1:
- interface = PHY_INTERFACE_MODE_RMII;
- break;
- default:
- interface = PHY_INTERFACE_MODE_RGMII;
- if (data8 & PORT_RGMII_ID_EG_ENABLE)
- interface = PHY_INTERFACE_MODE_RGMII_TXID;
- if (data8 & PORT_RGMII_ID_IG_ENABLE) {
- interface = PHY_INTERFACE_MODE_RGMII_RXID;
- if (data8 & PORT_RGMII_ID_EG_ENABLE)
- interface = PHY_INTERFACE_MODE_RGMII_ID;
- }
- break;
+#define MAX_TIMER_VAL ((1 << 8) - 1)
+
+ /* The aging timer comprises a 3-bit multiplier and an 8-bit second
+ * value. Either of them cannot be zero. The maximum timer is then
+ * 7 * 255 = 1785 seconds.
+ */
+ if (!secs)
+ secs = 1;
+
+ /* Return error if too large. */
+ else if (secs > 7 * MAX_TIMER_VAL)
+ return -EINVAL;
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
+ if (ret < 0)
+ return ret;
+
+ /* Check whether there is need to update the multiplier. */
+ mult = FIELD_GET(SW_AGE_CNT_M, value);
+ max_val = MAX_TIMER_VAL;
+ if (mult > 0) {
+ /* Try to use the same multiplier already in the register as
+ * the hardware default uses multiplier 4 and 75 seconds for
+ * 300 seconds.
+ */
+ max_val = DIV_ROUND_UP(secs, mult);
+ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
+ max_val = MAX_TIMER_VAL;
}
- return interface;
-}
-static void ksz9477_port_mmd_write(struct ksz_device *dev, int port,
- u8 dev_addr, u16 reg_addr, u16 val)
-{
- ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP,
- MMD_SETUP(PORT_MMD_OP_INDEX, dev_addr));
- ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, reg_addr);
- ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_SETUP,
- MMD_SETUP(PORT_MMD_OP_DATA_NO_INCR, dev_addr));
- ksz_pwrite16(dev, port, REG_PORT_PHY_MMD_INDEX_DATA, val);
+ data = DIV_ROUND_UP(secs, max_val);
+ if (mult != data) {
+ value &= ~SW_AGE_CNT_M;
+ value |= FIELD_PREP(SW_AGE_CNT_M, data);
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+ if (ret < 0)
+ return ret;
+ }
+
+ value = DIV_ROUND_UP(secs, data);
+ return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
}
-static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
+void ksz9477_port_queue_split(struct ksz_device *dev, int port)
{
- /* Apply PHY settings to address errata listed in
- * KSZ9477, KSZ9897, KSZ9896, KSZ9567, KSZ8565
- * Silicon Errata and Data Sheet Clarification documents:
- *
- * Register settings are needed to improve PHY receive performance
- */
- ksz9477_port_mmd_write(dev, port, 0x01, 0x6f, 0xdd0b);
- ksz9477_port_mmd_write(dev, port, 0x01, 0x8f, 0x6032);
- ksz9477_port_mmd_write(dev, port, 0x01, 0x9d, 0x248c);
- ksz9477_port_mmd_write(dev, port, 0x01, 0x75, 0x0060);
- ksz9477_port_mmd_write(dev, port, 0x01, 0xd3, 0x7777);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x06, 0x3008);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x08, 0x2001);
-
- /* Transmit waveform amplitude can be improved
- * (1000BASE-T, 100BASE-TX, 10BASE-Te)
- */
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x04, 0x00d0);
+ u8 data;
- /* Energy Efficient Ethernet (EEE) feature select must
- * be manually disabled (except on KSZ8565 which is 100Mbit)
- */
- if (dev->features & GBIT_SUPPORT)
- ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000);
+ if (dev->info->num_tx_queues == 8)
+ data = PORT_EIGHT_QUEUE;
+ else if (dev->info->num_tx_queues == 4)
+ data = PORT_FOUR_QUEUE;
+ else if (dev->info->num_tx_queues == 2)
+ data = PORT_TWO_QUEUE;
+ else
+ data = PORT_SINGLE_QUEUE;
- /* Register settings are required to meet data sheet
- * supply current specifications
- */
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x13, 0x6eff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x14, 0xe6ff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x15, 0x6eff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x16, 0xe6ff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x17, 0x00ff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x18, 0x43ff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x19, 0xc3ff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x1a, 0x6fff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x1b, 0x07ff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x1c, 0x0fff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x1d, 0xe7ff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x1e, 0xefff);
- ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee);
+ ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
}
-static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
- u8 data8;
- u8 member;
+ const u16 *regs = dev->info->regs;
+ struct dsa_switch *ds = dev->ds;
u16 data16;
- struct ksz_port *p = &dev->ports[port];
+ u8 member;
/* enable tag tail for host port */
if (cpu_port)
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
true);
+ ksz9477_port_queue_split(dev, port);
+
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
/* set back pressure */
@@ -1186,98 +1260,53 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
- /* disable DiffServ priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
-
/* replace priority */
ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
false);
ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
MTI_PVID_REPLACE, false);
- /* enable 802.1p priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+ /* force flow control for non-PHY ports only */
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
+ !dev->info->internal_phy[port]);
- if (port < dev->phy_port_cnt) {
- /* do not force flow control */
- ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
- PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
- false);
-
- if (dev->phy_errata_9477)
- ksz9477_phy_errata_setup(dev, port);
- } else {
- /* force flow control */
- ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
- PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
- true);
-
- /* configure MAC to 1G & RGMII mode */
- ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- switch (p->interface) {
- case PHY_INTERFACE_MODE_MII:
- ksz9477_set_xmii(dev, 0, &data8);
- ksz9477_set_gbit(dev, false, &data8);
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_RMII:
- ksz9477_set_xmii(dev, 1, &data8);
- ksz9477_set_gbit(dev, false, &data8);
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_GMII:
- ksz9477_set_xmii(dev, 2, &data8);
- ksz9477_set_gbit(dev, true, &data8);
- p->phydev.speed = SPEED_1000;
- break;
- default:
- ksz9477_set_xmii(dev, 3, &data8);
- ksz9477_set_gbit(dev, true, &data8);
- data8 &= ~PORT_RGMII_ID_IG_ENABLE;
- data8 &= ~PORT_RGMII_ID_EG_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- data8 |= PORT_RGMII_ID_IG_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- data8 |= PORT_RGMII_ID_EG_ENABLE;
- /* On KSZ9893, disable RGMII in-band status support */
- if (dev->features & IS_9893)
- data8 &= ~PORT_MII_MAC_MODE;
- p->phydev.speed = SPEED_1000;
- break;
- }
- ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
- p->phydev.duplex = 1;
- }
- mutex_lock(&dev->dev_mutex);
if (cpu_port)
- member = dev->port_mask;
+ member = dsa_user_ports(ds);
else
- member = dev->host_mask | p->vid_member;
- mutex_unlock(&dev->dev_mutex);
+ member = BIT(dsa_upstream_port(ds, port));
+
ksz9477_cfg_port_member(dev, port, member);
/* clear pending interrupts */
- if (port < dev->phy_port_cnt)
+ if (dev->info->internal_phy[port])
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
+
+ ksz9477_port_acl_init(dev, port);
+
+ /* clear pending wake flags */
+ ksz_handle_wake_reason(dev, port);
+
+ /* Disable all WoL options by default. Otherwise
+ * ksz_switch_macaddr_get/put logic will not work properly.
+ */
+ ksz_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
}
-static void ksz9477_config_cpu_port(struct dsa_switch *ds)
+void ksz9477_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p;
int i;
- for (i = 0; i < dev->port_cnt; i++) {
- if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ if (dsa_is_cpu_port(ds, i) &&
+ (dev->info->cpu_ports & (1 << i))) {
phy_interface_t interface;
const char *prev_msg;
const char *prev_mode;
dev->cpu_port = i;
- dev->host_mask = (1 << dev->cpu_port);
- dev->port_mask |= dev->host_mask;
p = &dev->ports[i];
/* Read from XMII register to determine host port
@@ -1312,51 +1341,130 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
- p->vid_member = dev->port_mask;
- p->on = 1;
}
}
- dev->member = dev->host_mask;
-
- for (i = 0; i < dev->port_cnt; i++) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
if (i == dev->cpu_port)
continue;
- p = &dev->ports[i];
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
- /* Initialize to non-zero so that ksz_cfg_port_member() will
- * be called.
- */
- p->vid_member = (1 << i);
- p->member = dev->port_mask;
- ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
- p->on = 1;
- if (i < dev->phy_port_cnt)
- p->phy = 1;
- if (dev->chip_id == 0x00947700 && i == 6) {
- p->sgmii = 1;
-
- /* SGMII PHY detection code is not implemented yet. */
- p->phy = 0;
+ /* Power down the internal PHY if port is unused. */
+ if (dsa_is_unused_port(ds, i) && dev->info->internal_phy[i])
+ ksz_pwrite16(dev, i, 0x100, BMCR_PDOWN);
+ }
+}
+
+#define RESV_MCAST_CNT 8
+
+static u8 reserved_mcast_map[RESV_MCAST_CNT] = { 0, 1, 3, 16, 32, 33, 2, 17 };
+
+int ksz9477_enable_stp_addr(struct ksz_device *dev)
+{
+ u8 i, ports, update;
+ const u32 *masks;
+ bool override;
+ u32 data;
+ int ret;
+
+ masks = dev->info->masks;
+
+ /* Enable Reserved multicast table */
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
+
+ /* The reserved multicast address table has 8 entries. Each entry has
+ * a default value of which port to forward. It is assumed the host
+ * port is the last port in most of the switches, but that is not the
+ * case for KSZ9477 or maybe KSZ9897. For LAN937X family the default
+ * port is port 5, the first RGMII port. It is okay for LAN9370, a
+ * 5-port switch, but may not be correct for the other 8-port
+ * versions. It is necessary to update the whole table to forward to
+ * the right ports.
+ * Furthermore PTP messages can use a reserved multicast address and
+ * the host will not receive them if this table is not correct.
+ */
+ for (i = 0; i < RESV_MCAST_CNT; i++) {
+ data = reserved_mcast_map[i] <<
+ dev->info->shifts[ALU_STAT_INDEX];
+ data |= ALU_STAT_START |
+ masks[ALU_STAT_DIRECT] |
+ masks[ALU_RESV_MCAST_ADDR] |
+ masks[ALU_STAT_READ];
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_read32(dev, REG_SW_ALU_VAL_B, &data);
+ if (ret < 0)
+ return ret;
+
+ override = false;
+ ports = data & dev->port_mask;
+ switch (i) {
+ case 0:
+ case 6:
+ /* Change the host port. */
+ update = BIT(dev->cpu_port);
+ override = true;
+ break;
+ case 2:
+ /* Change the host port. */
+ update = BIT(dev->cpu_port);
+ break;
+ case 4:
+ case 5:
+ case 7:
+ /* Skip the host port. */
+ update = dev->port_mask & ~BIT(dev->cpu_port);
+ break;
+ default:
+ update = ports;
+ break;
+ }
+ if (update != ports || override) {
+ data &= ~dev->port_mask;
+ data |= update;
+ /* Set Override bit to receive frame even when port is
+ * closed.
+ */
+ if (override)
+ data |= ALU_V_OVERRIDE;
+ ret = ksz_write32(dev, REG_SW_ALU_VAL_B, data);
+ if (ret < 0)
+ return ret;
+
+ data = reserved_mcast_map[i] <<
+ dev->info->shifts[ALU_STAT_INDEX];
+ data |= ALU_STAT_START |
+ masks[ALU_STAT_DIRECT] |
+ masks[ALU_RESV_MCAST_ADDR] |
+ masks[ALU_STAT_WRITE];
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0)
+ return ret;
}
}
+
+ return 0;
}
-static int ksz9477_setup(struct dsa_switch *ds)
+int ksz9477_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
int ret = 0;
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
-
- ret = ksz9477_reset_switch(dev);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
- return ret;
- }
+ ds->mtu_enforcement_ingress = true;
/* Required for port partitioning. */
ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
@@ -1365,12 +1473,18 @@ static int ksz9477_setup(struct dsa_switch *ds)
/* Do not work correctly with tail tagging. */
ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
- /* accept packet up to 2000bytes */
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
+ /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
- ksz9477_config_cpu_port(ds);
+ /* Use collision based back pressure mode. */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
+ SW_BACK_PRESSURE_COLLISION);
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
+ /* Now we can configure default MTU value */
+ ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
+ VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ return ret;
/* queue based egress rate limit */
ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
@@ -1378,262 +1492,126 @@ static int ksz9477_setup(struct dsa_switch *ds)
/* enable global MIB counter freeze function */
ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
- /* start switch */
- ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
-
- ksz_init_mib_timer(dev);
-
- ds->configure_vlan_while_not_filtering = false;
-
- return 0;
+ /* Make sure PME (WoL) is not enabled. If requested, it will
+ * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
+ * do not like PME events changes before shutdown.
+ */
+ return ksz_write8(dev, regs[REG_SW_PME_CTRL], 0);
}
-static const struct dsa_switch_ops ksz9477_switch_ops = {
- .get_tag_protocol = ksz9477_get_tag_protocol,
- .setup = ksz9477_setup,
- .phy_read = ksz9477_phy_read16,
- .phy_write = ksz9477_phy_write16,
- .phylink_mac_link_down = ksz_mac_link_down,
- .port_enable = ksz_enable_port,
- .get_strings = ksz9477_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_bridge_join = ksz_port_bridge_join,
- .port_bridge_leave = ksz_port_bridge_leave,
- .port_stp_state_set = ksz9477_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz9477_port_vlan_filtering,
- .port_vlan_add = ksz9477_port_vlan_add,
- .port_vlan_del = ksz9477_port_vlan_del,
- .port_fdb_dump = ksz9477_port_fdb_dump,
- .port_fdb_add = ksz9477_port_fdb_add,
- .port_fdb_del = ksz9477_port_fdb_del,
- .port_mdb_add = ksz9477_port_mdb_add,
- .port_mdb_del = ksz9477_port_mdb_del,
- .port_mirror_add = ksz9477_port_mirror_add,
- .port_mirror_del = ksz9477_port_mirror_del,
-};
-
-static u32 ksz9477_get_port_addr(int port, int offset)
+u32 ksz9477_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
-static int ksz9477_switch_detect(struct ksz_device *dev)
+int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
{
- u8 data8;
- u8 id_hi;
- u8 id_lo;
- u32 id32;
- int ret;
+ val = val >> 8;
- /* turn off SPI DO Edge select */
- ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
- if (ret)
- return ret;
-
- data8 &= ~SPI_AUTO_EDGE_DETECTION;
- ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
- if (ret)
- return ret;
-
- /* read chip id */
- ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
- if (ret)
- return ret;
- ret = ksz_read8(dev, REG_GLOBAL_OPTIONS, &data8);
- if (ret)
- return ret;
+ return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
+}
- /* Number of ports can be reduced depending on chip. */
- dev->phy_port_cnt = 5;
+/* The KSZ9477 provides following HW features to accelerate
+ * HSR frames handling:
+ *
+ * 1. TX PACKET DUPLICATION FROM HOST TO SWITCH
+ * 2. RX PACKET DUPLICATION DISCARDING
+ * 3. PREVENTING PACKET LOOP IN THE RING BY SELF-ADDRESS FILTERING
+ *
+ * Only one from point 1. has the NETIF_F* flag available.
+ *
+ * Ones from point 2 and 3 are "best effort" - i.e. those will
+ * work correctly most of the time, but it may happen that some
+ * frames will not be caught - to be more specific; there is a race
+ * condition in hardware such that, when duplicate packets are received
+ * on member ports very close in time to each other, the hardware fails
+ * to detect that they are duplicates.
+ *
+ * Hence, the SW needs to handle those special cases. However, the speed
+ * up gain is considerable when above features are used.
+ *
+ * Moreover, the NETIF_F_HW_HSR_FWD feature is also enabled, as HSR frames
+ * can be forwarded in the switch fabric between HSR ports.
+ */
+#define KSZ9477_SUPPORTED_HSR_FEATURES (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_FWD)
- /* Default capability is gigabit capable. */
- dev->features = GBIT_SUPPORT;
+void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr)
+{
+ struct ksz_device *dev = ds->priv;
+ struct net_device *user;
+ struct dsa_port *hsr_dp;
+ u8 data, hsr_ports = 0;
- dev_dbg(dev->dev, "Switch detect: ID=%08x%02x\n", id32, data8);
- id_hi = (u8)(id32 >> 16);
- id_lo = (u8)(id32 >> 8);
- if ((id_lo & 0xf) == 3) {
- /* Chip is from KSZ9893 design. */
- dev_info(dev->dev, "Found KSZ9893\n");
- dev->features |= IS_9893;
+ /* Program which port(s) shall support HSR */
+ ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), BIT(port));
- /* Chip does not support gigabit. */
- if (data8 & SW_QW_ABLE)
- dev->features &= ~GBIT_SUPPORT;
- dev->phy_port_cnt = 2;
- } else {
- dev_info(dev->dev, "Found KSZ9477 or compatible\n");
- /* Chip uses new XMII register definitions. */
- dev->features |= NEW_XMII;
+ /* Forward frames between HSR ports (i.e. bridge together HSR ports) */
+ if (dev->hsr_ports) {
+ dsa_hsr_foreach_port(hsr_dp, ds, hsr)
+ hsr_ports |= BIT(hsr_dp->index);
- /* Chip does not support gigabit. */
- if (!(data8 & SW_GIGABIT_ABLE))
- dev->features &= ~GBIT_SUPPORT;
+ hsr_ports |= BIT(dsa_upstream_port(ds, port));
+ dsa_hsr_foreach_port(hsr_dp, ds, hsr)
+ ksz9477_cfg_port_member(dev, hsr_dp->index, hsr_ports);
}
- /* Change chip id to known ones so it can be matched against them. */
- id32 = (id_hi << 16) | (id_lo << 8);
+ if (!dev->hsr_ports) {
+ /* Enable discarding of received HSR frames */
+ ksz_read8(dev, REG_HSR_ALU_CTRL_0__1, &data);
+ data |= HSR_DUPLICATE_DISCARD;
+ data &= ~HSR_NODE_UNICAST;
+ ksz_write8(dev, REG_HSR_ALU_CTRL_0__1, data);
+ }
- dev->chip_id = id32;
+ /* Enable per port self-address filtering.
+ * The global self-address filtering has already been enabled in the
+ * ksz9477_reset_switch() function.
+ */
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, true);
- return 0;
+ /* Setup HW supported features for lan HSR ports */
+ user = dsa_to_port(ds, port)->user;
+ user->features |= KSZ9477_SUPPORTED_HSR_FEATURES;
}
-struct ksz_chip_data {
- u32 chip_id;
- const char *dev_name;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_ports;
- int port_cnt;
- bool phy_errata_9477;
-};
-
-static const struct ksz_chip_data ksz9477_switch_chips[] = {
- {
- .chip_id = 0x00947700,
- .dev_name = "KSZ9477",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
- {
- .chip_id = 0x00989700,
- .dev_name = "KSZ9897",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
- {
- .chip_id = 0x00989300,
- .dev_name = "KSZ9893",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x07, /* can be configured as cpu port */
- .port_cnt = 3, /* total port count */
- },
- {
- .chip_id = 0x00956700,
- .dev_name = "KSZ9567",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
-};
-
-static int ksz9477_switch_init(struct ksz_device *dev)
+void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr)
{
- int i;
-
- dev->ds->ops = &ksz9477_switch_ops;
-
- for (i = 0; i < ARRAY_SIZE(ksz9477_switch_chips); i++) {
- const struct ksz_chip_data *chip = &ksz9477_switch_chips[i];
-
- if (dev->chip_id == chip->chip_id) {
- dev->name = chip->dev_name;
- dev->num_vlans = chip->num_vlans;
- dev->num_alus = chip->num_alus;
- dev->num_statics = chip->num_statics;
- dev->port_cnt = chip->port_cnt;
- dev->cpu_ports = chip->cpu_ports;
- dev->phy_errata_9477 = chip->phy_errata_9477;
-
- break;
- }
- }
+ struct ksz_device *dev = ds->priv;
- /* no switch found */
- if (!dev->port_cnt)
- return -ENODEV;
-
- dev->port_mask = (1 << dev->port_cnt) - 1;
-
- dev->reg_mib_cnt = SWITCH_COUNTER_NUM;
- dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM;
-
- dev->ports = devm_kzalloc(dev->dev,
- dev->port_cnt * sizeof(struct ksz_port),
- GFP_KERNEL);
- if (!dev->ports)
- return -ENOMEM;
- for (i = 0; i < dev->port_cnt; i++) {
- mutex_init(&dev->ports[i].mib.cnt_mutex);
- dev->ports[i].mib.counters =
- devm_kzalloc(dev->dev,
- sizeof(u64) *
- (TOTAL_SWITCH_COUNTER_NUM + 1),
- GFP_KERNEL);
- if (!dev->ports[i].mib.counters)
- return -ENOMEM;
- }
+ /* Clear port HSR support */
+ ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), 0);
- /* set the real number of ports */
- dev->ds->num_ports = dev->port_cnt;
+ /* Disable forwarding frames between HSR ports */
+ ksz9477_cfg_port_member(dev, port, BIT(dsa_upstream_port(ds, port)));
- return 0;
+ /* Disable per port self-address filtering */
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, false);
}
-static void ksz9477_switch_exit(struct ksz_device *dev)
+int ksz9477_switch_init(struct ksz_device *dev)
{
- ksz9477_reset_switch(dev);
-}
+ u8 data8;
+ int ret;
-static const struct ksz_dev_ops ksz9477_dev_ops = {
- .get_port_addr = ksz9477_get_port_addr,
- .cfg_port_member = ksz9477_cfg_port_member,
- .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
- .port_setup = ksz9477_port_setup,
- .r_mib_cnt = ksz9477_r_mib_cnt,
- .r_mib_pkt = ksz9477_r_mib_pkt,
- .freeze_mib = ksz9477_freeze_mib,
- .port_init_cnt = ksz9477_port_init_cnt,
- .shutdown = ksz9477_reset_switch,
- .detect = ksz9477_switch_detect,
- .init = ksz9477_switch_init,
- .exit = ksz9477_switch_exit,
-};
-
-int ksz9477_switch_register(struct ksz_device *dev)
-{
- int ret, i;
- struct phy_device *phydev;
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
- ret = ksz_switch_register(dev, &ksz9477_dev_ops);
+ /* turn off SPI DO Edge select */
+ ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
if (ret)
return ret;
- for (i = 0; i < dev->phy_port_cnt; ++i) {
- if (!dsa_is_user_port(dev->ds, i))
- continue;
-
- phydev = dsa_to_port(dev->ds, i)->slave->phydev;
+ data8 &= ~SPI_AUTO_EDGE_DETECTION;
+ ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
+ if (ret)
+ return ret;
- /* The MAC actually cannot run in 1000 half-duplex mode. */
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ return 0;
+}
- /* PHY does not support gigabit. */
- if (!(dev->features & GBIT_SUPPORT))
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
- }
- return ret;
+void ksz9477_switch_exit(struct ksz_device *dev)
+{
+ ksz9477_reset_switch(dev);
}
-EXPORT_SYMBOL(ksz9477_switch_register);
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
new file mode 100644
index 000000000000..0d1a6dfda23e
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Microchip KSZ9477 series Header file
+ *
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ9477_H
+#define __KSZ9477_H
+
+#include <net/dsa.h>
+#include "ksz_common.h"
+
+int ksz9477_setup(struct dsa_switch *ds);
+u32 ksz9477_get_port_addr(int port, int offset);
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port);
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col);
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu);
+void ksz9477_config_cpu_port(struct dsa_switch *ds);
+int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val);
+int ksz9477_enable_stp_addr(struct ksz_device *dev);
+int ksz9477_reset_switch(struct ksz_device *dev);
+int ksz9477_switch_init(struct ksz_device *dev);
+void ksz9477_switch_exit(struct ksz_device *dev);
+void ksz9477_port_queue_split(struct ksz_device *dev, int port);
+void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr);
+void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr);
+
+int ksz9477_port_acl_init(struct ksz_device *dev, int port);
+void ksz9477_port_acl_free(struct ksz_device *dev, int port);
+int ksz9477_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+int ksz9477_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+
+#define KSZ9477_ACL_ENTRY_SIZE 18
+#define KSZ9477_ACL_MAX_ENTRIES 16
+
+struct ksz9477_acl_entry {
+ u8 entry[KSZ9477_ACL_ENTRY_SIZE];
+ unsigned long cookie;
+ u32 prio;
+};
+
+struct ksz9477_acl_entries {
+ struct ksz9477_acl_entry entries[KSZ9477_ACL_MAX_ENTRIES];
+ int entries_count;
+};
+
+struct ksz9477_acl_priv {
+ struct ksz9477_acl_entries acles;
+};
+
+void ksz9477_acl_remove_entries(struct ksz_device *dev, int port,
+ struct ksz9477_acl_entries *acles,
+ unsigned long cookie);
+int ksz9477_acl_write_list(struct ksz_device *dev, int port);
+int ksz9477_sort_acl_entries(struct ksz_device *dev, int port);
+void ksz9477_acl_action_rule_cfg(u8 *entry, bool force_prio, u8 prio_val);
+void ksz9477_acl_processing_rule_set_action(u8 *entry, u8 action_idx);
+void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port,
+ u16 ethtype, u8 *src_mac, u8 *dst_mac,
+ unsigned long cookie, u32 prio);
+
+int ksz9477_pcs_create(struct ksz_device *dev);
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz9477_acl.c b/drivers/net/dsa/microchip/ksz9477_acl.c
new file mode 100644
index 000000000000..7ba778df63ac
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477_acl.c
@@ -0,0 +1,1436 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2023 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+
+/* Access Control List (ACL) structure:
+ *
+ * There are multiple groups of registers involved in ACL configuration:
+ *
+ * - Matching Rules: These registers define the criteria for matching incoming
+ * packets based on their header information (Layer 2 MAC, Layer 3 IP, or
+ * Layer 4 TCP/UDP). Different register settings are used depending on the
+ * matching rule mode (MD) and the Enable (ENB) settings.
+ *
+ * - Action Rules: These registers define how the ACL should modify the packet's
+ * priority, VLAN tag priority, and forwarding map once a matching rule has
+ * been triggered. The settings vary depending on whether the matching rule is
+ * in Count Mode (MD = 01 and ENB = 00) or not.
+ *
+ * - Processing Rules: These registers control the overall behavior of the ACL,
+ * such as selecting which matching rule to apply first, enabling/disabling
+ * specific rules, or specifying actions for matched packets.
+ *
+ * ACL Structure:
+ * +----------------------+
+ * +----------------------+ | (optional) |
+ * | Matching Rules | | Matching Rules |
+ * | (Layer 2, 3, 4) | | (Layer 2, 3, 4) |
+ * +----------------------+ +----------------------+
+ * | |
+ * \___________________________/
+ * v
+ * +----------------------+
+ * | Processing Rules |
+ * | (action idx, |
+ * | matching rule set) |
+ * +----------------------+
+ * |
+ * v
+ * +----------------------+
+ * | Action Rules |
+ * | (Modify Priority, |
+ * | Forwarding Map, |
+ * | VLAN tag, etc) |
+ * +----------------------+
+ */
+
+#include <linux/bitops.h>
+
+#include "ksz9477.h"
+#include "ksz9477_reg.h"
+#include "ksz_common.h"
+
+#define KSZ9477_PORT_ACL_0 0x600
+
+enum ksz9477_acl_port_access {
+ KSZ9477_ACL_PORT_ACCESS_0 = 0x00,
+ KSZ9477_ACL_PORT_ACCESS_1 = 0x01,
+ KSZ9477_ACL_PORT_ACCESS_2 = 0x02,
+ KSZ9477_ACL_PORT_ACCESS_3 = 0x03,
+ KSZ9477_ACL_PORT_ACCESS_4 = 0x04,
+ KSZ9477_ACL_PORT_ACCESS_5 = 0x05,
+ KSZ9477_ACL_PORT_ACCESS_6 = 0x06,
+ KSZ9477_ACL_PORT_ACCESS_7 = 0x07,
+ KSZ9477_ACL_PORT_ACCESS_8 = 0x08,
+ KSZ9477_ACL_PORT_ACCESS_9 = 0x09,
+ KSZ9477_ACL_PORT_ACCESS_A = 0x0A,
+ KSZ9477_ACL_PORT_ACCESS_B = 0x0B,
+ KSZ9477_ACL_PORT_ACCESS_C = 0x0C,
+ KSZ9477_ACL_PORT_ACCESS_D = 0x0D,
+ KSZ9477_ACL_PORT_ACCESS_E = 0x0E,
+ KSZ9477_ACL_PORT_ACCESS_F = 0x0F,
+ KSZ9477_ACL_PORT_ACCESS_10 = 0x10,
+ KSZ9477_ACL_PORT_ACCESS_11 = 0x11
+};
+
+#define KSZ9477_ACL_MD_MASK GENMASK(5, 4)
+#define KSZ9477_ACL_MD_DISABLE 0
+#define KSZ9477_ACL_MD_L2_MAC 1
+#define KSZ9477_ACL_MD_L3_IP 2
+#define KSZ9477_ACL_MD_L4_TCP_UDP 3
+
+#define KSZ9477_ACL_ENB_MASK GENMASK(3, 2)
+#define KSZ9477_ACL_ENB_L2_COUNTER 0
+#define KSZ9477_ACL_ENB_L2_TYPE 1
+#define KSZ9477_ACL_ENB_L2_MAC 2
+#define KSZ9477_ACL_ENB_L2_MAC_TYPE 3
+
+/* only IPv4 src or dst can be used with mask */
+#define KSZ9477_ACL_ENB_L3_IPV4_ADDR_MASK 1
+/* only IPv4 src and dst can be used without mask */
+#define KSZ9477_ACL_ENB_L3_IPV4_ADDR_SRC_DST 2
+
+#define KSZ9477_ACL_ENB_L4_IP_PROTO 0
+#define KSZ9477_ACL_ENB_L4_TCP_SRC_DST_PORT 1
+#define KSZ9477_ACL_ENB_L4_UDP_SRC_DST_PORT 2
+#define KSZ9477_ACL_ENB_L4_TCP_SEQ_NUMBER 3
+
+#define KSZ9477_ACL_SD_SRC BIT(1)
+#define KSZ9477_ACL_SD_DST 0
+#define KSZ9477_ACL_EQ_EQUAL BIT(0)
+#define KSZ9477_ACL_EQ_NOT_EQUAL 0
+
+#define KSZ9477_ACL_PM_M GENMASK(7, 6)
+#define KSZ9477_ACL_PM_DISABLE 0
+#define KSZ9477_ACL_PM_HIGHER 1
+#define KSZ9477_ACL_PM_LOWER 2
+#define KSZ9477_ACL_PM_REPLACE 3
+#define KSZ9477_ACL_P_M GENMASK(5, 3)
+
+#define KSZ9477_PORT_ACL_CTRL_0 0x0612
+
+#define KSZ9477_ACL_WRITE_DONE BIT(6)
+#define KSZ9477_ACL_READ_DONE BIT(5)
+#define KSZ9477_ACL_WRITE BIT(4)
+#define KSZ9477_ACL_INDEX_M GENMASK(3, 0)
+
+/**
+ * ksz9477_dump_acl_index - Print the ACL entry at the specified index
+ *
+ * @dev: Pointer to the ksz9477 device structure.
+ * @acle: Pointer to the ACL entry array.
+ * @index: The index of the ACL entry to print.
+ *
+ * This function prints the details of an ACL entry, located at a particular
+ * index within the ksz9477 device's ACL table. It omits printing entries that
+ * are empty.
+ *
+ * Return: 1 if the entry is non-empty and printed, 0 otherwise.
+ */
+static int ksz9477_dump_acl_index(struct ksz_device *dev,
+ struct ksz9477_acl_entry *acle, int index)
+{
+ bool empty = true;
+ char buf[64];
+ u8 *entry;
+ int i;
+
+ entry = &acle[index].entry[0];
+ for (i = 0; i <= KSZ9477_ACL_PORT_ACCESS_11; i++) {
+ if (entry[i])
+ empty = false;
+
+ sprintf(buf + (i * 3), "%02x ", entry[i]);
+ }
+
+ /* no need to print empty entries */
+ if (empty)
+ return 0;
+
+ dev_err(dev->dev, " Entry %02d, prio: %02d : %s", index,
+ acle[index].prio, buf);
+
+ return 1;
+}
+
+/**
+ * ksz9477_dump_acl - Print ACL entries
+ *
+ * @dev: Pointer to the device structure.
+ * @acle: Pointer to the ACL entry array.
+ */
+static void ksz9477_dump_acl(struct ksz_device *dev,
+ struct ksz9477_acl_entry *acle)
+{
+ int count = 0;
+ int i;
+
+ for (i = 0; i < KSZ9477_ACL_MAX_ENTRIES; i++)
+ count += ksz9477_dump_acl_index(dev, acle, i);
+
+ if (count != KSZ9477_ACL_MAX_ENTRIES - 1)
+ dev_err(dev->dev, " Empty ACL entries were skipped\n");
+}
+
+/**
+ * ksz9477_acl_is_valid_matching_rule - Check if an ACL entry contains a valid
+ * matching rule.
+ *
+ * @entry: Pointer to ACL entry buffer
+ *
+ * This function checks if the given ACL entry buffer contains a valid
+ * matching rule by inspecting the Mode (MD) and Enable (ENB) fields.
+ *
+ * Returns: True if it's a valid matching rule, false otherwise.
+ */
+static bool ksz9477_acl_is_valid_matching_rule(u8 *entry)
+{
+ u8 val1, md, enb;
+
+ val1 = entry[KSZ9477_ACL_PORT_ACCESS_1];
+
+ md = FIELD_GET(KSZ9477_ACL_MD_MASK, val1);
+ if (md == KSZ9477_ACL_MD_DISABLE)
+ return false;
+
+ if (md == KSZ9477_ACL_MD_L2_MAC) {
+ /* L2 counter is not support, so it is not valid rule for now */
+ enb = FIELD_GET(KSZ9477_ACL_ENB_MASK, val1);
+ if (enb == KSZ9477_ACL_ENB_L2_COUNTER)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ksz9477_acl_get_cont_entr - Get count of contiguous ACL entries and validate
+ * the matching rules.
+ * @dev: Pointer to the KSZ9477 device structure.
+ * @port: Port number.
+ * @index: Index of the starting ACL entry.
+ *
+ * Based on the KSZ9477 switch's Access Control List (ACL) system, the RuleSet
+ * in an ACL entry indicates which entries contain Matching rules linked to it.
+ * This RuleSet is represented by two registers: KSZ9477_ACL_PORT_ACCESS_E and
+ * KSZ9477_ACL_PORT_ACCESS_F. Each bit set in these registers corresponds to
+ * an entry containing a Matching rule for this RuleSet.
+ *
+ * For a single Matching rule linked, only one bit is set. However, when an
+ * entry links multiple Matching rules, forming what's termed a 'complex rule',
+ * multiple bits are set in these registers.
+ *
+ * This function checks that, for complex rules, the entries containing the
+ * linked Matching rules are contiguous in terms of their indices. It calculates
+ * and returns the number of these contiguous entries.
+ *
+ * Returns:
+ * - 0 if the entry is empty and can be safely overwritten
+ * - 1 if the entry represents a simple rule
+ * - The number of contiguous entries if it is the root entry of a complex
+ * rule
+ * - -ENOTEMPTY if the entry is part of a complex rule but not the root
+ * entry
+ * - -EINVAL if the validation fails
+ */
+static int ksz9477_acl_get_cont_entr(struct ksz_device *dev, int port,
+ int index)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ int start_idx, end_idx, contiguous_count;
+ unsigned long val;
+ u8 vale, valf;
+ u8 *entry;
+ int i;
+
+ entry = &acles->entries[index].entry[0];
+ vale = entry[KSZ9477_ACL_PORT_ACCESS_E];
+ valf = entry[KSZ9477_ACL_PORT_ACCESS_F];
+
+ val = (vale << 8) | valf;
+
+ /* If no bits are set, return an appropriate value or error */
+ if (!val) {
+ if (ksz9477_acl_is_valid_matching_rule(entry)) {
+ /* Looks like we are about to corrupt some complex rule.
+ * Do not print an error here, as this is a normal case
+ * when we are trying to find a free or starting entry.
+ */
+ dev_dbg(dev->dev, "ACL: entry %d starting with a valid matching rule, but no bits set in RuleSet\n",
+ index);
+ return -ENOTEMPTY;
+ }
+
+ /* This entry does not contain a valid matching rule */
+ return 0;
+ }
+
+ start_idx = find_first_bit((unsigned long *)&val, 16);
+ end_idx = find_last_bit((unsigned long *)&val, 16);
+
+ /* Calculate the contiguous count */
+ contiguous_count = end_idx - start_idx + 1;
+
+ /* Check if the number of bits set in val matches our calculated count */
+ if (contiguous_count != hweight16(val)) {
+ /* Probably we have a fragmented complex rule, which is not
+ * supported by this driver.
+ */
+ dev_err(dev->dev, "ACL: number of bits set in RuleSet does not match calculated count\n");
+ return -EINVAL;
+ }
+
+ /* loop over the contiguous entries and check for valid matching rules */
+ for (i = start_idx; i <= end_idx; i++) {
+ u8 *current_entry = &acles->entries[i].entry[0];
+
+ if (!ksz9477_acl_is_valid_matching_rule(current_entry)) {
+ /* we have something linked without a valid matching
+ * rule. ACL table?
+ */
+ dev_err(dev->dev, "ACL: entry %d does not contain a valid matching rule\n",
+ i);
+ return -EINVAL;
+ }
+
+ if (i > start_idx) {
+ vale = current_entry[KSZ9477_ACL_PORT_ACCESS_E];
+ valf = current_entry[KSZ9477_ACL_PORT_ACCESS_F];
+ /* Following entry should have empty linkage list */
+ if (vale || valf) {
+ dev_err(dev->dev, "ACL: entry %d has non-empty RuleSet linkage\n",
+ i);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return contiguous_count;
+}
+
+/**
+ * ksz9477_acl_update_linkage - Update the RuleSet linkage for an ACL entry
+ * after a move operation.
+ *
+ * @dev: Pointer to the ksz_device.
+ * @entry: Pointer to the ACL entry array.
+ * @old_idx: The original index of the ACL entry before moving.
+ * @new_idx: The new index of the ACL entry after moving.
+ *
+ * This function updates the RuleSet linkage bits for an ACL entry when
+ * it's moved from one position to another in the ACL table. The RuleSet
+ * linkage is represented by two 8-bit registers, which are combined
+ * into a 16-bit value for easier manipulation. The linkage bits are shifted
+ * based on the difference between the old and new index. If any bits are lost
+ * during the shift operation, an error is returned.
+ *
+ * Note: Fragmentation within a RuleSet is not supported. Hence, entries must
+ * be moved as complete blocks, maintaining the integrity of the RuleSet.
+ *
+ * Returns: 0 on success, or -EINVAL if any RuleSet linkage bits are lost
+ * during the move.
+ */
+static int ksz9477_acl_update_linkage(struct ksz_device *dev, u8 *entry,
+ u16 old_idx, u16 new_idx)
+{
+ unsigned int original_bit_count;
+ unsigned long rule_linkage;
+ u8 vale, valf, val0;
+ int shift;
+
+ val0 = entry[KSZ9477_ACL_PORT_ACCESS_0];
+ vale = entry[KSZ9477_ACL_PORT_ACCESS_E];
+ valf = entry[KSZ9477_ACL_PORT_ACCESS_F];
+
+ /* Combine the two u8 values into one u16 for easier manipulation */
+ rule_linkage = (vale << 8) | valf;
+ original_bit_count = hweight16(rule_linkage);
+
+ /* Even if HW is able to handle fragmented RuleSet, we don't support it.
+ * RuleSet is filled only for the first entry of the set.
+ */
+ if (!rule_linkage)
+ return 0;
+
+ if (val0 != old_idx) {
+ dev_err(dev->dev, "ACL: entry %d has unexpected ActionRule linkage: %d\n",
+ old_idx, val0);
+ return -EINVAL;
+ }
+
+ val0 = new_idx;
+
+ /* Calculate the number of positions to shift */
+ shift = new_idx - old_idx;
+
+ /* Shift the RuleSet */
+ if (shift > 0)
+ rule_linkage <<= shift;
+ else
+ rule_linkage >>= -shift;
+
+ /* Check that no bits were lost in the process */
+ if (original_bit_count != hweight16(rule_linkage)) {
+ dev_err(dev->dev, "ACL RuleSet linkage bits lost during move\n");
+ return -EINVAL;
+ }
+
+ entry[KSZ9477_ACL_PORT_ACCESS_0] = val0;
+
+ /* Update the RuleSet bitfields in the entry */
+ entry[KSZ9477_ACL_PORT_ACCESS_E] = (rule_linkage >> 8) & 0xFF;
+ entry[KSZ9477_ACL_PORT_ACCESS_F] = rule_linkage & 0xFF;
+
+ return 0;
+}
+
+/**
+ * ksz9477_validate_and_get_src_count - Validate source and destination indices
+ * and determine the source entry count.
+ * @dev: Pointer to the KSZ device structure.
+ * @port: Port number on the KSZ device where the ACL entries reside.
+ * @src_idx: Index of the starting ACL entry that needs to be validated.
+ * @dst_idx: Index of the destination where the source entries are intended to
+ * be moved.
+ * @src_count: Pointer to the variable that will hold the number of contiguous
+ * source entries if the validation passes.
+ * @dst_count: Pointer to the variable that will hold the number of contiguous
+ * destination entries if the validation passes.
+ *
+ * This function performs validation on the source and destination indices
+ * provided for ACL entries. It checks if the indices are within the valid
+ * range, and if the source entries are contiguous. Additionally, the function
+ * ensures that there's adequate space at the destination for the source entries
+ * and that the destination index isn't in the middle of a RuleSet. If all
+ * validations pass, the function returns the number of contiguous source and
+ * destination entries.
+ *
+ * Return: 0 on success, otherwise returns a negative error code if any
+ * validation check fails.
+ */
+static int ksz9477_validate_and_get_src_count(struct ksz_device *dev, int port,
+ int src_idx, int dst_idx,
+ int *src_count, int *dst_count)
+{
+ int ret;
+
+ if (src_idx >= KSZ9477_ACL_MAX_ENTRIES ||
+ dst_idx >= KSZ9477_ACL_MAX_ENTRIES) {
+ dev_err(dev->dev, "ACL: invalid entry index\n");
+ return -EINVAL;
+ }
+
+ /* Validate if the source entries are contiguous */
+ ret = ksz9477_acl_get_cont_entr(dev, port, src_idx);
+ if (ret < 0)
+ return ret;
+ *src_count = ret;
+
+ if (!*src_count) {
+ dev_err(dev->dev, "ACL: source entry is empty\n");
+ return -EINVAL;
+ }
+
+ if (dst_idx + *src_count >= KSZ9477_ACL_MAX_ENTRIES) {
+ dev_err(dev->dev, "ACL: Not enough space at the destination. Move operation will fail.\n");
+ return -EINVAL;
+ }
+
+ /* Validate if the destination entry is empty or not in the middle of
+ * a RuleSet.
+ */
+ ret = ksz9477_acl_get_cont_entr(dev, port, dst_idx);
+ if (ret < 0)
+ return ret;
+ *dst_count = ret;
+
+ return 0;
+}
+
+/**
+ * ksz9477_move_entries_downwards - Move a range of ACL entries downwards in
+ * the list.
+ * @dev: Pointer to the KSZ device structure.
+ * @acles: Pointer to the structure encapsulating all the ACL entries.
+ * @start_idx: Starting index of the entries to be relocated.
+ * @num_entries_to_move: Number of consecutive entries to be relocated.
+ * @end_idx: Destination index where the first entry should be situated post
+ * relocation.
+ *
+ * This function is responsible for rearranging a specific block of ACL entries
+ * by shifting them downwards in the list based on the supplied source and
+ * destination indices. It ensures that the linkage between the ACL entries is
+ * maintained accurately after the relocation.
+ *
+ * Return: 0 on successful relocation of entries, otherwise returns a negative
+ * error code.
+ */
+static int ksz9477_move_entries_downwards(struct ksz_device *dev,
+ struct ksz9477_acl_entries *acles,
+ u16 start_idx,
+ u16 num_entries_to_move,
+ u16 end_idx)
+{
+ struct ksz9477_acl_entry *e;
+ int ret, i;
+
+ for (i = start_idx; i < end_idx; i++) {
+ e = &acles->entries[i];
+ *e = acles->entries[i + num_entries_to_move];
+
+ ret = ksz9477_acl_update_linkage(dev, &e->entry[0],
+ i + num_entries_to_move, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz9477_move_entries_upwards - Move a range of ACL entries upwards in the
+ * list.
+ * @dev: Pointer to the KSZ device structure.
+ * @acles: Pointer to the structure holding all the ACL entries.
+ * @start_idx: The starting index of the entries to be moved.
+ * @num_entries_to_move: Number of contiguous entries to be moved.
+ * @target_idx: The destination index where the first entry should be placed
+ * after moving.
+ *
+ * This function rearranges a chunk of ACL entries by moving them upwards
+ * in the list based on the given source and destination indices. The reordering
+ * process preserves the linkage between entries by updating it accordingly.
+ *
+ * Return: 0 if the entries were successfully moved, otherwise a negative error
+ * code.
+ */
+static int ksz9477_move_entries_upwards(struct ksz_device *dev,
+ struct ksz9477_acl_entries *acles,
+ u16 start_idx, u16 num_entries_to_move,
+ u16 target_idx)
+{
+ struct ksz9477_acl_entry *e;
+ int ret, i, b;
+
+ for (i = start_idx; i > target_idx; i--) {
+ b = i + num_entries_to_move - 1;
+
+ e = &acles->entries[b];
+ *e = acles->entries[i - 1];
+
+ ret = ksz9477_acl_update_linkage(dev, &e->entry[0], i - 1, b);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz9477_acl_move_entries - Move a block of contiguous ACL entries from a
+ * source to a destination index.
+ * @dev: Pointer to the KSZ9477 device structure.
+ * @port: Port number.
+ * @src_idx: Index of the starting source ACL entry.
+ * @dst_idx: Index of the starting destination ACL entry.
+ *
+ * This function aims to move a block of contiguous ACL entries from the source
+ * index to the destination index while ensuring the integrity and validity of
+ * the ACL table.
+ *
+ * In case of any errors during the adjustments or copying, the function will
+ * restore the ACL entries to their original state from the backup.
+ *
+ * Return: 0 if the move operation is successful. Returns -EINVAL for validation
+ * errors or other error codes based on specific failure conditions.
+ */
+static int ksz9477_acl_move_entries(struct ksz_device *dev, int port,
+ u16 src_idx, u16 dst_idx)
+{
+ struct ksz9477_acl_entry buffer[KSZ9477_ACL_MAX_ENTRIES];
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ int src_count, ret, dst_count;
+
+ /* Nothing to do */
+ if (src_idx == dst_idx)
+ return 0;
+
+ ret = ksz9477_validate_and_get_src_count(dev, port, src_idx, dst_idx,
+ &src_count, &dst_count);
+ if (ret)
+ return ret;
+
+ /* In case dst_index is greater than src_index, we need to adjust the
+ * destination index to account for the entries that will be moved
+ * downwards and the size of the entry located at dst_idx.
+ */
+ if (dst_idx > src_idx)
+ dst_idx = dst_idx + dst_count - src_count;
+
+ /* Copy source block to buffer and update its linkage */
+ for (int i = 0; i < src_count; i++) {
+ buffer[i] = acles->entries[src_idx + i];
+ ret = ksz9477_acl_update_linkage(dev, &buffer[i].entry[0],
+ src_idx + i, dst_idx + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Adjust other entries and their linkage based on destination */
+ if (dst_idx > src_idx) {
+ ret = ksz9477_move_entries_downwards(dev, acles, src_idx,
+ src_count, dst_idx);
+ } else {
+ ret = ksz9477_move_entries_upwards(dev, acles, src_idx,
+ src_count, dst_idx);
+ }
+ if (ret < 0)
+ return ret;
+
+ /* Copy buffer to destination block */
+ for (int i = 0; i < src_count; i++)
+ acles->entries[dst_idx + i] = buffer[i];
+
+ return 0;
+}
+
+/**
+ * ksz9477_get_next_block_start - Identify the starting index of the next ACL
+ * block.
+ * @dev: Pointer to the device structure.
+ * @port: The port number on which the ACL entries are being checked.
+ * @start: The starting index from which the search begins.
+ *
+ * This function looks for the next valid ACL block starting from the provided
+ * 'start' index and returns the beginning index of that block. If the block is
+ * invalid or if it reaches the end of the ACL entries without finding another
+ * block, it returns the maximum ACL entries count.
+ *
+ * Returns:
+ * - The starting index of the next valid ACL block.
+ * - KSZ9477_ACL_MAX_ENTRIES if no other valid blocks are found after 'start'.
+ * - A negative error code if an error occurs while checking.
+ */
+static int ksz9477_get_next_block_start(struct ksz_device *dev, int port,
+ int start)
+{
+ int block_size;
+
+ for (int i = start; i < KSZ9477_ACL_MAX_ENTRIES;) {
+ block_size = ksz9477_acl_get_cont_entr(dev, port, i);
+ if (block_size < 0 && block_size != -ENOTEMPTY)
+ return block_size;
+
+ if (block_size > 0)
+ return i;
+
+ i++;
+ }
+ return KSZ9477_ACL_MAX_ENTRIES;
+}
+
+/**
+ * ksz9477_swap_acl_blocks - Swap two ACL blocks
+ * @dev: Pointer to the device structure.
+ * @port: The port number on which the ACL blocks are to be swapped.
+ * @i: The starting index of the first ACL block.
+ * @j: The starting index of the second ACL block.
+ *
+ * This function is used to swap two ACL blocks present at given indices. The
+ * main purpose is to aid in the sorting and reordering of ACL blocks based on
+ * certain criteria, e.g., priority. It checks the validity of the block at
+ * index 'i', ensuring it's not an empty block, and then proceeds to swap it
+ * with the block at index 'j'.
+ *
+ * Returns:
+ * - 0 on successful swapping of blocks.
+ * - -EINVAL if the block at index 'i' is empty.
+ * - A negative error code if any other error occurs during the swap.
+ */
+static int ksz9477_swap_acl_blocks(struct ksz_device *dev, int port, int i,
+ int j)
+{
+ int ret, current_block_size;
+
+ current_block_size = ksz9477_acl_get_cont_entr(dev, port, i);
+ if (current_block_size < 0)
+ return current_block_size;
+
+ if (!current_block_size) {
+ dev_err(dev->dev, "ACL: swapping empty entry %d\n", i);
+ return -EINVAL;
+ }
+
+ ret = ksz9477_acl_move_entries(dev, port, i, j);
+ if (ret)
+ return ret;
+
+ ret = ksz9477_acl_move_entries(dev, port, j - current_block_size, i);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * ksz9477_sort_acl_entr_no_back - Sort ACL entries for a given port based on
+ * priority without backing up entries.
+ * @dev: Pointer to the device structure.
+ * @port: The port number whose ACL entries need to be sorted.
+ *
+ * This function sorts ACL entries of the specified port using a variant of the
+ * bubble sort algorithm. It operates on blocks of ACL entries rather than
+ * individual entries. Each block's starting point is identified and then
+ * compared with subsequent blocks based on their priority. If the current
+ * block has a lower priority than the subsequent block, the two blocks are
+ * swapped.
+ *
+ * This is done in order to maintain an organized order of ACL entries based on
+ * priority, ensuring efficient and predictable ACL rule application.
+ *
+ * Returns:
+ * - 0 on successful sorting of entries.
+ * - A negative error code if any issue arises during sorting, e.g.,
+ * if the function is unable to get the next block start.
+ */
+static int ksz9477_sort_acl_entr_no_back(struct ksz_device *dev, int port)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ struct ksz9477_acl_entry *curr, *next;
+ int i, j, ret;
+
+ /* Bubble sort */
+ for (i = 0; i < KSZ9477_ACL_MAX_ENTRIES;) {
+ curr = &acles->entries[i];
+
+ j = ksz9477_get_next_block_start(dev, port, i + 1);
+ if (j < 0)
+ return j;
+
+ while (j < KSZ9477_ACL_MAX_ENTRIES) {
+ next = &acles->entries[j];
+
+ if (curr->prio > next->prio) {
+ ret = ksz9477_swap_acl_blocks(dev, port, i, j);
+ if (ret)
+ return ret;
+ }
+
+ j = ksz9477_get_next_block_start(dev, port, j + 1);
+ if (j < 0)
+ return j;
+ }
+
+ i = ksz9477_get_next_block_start(dev, port, i + 1);
+ if (i < 0)
+ return i;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz9477_sort_acl_entries - Sort the ACL entries for a given port.
+ * @dev: Pointer to the KSZ device.
+ * @port: Port number.
+ *
+ * This function sorts the Access Control List (ACL) entries for a specified
+ * port. Before sorting, a backup of the original entries is created. If the
+ * sorting process fails, the function will log error messages displaying both
+ * the original and attempted sorted entries, and then restore the original
+ * entries from the backup.
+ *
+ * Return: 0 if the sorting succeeds, otherwise a negative error code.
+ */
+int ksz9477_sort_acl_entries(struct ksz_device *dev, int port)
+{
+ struct ksz9477_acl_entry backup[KSZ9477_ACL_MAX_ENTRIES];
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ int ret;
+
+ /* create a backup of the ACL entries, if something goes wrong
+ * we can restore the ACL entries.
+ */
+ memcpy(backup, acles->entries, sizeof(backup));
+
+ ret = ksz9477_sort_acl_entr_no_back(dev, port);
+ if (ret) {
+ dev_err(dev->dev, "ACL: failed to sort entries for port %d\n",
+ port);
+ dev_err(dev->dev, "ACL dump before sorting:\n");
+ ksz9477_dump_acl(dev, backup);
+ dev_err(dev->dev, "ACL dump after sorting:\n");
+ ksz9477_dump_acl(dev, acles->entries);
+ /* Restore the original entries */
+ memcpy(acles->entries, backup, sizeof(backup));
+ }
+
+ return ret;
+}
+
+/**
+ * ksz9477_acl_wait_ready - Waits for the ACL operation to complete on a given
+ * port.
+ * @dev: The ksz_device instance.
+ * @port: The port number to wait for.
+ *
+ * This function checks if the ACL write or read operation is completed by
+ * polling the specified register.
+ *
+ * Returns: 0 if the operation is successful, or a negative error code if an
+ * error occurs.
+ */
+static int ksz9477_acl_wait_ready(struct ksz_device *dev, int port)
+{
+ unsigned int wr_mask = KSZ9477_ACL_WRITE_DONE | KSZ9477_ACL_READ_DONE;
+ unsigned int val, reg;
+ int ret;
+
+ reg = dev->dev_ops->get_port_addr(port, KSZ9477_PORT_ACL_CTRL_0);
+
+ ret = regmap_read_poll_timeout(dev->regmap[0], reg, val,
+ (val & wr_mask) == wr_mask, 1000, 10000);
+ if (ret)
+ dev_err(dev->dev, "Failed to read/write ACL table\n");
+
+ return ret;
+}
+
+/**
+ * ksz9477_acl_entry_write - Writes an ACL entry to a given port at the
+ * specified index.
+ * @dev: The ksz_device instance.
+ * @port: The port number to write the ACL entry to.
+ * @entry: A pointer to the ACL entry data.
+ * @idx: The index at which to write the ACL entry.
+ *
+ * This function writes the provided ACL entry to the specified port at the
+ * given index.
+ *
+ * Returns: 0 if the operation is successful, or a negative error code if an
+ * error occurs.
+ */
+static int ksz9477_acl_entry_write(struct ksz_device *dev, int port, u8 *entry,
+ int idx)
+{
+ int ret, i;
+ u8 val;
+
+ for (i = 0; i < KSZ9477_ACL_ENTRY_SIZE; i++) {
+ ret = ksz_pwrite8(dev, port, KSZ9477_PORT_ACL_0 + i, entry[i]);
+ if (ret) {
+ dev_err(dev->dev, "Failed to write ACL entry %d\n", i);
+ return ret;
+ }
+ }
+
+ /* write everything down */
+ val = FIELD_PREP(KSZ9477_ACL_INDEX_M, idx) | KSZ9477_ACL_WRITE;
+ ret = ksz_pwrite8(dev, port, KSZ9477_PORT_ACL_CTRL_0, val);
+ if (ret)
+ return ret;
+
+ /* wait until everything is written */
+ return ksz9477_acl_wait_ready(dev, port);
+}
+
+/**
+ * ksz9477_acl_port_enable - Enables ACL functionality on a given port.
+ * @dev: The ksz_device instance.
+ * @port: The port number on which to enable ACL functionality.
+ *
+ * This function enables ACL functionality on the specified port by configuring
+ * the appropriate control registers. It returns 0 if the operation is
+ * successful, or a negative error code if an error occurs.
+ *
+ * 0xn801 - KSZ9477S 5.2.8.2 Port Priority Control Register
+ * Bit 7 - Highest Priority
+ * Bit 6 - OR'ed Priority
+ * Bit 4 - MAC Address Priority Classification
+ * Bit 3 - VLAN Priority Classification
+ * Bit 2 - 802.1p Priority Classification
+ * Bit 1 - Diffserv Priority Classification
+ * Bit 0 - ACL Priority Classification
+ *
+ * Current driver implementation sets 802.1p priority classification by default.
+ * In this function we add ACL priority classification with OR'ed priority.
+ * According to testing, priority set by ACL will supersede the 802.1p priority.
+ *
+ * 0xn803 - KSZ9477S 5.2.8.4 Port Authentication Control Register
+ * Bit 2 - Access Control List (ACL) Enable
+ * Bits 1:0 - Authentication Mode
+ * 00 = Reserved
+ * 01 = Block Mode. Authentication is enabled. When ACL is
+ * enabled, all traffic that misses the ACL rules is
+ * blocked; otherwise ACL actions apply.
+ * 10 = Pass Mode. Authentication is disabled. When ACL is
+ * enabled, all traffic that misses the ACL rules is
+ * forwarded; otherwise ACL actions apply.
+ * 11 = Trap Mode. Authentication is enabled. All traffic is
+ * forwarded to the host port. When ACL is enabled, all
+ * traffic that misses the ACL rules is blocked; otherwise
+ * ACL actions apply.
+ *
+ * We are using Pass Mode int this function.
+ *
+ * Returns: 0 if the operation is successful, or a negative error code if an
+ * error occurs.
+ */
+static int ksz9477_acl_port_enable(struct ksz_device *dev, int port)
+{
+ int ret;
+
+ ret = ksz_prmw8(dev, port, P_PRIO_CTRL, 0, PORT_ACL_PRIO_ENABLE |
+ PORT_OR_PRIO);
+ if (ret)
+ return ret;
+
+ return ksz_pwrite8(dev, port, REG_PORT_MRI_AUTHEN_CTRL,
+ PORT_ACL_ENABLE |
+ FIELD_PREP(PORT_AUTHEN_MODE, PORT_AUTHEN_PASS));
+}
+
+/**
+ * ksz9477_acl_port_disable - Disables ACL functionality on a given port.
+ * @dev: The ksz_device instance.
+ * @port: The port number on which to disable ACL functionality.
+ *
+ * This function disables ACL functionality on the specified port by writing a
+ * value of 0 to the REG_PORT_MRI_AUTHEN_CTRL control register and remove
+ * PORT_ACL_PRIO_ENABLE bit from P_PRIO_CTRL register.
+ *
+ * Returns: 0 if the operation is successful, or a negative error code if an
+ * error occurs.
+ */
+static int ksz9477_acl_port_disable(struct ksz_device *dev, int port)
+{
+ int ret;
+
+ ret = ksz_prmw8(dev, port, P_PRIO_CTRL, PORT_ACL_PRIO_ENABLE, 0);
+ if (ret)
+ return ret;
+
+ return ksz_pwrite8(dev, port, REG_PORT_MRI_AUTHEN_CTRL, 0);
+}
+
+/**
+ * ksz9477_acl_write_list - Write a list of ACL entries to a given port.
+ * @dev: The ksz_device instance.
+ * @port: The port number on which to write ACL entries.
+ *
+ * This function enables ACL functionality on the specified port, writes a list
+ * of ACL entries to the port, and disables ACL functionality if there are no
+ * entries.
+ *
+ * Returns: 0 if the operation is successful, or a negative error code if an
+ * error occurs.
+ */
+int ksz9477_acl_write_list(struct ksz_device *dev, int port)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ int ret, i;
+
+ /* ACL should be enabled before writing entries */
+ ret = ksz9477_acl_port_enable(dev, port);
+ if (ret)
+ return ret;
+
+ /* write all entries */
+ for (i = 0; i < ARRAY_SIZE(acles->entries); i++) {
+ u8 *entry = acles->entries[i].entry;
+
+ /* Check if entry was removed and should be zeroed.
+ * If last fields of the entry are not zero, it means
+ * it is removed locally but currently not synced with the HW.
+ * So, we will write it down to the HW to remove it.
+ */
+ if (i >= acles->entries_count &&
+ entry[KSZ9477_ACL_PORT_ACCESS_10] == 0 &&
+ entry[KSZ9477_ACL_PORT_ACCESS_11] == 0)
+ continue;
+
+ ret = ksz9477_acl_entry_write(dev, port, entry, i);
+ if (ret)
+ return ret;
+
+ /* now removed entry is clean on HW side, so it can
+ * in the cache too
+ */
+ if (i >= acles->entries_count &&
+ entry[KSZ9477_ACL_PORT_ACCESS_10] != 0 &&
+ entry[KSZ9477_ACL_PORT_ACCESS_11] != 0) {
+ entry[KSZ9477_ACL_PORT_ACCESS_10] = 0;
+ entry[KSZ9477_ACL_PORT_ACCESS_11] = 0;
+ }
+ }
+
+ if (!acles->entries_count)
+ return ksz9477_acl_port_disable(dev, port);
+
+ return 0;
+}
+
+/**
+ * ksz9477_acl_remove_entries - Remove ACL entries with a given cookie from a
+ * specified ksz9477_acl_entries structure.
+ * @dev: The ksz_device instance.
+ * @port: The port number on which to remove ACL entries.
+ * @acles: The ksz9477_acl_entries instance.
+ * @cookie: The cookie value to match for entry removal.
+ *
+ * This function iterates through the entries array, removing any entries with
+ * a matching cookie value. The remaining entries are then shifted down to fill
+ * the gap.
+ */
+void ksz9477_acl_remove_entries(struct ksz_device *dev, int port,
+ struct ksz9477_acl_entries *acles,
+ unsigned long cookie)
+{
+ int entries_count = acles->entries_count;
+ int ret, i, src_count;
+ int src_idx = -1;
+
+ if (!entries_count)
+ return;
+
+ /* Search for the first position with the cookie */
+ for (i = 0; i < entries_count; i++) {
+ if (acles->entries[i].cookie == cookie) {
+ src_idx = i;
+ break;
+ }
+ }
+
+ /* No entries with the matching cookie found */
+ if (src_idx == -1)
+ return;
+
+ /* Get the size of the cookie entry. We may have complex entries. */
+ src_count = ksz9477_acl_get_cont_entr(dev, port, src_idx);
+ if (src_count <= 0)
+ return;
+
+ /* Move all entries down to overwrite removed entry with the cookie */
+ ret = ksz9477_move_entries_downwards(dev, acles, src_idx,
+ src_count,
+ entries_count - src_count);
+ if (ret) {
+ dev_err(dev->dev, "Failed to move ACL entries down\n");
+ return;
+ }
+
+ /* Overwrite new empty places at the end of the list with zeros to make
+ * sure not unexpected things will happen or no unexplored quirks will
+ * come out.
+ */
+ for (i = entries_count - src_count; i < entries_count; i++) {
+ struct ksz9477_acl_entry *entry = &acles->entries[i];
+
+ memset(entry, 0, sizeof(*entry));
+
+ /* Set all access bits to be able to write zeroed entry to HW */
+ entry->entry[KSZ9477_ACL_PORT_ACCESS_10] = 0xff;
+ entry->entry[KSZ9477_ACL_PORT_ACCESS_11] = 0xff;
+ }
+
+ /* Adjust the total entries count */
+ acles->entries_count -= src_count;
+}
+
+/**
+ * ksz9477_port_acl_init - Initialize the ACL for a specified port on a ksz
+ * device.
+ * @dev: The ksz_device instance.
+ * @port: The port number to initialize the ACL for.
+ *
+ * This function allocates memory for an acl structure, associates it with the
+ * specified port, and initializes the ACL entries to a default state. The
+ * entries are then written using the ksz9477_acl_write_list function, ensuring
+ * the ACL has a predictable initial hardware state.
+ *
+ * Returns: 0 on success, or an error code on failure.
+ */
+int ksz9477_port_acl_init(struct ksz_device *dev, int port)
+{
+ struct ksz9477_acl_entries *acles;
+ struct ksz9477_acl_priv *acl;
+ int ret, i;
+
+ acl = kzalloc(sizeof(*acl), GFP_KERNEL);
+ if (!acl)
+ return -ENOMEM;
+
+ dev->ports[port].acl_priv = acl;
+
+ acles = &acl->acles;
+ /* write all entries */
+ for (i = 0; i < ARRAY_SIZE(acles->entries); i++) {
+ u8 *entry = acles->entries[i].entry;
+
+ /* Set all access bits to be able to write zeroed
+ * entry
+ */
+ entry[KSZ9477_ACL_PORT_ACCESS_10] = 0xff;
+ entry[KSZ9477_ACL_PORT_ACCESS_11] = 0xff;
+ }
+
+ ret = ksz9477_acl_write_list(dev, port);
+ if (ret)
+ goto free_acl;
+
+ return 0;
+
+free_acl:
+ kfree(dev->ports[port].acl_priv);
+ dev->ports[port].acl_priv = NULL;
+
+ return ret;
+}
+
+/**
+ * ksz9477_port_acl_free - Free the ACL resources for a specified port on a ksz
+ * device.
+ * @dev: The ksz_device instance.
+ * @port: The port number to initialize the ACL for.
+ *
+ * This disables the ACL for the specified port and frees the associated memory,
+ */
+void ksz9477_port_acl_free(struct ksz_device *dev, int port)
+{
+ if (!dev->ports[port].acl_priv)
+ return;
+
+ ksz9477_acl_port_disable(dev, port);
+
+ kfree(dev->ports[port].acl_priv);
+ dev->ports[port].acl_priv = NULL;
+}
+
+/**
+ * ksz9477_acl_set_reg - Set entry[16] and entry[17] depending on the updated
+ * entry[]
+ * @entry: An array containing the entries
+ * @reg: The register of the entry that needs to be updated
+ * @value: The value to be assigned to the updated entry
+ *
+ * This function updates the entry[] array based on the provided register and
+ * value. It also sets entry[0x10] and entry[0x11] according to the ACL byte
+ * enable rules.
+ *
+ * 0x10 - Byte Enable [15:8]
+ *
+ * Each bit enables accessing one of the ACL bytes when a read or write is
+ * initiated by writing to the Port ACL Byte Enable LSB Register.
+ * Bit 0 applies to the Port ACL Access 7 Register
+ * Bit 1 applies to the Port ACL Access 6 Register, etc.
+ * Bit 7 applies to the Port ACL Access 0 Register
+ * 1 = Byte is selected for read/write
+ * 0 = Byte is not selected
+ *
+ * 0x11 - Byte Enable [7:0]
+ *
+ * Each bit enables accessing one of the ACL bytes when a read or write is
+ * initiated by writing to the Port ACL Byte Enable LSB Register.
+ * Bit 0 applies to the Port ACL Access F Register
+ * Bit 1 applies to the Port ACL Access E Register, etc.
+ * Bit 7 applies to the Port ACL Access 8 Register
+ * 1 = Byte is selected for read/write
+ * 0 = Byte is not selected
+ */
+static void ksz9477_acl_set_reg(u8 *entry, enum ksz9477_acl_port_access reg,
+ u8 value)
+{
+ if (reg >= KSZ9477_ACL_PORT_ACCESS_0 &&
+ reg <= KSZ9477_ACL_PORT_ACCESS_7) {
+ entry[KSZ9477_ACL_PORT_ACCESS_10] |=
+ BIT(KSZ9477_ACL_PORT_ACCESS_7 - reg);
+ } else if (reg >= KSZ9477_ACL_PORT_ACCESS_8 &&
+ reg <= KSZ9477_ACL_PORT_ACCESS_F) {
+ entry[KSZ9477_ACL_PORT_ACCESS_11] |=
+ BIT(KSZ9477_ACL_PORT_ACCESS_F - reg);
+ } else {
+ WARN_ON(1);
+ return;
+ }
+
+ entry[reg] = value;
+}
+
+/**
+ * ksz9477_acl_matching_rule_cfg_l2 - Configure an ACL filtering entry to match
+ * L2 types of Ethernet frames
+ * @entry: Pointer to ACL entry buffer
+ * @ethertype: Ethertype value
+ * @eth_addr: Pointer to Ethernet address
+ * @is_src: If true, match the source MAC address; if false, match the
+ * destination MAC address
+ *
+ * This function configures an Access Control List (ACL) filtering
+ * entry to match Layer 2 types of Ethernet frames based on the provided
+ * ethertype and Ethernet address. Additionally, it can match either the source
+ * or destination MAC address depending on the value of the is_src parameter.
+ *
+ * Register Descriptions for MD = 01 and ENB != 00 (Layer 2 MAC header
+ * filtering)
+ *
+ * 0x01 - Mode and Enable
+ * Bits 5:4 - MD (Mode)
+ * 01 = Layer 2 MAC header or counter filtering
+ * Bits 3:2 - ENB (Enable)
+ * 01 = Comparison is performed only on the TYPE value
+ * 10 = Comparison is performed only on the MAC Address value
+ * 11 = Both the MAC Address and TYPE are tested
+ * Bit 1 - S/D (Source / Destination)
+ * 0 = Destination address
+ * 1 = Source address
+ * Bit 0 - EQ (Equal / Not Equal)
+ * 0 = Not Equal produces true result
+ * 1 = Equal produces true result
+ *
+ * 0x02-0x07 - MAC Address
+ * 0x02 - MAC Address [47:40]
+ * 0x03 - MAC Address [39:32]
+ * 0x04 - MAC Address [31:24]
+ * 0x05 - MAC Address [23:16]
+ * 0x06 - MAC Address [15:8]
+ * 0x07 - MAC Address [7:0]
+ *
+ * 0x08-0x09 - EtherType
+ * 0x08 - EtherType [15:8]
+ * 0x09 - EtherType [7:0]
+ */
+static void ksz9477_acl_matching_rule_cfg_l2(u8 *entry, u16 ethertype,
+ u8 *eth_addr, bool is_src)
+{
+ u8 enb = 0;
+ u8 val;
+
+ if (ethertype)
+ enb |= KSZ9477_ACL_ENB_L2_TYPE;
+ if (eth_addr)
+ enb |= KSZ9477_ACL_ENB_L2_MAC;
+
+ val = FIELD_PREP(KSZ9477_ACL_MD_MASK, KSZ9477_ACL_MD_L2_MAC) |
+ FIELD_PREP(KSZ9477_ACL_ENB_MASK, enb) |
+ FIELD_PREP(KSZ9477_ACL_SD_SRC, is_src) | KSZ9477_ACL_EQ_EQUAL;
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_1, val);
+
+ if (eth_addr) {
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ ksz9477_acl_set_reg(entry,
+ KSZ9477_ACL_PORT_ACCESS_2 + i,
+ eth_addr[i]);
+ }
+ }
+
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_8, ethertype >> 8);
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_9, ethertype & 0xff);
+}
+
+/**
+ * ksz9477_acl_action_rule_cfg - Set action for an ACL entry
+ * @entry: Pointer to the ACL entry
+ * @force_prio: If true, force the priority value
+ * @prio_val: Priority value
+ *
+ * This function sets the action for the specified ACL entry. It prepares
+ * the priority mode and traffic class values and updates the entry's
+ * action registers accordingly. Currently, there is no port or VLAN PCP
+ * remapping.
+ *
+ * ACL Action Rule Parameters for Non-Count Modes (MD ≠ 01 or ENB ≠ 00)
+ *
+ * 0x0A - PM, P, RPE, RP[2:1]
+ * Bits 7:6 - PM[1:0] - Priority Mode
+ * 00 = ACL does not specify the packet priority. Priority is
+ * determined by standard QoS functions.
+ * 01 = Change packet priority to P[2:0] if it is greater than QoS
+ * result.
+ * 10 = Change packet priority to P[2:0] if it is smaller than the
+ * QoS result.
+ * 11 = Always change packet priority to P[2:0].
+ * Bits 5:3 - P[2:0] - Priority value
+ * Bit 2 - RPE - Remark Priority Enable
+ * Bits 1:0 - RP[2:1] - Remarked Priority value (bits 2:1)
+ * 0 = Disable priority remarking
+ * 1 = Enable priority remarking. VLAN tag priority (PCP) bits are
+ * replaced by RP[2:0].
+ *
+ * 0x0B - RP[0], MM
+ * Bit 7 - RP[0] - Remarked Priority value (bit 0)
+ * Bits 6:5 - MM[1:0] - Map Mode
+ * 00 = No forwarding remapping
+ * 01 = The forwarding map in FORWARD is OR'ed with the forwarding
+ * map from the Address Lookup Table.
+ * 10 = The forwarding map in FORWARD is AND'ed with the forwarding
+ * map from the Address Lookup Table.
+ * 11 = The forwarding map in FORWARD replaces the forwarding map
+ * from the Address Lookup Table.
+ * 0x0D - FORWARD[n:0]
+ * Bits 7:0 - FORWARD[n:0] - Forwarding map. Bit 0 = port 1,
+ * bit 1 = port 2, etc.
+ * 1 = enable forwarding to this port
+ * 0 = do not forward to this port
+ */
+void ksz9477_acl_action_rule_cfg(u8 *entry, bool force_prio, u8 prio_val)
+{
+ u8 prio_mode, val;
+
+ if (force_prio)
+ prio_mode = KSZ9477_ACL_PM_REPLACE;
+ else
+ prio_mode = KSZ9477_ACL_PM_DISABLE;
+
+ val = FIELD_PREP(KSZ9477_ACL_PM_M, prio_mode) |
+ FIELD_PREP(KSZ9477_ACL_P_M, prio_val);
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_A, val);
+
+ /* no port or VLAN PCP remapping for now */
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_B, 0);
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_D, 0);
+}
+
+/**
+ * ksz9477_acl_processing_rule_set_action - Set the action for the processing
+ * rule set.
+ * @entry: Pointer to the ACL entry
+ * @action_idx: Index of the action to be applied
+ *
+ * This function sets the action for the processing rule set by updating the
+ * appropriate register in the entry. There can be only one action per
+ * processing rule.
+ *
+ * Access Control List (ACL) Processing Rule Registers:
+ *
+ * 0x00 - First Rule Number (FRN)
+ * Bits 3:0 - First Rule Number. Pointer to an Action rule entry.
+ */
+void ksz9477_acl_processing_rule_set_action(u8 *entry, u8 action_idx)
+{
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_0, action_idx);
+}
+
+/**
+ * ksz9477_acl_processing_rule_add_match - Add a matching rule to the rule set
+ * @entry: Pointer to the ACL entry
+ * @match_idx: Index of the matching rule to be added
+ *
+ * This function adds a matching rule to the rule set by updating the
+ * appropriate bits in the entry's rule set registers.
+ *
+ * Access Control List (ACL) Processing Rule Registers:
+ *
+ * 0x0E - RuleSet [15:8]
+ * Bits 7:0 - RuleSet [15:8] Specifies a set of one or more Matching rule
+ * entries. RuleSet has one bit for each of the 16 Matching rule entries.
+ * If multiple Matching rules are selected, then all conditions will be
+ * AND'ed to produce a final match result.
+ * 0 = Matching rule not selected
+ * 1 = Matching rule selected
+ *
+ * 0x0F - RuleSet [7:0]
+ * Bits 7:0 - RuleSet [7:0]
+ */
+static void ksz9477_acl_processing_rule_add_match(u8 *entry, u8 match_idx)
+{
+ u8 vale = entry[KSZ9477_ACL_PORT_ACCESS_E];
+ u8 valf = entry[KSZ9477_ACL_PORT_ACCESS_F];
+
+ if (match_idx < 8)
+ valf |= BIT(match_idx);
+ else
+ vale |= BIT(match_idx - 8);
+
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_E, vale);
+ ksz9477_acl_set_reg(entry, KSZ9477_ACL_PORT_ACCESS_F, valf);
+}
+
+/**
+ * ksz9477_acl_get_init_entry - Get a new uninitialized entry for a specified
+ * port on a ksz_device.
+ * @dev: The ksz_device instance.
+ * @port: The port number to get the uninitialized entry for.
+ * @cookie: The cookie to associate with the entry.
+ * @prio: The priority to associate with the entry.
+ *
+ * This function retrieves the next available ACL entry for the specified port,
+ * clears all access flags, and associates it with the current cookie.
+ *
+ * Returns: A pointer to the new uninitialized ACL entry.
+ */
+static struct ksz9477_acl_entry *
+ksz9477_acl_get_init_entry(struct ksz_device *dev, int port,
+ unsigned long cookie, u32 prio)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ struct ksz9477_acl_entry *entry;
+
+ entry = &acles->entries[acles->entries_count];
+ entry->cookie = cookie;
+ entry->prio = prio;
+
+ /* clear all access flags */
+ entry->entry[KSZ9477_ACL_PORT_ACCESS_10] = 0;
+ entry->entry[KSZ9477_ACL_PORT_ACCESS_11] = 0;
+
+ return entry;
+}
+
+/**
+ * ksz9477_acl_match_process_l2 - Configure Layer 2 ACL matching rules and
+ * processing rules.
+ * @dev: Pointer to the ksz_device.
+ * @port: Port number.
+ * @ethtype: Ethernet type.
+ * @src_mac: Source MAC address.
+ * @dst_mac: Destination MAC address.
+ * @cookie: The cookie to associate with the entry.
+ * @prio: The priority of the entry.
+ *
+ * This function sets up matching and processing rules for Layer 2 ACLs.
+ * It takes into account that only one MAC per entry is supported.
+ */
+void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port,
+ u16 ethtype, u8 *src_mac, u8 *dst_mac,
+ unsigned long cookie, u32 prio)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct ksz9477_acl_entries *acles = &acl->acles;
+ struct ksz9477_acl_entry *entry;
+
+ entry = ksz9477_acl_get_init_entry(dev, port, cookie, prio);
+
+ /* ACL supports only one MAC per entry */
+ if (src_mac && dst_mac) {
+ ksz9477_acl_matching_rule_cfg_l2(entry->entry, ethtype, src_mac,
+ true);
+
+ /* Add both match entries to first processing rule */
+ ksz9477_acl_processing_rule_add_match(entry->entry,
+ acles->entries_count);
+ acles->entries_count++;
+ ksz9477_acl_processing_rule_add_match(entry->entry,
+ acles->entries_count);
+
+ entry = ksz9477_acl_get_init_entry(dev, port, cookie, prio);
+ ksz9477_acl_matching_rule_cfg_l2(entry->entry, 0, dst_mac,
+ false);
+ acles->entries_count++;
+ } else {
+ u8 *mac = src_mac ? src_mac : dst_mac;
+ bool is_src = src_mac ? true : false;
+
+ ksz9477_acl_matching_rule_cfg_l2(entry->entry, ethtype, mac,
+ is_src);
+ ksz9477_acl_processing_rule_add_match(entry->entry,
+ acles->entries_count);
+ acles->entries_count++;
+ }
+}
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index f3afb8b8c4cc..a2beb27459f1 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 series register access through I2C
*
- * Copyright (C) 2018-2019 Microchip Technology Inc.
+ * Copyright (C) 2018-2024 Microchip Technology Inc.
*/
#include <linux/i2c.h>
@@ -14,9 +14,10 @@
KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
-static int ksz9477_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
+static int ksz9477_i2c_probe(struct i2c_client *i2c)
{
+ const struct ksz_chip_data *chip;
+ struct device *ddev = &i2c->dev;
struct regmap_config rc;
struct ksz_device *dev;
int i, ret;
@@ -25,23 +26,29 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
if (!dev)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
+ chip = device_get_match_data(ddev);
+ if (!chip)
+ return -EINVAL;
+
+ /* Save chip id to do special initialization when probing. */
+ dev->chip_id = chip->chip_id;
+ for (i = 0; i < __KSZ_NUM_REGMAPS; i++) {
rc = ksz9477_regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc);
if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&i2c->dev,
- "Failed to initialize regmap%i: %d\n",
- ksz9477_regmap_config[i].val_bits, ret);
- return ret;
+ return dev_err_probe(&i2c->dev, PTR_ERR(dev->regmap[i]),
+ "Failed to initialize regmap%i\n",
+ ksz9477_regmap_config[i].val_bits);
}
}
if (i2c->dev.platform_data)
dev->pdata = i2c->dev.platform_data;
- ret = ksz9477_switch_register(dev);
+ dev->irq = i2c->irq;
+
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
@@ -52,16 +59,12 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int ksz9477_i2c_remove(struct i2c_client *i2c)
+static void ksz9477_i2c_remove(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
if (dev)
ksz_switch_remove(dev);
-
- i2c_set_clientdata(i2c, NULL);
-
- return 0;
}
static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
@@ -71,37 +74,69 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
if (!dev)
return;
- if (dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
-
- dsa_switch_shutdown(dev->ds);
+ ksz_switch_shutdown(dev);
i2c_set_clientdata(i2c, NULL);
}
static const struct i2c_device_id ksz9477_i2c_id[] = {
- { "ksz9477-switch", 0 },
- {},
+ { "ksz9477-switch" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
static const struct of_device_id ksz9477_dt_ids[] = {
- { .compatible = "microchip,ksz9477" },
- { .compatible = "microchip,ksz9897" },
- { .compatible = "microchip,ksz9893" },
- { .compatible = "microchip,ksz9563" },
- { .compatible = "microchip,ksz9567" },
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9563]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ8563]
+ },
+ {
+ .compatible = "microchip,ksz8567",
+ .data = &ksz_switch_chips[KSZ8567]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
+ {
+ .compatible = "microchip,lan9646",
+ .data = &ksz_switch_chips[LAN9646]
+ },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_i2c_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
static struct i2c_driver ksz9477_i2c_driver = {
.driver = {
.name = "ksz9477-switch",
- .of_match_table = of_match_ptr(ksz9477_dt_ids),
+ .of_match_table = ksz9477_dt_ids,
+ .pm = &ksz_i2c_pm_ops,
},
- .probe = ksz9477_i2c_probe,
+ .probe = ksz9477_i2c_probe,
.remove = ksz9477_i2c_remove,
.shutdown = ksz9477_i2c_shutdown,
.id_table = ksz9477_i2c_id,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 16939f29faa5..61ea11e3338e 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 register definitions
*
- * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
@@ -25,7 +25,6 @@
#define REG_CHIP_ID2__1 0x0002
-#define CHIP_ID_63 0x63
#define CHIP_ID_66 0x66
#define CHIP_ID_67 0x67
#define CHIP_ID_77 0x77
@@ -39,11 +38,6 @@
#define SWITCH_REVISION_S 4
#define SWITCH_RESET 0x01
-#define REG_SW_PME_CTRL 0x0006
-
-#define PME_ENABLE BIT(1)
-#define PME_POLARITY BIT(0)
-
#define REG_GLOBAL_OPTIONS 0x000F
#define SW_GIGABIT_ABLE BIT(6)
@@ -113,19 +107,6 @@
#define REG_SW_IBA_SYNC__1 0x010C
-#define REG_SW_IO_STRENGTH__1 0x010D
-#define SW_DRIVE_STRENGTH_M 0x7
-#define SW_DRIVE_STRENGTH_2MA 0
-#define SW_DRIVE_STRENGTH_4MA 1
-#define SW_DRIVE_STRENGTH_8MA 2
-#define SW_DRIVE_STRENGTH_12MA 3
-#define SW_DRIVE_STRENGTH_16MA 4
-#define SW_DRIVE_STRENGTH_20MA 5
-#define SW_DRIVE_STRENGTH_24MA 6
-#define SW_DRIVE_STRENGTH_28MA 7
-#define SW_HI_SPEED_DRIVE_STRENGTH_S 4
-#define SW_LO_SPEED_DRIVE_STRENGTH_S 0
-
#define REG_SW_IBA_STATUS__4 0x0110
#define SW_IBA_REQ BIT(31)
@@ -166,16 +147,9 @@
#define SW_DOUBLE_TAG BIT(7)
#define SW_RESET BIT(1)
-#define SW_START BIT(0)
-
-#define REG_SW_MAC_ADDR_0 0x0302
-#define REG_SW_MAC_ADDR_1 0x0303
-#define REG_SW_MAC_ADDR_2 0x0304
-#define REG_SW_MAC_ADDR_3 0x0305
-#define REG_SW_MAC_ADDR_4 0x0306
-#define REG_SW_MAC_ADDR_5 0x0307
#define REG_SW_MTU__2 0x0308
+#define REG_SW_MTU_MASK GENMASK(13, 0)
#define REG_SW_ISP_TPID__2 0x030A
@@ -190,8 +164,7 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
-#define SW_AGE_CNT_M 0x7
-#define SW_AGE_CNT_S 3
+#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1
@@ -226,6 +199,7 @@
#define SW_PRIO_LOWEST_DA_SA 3
#define REG_SW_LUE_CTRL_3 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
#define REG_SW_LUE_INT_STATUS 0x0314
#define REG_SW_LUE_INT_ENABLE 0x0315
@@ -265,8 +239,8 @@
#define REG_SW_MAC_CTRL_1 0x0331
-#define MULTICAST_STORM_DISABLE BIT(6)
#define SW_BACK_PRESSURE BIT(5)
+#define SW_BACK_PRESSURE_COLLISION 0
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
#define SW_JUMBO_PACKET BIT(2)
@@ -276,13 +250,9 @@
#define REG_SW_MAC_CTRL_2 0x0332
#define SW_REPLACE_VID BIT(3)
-#define BROADCAST_STORM_RATE_HI 0x07
#define REG_SW_MAC_CTRL_3 0x0333
-#define BROADCAST_STORM_RATE_LO 0xFF
-#define BROADCAST_STORM_RATE 0x07FF
-
#define REG_SW_MAC_CTRL_4 0x0334
#define SW_PASS_PAUSE BIT(3)
@@ -425,12 +395,8 @@
#define REG_SW_ALU_STAT_CTRL__4 0x041C
-#define ALU_STAT_INDEX_M (BIT(4) - 1)
-#define ALU_STAT_INDEX_S 16
#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
#define ALU_STAT_START BIT(7)
-#define ALU_RESV_MCAST_ADDR BIT(1)
-#define ALU_STAT_READ BIT(0)
#define REG_SW_ALU_VAL_A 0x0420
@@ -833,13 +799,6 @@
#define REG_PORT_AVB_SR_1_TYPE 0x0008
#define REG_PORT_AVB_SR_2_TYPE 0x000A
-#define REG_PORT_PME_STATUS 0x0013
-#define REG_PORT_PME_CTRL 0x0017
-
-#define PME_WOL_MAGICPKT BIT(2)
-#define PME_WOL_LINKUP BIT(1)
-#define PME_WOL_ENERGY BIT(0)
-
#define REG_PORT_INT_STATUS 0x001B
#define REG_PORT_INT_MASK 0x001F
@@ -857,7 +816,11 @@
#define PORT_FORCE_TX_FLOW_CTRL BIT(4)
#define PORT_FORCE_RX_FLOW_CTRL BIT(3)
#define PORT_TAIL_TAG_ENABLE BIT(2)
-#define PORT_QUEUE_SPLIT_ENABLE 0x3
+#define PORT_QUEUE_SPLIT_MASK GENMASK(1, 0)
+#define PORT_EIGHT_QUEUE 0x3
+#define PORT_FOUR_QUEUE 0x2
+#define PORT_TWO_QUEUE 0x1
+#define PORT_SINGLE_QUEUE 0x0
#define REG_PORT_CTRL_1 0x0021
@@ -865,8 +828,8 @@
#define REG_PORT_STATUS_0 0x0030
-#define PORT_INTF_SPEED_M 0x3
-#define PORT_INTF_SPEED_S 3
+#define PORT_INTF_SPEED_MASK GENMASK(4, 3)
+#define PORT_INTF_SPEED_NONE GENMASK(1, 0)
#define PORT_INTF_FULL_DUPLEX BIT(2)
#define PORT_TX_FLOW_CTRL BIT(1)
#define PORT_RX_FLOW_CTRL BIT(0)
@@ -1184,35 +1147,16 @@
#define PORT_LINK_STATUS_FAIL BIT(0)
/* 3 - xMII */
-#define REG_PORT_XMII_CTRL_0 0x0300
-
#define PORT_SGMII_SEL BIT(7)
-#define PORT_MII_FULL_DUPLEX BIT(6)
-#define PORT_MII_100MBIT BIT(4)
#define PORT_GRXC_ENABLE BIT(0)
-#define REG_PORT_XMII_CTRL_1 0x0301
-
#define PORT_RMII_CLK_SEL BIT(7)
-/* S1 */
-#define PORT_MII_1000MBIT_S1 BIT(6)
-/* S2 */
-#define PORT_MII_NOT_1GBIT BIT(6)
#define PORT_MII_SEL_EDGE BIT(5)
-#define PORT_RGMII_ID_IG_ENABLE BIT(4)
-#define PORT_RGMII_ID_EG_ENABLE BIT(3)
-#define PORT_MII_MAC_MODE BIT(2)
-#define PORT_MII_SEL_M 0x3
-/* S1 */
-#define PORT_MII_SEL_S1 0x0
-#define PORT_RMII_SEL_S1 0x1
-#define PORT_GMII_SEL_S1 0x2
-#define PORT_RGMII_SEL_S1 0x3
-/* S2 */
-#define PORT_RGMII_SEL 0x0
-#define PORT_RMII_SEL 0x1
-#define PORT_GMII_SEL 0x2
-#define PORT_MII_SEL 0x3
+
+#define REG_PMAVBC 0x03AC
+
+#define PMAVBC_MASK GENMASK(26, 16)
+#define PMAVBC_MIN 0x580
/* 4 - MAC */
#define REG_PORT_MAC_CTRL_0 0x0400
@@ -1268,8 +1212,6 @@
/* 5 - MIB Counters */
#define REG_PORT_MIB_CTRL_STAT__4 0x0500
-#define MIB_COUNTER_OVERFLOW BIT(31)
-#define MIB_COUNTER_VALID BIT(30)
#define MIB_COUNTER_READ BIT(25)
#define MIB_COUNTER_FLUSH_FREEZE BIT(24)
#define MIB_COUNTER_INDEX_M (BIT(8) - 1)
@@ -1513,33 +1455,10 @@
/* 9 - Shaping */
-#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
-
-#define REG_PORT_MTI_QUEUE_CTRL_0__4 0x0904
-
-#define MTI_PVID_REPLACE BIT(0)
-
-#define REG_PORT_MTI_QUEUE_CTRL_0 0x0914
-
-#define MTI_SCHEDULE_MODE_M 0x3
-#define MTI_SCHEDULE_MODE_S 6
-#define MTI_SCHEDULE_STRICT_PRIO 0
-#define MTI_SCHEDULE_WRR 2
-#define MTI_SHAPING_M 0x3
-#define MTI_SHAPING_S 4
-#define MTI_SHAPING_OFF 0
-#define MTI_SHAPING_SRP 1
-#define MTI_SHAPING_TIME_AWARE 2
+#define REG_PORT_MTI_QUEUE_CTRL_0__4 0x0904
-#define REG_PORT_MTI_QUEUE_CTRL_1 0x0915
+#define MTI_PVID_REPLACE BIT(0)
-#define MTI_TX_RATIO_M (BIT(7) - 1)
-
-#define REG_PORT_MTI_QUEUE_CTRL_2__2 0x0916
-#define REG_PORT_MTI_HI_WATER_MARK 0x0916
-#define REG_PORT_MTI_QUEUE_CTRL_3__2 0x0918
-#define REG_PORT_MTI_LO_WATER_MARK 0x0918
-#define REG_PORT_MTI_QUEUE_CTRL_4__2 0x091A
#define REG_PORT_MTI_CREDIT_INCREMENT 0x091A
/* A - QM */
@@ -1566,6 +1485,7 @@
#define PORT_QM_TX_CNT_USED_S 0
#define PORT_QM_TX_CNT_M (BIT(11) - 1)
+#define PORT_QM_TX_CNT_MAX 0x200
#define REG_PORT_QM_TX_CNT_1__4 0x0A14
@@ -1585,10 +1505,6 @@
#define REG_PORT_LUE_MSTP_STATE 0x0B04
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
-
/* C - PTP */
#define REG_PTP_PORT_RX_DELAY__2 0x0C00
@@ -1632,11 +1548,7 @@
#define P_BCAST_STORM_CTRL REG_PORT_MAC_CTRL_0
#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
#define P_MIRROR_CTRL REG_PORT_MRI_MIRROR_CTRL
-#define P_STP_CTRL REG_PORT_LUE_MSTP_STATE
#define P_PHY_CTRL REG_PORT_PHY_CTRL
-#define P_NEG_RESTART_CTRL REG_PORT_PHY_CTRL
-#define P_LINK_STATUS REG_PORT_PHY_STATUS
-#define P_SPEED_STATUS REG_PORT_PHY_PHY_CTRL
#define P_RATE_LIMIT_CTRL REG_PORT_MAC_IN_RATE_LIMIT
#define S_LINK_AGING_CTRL REG_SW_LUE_CTRL_1
@@ -1656,10 +1568,4 @@
#define PTP_TRIG_UNIT_M (BIT(MAX_TRIG_UNIT) - 1)
#define PTP_TS_UNIT_M (BIT(MAX_TIMESTAMP_UNIT) - 1)
-/* Driver set switch broadcast storm protection at 10% rate. */
-#define BROADCAST_STORM_PROT_RATE 10
-
-/* 148,800 frames * 67 ms / 100 */
-#define BROADCAST_STORM_VALUE 9969
-
#endif /* KSZ9477_REGS_H */
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
deleted file mode 100644
index e3cb0e6c9f6f..000000000000
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Microchip KSZ9477 series register access through SPI
- *
- * Copyright (C) 2017-2019 Microchip Technology Inc.
- */
-
-#include <asm/unaligned.h>
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
-
-#include "ksz_common.h"
-
-#define SPI_ADDR_SHIFT 24
-#define SPI_ADDR_ALIGN 3
-#define SPI_TURNAROUND_SHIFT 5
-
-KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT,
- SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN);
-
-static int ksz9477_spi_probe(struct spi_device *spi)
-{
- struct regmap_config rc;
- struct ksz_device *dev;
- int i, ret;
-
- dev = ksz_switch_alloc(&spi->dev, spi);
- if (!dev)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
- rc = ksz9477_regmap_config[i];
- rc.lock_arg = &dev->regmap_mutex;
- dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
- if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&spi->dev,
- "Failed to initialize regmap%i: %d\n",
- ksz9477_regmap_config[i].val_bits, ret);
- return ret;
- }
- }
-
- if (spi->dev.platform_data)
- dev->pdata = spi->dev.platform_data;
-
- /* setup spi */
- spi->mode = SPI_MODE_3;
- ret = spi_setup(spi);
- if (ret)
- return ret;
-
- ret = ksz9477_switch_register(dev);
-
- /* Main DSA driver may not be started yet. */
- if (ret)
- return ret;
-
- spi_set_drvdata(spi, dev);
-
- return 0;
-}
-
-static int ksz9477_spi_remove(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- ksz_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
-}
-
-static void ksz9477_spi_shutdown(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- dsa_switch_shutdown(dev->ds);
-
- spi_set_drvdata(spi, NULL);
-}
-
-static const struct of_device_id ksz9477_dt_ids[] = {
- { .compatible = "microchip,ksz9477" },
- { .compatible = "microchip,ksz9897" },
- { .compatible = "microchip,ksz9893" },
- { .compatible = "microchip,ksz9563" },
- { .compatible = "microchip,ksz8563" },
- { .compatible = "microchip,ksz9567" },
- {},
-};
-MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
-
-static struct spi_driver ksz9477_spi_driver = {
- .driver = {
- .name = "ksz9477-switch",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz9477_dt_ids),
- },
- .probe = ksz9477_spi_probe,
- .remove = ksz9477_spi_remove,
- .shutdown = ksz9477_spi_shutdown,
-};
-
-module_spi_driver(ksz9477_spi_driver);
-
-MODULE_ALIAS("spi:ksz9477");
-MODULE_ALIAS("spi:ksz9897");
-MODULE_ALIAS("spi:ksz9893");
-MODULE_ALIAS("spi:ksz9563");
-MODULE_ALIAS("spi:ksz8563");
-MODULE_ALIAS("spi:ksz9567");
-MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz9477_tc_flower.c b/drivers/net/dsa/microchip/ksz9477_tc_flower.c
new file mode 100644
index 000000000000..ca7830ab168a
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477_tc_flower.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2023 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+
+#include "ksz9477.h"
+#include "ksz9477_reg.h"
+#include "ksz_common.h"
+
+#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0)
+#define KSZ9477_MAX_TC 7
+
+/**
+ * ksz9477_flower_parse_key_l2 - Parse Layer 2 key from flow rule and configure
+ * ACL entries accordingly.
+ * @dev: Pointer to the ksz_device.
+ * @port: Port number.
+ * @extack: Pointer to the netlink_ext_ack.
+ * @rule: Pointer to the flow_rule.
+ * @cookie: The cookie to associate with the entry.
+ * @prio: The priority of the entry.
+ *
+ * This function parses the Layer 2 key from the flow rule and configures
+ * the corresponding ACL entries. It checks for unsupported offloads and
+ * available entries before proceeding with the configuration.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int ksz9477_flower_parse_key_l2(struct ksz_device *dev, int port,
+ struct netlink_ext_ack *extack,
+ struct flow_rule *rule,
+ unsigned long cookie, u32 prio)
+{
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ struct flow_match_eth_addrs ematch;
+ struct ksz9477_acl_entries *acles;
+ int required_entries;
+ u8 *src_mac = NULL;
+ u8 *dst_mac = NULL;
+ u16 ethtype = 0;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ if (match.key->n_proto) {
+ if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "ethernet type mask must be a full mask");
+ return -EINVAL;
+ }
+
+ ethtype = be16_to_cpu(match.key->n_proto);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ flow_rule_match_eth_addrs(rule, &ematch);
+
+ if (!is_zero_ether_addr(ematch.key->src)) {
+ if (!is_broadcast_ether_addr(ematch.mask->src))
+ goto not_full_mask_err;
+
+ src_mac = ematch.key->src;
+ }
+
+ if (!is_zero_ether_addr(ematch.key->dst)) {
+ if (!is_broadcast_ether_addr(ematch.mask->dst))
+ goto not_full_mask_err;
+
+ dst_mac = ematch.key->dst;
+ }
+ }
+
+ acles = &acl->acles;
+ /* ACL supports only one MAC per entry */
+ required_entries = src_mac && dst_mac ? 2 : 1;
+
+ /* Check if there are enough available entries */
+ if (acles->entries_count + required_entries > KSZ9477_ACL_MAX_ENTRIES) {
+ NL_SET_ERR_MSG_MOD(extack, "ACL entry limit reached");
+ return -EOPNOTSUPP;
+ }
+
+ ksz9477_acl_match_process_l2(dev, port, ethtype, src_mac, dst_mac,
+ cookie, prio);
+
+ return 0;
+
+not_full_mask_err:
+ NL_SET_ERR_MSG_MOD(extack, "MAC address mask must be a full mask");
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ksz9477_flower_parse_key - Parse flow rule keys for a specified port on a
+ * ksz_device.
+ * @dev: The ksz_device instance.
+ * @port: The port number to parse the flow rule keys for.
+ * @extack: The netlink extended ACK for reporting errors.
+ * @rule: The flow_rule to parse.
+ * @cookie: The cookie to associate with the entry.
+ * @prio: The priority of the entry.
+ *
+ * This function checks if the used keys in the flow rule are supported by
+ * the device and parses the L2 keys if they match. If unsupported keys are
+ * used, an error message is set in the extended ACK.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int ksz9477_flower_parse_key(struct ksz_device *dev, int port,
+ struct netlink_ext_ack *extack,
+ struct flow_rule *rule,
+ unsigned long cookie, u32 prio)
+{
+ struct flow_dissector *dissector = rule->match.dissector;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ ret = ksz9477_flower_parse_key_l2(dev, port, extack, rule,
+ cookie, prio);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz9477_flower_parse_action - Parse flow rule actions for a specified port
+ * on a ksz_device.
+ * @dev: The ksz_device instance.
+ * @port: The port number to parse the flow rule actions for.
+ * @extack: The netlink extended ACK for reporting errors.
+ * @cls: The flow_cls_offload instance containing the flow rule.
+ * @entry_idx: The index of the ACL entry to store the action.
+ *
+ * This function checks if the actions in the flow rule are supported by
+ * the device. Currently, only actions that change priorities are supported.
+ * If unsupported actions are encountered, an error message is set in the
+ * extended ACK.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int ksz9477_flower_parse_action(struct ksz_device *dev, int port,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ int entry_idx)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct ksz9477_acl_priv *acl = dev->ports[port].acl_priv;
+ const struct flow_action_entry *act;
+ struct ksz9477_acl_entry *entry;
+ bool prio_force = false;
+ u8 prio_val = 0;
+ int i;
+
+ if (TC_H_MIN(cls->classid)) {
+ NL_SET_ERR_MSG_MOD(extack, "hw_tc is not supported. Use: action skbedit prio");
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_PRIORITY:
+ if (act->priority > KSZ9477_MAX_TC) {
+ NL_SET_ERR_MSG_MOD(extack, "Priority value is too high");
+ return -EOPNOTSUPP;
+ }
+ prio_force = true;
+ prio_val = act->priority;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* pick entry to store action */
+ entry = &acl->acles.entries[entry_idx];
+
+ ksz9477_acl_action_rule_cfg(entry->entry, prio_force, prio_val);
+ ksz9477_acl_processing_rule_set_action(entry->entry, entry_idx);
+
+ return 0;
+}
+
+/**
+ * ksz9477_cls_flower_add - Add a flow classification rule for a specified port
+ * on a ksz_device.
+ * @ds: The DSA switch instance.
+ * @port: The port number to add the flow classification rule to.
+ * @cls: The flow_cls_offload instance containing the flow rule.
+ * @ingress: A flag indicating if the rule is applied on the ingress path.
+ *
+ * This function adds a flow classification rule for a specified port on a
+ * ksz_device. It checks if the ACL offloading is supported and parses the flow
+ * keys and actions. If the ACL is not supported, it returns an error. If there
+ * are unprocessed entries, it parses the action for the rule.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int ksz9477_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct ksz_device *dev = ds->priv;
+ struct ksz9477_acl_priv *acl;
+ int action_entry_idx;
+ int ret;
+
+ acl = dev->ports[port].acl_priv;
+
+ if (!acl) {
+ NL_SET_ERR_MSG_MOD(extack, "ACL offloading is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ /* A complex rule set can take multiple entries. Use first entry
+ * to store the action.
+ */
+ action_entry_idx = acl->acles.entries_count;
+
+ ret = ksz9477_flower_parse_key(dev, port, extack, rule, cls->cookie,
+ cls->common.prio);
+ if (ret)
+ return ret;
+
+ ret = ksz9477_flower_parse_action(dev, port, extack, cls,
+ action_entry_idx);
+ if (ret)
+ return ret;
+
+ ret = ksz9477_sort_acl_entries(dev, port);
+ if (ret)
+ return ret;
+
+ return ksz9477_acl_write_list(dev, port);
+}
+
+/**
+ * ksz9477_cls_flower_del - Remove a flow classification rule for a specified
+ * port on a ksz_device.
+ * @ds: The DSA switch instance.
+ * @port: The port number to remove the flow classification rule from.
+ * @cls: The flow_cls_offload instance containing the flow rule.
+ * @ingress: A flag indicating if the rule is applied on the ingress path.
+ *
+ * This function removes a flow classification rule for a specified port on a
+ * ksz_device. It checks if the ACL is initialized, and if not, returns an
+ * error. If the ACL is initialized, it removes entries with the specified
+ * cookie and rewrites the ACL list.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int ksz9477_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ unsigned long cookie = cls->cookie;
+ struct ksz_device *dev = ds->priv;
+ struct ksz9477_acl_priv *acl;
+
+ acl = dev->ports[port].acl_priv;
+
+ if (!acl)
+ return -EOPNOTSUPP;
+
+ ksz9477_acl_remove_entries(dev, port, &acl->acles, cookie);
+
+ return ksz9477_acl_write_list(dev, port);
+}
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 7c2968a639eb..0c10351fe5eb 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2,10 +2,11 @@
/*
* Microchip switch driver main logic
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#include <linux/delay.h>
+#include <linux/dsa/ksz_common.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
@@ -14,31 +15,3103 @@
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/if_hsr.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/micrel_phy.h>
+#include <linux/pinctrl/consumer.h>
#include <net/dsa.h>
+#include <net/ieee8021q.h>
+#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include "ksz_common.h"
+#include "ksz_dcb.h"
+#include "ksz_ptp.h"
+#include "ksz8.h"
+#include "ksz9477.h"
+#include "lan937x.h"
-void ksz_update_port_member(struct ksz_device *dev, int port)
+#define MIB_COUNTER_NUM 0x20
+
+struct ksz_stats_raw {
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 rx_1523_2000;
+ u64 rx_2001;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_total;
+ u64 tx_total;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
+struct ksz88xx_stats_raw {
+ u64 rx;
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 tx;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
+static const struct ksz_mib_names ksz88xx_mib_names[] = {
+ { 0x00, "rx" },
+ { 0x01, "rx_hi" },
+ { 0x02, "rx_undersize" },
+ { 0x03, "rx_fragments" },
+ { 0x04, "rx_oversize" },
+ { 0x05, "rx_jabbers" },
+ { 0x06, "rx_symbol_err" },
+ { 0x07, "rx_crc_err" },
+ { 0x08, "rx_align_err" },
+ { 0x09, "rx_mac_ctrl" },
+ { 0x0a, "rx_pause" },
+ { 0x0b, "rx_bcast" },
+ { 0x0c, "rx_mcast" },
+ { 0x0d, "rx_ucast" },
+ { 0x0e, "rx_64_or_less" },
+ { 0x0f, "rx_65_127" },
+ { 0x10, "rx_128_255" },
+ { 0x11, "rx_256_511" },
+ { 0x12, "rx_512_1023" },
+ { 0x13, "rx_1024_1522" },
+ { 0x14, "tx" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1a, "tx_ucast" },
+ { 0x1b, "tx_deferred" },
+ { 0x1c, "tx_total_col" },
+ { 0x1d, "tx_exc_col" },
+ { 0x1e, "tx_single_col" },
+ { 0x1f, "tx_mult_col" },
+ { 0x100, "rx_discards" },
+ { 0x101, "tx_discards" },
+};
+
+static const struct ksz_mib_names ksz9477_mib_names[] = {
+ { 0x00, "rx_hi" },
+ { 0x01, "rx_undersize" },
+ { 0x02, "rx_fragments" },
+ { 0x03, "rx_oversize" },
+ { 0x04, "rx_jabbers" },
+ { 0x05, "rx_symbol_err" },
+ { 0x06, "rx_crc_err" },
+ { 0x07, "rx_align_err" },
+ { 0x08, "rx_mac_ctrl" },
+ { 0x09, "rx_pause" },
+ { 0x0A, "rx_bcast" },
+ { 0x0B, "rx_mcast" },
+ { 0x0C, "rx_ucast" },
+ { 0x0D, "rx_64_or_less" },
+ { 0x0E, "rx_65_127" },
+ { 0x0F, "rx_128_255" },
+ { 0x10, "rx_256_511" },
+ { 0x11, "rx_512_1023" },
+ { 0x12, "rx_1024_1522" },
+ { 0x13, "rx_1523_2000" },
+ { 0x14, "rx_2001" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1A, "tx_ucast" },
+ { 0x1B, "tx_deferred" },
+ { 0x1C, "tx_total_col" },
+ { 0x1D, "tx_exc_col" },
+ { 0x1E, "tx_single_col" },
+ { 0x1F, "tx_mult_col" },
+ { 0x80, "rx_total" },
+ { 0x81, "tx_total" },
+ { 0x82, "rx_discards" },
+ { 0x83, "tx_discards" },
+};
+
+struct ksz_driver_strength_prop {
+ const char *name;
+ int offset;
+ int value;
+};
+
+enum ksz_driver_strength_type {
+ KSZ_DRIVER_STRENGTH_HI,
+ KSZ_DRIVER_STRENGTH_LO,
+ KSZ_DRIVER_STRENGTH_IO,
+};
+
+/**
+ * struct ksz_drive_strength - drive strength mapping
+ * @reg_val: register value
+ * @microamp: microamp value
+ */
+struct ksz_drive_strength {
+ u32 reg_val;
+ u32 microamp;
+};
+
+/* ksz9477_drive_strengths - Drive strength mapping for KSZ9477 variants
+ *
+ * This values are not documented in KSZ9477 variants but confirmed by
+ * Microchip that KSZ9477, KSZ9567, KSZ8567, KSZ9897, KSZ9896, KSZ9563, KSZ9893
+ * and KSZ8563 are using same register (drive strength) settings like KSZ8795.
+ *
+ * Documentation in KSZ8795CLX provides more information with some
+ * recommendations:
+ * - for high speed signals
+ * 1. 4 mA or 8 mA is often used for MII, RMII, and SPI interface with using
+ * 2.5V or 3.3V VDDIO.
+ * 2. 12 mA or 16 mA is often used for MII, RMII, and SPI interface with
+ * using 1.8V VDDIO.
+ * 3. 20 mA or 24 mA is often used for GMII/RGMII interface with using 2.5V
+ * or 3.3V VDDIO.
+ * 4. 28 mA is often used for GMII/RGMII interface with using 1.8V VDDIO.
+ * 5. In same interface, the heavy loading should use higher one of the
+ * drive current strength.
+ * - for low speed signals
+ * 1. 3.3V VDDIO, use either 4 mA or 8 mA.
+ * 2. 2.5V VDDIO, use either 8 mA or 12 mA.
+ * 3. 1.8V VDDIO, use either 12 mA or 16 mA.
+ * 4. If it is heavy loading, can use higher drive current strength.
+ */
+static const struct ksz_drive_strength ksz9477_drive_strengths[] = {
+ { SW_DRIVE_STRENGTH_2MA, 2000 },
+ { SW_DRIVE_STRENGTH_4MA, 4000 },
+ { SW_DRIVE_STRENGTH_8MA, 8000 },
+ { SW_DRIVE_STRENGTH_12MA, 12000 },
+ { SW_DRIVE_STRENGTH_16MA, 16000 },
+ { SW_DRIVE_STRENGTH_20MA, 20000 },
+ { SW_DRIVE_STRENGTH_24MA, 24000 },
+ { SW_DRIVE_STRENGTH_28MA, 28000 },
+};
+
+/* ksz88x3_drive_strengths - Drive strength mapping for KSZ8863, KSZ8873, ..
+ * variants.
+ * This values are documented in KSZ8873 and KSZ8863 datasheets.
+ */
+static const struct ksz_drive_strength ksz88x3_drive_strengths[] = {
+ { 0, 8000 },
+ { KSZ8873_DRIVE_STRENGTH_16MA, 16000 },
+};
+
+static void ksz88x3_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+static void ksz_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+static void ksz_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface);
+
+/**
+ * ksz_phylink_mac_disable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ *
+ * This function is a dummy handler. See ksz_phylink_mac_enable_tx_lpi() for
+ * a detailed explanation of EEE/LPI handling in KSZ switches.
+ */
+static void ksz_phylink_mac_disable_tx_lpi(struct phylink_config *config)
{
- struct ksz_port *p;
+}
+
+/**
+ * ksz_phylink_mac_enable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ * @timer: timer value before entering LPI (unused)
+ * @tx_clock_stop: whether to stop the TX clock in LPI mode (unused)
+ *
+ * This function signals to phylink that the driver architecture supports
+ * LPI management, enabling phylink to control EEE advertisement during
+ * negotiation according to IEEE Std 802.3 (Clause 78).
+ *
+ * Hardware Management of EEE/LPI State:
+ * For KSZ switch ports with integrated PHYs (e.g., KSZ9893R ports 1-2),
+ * observation and testing suggest that the actual EEE / Low Power Idle (LPI)
+ * state transitions are managed autonomously by the hardware based on
+ * the auto-negotiation results. (Note: While the datasheet describes EEE
+ * operation based on negotiation, it doesn't explicitly detail the internal
+ * MAC/PHY interaction, so autonomous hardware management of the MAC state
+ * for LPI is inferred from observed behavior).
+ * This hardware control, consistent with the switch's ability to operate
+ * autonomously via strapping, means MAC-level software intervention is not
+ * required or exposed for managing the LPI state once EEE is negotiated.
+ * (Ref: KSZ9893R Data Sheet DS00002420D, primarily Section 4.7.5 explaining
+ * EEE, also Sections 4.1.7 on Auto-Negotiation and 3.2.1 on Configuration
+ * Straps).
+ *
+ * Additionally, ports configured as MAC interfaces (e.g., KSZ9893R port 3)
+ * lack documented MAC-level LPI control.
+ *
+ * Therefore, this callback performs no action and serves primarily to inform
+ * phylink of LPI awareness and to document the inferred hardware behavior.
+ *
+ * Returns: 0 (Always success)
+ */
+static int ksz_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ return 0;
+}
+
+static const struct phylink_mac_ops ksz88x3_phylink_mac_ops = {
+ .mac_config = ksz88x3_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
+};
+
+static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
+};
+
+static const struct ksz_dev_ops ksz8463_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8463_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8463_r_phy,
+ .w_phy = ksz8463_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz88xx_r_mib_stats64,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
+};
+
+static const struct ksz_dev_ops ksz88xx_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz88xx_r_mib_stats64,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
+ .pme_write8 = ksz8_pme_write8,
+ .pme_pread8 = ksz8_pme_pread8,
+ .pme_pwrite8 = ksz8_pme_pwrite8,
+};
+
+static const struct ksz_dev_ops ksz87xx_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
+ .pme_write8 = ksz8_pme_write8,
+ .pme_pread8 = ksz8_pme_pread8,
+ .pme_pwrite8 = ksz8_pme_pwrite8,
+};
+
+static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause);
+
+static struct phylink_pcs *
+ksz_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ struct ksz_port *p = &dev->ports[dp->index];
+
+ if (ksz_is_sgmii_port(dev, dp->index) &&
+ (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX))
+ return p->pcs;
+
+ return NULL;
+}
+
+static const struct phylink_mac_ops ksz9477_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
+ .mac_select_pcs = ksz_phylink_mac_select_pcs,
+};
+
+static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .setup = ksz9477_setup,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = ksz9477_port_setup,
+ .set_ageing_time = ksz9477_set_ageing_time,
+ .r_phy = ksz9477_r_phy,
+ .w_phy = ksz9477_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = ksz9477_get_caps,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = ksz9477_change_mtu,
+ .pme_write8 = ksz_write8,
+ .pme_pread8 = ksz_pread8,
+ .pme_pwrite8 = ksz_pwrite8,
+ .config_cpu_port = ksz9477_config_cpu_port,
+ .tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = ksz9477_reset_switch,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+ .pcs_create = ksz9477_pcs_create,
+};
+
+static const struct phylink_mac_ops lan937x_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
+};
+
+static const struct ksz_dev_ops lan937x_dev_ops = {
+ .setup = lan937x_setup,
+ .teardown = lan937x_teardown,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = lan937x_port_setup,
+ .set_ageing_time = lan937x_set_ageing_time,
+ .mdio_bus_preinit = lan937x_mdio_bus_preinit,
+ .create_phy_addr_map = lan937x_create_phy_addr_map,
+ .r_phy = lan937x_r_phy,
+ .w_phy = lan937x_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = lan937x_phylink_get_caps,
+ .setup_rgmii_delay = lan937x_setup_rgmii_delay,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = lan937x_change_mtu,
+ .config_cpu_port = lan937x_config_cpu_port,
+ .tc_cbs_set_cinc = lan937x_tc_cbs_set_cinc,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = lan937x_reset_switch,
+ .init = lan937x_switch_init,
+ .exit = lan937x_switch_exit,
+};
+
+static const u16 ksz8463_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x10,
+ [REG_IND_CTRL_0] = 0x30,
+ [REG_IND_DATA_8] = 0x26,
+ [REG_IND_DATA_CHECK] = 0x26,
+ [REG_IND_DATA_HI] = 0x28,
+ [REG_IND_DATA_LO] = 0x2C,
+ [REG_IND_MIB_CHECK] = 0x2F,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x0F,
+ [S_TAIL_TAG_CTRL] = 0xAD,
+ [P_STP_CTRL] = 0x6F,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8463_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(3),
+ [SW_TAIL_TAG_ENABLE] = BIT(0),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(15, 12),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
+ [VLAN_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(21),
+ [STATIC_MAC_TABLE_FID] = GENMASK(25, 22),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(1, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(2),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 24),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
+};
+
+static u8 ksz8463_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 22,
+ [DYNAMIC_MAC_ENTRIES_H] = 8,
+ [DYNAMIC_MAC_ENTRIES] = 24,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 22,
+ [DYNAMIC_MAC_SRC_PORT] = 20,
+};
+
+static const u16 ksz8795_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x68,
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x74,
+ [REG_IND_BYTE] = 0xA0,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x07,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x08,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+ [P_XMII_CTRL_0] = 0x06,
+ [P_XMII_CTRL_1] = 0x06,
+ [REG_SW_PME_CTRL] = 0x8003,
+ [REG_PORT_PME_STATUS] = 0x8003,
+ [REG_PORT_PME_CTRL] = 0x8007,
+};
+
+static const u32 ksz8795_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(6),
+ [MIB_COUNTER_VALID] = BIT(5),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(22),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(20, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(22, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(5),
+};
+
+static const u8 ksz8795_xmii_ctrl0[] = {
+ [P_MII_100MBIT] = 0,
+ [P_MII_10MBIT] = 1,
+ [P_MII_FULL_DUPLEX] = 0,
+ [P_MII_HALF_DUPLEX] = 1,
+};
+
+static const u8 ksz8795_xmii_ctrl1[] = {
+ [P_RGMII_SEL] = 3,
+ [P_GMII_SEL] = 2,
+ [P_RMII_SEL] = 1,
+ [P_MII_SEL] = 0,
+ [P_GMII_1GBIT] = 1,
+ [P_GMII_NOT_1GBIT] = 0,
+};
+
+static const u8 ksz8795_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
+static const u16 ksz8863_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x70,
+ [REG_IND_CTRL_0] = 0x79,
+ [REG_IND_DATA_8] = 0x7B,
+ [REG_IND_DATA_CHECK] = 0x7B,
+ [REG_IND_DATA_HI] = 0x7C,
+ [REG_IND_DATA_LO] = 0x80,
+ [REG_IND_MIB_CHECK] = 0x80,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x0F,
+ [S_TAIL_TAG_CTRL] = 0x03,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8863_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(3),
+ [SW_TAIL_TAG_ENABLE] = BIT(6),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(15, 12),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
+ [VLAN_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(21),
+ [STATIC_MAC_TABLE_FID] = GENMASK(25, 22),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(1, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(2),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 24),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
+};
+
+static u8 ksz8863_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 22,
+ [DYNAMIC_MAC_ENTRIES_H] = 8,
+ [DYNAMIC_MAC_ENTRIES] = 24,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 22,
+ [DYNAMIC_MAC_SRC_PORT] = 20,
+};
+
+static const u16 ksz8895_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x68,
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x75,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8895_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(22),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(20, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(22, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+};
+
+static const u8 ksz8895_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 13,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
+static const u16 ksz9477_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x0302,
+ [P_STP_CTRL] = 0x0B04,
+ [S_START_CTRL] = 0x0300,
+ [S_BROADCAST_CTRL] = 0x0332,
+ [S_MULTICAST_CTRL] = 0x0331,
+ [P_XMII_CTRL_0] = 0x0300,
+ [P_XMII_CTRL_1] = 0x0301,
+ [REG_SW_PME_CTRL] = 0x0006,
+ [REG_PORT_PME_STATUS] = 0x0013,
+ [REG_PORT_PME_CTRL] = 0x0017,
+};
+
+static const u32 ksz9477_masks[] = {
+ [ALU_STAT_WRITE] = 0,
+ [ALU_STAT_READ] = 1,
+ [ALU_STAT_DIRECT] = 0,
+ [ALU_RESV_MCAST_ADDR] = BIT(1),
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(3),
+};
+
+static const u8 ksz9477_shifts[] = {
+ [ALU_STAT_INDEX] = 16,
+};
+
+static const u8 ksz9477_xmii_ctrl0[] = {
+ [P_MII_100MBIT] = 1,
+ [P_MII_10MBIT] = 0,
+ [P_MII_FULL_DUPLEX] = 1,
+ [P_MII_HALF_DUPLEX] = 0,
+};
+
+static const u8 ksz9477_xmii_ctrl1[] = {
+ [P_RGMII_SEL] = 0,
+ [P_RMII_SEL] = 1,
+ [P_GMII_SEL] = 2,
+ [P_MII_SEL] = 3,
+ [P_GMII_1GBIT] = 0,
+ [P_GMII_NOT_1GBIT] = 1,
+};
+
+static const u32 lan937x_masks[] = {
+ [ALU_STAT_WRITE] = 1,
+ [ALU_STAT_READ] = 2,
+ [ALU_STAT_DIRECT] = BIT(3),
+ [ALU_RESV_MCAST_ADDR] = BIT(2),
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(3),
+};
+
+static const u8 lan937x_shifts[] = {
+ [ALU_STAT_INDEX] = 8,
+};
+
+static const struct regmap_range ksz8563_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x000f, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0104, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1004, 0x100b),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1021),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1111),
+ regmap_reg_range(0x111a, 0x111d),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a08),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2004, 0x200b),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2021),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2111),
+ regmap_reg_range(0x211a, 0x211d),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a08),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3004, 0x300b),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3021),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3300, 0x3301),
+ regmap_reg_range(0x3303, 0x3303),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a08),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+};
+
+static const struct regmap_access_table ksz8563_register_set = {
+ .yes_ranges = ksz8563_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz8563_valid_regs),
+};
+
+static const struct regmap_range ksz9477_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0444, 0x044b),
+ regmap_reg_range(0x0450, 0x046f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+ regmap_reg_range(0x0604, 0x060b),
+ regmap_reg_range(0x0610, 0x0612),
+ regmap_reg_range(0x0614, 0x062c),
+ regmap_reg_range(0x0640, 0x0645),
+ regmap_reg_range(0x0648, 0x064d),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1120, 0x112b),
+ regmap_reg_range(0x1134, 0x113b),
+ regmap_reg_range(0x113c, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1613),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1920, 0x1920),
+ regmap_reg_range(0x1923, 0x1927),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2120, 0x212b),
+ regmap_reg_range(0x2134, 0x213b),
+ regmap_reg_range(0x213c, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2613),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2920, 0x2920),
+ regmap_reg_range(0x2923, 0x2927),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3120, 0x312b),
+ regmap_reg_range(0x3134, 0x313b),
+ regmap_reg_range(0x313c, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3613),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3920, 0x3920),
+ regmap_reg_range(0x3923, 0x3927),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4120, 0x412b),
+ regmap_reg_range(0x4134, 0x413b),
+ regmap_reg_range(0x413c, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4613),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x491b),
+ regmap_reg_range(0x4920, 0x4920),
+ regmap_reg_range(0x4923, 0x4927),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+ regmap_reg_range(0x4c00, 0x4c05),
+ regmap_reg_range(0x4c08, 0x4c1b),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5120, 0x512b),
+ regmap_reg_range(0x5134, 0x513b),
+ regmap_reg_range(0x513c, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5613),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x591b),
+ regmap_reg_range(0x5920, 0x5920),
+ regmap_reg_range(0x5923, 0x5927),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+ regmap_reg_range(0x5c00, 0x5c05),
+ regmap_reg_range(0x5c08, 0x5c1b),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6613),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x691b),
+ regmap_reg_range(0x6920, 0x6920),
+ regmap_reg_range(0x6923, 0x6927),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+ regmap_reg_range(0x6c00, 0x6c05),
+ regmap_reg_range(0x6c08, 0x6c1b),
+
+ /* port 7 */
+ regmap_reg_range(0x7000, 0x7001),
+ regmap_reg_range(0x7013, 0x7013),
+ regmap_reg_range(0x7017, 0x7017),
+ regmap_reg_range(0x701b, 0x701b),
+ regmap_reg_range(0x701f, 0x7020),
+ regmap_reg_range(0x7030, 0x7030),
+ regmap_reg_range(0x7200, 0x7207),
+ regmap_reg_range(0x7300, 0x7301),
+ regmap_reg_range(0x7400, 0x7401),
+ regmap_reg_range(0x7403, 0x7403),
+ regmap_reg_range(0x7410, 0x7417),
+ regmap_reg_range(0x7420, 0x7423),
+ regmap_reg_range(0x7500, 0x7507),
+ regmap_reg_range(0x7600, 0x7613),
+ regmap_reg_range(0x7800, 0x780f),
+ regmap_reg_range(0x7820, 0x7827),
+ regmap_reg_range(0x7830, 0x7837),
+ regmap_reg_range(0x7840, 0x784b),
+ regmap_reg_range(0x7900, 0x7907),
+ regmap_reg_range(0x7914, 0x791b),
+ regmap_reg_range(0x7920, 0x7920),
+ regmap_reg_range(0x7923, 0x7927),
+ regmap_reg_range(0x7a00, 0x7a03),
+ regmap_reg_range(0x7a04, 0x7a07),
+ regmap_reg_range(0x7b00, 0x7b01),
+ regmap_reg_range(0x7b04, 0x7b04),
+ regmap_reg_range(0x7c00, 0x7c05),
+ regmap_reg_range(0x7c08, 0x7c1b),
+};
+
+static const struct regmap_access_table ksz9477_register_set = {
+ .yes_ranges = ksz9477_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9477_valid_regs),
+};
+
+static const struct regmap_range ksz9896_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x0127),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x030b),
+ regmap_reg_range(0x0310, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1120, 0x112b),
+ regmap_reg_range(0x1134, 0x113b),
+ regmap_reg_range(0x113c, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x1915),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2120, 0x212b),
+ regmap_reg_range(0x2134, 0x213b),
+ regmap_reg_range(0x213c, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x2915),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3120, 0x312b),
+ regmap_reg_range(0x3134, 0x313b),
+ regmap_reg_range(0x313c, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x3915),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4120, 0x412b),
+ regmap_reg_range(0x4134, 0x413b),
+ regmap_reg_range(0x413c, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4612),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x4915),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5120, 0x512b),
+ regmap_reg_range(0x5134, 0x513b),
+ regmap_reg_range(0x513c, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5612),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x5915),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6100, 0x6115),
+ regmap_reg_range(0x611a, 0x611f),
+ regmap_reg_range(0x6120, 0x612b),
+ regmap_reg_range(0x6134, 0x613b),
+ regmap_reg_range(0x613c, 0x613f),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6612),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x6915),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+};
+
+static const struct regmap_access_table ksz9896_register_set = {
+ .yes_ranges = ksz9896_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9896_valid_regs),
+};
+
+static const struct regmap_range ksz8873_valid_regs[] = {
+ regmap_reg_range(0x00, 0x01),
+ /* global control register */
+ regmap_reg_range(0x02, 0x0f),
+
+ /* port registers */
+ regmap_reg_range(0x10, 0x1d),
+ regmap_reg_range(0x1e, 0x1f),
+ regmap_reg_range(0x20, 0x2d),
+ regmap_reg_range(0x2e, 0x2f),
+ regmap_reg_range(0x30, 0x39),
+ regmap_reg_range(0x3f, 0x3f),
+
+ /* advanced control registers */
+ regmap_reg_range(0x43, 0x43),
+ regmap_reg_range(0x60, 0x6f),
+ regmap_reg_range(0x70, 0x75),
+ regmap_reg_range(0x76, 0x78),
+ regmap_reg_range(0x79, 0x7a),
+ regmap_reg_range(0x7b, 0x83),
+ regmap_reg_range(0x8e, 0x99),
+ regmap_reg_range(0x9a, 0xa5),
+ regmap_reg_range(0xa6, 0xa6),
+ regmap_reg_range(0xa7, 0xaa),
+ regmap_reg_range(0xab, 0xae),
+ regmap_reg_range(0xaf, 0xba),
+ regmap_reg_range(0xbb, 0xbc),
+ regmap_reg_range(0xbd, 0xbd),
+ regmap_reg_range(0xc0, 0xc0),
+ regmap_reg_range(0xc2, 0xc2),
+ regmap_reg_range(0xc3, 0xc3),
+ regmap_reg_range(0xc4, 0xc4),
+ regmap_reg_range(0xc6, 0xc6),
+};
+
+static const struct regmap_access_table ksz8873_register_set = {
+ .yes_ranges = ksz8873_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz8873_valid_regs),
+};
+
+const struct ksz_chip_data ksz_switch_chips[] = {
+ [KSZ8463] = {
+ .chip_id = KSZ8463_CHIP_ID,
+ .dev_name = "KSZ8463",
+ .num_vlans = 16,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x4, /* can be configured as cpu port */
+ .port_cnt = 3,
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz8463_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8463_regs,
+ .masks = ksz8463_masks,
+ .shifts = ksz8463_shifts,
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ },
+
+ [KSZ8563] = {
+ .chip_id = KSZ8563_CHIP_ID,
+ .dev_name = "KSZ8563",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .port_nirqs = 3,
+ .num_tx_queues = 4,
+ .num_ipms = 8,
+ .tc_cbs_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {false, false, true},
+ .ptp_capable = true,
+ .wr_table = &ksz8563_register_set,
+ .rd_table = &ksz8563_register_set,
+ },
+
+ [KSZ8795] = {
+ .chip_id = KSZ8795_CHIP_ID,
+ .dev_name = "KSZ8795",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz87xx_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [KSZ8794] = {
+ /* WARNING
+ * =======
+ * KSZ8794 is similar to KSZ8795, except the port map
+ * contains a gap between external and CPU ports, the
+ * port map is NOT continuous. The per-port register
+ * map is shifted accordingly too, i.e. registers at
+ * offset 0x40 are NOT used on KSZ8794 and they ARE
+ * used on KSZ8795 for external port 3.
+ * external cpu
+ * KSZ8794 0,1,2 4
+ * KSZ8795 0,1,2,3 4
+ * KSZ8765 0,1,2,3 4
+ * port_cnt is configured as 5, even though it is 4
+ */
+ .chip_id = KSZ8794_CHIP_ID,
+ .dev_name = "KSZ8794",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz87xx_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, false, false},
+ },
+
+ [KSZ8765] = {
+ .chip_id = KSZ8765_CHIP_ID,
+ .dev_name = "KSZ8765",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz87xx_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [KSZ88X3] = {
+ .chip_id = KSZ88X3_CHIP_ID,
+ .dev_name = "KSZ8863/KSZ8873",
+ .num_vlans = 16,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x4, /* can be configured as cpu port */
+ .port_cnt = 3,
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8863_regs,
+ .masks = ksz8863_masks,
+ .shifts = ksz8863_shifts,
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .wr_table = &ksz8873_register_set,
+ .rd_table = &ksz8873_register_set,
+ },
+
+ [KSZ8864] = {
+ /* WARNING
+ * =======
+ * KSZ8864 is similar to KSZ8895, except the first port
+ * does not exist.
+ * external cpu
+ * KSZ8864 1,2,3 4
+ * KSZ8895 0,1,2,3 4
+ * port_cnt is configured as 5, even though it is 4
+ */
+ .chip_id = KSZ8864_CHIP_ID,
+ .dev_name = "KSZ8864",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8895_regs,
+ .masks = ksz8895_masks,
+ .shifts = ksz8895_shifts,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .internal_phy = {false, true, true, true, false},
+ },
+
+ [KSZ8895] = {
+ .chip_id = KSZ8895_CHIP_ID,
+ .dev_name = "KSZ8895",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz88x3_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8895_regs,
+ .masks = ksz8895_masks,
+ .shifts = ksz8895_shifts,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [KSZ9477] = {
+ .chip_id = KSZ9477_CHIP_ID,
+ .dev_name = "KSZ9477",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 4,
+ .num_tx_queues = 4,
+ .num_ipms = 8,
+ .tc_cbs_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, false},
+ .supports_rmii = {false, false, false, false,
+ false, true, false},
+ .supports_rgmii = {false, false, false, false,
+ false, true, false},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
+ .sgmii_port = 7,
+ .wr_table = &ksz9477_register_set,
+ .rd_table = &ksz9477_register_set,
+ },
+
+ [KSZ9896] = {
+ .chip_id = KSZ9896_CHIP_ID,
+ .dev_name = "KSZ9896",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x3F, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .port_nirqs = 2,
+ .num_tx_queues = 4,
+ .num_ipms = 8,
+ .ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true},
+ .supports_rmii = {false, false, false, false,
+ false, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true},
+ .internal_phy = {true, true, true, true,
+ true, false},
+ .gbit_capable = {true, true, true, true, true, true},
+ .wr_table = &ksz9896_register_set,
+ .rd_table = &ksz9896_register_set,
+ },
+
+ [KSZ9897] = {
+ .chip_id = KSZ9897_CHIP_ID,
+ .dev_name = "KSZ9897",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 2,
+ .num_tx_queues = 4,
+ .num_ipms = 8,
+ .ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ },
+
+ [KSZ9893] = {
+ .chip_id = KSZ9893_CHIP_ID,
+ .dev_name = "KSZ9893",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .port_nirqs = 2,
+ .num_tx_queues = 4,
+ .num_ipms = 8,
+ .ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {true, true, true},
+ },
+
+ [KSZ9563] = {
+ .chip_id = KSZ9563_CHIP_ID,
+ .dev_name = "KSZ9563",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .port_nirqs = 3,
+ .num_tx_queues = 4,
+ .num_ipms = 8,
+ .tc_cbs_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {true, true, true},
+ .ptp_capable = true,
+ },
+
+ [KSZ8567] = {
+ .chip_id = KSZ8567_CHIP_ID,
+ .dev_name = "KSZ8567",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total port count */
+ .port_nirqs = 3,
+ .num_tx_queues = 4,
+ .num_ipms = 8,
+ .tc_cbs_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {false, false, false, false, false,
+ true, true},
+ .ptp_capable = true,
+ },
+
+ [KSZ9567] = {
+ .chip_id = KSZ9567_CHIP_ID,
+ .dev_name = "KSZ9567",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 3,
+ .num_tx_queues = 4,
+ .num_ipms = 8,
+ .tc_cbs_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
+ },
+
+ [LAN9370] = {
+ .chip_id = LAN9370_CHIP_ID,
+ .dev_name = "LAN9370",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
+ .num_tx_queues = 8,
+ .num_ipms = 8,
+ .tc_cbs_supported = true,
+ .phy_side_mdio_supported = true,
+ .ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ .ptp_capable = true,
+ },
+
+ [LAN9371] = {
+ .chip_id = LAN9371_CHIP_ID,
+ .dev_name = "LAN9371",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .port_nirqs = 6,
+ .num_tx_queues = 8,
+ .num_ipms = 8,
+ .tc_cbs_supported = true,
+ .phy_side_mdio_supported = true,
+ .ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true, true},
+ .supports_rmii = {false, false, false, false, true, true},
+ .supports_rgmii = {false, false, false, false, true, true},
+ .internal_phy = {true, true, true, true, false, false},
+ .ptp_capable = true,
+ },
+
+ [LAN9372] = {
+ .chip_id = LAN9372_CHIP_ID,
+ .dev_name = "LAN9372",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
+ .num_tx_queues = 8,
+ .num_ipms = 8,
+ .tc_cbs_supported = true,
+ .phy_side_mdio_supported = true,
+ .ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, true,
+ false, false, true, true},
+ .ptp_capable = true,
+ },
+
+ [LAN9373] = {
+ .chip_id = LAN9373_CHIP_ID,
+ .dev_name = "LAN9373",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x38, /* can be configured as cpu port */
+ .port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
+ .num_tx_queues = 8,
+ .num_ipms = 8,
+ .tc_cbs_supported = true,
+ .phy_side_mdio_supported = true,
+ .ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, false,
+ false, false, true, true},
+ .ptp_capable = true,
+ },
+
+ [LAN9374] = {
+ .chip_id = LAN9374_CHIP_ID,
+ .dev_name = "LAN9374",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
+ .num_tx_queues = 8,
+ .num_ipms = 8,
+ .tc_cbs_supported = true,
+ .phy_side_mdio_supported = true,
+ .ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, true,
+ false, false, true, true},
+ .ptp_capable = true,
+ },
+
+ [LAN9646] = {
+ .chip_id = LAN9646_CHIP_ID,
+ .dev_name = "LAN9646",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 4,
+ .num_tx_queues = 4,
+ .num_ipms = 8,
+ .ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ .sgmii_port = 7,
+ .wr_table = &ksz9477_register_set,
+ .rd_table = &ksz9477_register_set,
+ },
+};
+EXPORT_SYMBOL_GPL(ksz_switch_chips);
+
+static const struct ksz_chip_data *ksz_lookup_info(unsigned int prod_num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
+ const struct ksz_chip_data *chip = &ksz_switch_chips[i];
+
+ if (chip->chip_id == prod_num)
+ return chip;
+ }
+
+ return NULL;
+}
+
+static int ksz_check_device_id(struct ksz_device *dev)
+{
+ const struct ksz_chip_data *expected_chip_data;
+ u32 expected_chip_id;
+
+ if (dev->pdata) {
+ expected_chip_id = dev->pdata->chip_id;
+ expected_chip_data = ksz_lookup_info(expected_chip_id);
+ if (WARN_ON(!expected_chip_data))
+ return -ENODEV;
+ } else {
+ expected_chip_data = of_device_get_match_data(dev->dev);
+ expected_chip_id = expected_chip_data->chip_id;
+ }
+
+ if (expected_chip_id != dev->chip_id) {
+ dev_err(dev->dev,
+ "Device tree specifies chip %s but found %s, please fix it!\n",
+ expected_chip_data->dev_name, dev->info->dev_name);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->info->supports_mii[port])
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+
+ if (dev->info->supports_rmii[port])
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+
+ if (dev->info->supports_rgmii[port])
+ phy_interface_set_rgmii(config->supported_interfaces);
+
+ if (dev->info->internal_phy[port]) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
+
+ if (dev->dev_ops->get_caps)
+ dev->dev_ops->get_caps(dev, port, config);
+
+ if (ds->ops->support_eee && ds->ops->support_eee(ds, port)) {
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+
+ config->lpi_capabilities = MAC_100FD;
+ if (dev->info->gbit_capable[port])
+ config->lpi_capabilities |= MAC_1000FD;
+
+ /* EEE is fully operational */
+ config->eee_enabled_default = true;
+ }
+}
+
+void ksz_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct ethtool_pause_stats *pstats;
+ struct rtnl_link_stats64 *stats;
+ struct ksz_stats_raw *raw;
+ struct ksz_port_mib *mib;
+ int ret;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ pstats = &mib->pause_stats;
+ raw = (struct ksz_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast +
+ raw->rx_pause;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast +
+ raw->tx_pause;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ pstats->tx_pause_frames = raw->tx_pause;
+ pstats->rx_pause_frames = raw->rx_pause;
+
+ spin_unlock(&mib->stats64_lock);
+
+ if (dev->info->phy_errata_9477 && !ksz_is_sgmii_port(dev, port)) {
+ ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col);
+ if (ret)
+ dev_err(dev->dev, "Failed to monitor transmission halt\n");
+ }
+}
+
+void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct ethtool_pause_stats *pstats;
+ struct rtnl_link_stats64 *stats;
+ struct ksz88xx_stats_raw *raw;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ pstats = &mib->pause_stats;
+ raw = (struct ksz88xx_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast +
+ raw->rx_pause;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast +
+ raw->tx_pause;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx + raw->rx_hi - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx + raw->tx_hi - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ pstats->tx_pause_frames = raw->tx_pause;
+ pstats->rx_pause_frames = raw->rx_pause;
+
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(s, &mib->stats64, sizeof(*s));
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(pause_stats, &mib->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
+{
+ struct ksz_device *dev = ds->priv;
int i;
- for (i = 0; i < dev->port_cnt; i++) {
- if (i == port || i == dev->cpu_port)
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < dev->info->mib_cnt; i++)
+ ethtool_puts(&buf, dev->info->mib_names[i].string);
+}
+
+/**
+ * ksz_update_port_member - Adjust port forwarding rules based on STP state and
+ * isolation settings.
+ * @dev: A pointer to the struct ksz_device representing the device.
+ * @port: The port number to adjust.
+ *
+ * This function dynamically adjusts the port membership configuration for a
+ * specified port and other device ports, based on Spanning Tree Protocol (STP)
+ * states and port isolation settings. Each port, including the CPU port, has a
+ * membership register, represented as a bitfield, where each bit corresponds
+ * to a port number. A set bit indicates permission to forward frames to that
+ * port. This function iterates over all ports, updating the membership register
+ * to reflect current forwarding permissions:
+ *
+ * 1. Forwards frames only to ports that are part of the same bridge group and
+ * in the BR_STATE_FORWARDING state.
+ * 2. Takes into account the isolation status of ports; ports in the
+ * BR_STATE_FORWARDING state with BR_ISOLATED configuration will not forward
+ * frames to each other, even if they are in the same bridge group.
+ * 3. Ensures that the CPU port is included in the membership based on its
+ * upstream port configuration, allowing for management and control traffic
+ * to flow as required.
+ */
+static void ksz_update_port_member(struct ksz_device *dev, int port)
+{
+ struct ksz_port *p = &dev->ports[port];
+ struct dsa_switch *ds = dev->ds;
+ u8 port_member = 0, cpu_port;
+ const struct dsa_port *dp;
+ int i, j;
+
+ if (!dsa_is_user_port(ds, port))
+ return;
+
+ dp = dsa_to_port(ds, port);
+ cpu_port = BIT(dsa_upstream_port(ds, port));
+
+ for (i = 0; i < ds->num_ports; i++) {
+ const struct dsa_port *other_dp = dsa_to_port(ds, i);
+ struct ksz_port *other_p = &dev->ports[i];
+ u8 val = 0;
+
+ if (!dsa_is_user_port(ds, i))
continue;
- p = &dev->ports[i];
- if (!(dev->member & (1 << i)))
+ if (port == i)
+ continue;
+ if (!dsa_port_bridge_same(dp, other_dp))
+ continue;
+ if (other_p->stp_state != BR_STATE_FORWARDING)
continue;
- /* Port is a member of the bridge and is forwarding. */
+ /* At this point we know that "port" and "other" port [i] are in
+ * the same bridge group and that "other" port [i] is in
+ * forwarding stp state. If "port" is also in forwarding stp
+ * state, we can allow forwarding from port [port] to port [i].
+ * Except if both ports are isolated.
+ */
if (p->stp_state == BR_STATE_FORWARDING &&
- p->member != dev->member)
- dev->dev_ops->cfg_port_member(dev, i, dev->member);
+ !(p->isolated && other_p->isolated)) {
+ val |= BIT(port);
+ port_member |= BIT(i);
+ }
+
+ /* Retain port [i]'s relationship to other ports than [port] */
+ for (j = 0; j < ds->num_ports; j++) {
+ const struct dsa_port *third_dp;
+ struct ksz_port *third_p;
+
+ if (j == i)
+ continue;
+ if (j == port)
+ continue;
+ if (!dsa_is_user_port(ds, j))
+ continue;
+ third_p = &dev->ports[j];
+ if (third_p->stp_state != BR_STATE_FORWARDING)
+ continue;
+
+ third_dp = dsa_to_port(ds, j);
+
+ /* Now we updating relation of the "other" port [i] to
+ * the "third" port [j]. We already know that "other"
+ * port [i] is in forwarding stp state and that "third"
+ * port [j] is in forwarding stp state too.
+ * We need to check if "other" port [i] and "third" port
+ * [j] are in the same bridge group and not isolated
+ * before allowing forwarding from port [i] to port [j].
+ */
+ if (dsa_port_bridge_same(other_dp, third_dp) &&
+ !(other_p->isolated && third_p->isolated))
+ val |= BIT(j);
+ }
+
+ dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
+ }
+
+ /* HSR ports are setup once so need to use the assigned membership
+ * when the port is enabled.
+ */
+ if (!port_member && p->stp_state == BR_STATE_FORWARDING &&
+ (dev->hsr_ports & BIT(port)))
+ port_member = dev->hsr_ports;
+ dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
+}
+
+static int ksz_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+ u16 val;
+ int ret;
+
+ ret = dev->dev_ops->r_phy(dev, addr, regnum, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ return dev->dev_ops->w_phy(dev, addr, regnum, val);
+}
+
+/**
+ * ksz_parent_mdio_read - Read data from a PHY register on the parent MDIO bus.
+ * @bus: MDIO bus structure.
+ * @addr: PHY address on the parent MDIO bus.
+ * @regnum: Register number to read.
+ *
+ * This function provides a direct read operation on the parent MDIO bus for
+ * accessing PHY registers. By bypassing SPI or I2C, it uses the parent MDIO bus
+ * to retrieve data from the PHY registers at the specified address and register
+ * number.
+ *
+ * Return: Value of the PHY register, or a negative error code on failure.
+ */
+static int ksz_parent_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+
+ return mdiobus_read_nested(dev->parent_mdio_bus, addr, regnum);
+}
+
+/**
+ * ksz_parent_mdio_write - Write data to a PHY register on the parent MDIO bus.
+ * @bus: MDIO bus structure.
+ * @addr: PHY address on the parent MDIO bus.
+ * @regnum: Register number to write to.
+ * @val: Value to write to the PHY register.
+ *
+ * This function provides a direct write operation on the parent MDIO bus for
+ * accessing PHY registers. Bypassing SPI or I2C, it uses the parent MDIO bus
+ * to modify the PHY register values at the specified address.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_parent_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ return mdiobus_write_nested(dev->parent_mdio_bus, addr, regnum, val);
+}
+
+/**
+ * ksz_phy_addr_to_port - Map a PHY address to the corresponding switch port.
+ * @dev: Pointer to device structure.
+ * @addr: PHY address to map to a port.
+ *
+ * This function finds the corresponding switch port for a given PHY address by
+ * iterating over all user ports on the device. It checks if a port's PHY
+ * address in `phy_addr_map` matches the specified address and if the port
+ * contains an internal PHY. If a match is found, the index of the port is
+ * returned.
+ *
+ * Return: Port index on success, or -EINVAL if no matching port is found.
+ */
+static int ksz_phy_addr_to_port(struct ksz_device *dev, int addr)
+{
+ struct dsa_switch *ds = dev->ds;
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (dev->info->internal_phy[dp->index] &&
+ dev->phy_addr_map[dp->index] == addr)
+ return dp->index;
}
+
+ return -EINVAL;
+}
+
+/**
+ * ksz_irq_phy_setup - Configure IRQs for PHYs in the KSZ device.
+ * @dev: Pointer to the KSZ device structure.
+ *
+ * Sets up IRQs for each active PHY connected to the KSZ switch by mapping the
+ * appropriate IRQs for each PHY and assigning them to the `user_mii_bus` in
+ * the DSA switch structure. Each IRQ is mapped based on the port's IRQ domain.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_irq_phy_setup(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy, port;
+ int irq;
+ int ret;
+
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++) {
+ if (BIT(phy) & ds->phys_mii_mask) {
+ port = ksz_phy_addr_to_port(dev, phy);
+ if (port < 0) {
+ ret = port;
+ goto out;
+ }
+
+ irq = irq_find_mapping(dev->ports[port].pirq.domain,
+ PORT_SRC_PHY_INT);
+ if (!irq) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ds->user_mii_bus->irq[phy] = irq;
+ }
+ }
+ return 0;
+out:
+ while (phy--)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->user_mii_bus->irq[phy]);
+
+ return ret;
+}
+
+/**
+ * ksz_irq_phy_free - Release IRQ mappings for PHYs in the KSZ device.
+ * @dev: Pointer to the KSZ device structure.
+ *
+ * Releases any IRQ mappings previously assigned to active PHYs in the KSZ
+ * switch by disposing of each mapped IRQ in the `user_mii_bus` structure.
+ */
+static void ksz_irq_phy_free(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->user_mii_bus->irq[phy]);
+}
+
+/**
+ * ksz_parse_dt_phy_config - Parse and validate PHY configuration from DT
+ * @dev: pointer to the KSZ device structure
+ * @bus: pointer to the MII bus structure
+ * @mdio_np: pointer to the MDIO node in the device tree
+ *
+ * This function parses and validates PHY configurations for each user port
+ * defined in the device tree for a KSZ switch device. It verifies that the
+ * `phy-handle` properties are correctly set and that the internal PHYs match
+ * expected addresses and parent nodes. Sets up the PHY mask in the MII bus if
+ * all validations pass. Logs error messages for any mismatches or missing data.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_parse_dt_phy_config(struct ksz_device *dev, struct mii_bus *bus,
+ struct device_node *mdio_np)
+{
+ struct device_node *phy_node, *phy_parent_node;
+ bool phys_are_valid = true;
+ struct dsa_port *dp;
+ u32 phy_addr;
+ int ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ if (!dev->info->internal_phy[dp->index])
+ continue;
+
+ phy_node = of_parse_phandle(dp->dn, "phy-handle", 0);
+ if (!phy_node) {
+ dev_err(dev->dev, "failed to parse phy-handle for port %d.\n",
+ dp->index);
+ phys_are_valid = false;
+ continue;
+ }
+
+ phy_parent_node = of_get_parent(phy_node);
+ if (!phy_parent_node) {
+ dev_err(dev->dev, "failed to get PHY-parent node for port %d\n",
+ dp->index);
+ phys_are_valid = false;
+ } else if (phy_parent_node != mdio_np) {
+ dev_err(dev->dev, "PHY-parent node mismatch for port %d, expected %pOF, got %pOF\n",
+ dp->index, mdio_np, phy_parent_node);
+ phys_are_valid = false;
+ } else {
+ ret = of_property_read_u32(phy_node, "reg", &phy_addr);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to read PHY address for port %d. Error %d\n",
+ dp->index, ret);
+ phys_are_valid = false;
+ } else if (phy_addr != dev->phy_addr_map[dp->index]) {
+ dev_err(dev->dev, "PHY address mismatch for port %d, expected 0x%x, got 0x%x\n",
+ dp->index, dev->phy_addr_map[dp->index],
+ phy_addr);
+ phys_are_valid = false;
+ } else {
+ bus->phy_mask |= BIT(phy_addr);
+ }
+ }
+
+ of_node_put(phy_node);
+ of_node_put(phy_parent_node);
+ }
+
+ if (!phys_are_valid)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ksz_mdio_register - Register and configure the MDIO bus for the KSZ device.
+ * @dev: Pointer to the KSZ device structure.
+ *
+ * This function sets up and registers an MDIO bus for the KSZ switch device,
+ * allowing access to its internal PHYs. If the device supports side MDIO,
+ * the function will configure the external MDIO controller specified by the
+ * "mdio-parent-bus" device tree property to directly manage internal PHYs.
+ * Otherwise, SPI or I2C access is set up for PHY access.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_mdio_register(struct ksz_device *dev)
+{
+ struct device_node *parent_bus_node;
+ struct mii_bus *parent_bus = NULL;
+ struct dsa_switch *ds = dev->ds;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret, i;
+
+ mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
+ if (!mdio_np)
+ return 0;
+
+ parent_bus_node = of_parse_phandle(mdio_np, "mdio-parent-bus", 0);
+ if (parent_bus_node && !dev->info->phy_side_mdio_supported) {
+ dev_err(dev->dev, "Side MDIO bus is not supported for this HW, ignoring 'mdio-parent-bus' property.\n");
+ ret = -EINVAL;
+
+ goto put_mdio_node;
+ } else if (parent_bus_node) {
+ parent_bus = of_mdio_find_bus(parent_bus_node);
+ if (!parent_bus) {
+ ret = -EPROBE_DEFER;
+
+ goto put_mdio_node;
+ }
+
+ dev->parent_mdio_bus = parent_bus;
+ }
+
+ bus = devm_mdiobus_alloc(ds->dev);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto put_mdio_node;
+ }
+
+ if (dev->dev_ops->mdio_bus_preinit) {
+ ret = dev->dev_ops->mdio_bus_preinit(dev, !!parent_bus);
+ if (ret)
+ goto put_mdio_node;
+ }
+
+ if (dev->dev_ops->create_phy_addr_map) {
+ ret = dev->dev_ops->create_phy_addr_map(dev, !!parent_bus);
+ if (ret)
+ goto put_mdio_node;
+ } else {
+ for (i = 0; i < dev->info->port_cnt; i++)
+ dev->phy_addr_map[i] = i;
+ }
+
+ bus->priv = dev;
+ if (parent_bus) {
+ bus->read = ksz_parent_mdio_read;
+ bus->write = ksz_parent_mdio_write;
+ bus->name = "KSZ side MDIO";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "ksz-side-mdio-%d",
+ ds->index);
+ } else {
+ bus->read = ksz_sw_mdio_read;
+ bus->write = ksz_sw_mdio_write;
+ bus->name = "ksz user smi";
+ if (ds->dst->index != 0) {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d-%d", ds->dst->index, ds->index);
+ } else {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ }
+ }
+
+ ret = ksz_parse_dt_phy_config(dev, bus, mdio_np);
+ if (ret)
+ goto put_mdio_node;
+
+ ds->phys_mii_mask = bus->phy_mask;
+ bus->parent = ds->dev;
+
+ ds->user_mii_bus = bus;
+
+ if (dev->irq > 0) {
+ ret = ksz_irq_phy_setup(dev);
+ if (ret)
+ goto put_mdio_node;
+ }
+
+ ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(ds->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ if (dev->irq > 0)
+ ksz_irq_phy_free(dev);
+ }
+
+put_mdio_node:
+ of_node_put(mdio_np);
+ of_node_put(parent_bus_node);
+
+ return ret;
+}
+
+static void ksz_irq_mask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked |= BIT(d->hwirq);
+}
+
+static void ksz_irq_unmask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked &= ~BIT(d->hwirq);
+}
+
+static void ksz_irq_bus_lock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&kirq->dev->lock_irq);
+}
+
+static void ksz_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+ ret = ksz_write8(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&dev->lock_irq);
+}
+
+static const struct irq_chip ksz_irq_chip = {
+ .name = "ksz-irq",
+ .irq_mask = ksz_irq_mask,
+ .irq_unmask = ksz_irq_unmask,
+ .irq_bus_lock = ksz_irq_bus_lock,
+ .irq_bus_sync_unlock = ksz_irq_bus_sync_unlock,
+};
+
+static int ksz_irq_domain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &ksz_irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ksz_irq_domain_ops = {
+ .map = ksz_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void ksz_irq_free(struct ksz_irq *kirq)
+{
+ int irq, virq;
+
+ free_irq(kirq->irq_num, kirq);
+
+ for (irq = 0; irq < kirq->nirqs; irq++) {
+ virq = irq_find_mapping(kirq->domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(kirq->domain);
+}
+
+static irqreturn_t ksz_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_irq *kirq = dev_id;
+ unsigned int nhandled = 0;
+ struct ksz_device *dev;
+ unsigned int sub_irq;
+ u8 data;
+ int ret;
+ u8 n;
+
+ dev = kirq->dev;
+
+ /* Read interrupt status register */
+ ret = ksz_read8(dev, kirq->reg_status, &data);
+ if (ret)
+ goto out;
+
+ for (n = 0; n < kirq->nirqs; ++n) {
+ if (data & BIT(n)) {
+ sub_irq = irq_find_mapping(kirq->domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
+{
+ int ret, n;
+
+ kirq->dev = dev;
+ kirq->masked = ~0;
+
+ kirq->domain = irq_domain_create_simple(dev_fwnode(dev->dev), kirq->nirqs, 0,
+ &ksz_irq_domain_ops, kirq);
+ if (!kirq->domain)
+ return -ENOMEM;
+
+ for (n = 0; n < kirq->nirqs; n++)
+ irq_create_mapping(kirq->domain, n);
+
+ ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn,
+ IRQF_ONESHOT, kirq->name, kirq);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ ksz_irq_free(kirq);
+
+ return ret;
+}
+
+static int ksz_girq_setup(struct ksz_device *dev)
+{
+ struct ksz_irq *girq = &dev->girq;
+
+ girq->nirqs = dev->info->port_cnt;
+ girq->reg_mask = REG_SW_PORT_INT_MASK__1;
+ girq->reg_status = REG_SW_PORT_INT_STATUS__1;
+ snprintf(girq->name, sizeof(girq->name), "global_port_irq");
+
+ girq->irq_num = dev->irq;
+
+ return ksz_irq_common_setup(dev, girq);
+}
+
+static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
+{
+ struct ksz_irq *pirq = &dev->ports[p].pirq;
+
+ pirq->nirqs = dev->info->port_nirqs;
+ pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK);
+ pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS);
+ snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
+
+ pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
+ if (!pirq->irq_num)
+ return -EINVAL;
+
+ return ksz_irq_common_setup(dev, pirq);
+}
+
+static int ksz_parse_drive_strength(struct ksz_device *dev);
+
+static int ksz_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ u16 storm_mask, storm_rate;
+ struct dsa_port *dp;
+ struct ksz_port *p;
+ const u16 *regs;
+ int ret;
+
+ regs = dev->info->regs;
+
+ dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+ dev->info->num_vlans, GFP_KERNEL);
+ if (!dev->vlan_cache)
+ return -ENOMEM;
+
+ ret = dev->dev_ops->reset(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ ret = ksz_parse_drive_strength(dev);
+ if (ret)
+ return ret;
+
+ if (ksz_has_sgmii_port(dev) && dev->dev_ops->pcs_create) {
+ ret = dev->dev_ops->pcs_create(dev);
+ if (ret)
+ return ret;
+ }
+
+ /* set broadcast storm protection 10% rate */
+ storm_mask = BROADCAST_STORM_RATE;
+ storm_rate = (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
+ if (ksz_is_ksz8463(dev)) {
+ storm_mask = swab16(storm_mask);
+ storm_rate = swab16(storm_rate);
+ }
+ regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
+ storm_mask, storm_rate);
+
+ dev->dev_ops->config_cpu_port(ds);
+
+ dev->dev_ops->enable_stp_addr(dev);
+
+ ds->num_tx_queues = dev->info->num_tx_queues;
+
+ regmap_update_bits(ksz_regmap_8(dev), regs[S_MULTICAST_CTRL],
+ MULTICAST_STORM_DISABLE, MULTICAST_STORM_DISABLE);
+
+ ksz_init_mib_timer(dev);
+
+ ds->configure_vlan_while_not_filtering = false;
+ ds->dscp_prio_mapping_is_global = true;
+
+ if (dev->dev_ops->setup) {
+ ret = dev->dev_ops->setup(ds);
+ if (ret)
+ return ret;
+ }
+
+ /* Start with learning disabled on standalone user ports, and enabled
+ * on the CPU port. In lack of other finer mechanisms, learning on the
+ * CPU port will avoid flooding bridge local addresses on the network
+ * in some cases.
+ */
+ p = &dev->ports[dev->cpu_port];
+ p->learning = true;
+
+ if (dev->irq > 0) {
+ ret = ksz_girq_setup(dev);
+ if (ret)
+ return ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ ret = ksz_pirq_setup(dev, dp->index);
+ if (ret)
+ goto port_release;
+
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_irq_setup(ds, dp->index);
+ if (ret)
+ goto pirq_release;
+ }
+ }
+ }
+
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_clock_register(ds);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register PTP clock: %d\n",
+ ret);
+ goto port_release;
+ }
+ }
+
+ ret = ksz_mdio_register(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to register the mdio");
+ goto out_ptp_clock_unregister;
+ }
+
+ ret = ksz_dcb_init(dev);
+ if (ret)
+ goto out_ptp_clock_unregister;
+
+ /* start switch */
+ regmap_update_bits(ksz_regmap_8(dev), regs[S_START_CTRL],
+ SW_START, SW_START);
+
+ return 0;
+
+out_ptp_clock_unregister:
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
+port_release:
+ if (dev->irq > 0) {
+ dsa_switch_for_each_user_port_continue_reverse(dp, dev->ds) {
+ if (dev->info->ptp_capable)
+ ksz_ptp_irq_free(ds, dp->index);
+pirq_release:
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+ }
+ ksz_irq_free(&dev->girq);
+ }
+
+ return ret;
+}
+
+static void ksz_teardown(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
+
+ if (dev->irq > 0) {
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ if (dev->info->ptp_capable)
+ ksz_ptp_irq_free(ds, dp->index);
+
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+ }
+
+ ksz_irq_free(&dev->girq);
+ }
+
+ if (dev->dev_ops->teardown)
+ dev->dev_ops->teardown(ds);
}
-EXPORT_SYMBOL_GPL(ksz_update_port_member);
static void port_r_cnt(struct ksz_device *dev, int port)
{
@@ -46,17 +3119,17 @@ static void port_r_cnt(struct ksz_device *dev, int port)
u64 *dropped;
/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->reg_mib_cnt) {
+ while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
&mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
/* last one in storage */
- dropped = &mib->counters[dev->mib_cnt];
+ dropped = &mib->counters[dev->info->mib_cnt];
/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->mib_cnt) {
+ while (mib->cnt_ptr < dev->info->mib_cnt) {
dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
dropped, &mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
@@ -72,7 +3145,7 @@ static void ksz_mib_read_work(struct work_struct *work)
struct ksz_port *p;
int i;
- for (i = 0; i < dev->port_cnt; i++) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
if (dsa_is_unused_port(dev->ds, i))
continue;
@@ -86,11 +3159,15 @@ static void ksz_mib_read_work(struct work_struct *work)
if (!p->read) {
const struct dsa_port *dp = dsa_to_port(dev->ds, i);
- if (!netif_carrier_ok(dp->slave))
- mib->cnt_ptr = dev->reg_mib_cnt;
+ if (!netif_carrier_ok(dp->user))
+ mib->cnt_ptr = dev->info->reg_mib_cnt;
}
port_r_cnt(dev, i);
p->read = false;
+
+ if (dev->dev_ops->r_mib_stat64)
+ dev->dev_ops->r_mib_stat64(dev, i);
+
mutex_unlock(&mib->cnt_mutex);
}
@@ -103,58 +3180,85 @@ void ksz_init_mib_timer(struct ksz_device *dev)
INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
- for (i = 0; i < dev->port_cnt; i++)
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ struct ksz_port_mib *mib = &dev->ports[i].mib;
+
dev->dev_ops->port_init_cnt(dev, i);
+
+ mib->cnt_ptr = 0;
+ memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64));
+ }
}
-EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
-int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
+static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
+ int ret;
- dev->dev_ops->r_phy(dev, addr, reg, &val);
+ ret = dev->dev_ops->r_phy(dev, addr, reg, &val);
+ if (ret)
+ return ret;
return val;
}
-EXPORT_SYMBOL_GPL(ksz_phy_read16);
-int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct ksz_device *dev = ds->priv;
+ int ret;
- dev->dev_ops->w_phy(dev, addr, reg, val);
+ ret = dev->dev_ops->w_phy(dev, addr, reg, val);
+ if (ret)
+ return ret;
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_phy_write16);
-void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
+static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
- struct ksz_port *p = &dev->ports[port];
+
+ switch (dev->chip_id) {
+ case KSZ88X3_CHIP_ID:
+ /* Silicon Errata Sheet (DS80000830A):
+ * Port 1 does not work with LinkMD Cable-Testing.
+ * Port 1 does not respond to received PAUSE control frames.
+ */
+ if (!port)
+ return MICREL_KSZ8_P1_ERRATA;
+ break;
+ }
+
+ return 0;
+}
+
+static void ksz_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
/* Read all MIB counters when the link is going down. */
- p->read = true;
+ dev->ports[dp->index].read = true;
/* timer started */
if (dev->mib_read_interval)
schedule_delayed_work(&dev->mib_read, 0);
}
-EXPORT_SYMBOL_GPL(ksz_mac_link_down);
-int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
+static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct ksz_device *dev = ds->priv;
if (sset != ETH_SS_STATS)
return 0;
- return dev->mib_cnt;
+ return dev->info->mib_cnt;
}
-EXPORT_SYMBOL_GPL(ksz_sset_count);
-void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
+static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *buf)
{
const struct dsa_port *dp = dsa_to_port(ds, port);
struct ksz_device *dev = ds->priv;
@@ -164,180 +3268,1758 @@ void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
mutex_lock(&mib->cnt_mutex);
/* Only read dropped counters if no link. */
- if (!netif_carrier_ok(dp->slave))
- mib->cnt_ptr = dev->reg_mib_cnt;
+ if (!netif_carrier_ok(dp->user))
+ mib->cnt_ptr = dev->info->reg_mib_cnt;
port_r_cnt(dev, port);
- memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
+ memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64));
mutex_unlock(&mib->cnt_mutex);
}
-EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
-int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+static int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
-
- mutex_lock(&dev->dev_mutex);
- dev->br_member |= (1 << port);
- mutex_unlock(&dev->dev_mutex);
-
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
*/
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
-void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+static void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
{
- struct ksz_device *dev = ds->priv;
-
- mutex_lock(&dev->dev_mutex);
- dev->br_member &= ~(1 << port);
- dev->member &= ~(1 << port);
- mutex_unlock(&dev->dev_mutex);
-
/* port_stp_state_set() will be called after to put the port in
* forwarding state so there is no need to do anything.
*/
}
-EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
-void ksz_port_fast_age(struct dsa_switch *ds, int port)
+static void ksz_port_fast_age(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
dev->dev_ops->flush_dyn_mac_table(dev, port);
}
-EXPORT_SYMBOL_GPL(ksz_port_fast_age);
-int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
- void *data)
+static int ksz_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
struct ksz_device *dev = ds->priv;
- int ret = 0;
- u16 i = 0;
- u16 entries = 0;
- u8 timestamp = 0;
- u8 fid;
- u8 member;
- struct alu_struct alu;
- do {
- alu.is_static = false;
- ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
- &member, &timestamp,
- &entries);
- if (!ret && (member & BIT(port))) {
- ret = cb(alu.mac, alu.fid, alu.is_static, data);
+ if (!dev->dev_ops->set_ageing_time)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->set_ageing_time(dev, msecs);
+}
+
+static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->fdb_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_add(dev, port, addr, vid, db);
+}
+
+static int ksz_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr,
+ u16 vid, struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->fdb_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_del(dev, port, addr, vid, db);
+}
+
+static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->fdb_dump)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_dump(dev, port, cb, data);
+}
+
+static int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mdb_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mdb_add(dev, port, mdb, db);
+}
+
+static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mdb_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mdb_del(dev, port, mdb, db);
+}
+
+static int ksz9477_set_default_prio_queue_mapping(struct ksz_device *dev,
+ int port)
+{
+ u32 queue_map = 0;
+ int ipm;
+
+ for (ipm = 0; ipm < dev->info->num_ipms; ipm++) {
+ int queue;
+
+ /* Traffic Type (TT) is corresponding to the Internal Priority
+ * Map (IPM) in the switch. Traffic Class (TC) is
+ * corresponding to the queue in the switch.
+ */
+ queue = ieee8021q_tt_to_tc(ipm, dev->info->num_tx_queues);
+ if (queue < 0)
+ return queue;
+
+ queue_map |= queue << (ipm * KSZ9477_PORT_TC_MAP_S);
+ }
+
+ return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
+}
+
+static int ksz_port_setup(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ /* setup user port */
+ dev->dev_ops->port_setup(dev, port, false);
+
+ if (!is_ksz8(dev)) {
+ ret = ksz9477_set_default_prio_queue_mapping(dev, port);
+ if (ret)
+ return ret;
+ }
+
+ /* port_stp_state_set() will be called after to enable the port so
+ * there is no need to do anything.
+ */
+
+ return ksz_dcb_init_port(dev, port);
+}
+
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ const u16 *regs;
+ u8 data;
+
+ regs = dev->info->regs;
+
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ p = &dev->ports[port];
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
+
+ p->stp_state = state;
+
+ ksz_update_port_member(dev, port);
+}
+
+static void ksz_port_teardown(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ switch (dev->chip_id) {
+ case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
+ if (dsa_is_user_port(ds, port))
+ ksz9477_port_acl_free(dev, port);
+ }
+}
+
+static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_ISOLATED))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+
+ if (flags.mask & (BR_LEARNING | BR_ISOLATED)) {
+ if (flags.mask & BR_LEARNING)
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ if (flags.mask & BR_ISOLATED)
+ p->isolated = !!(flags.val & BR_ISOLATED);
+
+ /* Make the change take effect immediately */
+ ksz_port_stp_state_set(ds, port, p->stp_state);
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ struct ksz_device *dev = ds->priv;
+ enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
+
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev))
+ proto = DSA_TAG_PROTO_KSZ8795;
+
+ if (dev->chip_id == KSZ88X3_CHIP_ID ||
+ dev->chip_id == KSZ8463_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID ||
+ dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ9563_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9893;
+
+ if (dev->chip_id == KSZ8567_CHIP_ID ||
+ dev->chip_id == KSZ9477_CHIP_ID ||
+ dev->chip_id == KSZ9896_CHIP_ID ||
+ dev->chip_id == KSZ9897_CHIP_ID ||
+ dev->chip_id == KSZ9567_CHIP_ID ||
+ dev->chip_id == LAN9646_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9477;
+
+ if (is_lan937x(dev))
+ proto = DSA_TAG_PROTO_LAN937X;
+
+ return proto;
+}
+
+static int ksz_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct ksz_tagger_data *tagger_data;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_KSZ8795:
+ return 0;
+ case DSA_TAG_PROTO_KSZ9893:
+ case DSA_TAG_PROTO_KSZ9477:
+ case DSA_TAG_PROTO_LAN937X:
+ tagger_data = ksz_tagger_data(ds);
+ tagger_data->xmit_work_fn = ksz_port_deferred_xmit;
+ return 0;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+}
+
+static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool flag, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_filtering)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_filtering(dev, port, flag, extack);
+}
+
+static int ksz_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_add(dev, port, vlan, extack);
+}
+
+static int ksz_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_del(dev, port, vlan);
+}
+
+static int ksz_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mirror_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack);
+}
+
+static void ksz_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->mirror_del)
+ dev->dev_ops->mirror_del(dev, port, mirror);
+}
+
+static int ksz_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->change_mtu)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->change_mtu(dev, port, mtu);
+}
+
+static int ksz_max_mtu(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ switch (dev->chip_id) {
+ case KSZ8795_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8765_CHIP_ID:
+ return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ case KSZ8463_CHIP_ID:
+ case KSZ88X3_CHIP_ID:
+ case KSZ8864_CHIP_ID:
+ case KSZ8895_CHIP_ID:
+ return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case LAN9370_CHIP_ID:
+ case LAN9371_CHIP_ID:
+ case LAN9372_CHIP_ID:
+ case LAN9373_CHIP_ID:
+ case LAN9374_CHIP_ID:
+ case LAN9646_CHIP_ID:
+ return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ksz_support_eee - Determine Energy Efficient Ethernet (EEE) support for a
+ * port
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number to check
+ *
+ * This function also documents devices where EEE was initially advertised but
+ * later withdrawn due to reliability issues, as described in official errata
+ * documents. These devices are explicitly listed to record known limitations,
+ * even if there is no technical necessity for runtime checks.
+ *
+ * Returns: true if the internal PHY on the given port supports fully
+ * operational EEE, false otherwise.
+ */
+static bool ksz_support_eee(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->info->internal_phy[port])
+ return false;
+
+ switch (dev->chip_id) {
+ case KSZ8563_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ return true;
+ case KSZ8567_CHIP_ID:
+ /* KSZ8567R Errata DS80000752C Module 4 */
+ case KSZ8765_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8795_CHIP_ID:
+ /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
+ case KSZ9477_CHIP_ID:
+ /* KSZ9477S Errata DS80000754A Module 4 */
+ case KSZ9567_CHIP_ID:
+ /* KSZ9567S Errata DS80000756A Module 4 */
+ case KSZ9896_CHIP_ID:
+ /* KSZ9896C Errata DS80000757A Module 3 */
+ case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
+ /* KSZ9897R Errata DS80000758C Module 4 */
+ /* Energy Efficient Ethernet (EEE) feature select must be
+ * manually disabled
+ * The EEE feature is enabled by default, but it is not fully
+ * operational. It must be manually disabled through register
+ * controls. If not disabled, the PHY ports can auto-negotiate
+ * to enable EEE, and this feature can cause link drops when
+ * linked to another device supporting EEE.
+ *
+ * The same item appears in the errata for all switches above.
+ */
+ break;
+ }
+
+ return false;
+}
+
+static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_keee *e)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!e->tx_lpi_enabled) {
+ dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n");
+ return -EINVAL;
+ }
+
+ if (e->tx_lpi_timer) {
+ dev_err(dev->dev, "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ksz_set_xmii(struct ksz_device *dev, int port,
+ phy_interface_t interface)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ struct ksz_port *p = &dev->ports[port];
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ data8 &= ~(P_MII_SEL_M | P_RGMII_ID_IG_ENABLE |
+ P_RGMII_ID_EG_ENABLE);
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ data8 |= bitval[P_MII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ data8 |= bitval[P_RMII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ data8 |= bitval[P_GMII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ data8 |= bitval[P_RGMII_SEL];
+ /* On KSZ9893, disable RGMII in-band status support */
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID ||
+ dev->chip_id == KSZ9563_CHIP_ID ||
+ is_lan937x(dev))
+ data8 &= ~P_MII_MAC_MODE;
+ break;
+ default:
+ dev_err(dev->dev, "Unsupported interface '%s' for port %d\n",
+ phy_modes(interface), port);
+ return;
+ }
+
+ if (p->rgmii_tx_val)
+ data8 |= P_RGMII_ID_EG_ENABLE;
+
+ if (p->rgmii_rx_val)
+ data8 |= P_RGMII_ID_IG_ENABLE;
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
+}
+
+phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ phy_interface_t interface;
+ u8 data8;
+ u8 val;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ val = FIELD_GET(P_MII_SEL_M, data8);
+
+ if (val == bitval[P_MII_SEL]) {
+ if (gbit)
+ interface = PHY_INTERFACE_MODE_GMII;
+ else
+ interface = PHY_INTERFACE_MODE_MII;
+ } else if (val == bitval[P_RMII_SEL]) {
+ interface = PHY_INTERFACE_MODE_RMII;
+ } else {
+ interface = PHY_INTERFACE_MODE_RGMII;
+ if (data8 & P_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_TXID;
+ if (data8 & P_RGMII_ID_IG_ENABLE) {
+ interface = PHY_INTERFACE_MODE_RGMII_RXID;
+ if (data8 & P_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_ID;
+ }
+ }
+
+ return interface;
+}
+
+static void ksz88x3_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+
+ dev->ports[dp->index].manual_flow = !(state->pause & MLO_PAUSE_AN);
+}
+
+static void ksz_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
+
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ /* No need to configure XMII control register when using SGMII. */
+ if (ksz_is_sgmii_port(dev, port))
+ return;
+
+ if (phylink_autoneg_inband(mode)) {
+ dev_err(dev->dev, "In-band AN not supported!\n");
+ return;
+ }
+
+ ksz_set_xmii(dev, port, state->interface);
+
+ if (dev->dev_ops->setup_rgmii_delay)
+ dev->dev_ops->setup_rgmii_delay(dev, port);
+}
+
+bool ksz_get_gbit(struct ksz_device *dev, int port)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ bool gbit = false;
+ u8 data8;
+ bool val;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ val = FIELD_GET(P_GMII_1GBIT_M, data8);
+
+ if (val == bitval[P_GMII_1GBIT])
+ gbit = true;
+
+ return gbit;
+}
+
+static void ksz_set_gbit(struct ksz_device *dev, int port, bool gbit)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ data8 &= ~P_GMII_1GBIT_M;
+
+ if (gbit)
+ data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_1GBIT]);
+ else
+ data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_NOT_1GBIT]);
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
+}
+
+static void ksz_set_100_10mbit(struct ksz_device *dev, int port, int speed)
+{
+ const u8 *bitval = dev->info->xmii_ctrl0;
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_0], &data8);
+
+ data8 &= ~P_MII_100MBIT_M;
+
+ if (speed == SPEED_100)
+ data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_100MBIT]);
+ else
+ data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_10MBIT]);
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_0], data8);
+}
+
+static void ksz_port_set_xmii_speed(struct ksz_device *dev, int port, int speed)
+{
+ if (speed == SPEED_1000)
+ ksz_set_gbit(dev, port, true);
+ else
+ ksz_set_gbit(dev, port, false);
+
+ if (speed == SPEED_100 || speed == SPEED_10)
+ ksz_set_100_10mbit(dev, port, speed);
+}
+
+static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ const u8 *bitval = dev->info->xmii_ctrl0;
+ const u32 *masks = dev->info->masks;
+ const u16 *regs = dev->info->regs;
+ u8 mask;
+ u8 val;
+
+ mask = P_MII_DUPLEX_M | masks[P_MII_TX_FLOW_CTRL] |
+ masks[P_MII_RX_FLOW_CTRL];
+
+ if (duplex == DUPLEX_FULL)
+ val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_FULL_DUPLEX]);
+ else
+ val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_HALF_DUPLEX]);
+
+ if (tx_pause)
+ val |= masks[P_MII_TX_FLOW_CTRL];
+
+ if (rx_pause)
+ val |= masks[P_MII_RX_FLOW_CTRL];
+
+ ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
+}
+
+static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
+ struct ksz_port *p;
+
+ p = &dev->ports[port];
+
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ p->phydev.speed = speed;
+
+ ksz_port_set_xmii_speed(dev, port, speed);
+
+ ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
+}
+
+static int ksz_switch_detect(struct ksz_device *dev)
+{
+ u8 id1, id2, id4;
+ u16 id16;
+ u32 id32;
+ int ret;
+
+ /* read chip id */
+ ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
+ if (ret)
+ return ret;
+
+ id1 = FIELD_GET(SW_FAMILY_ID_M, id16);
+ id2 = FIELD_GET(SW_CHIP_ID_M, id16);
+
+ switch (id1) {
+ case KSZ84_FAMILY_ID:
+ dev->chip_id = KSZ8463_CHIP_ID;
+ break;
+ case KSZ87_FAMILY_ID:
+ if (id2 == KSZ87_CHIP_ID_95) {
+ u8 val;
+
+ dev->chip_id = KSZ8795_CHIP_ID;
+
+ ksz_read8(dev, KSZ8_PORT_STATUS_0, &val);
+ if (val & KSZ8_PORT_FIBER_MODE)
+ dev->chip_id = KSZ8765_CHIP_ID;
+ } else if (id2 == KSZ87_CHIP_ID_94) {
+ dev->chip_id = KSZ8794_CHIP_ID;
+ } else {
+ return -ENODEV;
+ }
+ break;
+ case KSZ88_FAMILY_ID:
+ if (id2 == KSZ88_CHIP_ID_63)
+ dev->chip_id = KSZ88X3_CHIP_ID;
+ else
+ return -ENODEV;
+ break;
+ case KSZ8895_FAMILY_ID:
+ if (id2 == KSZ8895_CHIP_ID_95 ||
+ id2 == KSZ8895_CHIP_ID_95R)
+ dev->chip_id = KSZ8895_CHIP_ID;
+ else
+ return -ENODEV;
+ ret = ksz_read8(dev, REG_KSZ8864_CHIP_ID, &id4);
+ if (ret)
+ return ret;
+ if (id4 & SW_KSZ8864)
+ dev->chip_id = KSZ8864_CHIP_ID;
+ break;
+ default:
+ ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
+ if (ret)
+ return ret;
+
+ dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32);
+ id32 &= ~0xFF;
+
+ switch (id32) {
+ case KSZ9477_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ8567_CHIP_ID:
+ case LAN9370_CHIP_ID:
+ case LAN9371_CHIP_ID:
+ case LAN9372_CHIP_ID:
+ case LAN9373_CHIP_ID:
+ case LAN9374_CHIP_ID:
+
+ /* LAN9646 does not have its own chip id. */
+ if (dev->chip_id != LAN9646_CHIP_ID)
+ dev->chip_id = id32;
+ break;
+ case KSZ9893_CHIP_ID:
+ ret = ksz_read8(dev, REG_CHIP_ID4,
+ &id4);
if (ret)
- break;
+ return ret;
+
+ if (id4 == SKU_ID_KSZ8563)
+ dev->chip_id = KSZ8563_CHIP_ID;
+ else if (id4 == SKU_ID_KSZ9563)
+ dev->chip_id = KSZ9563_CHIP_ID;
+ else
+ dev->chip_id = KSZ9893_CHIP_ID;
+
+ break;
+ default:
+ dev_err(dev->dev,
+ "unsupported switch detected %x)\n", id32);
+ return -ENODEV;
}
- i++;
- } while (i < entries);
- if (i >= entries)
- ret = 0;
+ }
+ return 0;
+}
- return ret;
+static int ksz_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ksz_device *dev = ds->priv;
+
+ switch (dev->chip_id) {
+ case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
+ return ksz9477_cls_flower_add(ds, port, cls, ingress);
+ }
+
+ return -EOPNOTSUPP;
}
-EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
-int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int ksz_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
{
struct ksz_device *dev = ds->priv;
- struct alu_struct alu;
- int index;
- int empty = 0;
- alu.port_forward = 0;
- for (index = 0; index < dev->num_statics; index++) {
- if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
- /* Remember the first empty entry. */
- } else if (!empty) {
- empty = index + 1;
+ switch (dev->chip_id) {
+ case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
+ return ksz9477_cls_flower_del(ds, port, cls, ingress);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* Bandwidth is calculated by idle slope/transmission speed. Then the Bandwidth
+ * is converted to Hex-decimal using the successive multiplication method. On
+ * every step, integer part is taken and decimal part is carry forwarded.
+ */
+static int cinc_cal(s32 idle_slope, s32 send_slope, u32 *bw)
+{
+ u32 cinc = 0;
+ u32 txrate;
+ u32 rate;
+ u8 temp;
+ u8 i;
+
+ txrate = idle_slope - send_slope;
+
+ if (!txrate)
+ return -EINVAL;
+
+ rate = idle_slope;
+
+ /* 24 bit register */
+ for (i = 0; i < 6; i++) {
+ rate = rate * 16;
+
+ temp = rate / txrate;
+
+ rate %= txrate;
+
+ cinc = ((cinc << 4) | temp);
+ }
+
+ *bw = cinc;
+
+ return 0;
+}
+
+static int ksz_setup_tc_mode(struct ksz_device *dev, int port, u8 scheduler,
+ u8 shaper)
+{
+ return ksz_pwrite8(dev, port, REG_PORT_MTI_QUEUE_CTRL_0,
+ FIELD_PREP(MTI_SCHEDULE_MODE_M, scheduler) |
+ FIELD_PREP(MTI_SHAPING_M, shaper));
+}
+
+static int ksz_setup_tc_cbs(struct dsa_switch *ds, int port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+ u32 bw;
+
+ if (!dev->info->tc_cbs_supported)
+ return -EOPNOTSUPP;
+
+ if (qopt->queue > dev->info->num_tx_queues)
+ return -EINVAL;
+
+ /* Queue Selection */
+ ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, qopt->queue);
+ if (ret)
+ return ret;
+
+ if (!qopt->enable)
+ return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_WRR,
+ MTI_SHAPING_OFF);
+
+ /* High Credit */
+ ret = ksz_pwrite16(dev, port, REG_PORT_MTI_HI_WATER_MARK,
+ qopt->hicredit);
+ if (ret)
+ return ret;
+
+ /* Low Credit */
+ ret = ksz_pwrite16(dev, port, REG_PORT_MTI_LO_WATER_MARK,
+ qopt->locredit);
+ if (ret)
+ return ret;
+
+ /* Credit Increment Register */
+ ret = cinc_cal(qopt->idleslope, qopt->sendslope, &bw);
+ if (ret)
+ return ret;
+
+ if (dev->dev_ops->tc_cbs_set_cinc) {
+ ret = dev->dev_ops->tc_cbs_set_cinc(dev, port, bw);
+ if (ret)
+ return ret;
+ }
+
+ return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_STRICT_PRIO,
+ MTI_SHAPING_SRP);
+}
+
+static int ksz_disable_egress_rate_limit(struct ksz_device *dev, int port)
+{
+ int queue, ret;
+
+ /* Configuration will not take effect until the last Port Queue X
+ * Egress Limit Control Register is written.
+ */
+ for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
+ ret = ksz_pwrite8(dev, port, KSZ9477_REG_PORT_OUT_RATE_0 + queue,
+ KSZ9477_OUT_RATE_NO_LIMIT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ksz_ets_band_to_queue(struct tc_ets_qopt_offload_replace_params *p,
+ int band)
+{
+ /* Compared to queues, bands prioritize packets differently. In strict
+ * priority mode, the lowest priority is assigned to Queue 0 while the
+ * highest priority is given to Band 0.
+ */
+ return p->bands - 1 - band;
+}
+
+static u8 ksz8463_tc_ctrl(int port, int queue)
+{
+ u8 reg;
+
+ reg = 0xC8 + port * 4;
+ reg += ((3 - queue) / 2) * 2;
+ reg++;
+ reg -= (queue & 1);
+ return reg;
+}
+
+/**
+ * ksz88x3_tc_ets_add - Configure ETS (Enhanced Transmission Selection)
+ * for a port on KSZ88x3 switch
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number to configure
+ * @p: Pointer to offload replace parameters describing ETS bands and mapping
+ *
+ * The KSZ88x3 supports two scheduling modes: Strict Priority and
+ * Weighted Fair Queuing (WFQ). Both modes have fixed behavior:
+ * - No configurable queue-to-priority mapping
+ * - No weight adjustment in WFQ mode
+ *
+ * This function configures the switch to use strict priority mode by
+ * clearing the WFQ enable bit for all queues associated with ETS bands.
+ * If strict priority is not explicitly requested, the switch will default
+ * to WFQ mode.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_tc_ets_add(struct ksz_device *dev, int port,
+ struct tc_ets_qopt_offload_replace_params *p)
+{
+ int ret, band;
+
+ /* Only strict priority mode is supported for now.
+ * WFQ is implicitly enabled when strict mode is disabled.
+ */
+ for (band = 0; band < p->bands; band++) {
+ int queue = ksz_ets_band_to_queue(p, band);
+ u8 reg;
+
+ /* Calculate TXQ Split Control register address for this
+ * port/queue
+ */
+ reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+ if (ksz_is_ksz8463(dev))
+ reg = ksz8463_tc_ctrl(port, queue);
+
+ /* Clear WFQ enable bit to select strict priority scheduling */
+ ret = ksz_rmw8(dev, reg, KSZ8873_TXQ_WFQ_ENABLE, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz88x3_tc_ets_del - Reset ETS (Enhanced Transmission Selection) config
+ * for a port on KSZ88x3 switch
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number to reset
+ *
+ * The KSZ88x3 supports only fixed scheduling modes: Strict Priority or
+ * Weighted Fair Queuing (WFQ), with no reconfiguration of weights or
+ * queue mapping. This function resets the port’s scheduling mode to
+ * the default, which is WFQ, by enabling the WFQ bit for all queues.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_tc_ets_del(struct ksz_device *dev, int port)
+{
+ int ret, queue;
+
+ /* Iterate over all transmit queues for this port */
+ for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
+ u8 reg;
+
+ /* Calculate TXQ Split Control register address for this
+ * port/queue
+ */
+ reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+ if (ksz_is_ksz8463(dev))
+ reg = ksz8463_tc_ctrl(port, queue);
+
+ /* Set WFQ enable bit to revert back to default scheduling
+ * mode
+ */
+ ret = ksz_rmw8(dev, reg, KSZ8873_TXQ_WFQ_ENABLE,
+ KSZ8873_TXQ_WFQ_ENABLE);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ksz_queue_set_strict(struct ksz_device *dev, int port, int queue)
+{
+ int ret;
+
+ ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, queue);
+ if (ret)
+ return ret;
+
+ return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_STRICT_PRIO,
+ MTI_SHAPING_OFF);
+}
+
+static int ksz_queue_set_wrr(struct ksz_device *dev, int port, int queue,
+ int weight)
+{
+ int ret;
+
+ ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, queue);
+ if (ret)
+ return ret;
+
+ ret = ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_WRR,
+ MTI_SHAPING_OFF);
+ if (ret)
+ return ret;
+
+ return ksz_pwrite8(dev, port, KSZ9477_PORT_MTI_QUEUE_CTRL_1, weight);
+}
+
+static int ksz_tc_ets_add(struct ksz_device *dev, int port,
+ struct tc_ets_qopt_offload_replace_params *p)
+{
+ int ret, band, tc_prio;
+ u32 queue_map = 0;
+
+ /* In order to ensure proper prioritization, it is necessary to set the
+ * rate limit for the related queue to zero. Otherwise strict priority
+ * or WRR mode will not work. This is a hardware limitation.
+ */
+ ret = ksz_disable_egress_rate_limit(dev, port);
+ if (ret)
+ return ret;
+
+ /* Configure queue scheduling mode for all bands. Currently only strict
+ * prio mode is supported.
+ */
+ for (band = 0; band < p->bands; band++) {
+ int queue = ksz_ets_band_to_queue(p, band);
+
+ ret = ksz_queue_set_strict(dev, port, queue);
+ if (ret)
+ return ret;
+ }
+
+ /* Configure the mapping between traffic classes and queues. Note:
+ * priomap variable support 16 traffic classes, but the chip can handle
+ * only 8 classes.
+ */
+ for (tc_prio = 0; tc_prio < ARRAY_SIZE(p->priomap); tc_prio++) {
+ int queue;
+
+ if (tc_prio >= dev->info->num_ipms)
+ break;
+
+ queue = ksz_ets_band_to_queue(p, p->priomap[tc_prio]);
+ queue_map |= queue << (tc_prio * KSZ9477_PORT_TC_MAP_S);
+ }
+
+ return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
+}
+
+static int ksz_tc_ets_del(struct ksz_device *dev, int port)
+{
+ int ret, queue;
+
+ /* To restore the default chip configuration, set all queues to use the
+ * WRR scheduler with a weight of 1.
+ */
+ for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
+ ret = ksz_queue_set_wrr(dev, port, queue,
+ KSZ9477_DEFAULT_WRR_WEIGHT);
+
+ if (ret)
+ return ret;
+ }
+
+ /* Revert the queue mapping for TC-priority to its default setting on
+ * the chip.
+ */
+ return ksz9477_set_default_prio_queue_mapping(dev, port);
+}
+
+static int ksz_tc_ets_validate(struct ksz_device *dev, int port,
+ struct tc_ets_qopt_offload_replace_params *p)
+{
+ int band;
+
+ /* Since it is not feasible to share one port among multiple qdisc,
+ * the user must configure all available queues appropriately.
+ */
+ if (p->bands != dev->info->num_tx_queues) {
+ dev_err(dev->dev, "Not supported amount of bands. It should be %d\n",
+ dev->info->num_tx_queues);
+ return -EOPNOTSUPP;
+ }
+
+ for (band = 0; band < p->bands; ++band) {
+ /* The KSZ switches utilize a weighted round robin configuration
+ * where a certain number of packets can be transmitted from a
+ * queue before the next queue is serviced. For more information
+ * on this, refer to section 5.2.8.4 of the KSZ8565R
+ * documentation on the Port Transmit Queue Control 1 Register.
+ * However, the current ETS Qdisc implementation (as of February
+ * 2023) assigns a weight to each queue based on the number of
+ * bytes or extrapolated bandwidth in percentages. Since this
+ * differs from the KSZ switches' method and we don't want to
+ * fake support by converting bytes to packets, it is better to
+ * return an error instead.
+ */
+ if (p->quanta[band]) {
+ dev_err(dev->dev, "Quanta/weights configuration is not supported.\n");
+ return -EOPNOTSUPP;
}
}
- /* no available entry */
- if (index == dev->num_statics && !empty)
- return -ENOSPC;
+ return 0;
+}
+
+static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ if (is_ksz8(dev) && !(ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)))
+ return -EOPNOTSUPP;
+
+ if (qopt->parent != TC_H_ROOT) {
+ dev_err(dev->dev, "Parent should be \"root\"\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+ ret = ksz_tc_ets_validate(dev, port, &qopt->replace_params);
+ if (ret)
+ return ret;
+
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
+ return ksz88x3_tc_ets_add(dev, port,
+ &qopt->replace_params);
+ else
+ return ksz_tc_ets_add(dev, port, &qopt->replace_params);
+ case TC_ETS_DESTROY:
+ if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev))
+ return ksz88x3_tc_ets_del(dev, port);
+ else
+ return ksz_tc_ets_del(dev, port);
+ case TC_ETS_STATS:
+ case TC_ETS_GRAFT:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int ksz_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return ksz_setup_tc_cbs(ds, port, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return ksz_tc_setup_qdisc_ets(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ksz_handle_wake_reason - Handle wake reason on a specified port.
+ * @dev: The device structure.
+ * @port: The port number.
+ *
+ * This function reads the PME (Power Management Event) status register of a
+ * specified port to determine the wake reason. If there is no wake event, it
+ * returns early. Otherwise, it logs the wake reason which could be due to a
+ * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
+ * is then cleared to acknowledge the handling of the wake event.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int ksz_handle_wake_reason(struct ksz_device *dev, int port)
+{
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ const u16 *regs = dev->info->regs;
+ u8 pme_status;
+ int ret;
+
+ ret = ops->pme_pread8(dev, port, regs[REG_PORT_PME_STATUS],
+ &pme_status);
+ if (ret)
+ return ret;
+
+ if (!pme_status)
+ return 0;
+
+ dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port,
+ pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "",
+ pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
+ pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : "");
+
+ return ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_STATUS],
+ pme_status);
+}
+
+/**
+ * ksz_get_wol - Get Wake-on-LAN settings for a specified port.
+ * @ds: The dsa_switch structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function checks the device PME wakeup_source flag and chip_id.
+ * If enabled and supported, it sets the supported and active WoL
+ * flags.
+ */
+static void ksz_get_wol(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *wol)
+{
+ struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ u8 pme_ctrl;
+ int ret;
+
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return;
+
+ if (!dev->wakeup_source)
+ return;
+
+ wol->supported = WAKE_PHY;
+
+ /* Check if the current MAC address on this port can be set
+ * as global for WAKE_MAGIC support. The result may vary
+ * dynamically based on other ports configurations.
+ */
+ if (ksz_is_port_mac_global_usable(dev->ds, port))
+ wol->supported |= WAKE_MAGIC;
+
+ ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL],
+ &pme_ctrl);
+ if (ret)
+ return;
+
+ if (pme_ctrl & PME_WOL_MAGICPKT)
+ wol->wolopts |= WAKE_MAGIC;
+ if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
+ wol->wolopts |= WAKE_PHY;
+}
+
+/**
+ * ksz_set_wol - Set Wake-on-LAN settings for a specified port.
+ * @ds: The dsa_switch structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function configures Wake-on-LAN (WoL) settings for a specified
+ * port. It validates the provided WoL options, checks if PME is
+ * enabled and supported, clears any previous wake reasons, and sets
+ * the Magic Packet flag in the port's PME control register if
+ * specified.
+ *
+ * Return: 0 on success, or other error codes on failure.
+ */
+static int ksz_set_wol(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *wol)
+{
+ u8 pme_ctrl = 0, pme_ctrl_old = 0;
+ struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ bool magic_switched_off;
+ bool magic_switched_on;
+ int ret;
+
+ if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return -EOPNOTSUPP;
+
+ if (!dev->wakeup_source)
+ return -EOPNOTSUPP;
+
+ ret = ksz_handle_wake_reason(dev, port);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ pme_ctrl |= PME_WOL_MAGICPKT;
+ if (wol->wolopts & WAKE_PHY)
+ pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
+
+ ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL],
+ &pme_ctrl_old);
+ if (ret)
+ return ret;
+
+ if (pme_ctrl_old == pme_ctrl)
+ return 0;
+
+ magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) &&
+ !(pme_ctrl & PME_WOL_MAGICPKT);
+ magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) &&
+ (pme_ctrl & PME_WOL_MAGICPKT);
+
+ /* To keep reference count of MAC address, we should do this
+ * operation only on change of WOL settings.
+ */
+ if (magic_switched_on) {
+ ret = ksz_switch_macaddr_get(dev->ds, port, NULL);
+ if (ret)
+ return ret;
+ } else if (magic_switched_off) {
+ ksz_switch_macaddr_put(dev->ds);
+ }
+
+ ret = dev->dev_ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL],
+ pme_ctrl);
+ if (ret) {
+ if (magic_switched_on)
+ ksz_switch_macaddr_put(dev->ds);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_wol_pre_shutdown - Prepares the switch device for shutdown while
+ * considering Wake-on-LAN (WoL) settings.
+ * @dev: The switch device structure.
+ * @wol_enabled: Pointer to a boolean which will be set to true if WoL is
+ * enabled on any port.
+ *
+ * This function prepares the switch device for a safe shutdown while taking
+ * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
+ * the wol_enabled flag accordingly to reflect whether WoL is active on any
+ * port.
+ */
+static void ksz_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled)
+{
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ const u16 *regs = dev->info->regs;
+ u8 pme_pin_en = PME_ENABLE;
+ struct dsa_port *dp;
+ int ret;
+
+ *wol_enabled = false;
+
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return;
+
+ if (!dev->wakeup_source)
+ return;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ u8 pme_ctrl = 0;
+
+ ret = ops->pme_pread8(dev, dp->index,
+ regs[REG_PORT_PME_CTRL], &pme_ctrl);
+ if (!ret && pme_ctrl)
+ *wol_enabled = true;
+
+ /* make sure there are no pending wake events which would
+ * prevent the device from going to sleep/shutdown.
+ */
+ ksz_handle_wake_reason(dev, dp->index);
+ }
+
+ /* Now we are save to enable PME pin. */
+ if (*wol_enabled) {
+ if (dev->pme_active_high)
+ pme_pin_en |= PME_POLARITY;
+ ops->pme_write8(dev, regs[REG_SW_PME_CTRL], pme_pin_en);
+ if (ksz_is_ksz87xx(dev))
+ ksz_write8(dev, KSZ87XX_REG_INT_EN, KSZ87XX_INT_PME_MASK);
+ }
+}
+
+static int ksz_port_set_mac_address(struct dsa_switch *ds, int port,
+ const unsigned char *addr)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ethtool_wolinfo wol;
- /* add entry */
- if (index == dev->num_statics) {
- index = empty - 1;
- memset(&alu, 0, sizeof(alu));
- memcpy(alu.mac, mdb->addr, ETH_ALEN);
- alu.is_static = true;
+ if (dp->hsr_dev) {
+ dev_err(ds->dev,
+ "Cannot change MAC address on port %d with active HSR offload\n",
+ port);
+ return -EBUSY;
}
- alu.port_forward |= BIT(port);
- if (mdb->vid) {
- alu.is_use_fid = true;
- /* Need a way to map VID to FID. */
- alu.fid = mdb->vid;
+ /* Need to initialize variable as the code to fill in settings may
+ * not be executed.
+ */
+ wol.wolopts = 0;
+
+ ksz_get_wol(ds, dp->index, &wol);
+ if (wol.wolopts & WAKE_MAGIC) {
+ dev_err(ds->dev,
+ "Cannot change MAC address on port %d with active Wake on Magic Packet\n",
+ port);
+ return -EBUSY;
}
- dev->dev_ops->w_sta_mac_table(dev, index, &alu);
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
-int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+/**
+ * ksz_is_port_mac_global_usable - Check if the MAC address on a given port
+ * can be used as a global address.
+ * @ds: Pointer to the DSA switch structure.
+ * @port: The port number on which the MAC address is to be checked.
+ *
+ * This function examines the MAC address set on the specified port and
+ * determines if it can be used as a global address for the switch.
+ *
+ * Return: true if the port's MAC address can be used as a global address, false
+ * otherwise.
+ */
+bool ksz_is_port_mac_global_usable(struct dsa_switch *ds, int port)
{
+ struct net_device *user = dsa_to_port(ds, port)->user;
+ const unsigned char *addr = user->dev_addr;
+ struct ksz_switch_macaddr *switch_macaddr;
struct ksz_device *dev = ds->priv;
- struct alu_struct alu;
- int index;
- int ret = 0;
- for (index = 0; index < dev->num_statics; index++) {
- if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
+ ASSERT_RTNL();
+
+ switch_macaddr = dev->switch_macaddr;
+ if (switch_macaddr && !ether_addr_equal(switch_macaddr->addr, addr))
+ return false;
+
+ return true;
+}
+
+/**
+ * ksz_switch_macaddr_get - Program the switch's MAC address register.
+ * @ds: DSA switch instance.
+ * @port: Port number.
+ * @extack: Netlink extended acknowledgment.
+ *
+ * This function programs the switch's MAC address register with the MAC address
+ * of the requesting user port. This single address is used by the switch for
+ * multiple features like HSR self-address filtering and WoL. Other user ports
+ * can share ownership of this address as long as their MAC address is the same.
+ * The MAC addresses of user ports must not change while they have ownership of
+ * the switch MAC address.
+ *
+ * Return: 0 on success, or other error codes on failure.
+ */
+int ksz_switch_macaddr_get(struct dsa_switch *ds, int port,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *user = dsa_to_port(ds, port)->user;
+ const unsigned char *addr = user->dev_addr;
+ struct ksz_switch_macaddr *switch_macaddr;
+ struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ int i, ret;
+
+ /* Make sure concurrent MAC address changes are blocked */
+ ASSERT_RTNL();
+
+ switch_macaddr = dev->switch_macaddr;
+ if (switch_macaddr) {
+ if (!ether_addr_equal(switch_macaddr->addr, addr)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Switch already configured for MAC address %pM",
+ switch_macaddr->addr);
+ return -EBUSY;
}
+
+ refcount_inc(&switch_macaddr->refcount);
+ return 0;
}
- /* no available entry */
- if (index == dev->num_statics)
- goto exit;
+ switch_macaddr = kzalloc(sizeof(*switch_macaddr), GFP_KERNEL);
+ if (!switch_macaddr)
+ return -ENOMEM;
+
+ ether_addr_copy(switch_macaddr->addr, addr);
+ refcount_set(&switch_macaddr->refcount, 1);
+ dev->switch_macaddr = switch_macaddr;
+
+ /* Program the switch MAC address to hardware */
+ for (i = 0; i < ETH_ALEN; i++) {
+ if (ksz_is_ksz8463(dev)) {
+ u16 addr16 = ((u16)addr[i] << 8) | addr[i + 1];
- /* clear port */
- alu.port_forward &= ~BIT(port);
- if (!alu.port_forward)
- alu.is_static = false;
- dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+ ret = ksz_write16(dev, regs[REG_SW_MAC_ADDR] + i,
+ addr16);
+ i++;
+ } else {
+ ret = ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i,
+ addr[i]);
+ }
+ if (ret)
+ goto macaddr_drop;
+ }
+
+ return 0;
+
+macaddr_drop:
+ dev->switch_macaddr = NULL;
+ refcount_set(&switch_macaddr->refcount, 0);
+ kfree(switch_macaddr);
-exit:
return ret;
}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
-int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+void ksz_switch_macaddr_put(struct dsa_switch *ds)
{
+ struct ksz_switch_macaddr *switch_macaddr;
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ int i;
- if (!dsa_is_user_port(ds, port))
- return 0;
+ /* Make sure concurrent MAC address changes are blocked */
+ ASSERT_RTNL();
- /* setup slave port */
- dev->dev_ops->port_setup(dev, port, false);
+ switch_macaddr = dev->switch_macaddr;
+ if (!refcount_dec_and_test(&switch_macaddr->refcount))
+ return;
- /* port_stp_state_set() will be called after to enable the port so
- * there is no need to do anything.
+ for (i = 0; i < ETH_ALEN; i++)
+ ksz_write8(dev, regs[REG_SW_MAC_ADDR] + i, 0);
+
+ dev->switch_macaddr = NULL;
+ kfree(switch_macaddr);
+}
+
+static int ksz_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ enum hsr_version ver;
+ int ret;
+
+ ret = hsr_get_version(hsr, &ver);
+ if (ret)
+ return ret;
+
+ if (dev->chip_id != KSZ9477_CHIP_ID) {
+ NL_SET_ERR_MSG_MOD(extack, "Chip does not support HSR offload");
+ return -EOPNOTSUPP;
+ }
+
+ /* KSZ9477 can support HW offloading of only 1 HSR device */
+ if (dev->hsr_dev && hsr != dev->hsr_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload supported for a single HSR");
+ return -EOPNOTSUPP;
+ }
+
+ /* KSZ9477 only supports HSR v0 and v1 */
+ if (!(ver == HSR_V0 || ver == HSR_V1)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only HSR v0 and v1 supported");
+ return -EOPNOTSUPP;
+ }
+
+ /* KSZ9477 can only perform HSR offloading for up to two ports */
+ if (hweight8(dev->hsr_ports) >= 2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than two ports - using software HSR");
+ return -EOPNOTSUPP;
+ }
+
+ /* Self MAC address filtering, to avoid frames traversing
+ * the HSR ring more than once.
*/
+ ret = ksz_switch_macaddr_get(ds, port, extack);
+ if (ret)
+ return ret;
+
+ ksz9477_hsr_join(ds, port, hsr);
+ dev->hsr_dev = hsr;
+ dev->hsr_ports |= BIT(port);
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_enable_port);
+
+static int ksz_hsr_leave(struct dsa_switch *ds, int port,
+ struct net_device *hsr)
+{
+ struct ksz_device *dev = ds->priv;
+
+ WARN_ON(dev->chip_id != KSZ9477_CHIP_ID);
+
+ ksz9477_hsr_leave(ds, port, hsr);
+ dev->hsr_ports &= ~BIT(port);
+ if (!dev->hsr_ports)
+ dev->hsr_dev = NULL;
+
+ ksz_switch_macaddr_put(ds);
+
+ return 0;
+}
+
+static int ksz_suspend(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ cancel_delayed_work_sync(&dev->mib_read);
+ return 0;
+}
+
+static int ksz_resume(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->mib_read_interval)
+ schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
+ return 0;
+}
+
+static const struct dsa_switch_ops ksz_switch_ops = {
+ .get_tag_protocol = ksz_get_tag_protocol,
+ .connect_tag_protocol = ksz_connect_tag_protocol,
+ .get_phy_flags = ksz_get_phy_flags,
+ .setup = ksz_setup,
+ .teardown = ksz_teardown,
+ .phy_read = ksz_phy_read16,
+ .phy_write = ksz_phy_write16,
+ .phylink_get_caps = ksz_phylink_get_caps,
+ .port_setup = ksz_port_setup,
+ .set_ageing_time = ksz_set_ageing_time,
+ .get_strings = ksz_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+ .get_sset_count = ksz_sset_count,
+ .port_bridge_join = ksz_port_bridge_join,
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_hsr_join = ksz_hsr_join,
+ .port_hsr_leave = ksz_hsr_leave,
+ .port_set_mac_address = ksz_port_set_mac_address,
+ .port_stp_state_set = ksz_port_stp_state_set,
+ .port_teardown = ksz_port_teardown,
+ .port_pre_bridge_flags = ksz_port_pre_bridge_flags,
+ .port_bridge_flags = ksz_port_bridge_flags,
+ .port_fast_age = ksz_port_fast_age,
+ .port_vlan_filtering = ksz_port_vlan_filtering,
+ .port_vlan_add = ksz_port_vlan_add,
+ .port_vlan_del = ksz_port_vlan_del,
+ .port_fdb_dump = ksz_port_fdb_dump,
+ .port_fdb_add = ksz_port_fdb_add,
+ .port_fdb_del = ksz_port_fdb_del,
+ .port_mdb_add = ksz_port_mdb_add,
+ .port_mdb_del = ksz_port_mdb_del,
+ .port_mirror_add = ksz_port_mirror_add,
+ .port_mirror_del = ksz_port_mirror_del,
+ .get_stats64 = ksz_get_stats64,
+ .get_pause_stats = ksz_get_pause_stats,
+ .port_change_mtu = ksz_change_mtu,
+ .port_max_mtu = ksz_max_mtu,
+ .get_wol = ksz_get_wol,
+ .set_wol = ksz_set_wol,
+ .suspend = ksz_suspend,
+ .resume = ksz_resume,
+ .get_ts_info = ksz_get_ts_info,
+ .port_hwtstamp_get = ksz_hwtstamp_get,
+ .port_hwtstamp_set = ksz_hwtstamp_set,
+ .port_txtstamp = ksz_port_txtstamp,
+ .port_rxtstamp = ksz_port_rxtstamp,
+ .cls_flower_add = ksz_cls_flower_add,
+ .cls_flower_del = ksz_cls_flower_del,
+ .port_setup_tc = ksz_setup_tc,
+ .support_eee = ksz_support_eee,
+ .set_mac_eee = ksz_set_mac_eee,
+ .port_get_default_prio = ksz_port_get_default_prio,
+ .port_set_default_prio = ksz_port_set_default_prio,
+ .port_get_dscp_prio = ksz_port_get_dscp_prio,
+ .port_add_dscp_prio = ksz_port_add_dscp_prio,
+ .port_del_dscp_prio = ksz_port_del_dscp_prio,
+ .port_get_apptrust = ksz_port_get_apptrust,
+ .port_set_apptrust = ksz_port_set_apptrust,
+};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
{
@@ -350,6 +5032,7 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
ds->dev = base;
ds->num_ports = DSA_MAX_PORTS;
+ ds->ops = &ksz_switch_ops;
swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
if (!swdev)
@@ -365,16 +5048,347 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
}
EXPORT_SYMBOL(ksz_switch_alloc);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops)
+/**
+ * ksz_switch_shutdown - Shutdown routine for the switch device.
+ * @dev: The switch device structure.
+ *
+ * This function is responsible for initiating a shutdown sequence for the
+ * switch device. It invokes the reset operation defined in the device
+ * operations, if available, to reset the switch. Subsequently, it calls the
+ * DSA framework's shutdown function to ensure a proper shutdown of the DSA
+ * switch.
+ */
+void ksz_switch_shutdown(struct ksz_device *dev)
{
- struct device_node *port, *ports;
+ bool wol_enabled = false;
+
+ ksz_wol_pre_shutdown(dev, &wol_enabled);
+
+ if (dev->dev_ops->reset && !wol_enabled)
+ dev->dev_ops->reset(dev);
+
+ dsa_switch_shutdown(dev->ds);
+}
+EXPORT_SYMBOL(ksz_switch_shutdown);
+
+static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num,
+ struct device_node *port_dn)
+{
+ phy_interface_t phy_mode = dev->ports[port_num].interface;
+ int rx_delay = -1, tx_delay = -1;
+
+ if (!phy_interface_mode_is_rgmii(phy_mode))
+ return;
+
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
+
+ if (rx_delay == -1 && tx_delay == -1) {
+ dev_warn(dev->dev,
+ "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
+ "please update device tree to specify \"rx-internal-delay-ps\" and "
+ "\"tx-internal-delay-ps\"",
+ port_num);
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ rx_delay = 2000;
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ tx_delay = 2000;
+ }
+
+ if (rx_delay < 0)
+ rx_delay = 0;
+ if (tx_delay < 0)
+ tx_delay = 0;
+
+ dev->ports[port_num].rgmii_rx_val = rx_delay;
+ dev->ports[port_num].rgmii_tx_val = tx_delay;
+}
+
+/**
+ * ksz_drive_strength_to_reg() - Convert drive strength value to corresponding
+ * register value.
+ * @array: The array of drive strength values to search.
+ * @array_size: The size of the array.
+ * @microamp: The drive strength value in microamp to be converted.
+ *
+ * This function searches the array of drive strength values for the given
+ * microamp value and returns the corresponding register value for that drive.
+ *
+ * Returns: If found, the corresponding register value for that drive strength
+ * is returned. Otherwise, -EINVAL is returned indicating an invalid value.
+ */
+static int ksz_drive_strength_to_reg(const struct ksz_drive_strength *array,
+ size_t array_size, int microamp)
+{
+ int i;
+
+ for (i = 0; i < array_size; i++) {
+ if (array[i].microamp == microamp)
+ return array[i].reg_val;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ksz_drive_strength_error() - Report invalid drive strength value
+ * @dev: ksz device
+ * @array: The array of drive strength values to search.
+ * @array_size: The size of the array.
+ * @microamp: Invalid drive strength value in microamp
+ *
+ * This function logs an error message when an unsupported drive strength value
+ * is detected. It lists out all the supported drive strength values for
+ * reference in the error message.
+ */
+static void ksz_drive_strength_error(struct ksz_device *dev,
+ const struct ksz_drive_strength *array,
+ size_t array_size, int microamp)
+{
+ char supported_values[100];
+ size_t remaining_size;
+ int added_len;
+ char *ptr;
+ int i;
+
+ remaining_size = sizeof(supported_values);
+ ptr = supported_values;
+
+ for (i = 0; i < array_size; i++) {
+ added_len = snprintf(ptr, remaining_size,
+ i == 0 ? "%d" : ", %d", array[i].microamp);
+
+ if (added_len >= remaining_size)
+ break;
+
+ ptr += added_len;
+ remaining_size -= added_len;
+ }
+
+ dev_err(dev->dev, "Invalid drive strength %d, supported values are %s\n",
+ microamp, supported_values);
+}
+
+/**
+ * ksz9477_drive_strength_write() - Set the drive strength for specific KSZ9477
+ * chip variants.
+ * @dev: ksz device
+ * @props: Array of drive strength properties to be applied
+ * @num_props: Number of properties in the array
+ *
+ * This function configures the drive strength for various KSZ9477 chip variants
+ * based on the provided properties. It handles chip-specific nuances and
+ * ensures only valid drive strengths are written to the respective chip.
+ *
+ * Return: 0 on successful configuration, a negative error code on failure.
+ */
+static int ksz9477_drive_strength_write(struct ksz_device *dev,
+ struct ksz_driver_strength_prop *props,
+ int num_props)
+{
+ size_t array_size = ARRAY_SIZE(ksz9477_drive_strengths);
+ int i, ret, reg;
+ u8 mask = 0;
+ u8 val = 0;
+
+ if (props[KSZ_DRIVER_STRENGTH_IO].value != -1)
+ dev_warn(dev->dev, "%s is not supported by this chip variant\n",
+ props[KSZ_DRIVER_STRENGTH_IO].name);
+
+ if (dev->chip_id == KSZ8795_CHIP_ID ||
+ dev->chip_id == KSZ8794_CHIP_ID ||
+ dev->chip_id == KSZ8765_CHIP_ID)
+ reg = KSZ8795_REG_SW_CTRL_20;
+ else
+ reg = KSZ9477_REG_SW_IO_STRENGTH;
+
+ for (i = 0; i < num_props; i++) {
+ if (props[i].value == -1)
+ continue;
+
+ ret = ksz_drive_strength_to_reg(ksz9477_drive_strengths,
+ array_size, props[i].value);
+ if (ret < 0) {
+ ksz_drive_strength_error(dev, ksz9477_drive_strengths,
+ array_size, props[i].value);
+ return ret;
+ }
+
+ mask |= SW_DRIVE_STRENGTH_M << props[i].offset;
+ val |= ret << props[i].offset;
+ }
+
+ return ksz_rmw8(dev, reg, mask, val);
+}
+
+/**
+ * ksz88x3_drive_strength_write() - Set the drive strength configuration for
+ * KSZ8863 compatible chip variants.
+ * @dev: ksz device
+ * @props: Array of drive strength properties to be set
+ * @num_props: Number of properties in the array
+ *
+ * This function applies the specified drive strength settings to KSZ88X3 chip
+ * variants (KSZ8873, KSZ8863).
+ * It ensures the configurations align with what the chip variant supports and
+ * warns or errors out on unsupported settings.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int ksz88x3_drive_strength_write(struct ksz_device *dev,
+ struct ksz_driver_strength_prop *props,
+ int num_props)
+{
+ size_t array_size = ARRAY_SIZE(ksz88x3_drive_strengths);
+ int microamp;
+ int i, ret;
+
+ for (i = 0; i < num_props; i++) {
+ if (props[i].value == -1 || i == KSZ_DRIVER_STRENGTH_IO)
+ continue;
+
+ dev_warn(dev->dev, "%s is not supported by this chip variant\n",
+ props[i].name);
+ }
+
+ microamp = props[KSZ_DRIVER_STRENGTH_IO].value;
+ ret = ksz_drive_strength_to_reg(ksz88x3_drive_strengths, array_size,
+ microamp);
+ if (ret < 0) {
+ ksz_drive_strength_error(dev, ksz88x3_drive_strengths,
+ array_size, microamp);
+ return ret;
+ }
+
+ return ksz_rmw8(dev, KSZ8873_REG_GLOBAL_CTRL_12,
+ KSZ8873_DRIVE_STRENGTH_16MA, ret);
+}
+
+/**
+ * ksz_parse_drive_strength() - Extract and apply drive strength configurations
+ * from device tree properties.
+ * @dev: ksz device
+ *
+ * This function reads the specified drive strength properties from the
+ * device tree, validates against the supported chip variants, and sets
+ * them accordingly. An error should be critical here, as the drive strength
+ * settings are crucial for EMI compliance.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int ksz_parse_drive_strength(struct ksz_device *dev)
+{
+ struct ksz_driver_strength_prop of_props[] = {
+ [KSZ_DRIVER_STRENGTH_HI] = {
+ .name = "microchip,hi-drive-strength-microamp",
+ .offset = SW_HI_SPEED_DRIVE_STRENGTH_S,
+ .value = -1,
+ },
+ [KSZ_DRIVER_STRENGTH_LO] = {
+ .name = "microchip,lo-drive-strength-microamp",
+ .offset = SW_LO_SPEED_DRIVE_STRENGTH_S,
+ .value = -1,
+ },
+ [KSZ_DRIVER_STRENGTH_IO] = {
+ .name = "microchip,io-drive-strength-microamp",
+ .offset = 0, /* don't care */
+ .value = -1,
+ },
+ };
+ struct device_node *np = dev->dev->of_node;
+ bool have_any_prop = false;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(of_props); i++) {
+ ret = of_property_read_u32(np, of_props[i].name,
+ &of_props[i].value);
+ if (ret && ret != -EINVAL)
+ dev_warn(dev->dev, "Failed to read %s\n",
+ of_props[i].name);
+ if (ret)
+ continue;
+
+ have_any_prop = true;
+ }
+
+ if (!have_any_prop)
+ return 0;
+
+ switch (dev->chip_id) {
+ case KSZ88X3_CHIP_ID:
+ return ksz88x3_drive_strength_write(dev, of_props,
+ ARRAY_SIZE(of_props));
+ case KSZ8795_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8765_CHIP_ID:
+ case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case LAN9646_CHIP_ID:
+ return ksz9477_drive_strength_write(dev, of_props,
+ ARRAY_SIZE(of_props));
+ default:
+ for (i = 0; i < ARRAY_SIZE(of_props); i++) {
+ if (of_props[i].value == -1)
+ continue;
+
+ dev_warn(dev->dev, "%s is not supported by this chip variant\n",
+ of_props[i].name);
+ }
+ }
+
+ return 0;
+}
+
+static int ksz8463_configure_straps_spi(struct ksz_device *dev)
+{
+ struct pinctrl *pinctrl;
+ struct gpio_desc *rxd0;
+ struct gpio_desc *rxd1;
+
+ rxd0 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(rxd0))
+ return PTR_ERR(rxd0);
+
+ rxd1 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 1, GPIOD_OUT_HIGH);
+ if (IS_ERR(rxd1))
+ return PTR_ERR(rxd1);
+
+ if (!rxd0 && !rxd1)
+ return 0;
+
+ if ((rxd0 && !rxd1) || (rxd1 && !rxd0))
+ return -EINVAL;
+
+ pinctrl = devm_pinctrl_get_select(dev->dev, "reset");
+ if (IS_ERR(pinctrl))
+ return PTR_ERR(pinctrl);
+
+ return 0;
+}
+
+static int ksz8463_release_straps_spi(struct ksz_device *dev)
+{
+ return pinctrl_select_default_state(dev->dev);
+}
+
+int ksz_switch_register(struct ksz_device *dev)
+{
+ const struct ksz_chip_data *info;
+ struct device_node *ports;
phy_interface_t interface;
unsigned int port_num;
int ret;
-
- if (dev->pdata)
- dev->chip_id = dev->pdata->chip_id;
+ int i;
dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
GPIOD_OUT_LOW);
@@ -382,10 +5396,22 @@ int ksz_switch_register(struct ksz_device *dev,
return PTR_ERR(dev->reset_gpio);
if (dev->reset_gpio) {
+ if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) {
+ ret = ksz8463_configure_straps_spi(dev);
+ if (ret)
+ return ret;
+ }
+
gpiod_set_value_cansleep(dev->reset_gpio, 1);
usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
msleep(100);
+
+ if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) {
+ ret = ksz8463_release_straps_spi(dev);
+ if (ret)
+ return ret;
+ }
}
mutex_init(&dev->dev_mutex);
@@ -393,19 +5419,60 @@ int ksz_switch_register(struct ksz_device *dev,
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
- dev->dev_ops = ops;
+ ret = ksz_switch_detect(dev);
+ if (ret)
+ return ret;
- if (dev->dev_ops->detect(dev))
- return -EINVAL;
+ info = ksz_lookup_info(dev->chip_id);
+ if (!info)
+ return -ENODEV;
+
+ /* Update the compatible info with the probed one */
+ dev->info = info;
+
+ dev_info(dev->dev, "found switch: %s, rev %i\n",
+ dev->info->dev_name, dev->chip_rev);
+
+ ret = ksz_check_device_id(dev);
+ if (ret)
+ return ret;
+
+ dev->dev_ops = dev->info->ops;
ret = dev->dev_ops->init(dev);
if (ret)
return ret;
+ dev->ports = devm_kzalloc(dev->dev,
+ dev->info->port_cnt * sizeof(struct ksz_port),
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ spin_lock_init(&dev->ports[i].mib.stats64_lock);
+ mutex_init(&dev->ports[i].mib.cnt_mutex);
+ dev->ports[i].mib.counters =
+ devm_kzalloc(dev->dev,
+ sizeof(u64) * (dev->info->mib_cnt + 1),
+ GFP_KERNEL);
+ if (!dev->ports[i].mib.counters)
+ return -ENOMEM;
+
+ dev->ports[i].ksz_dev = dev;
+ dev->ports[i].num = i;
+ }
+
+ /* set the real number of ports */
+ dev->ds->num_ports = dev->info->port_cnt;
+
+ /* set the phylink ops */
+ dev->ds->phylink_mac_ops = dev->info->phylink_mac_ops;
+
/* Host port interface will be self detected, or specifically set in
* device tree.
*/
- for (port_num = 0; port_num < dev->port_cnt; ++port_num)
+ for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
if (dev->dev->of_node) {
ret = of_get_phy_mode(dev->dev->of_node, &interface);
@@ -414,20 +5481,38 @@ int ksz_switch_register(struct ksz_device *dev,
ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
if (!ports)
ports = of_get_child_by_name(dev->dev->of_node, "ports");
- if (ports)
- for_each_available_child_of_node(ports, port) {
+ if (ports) {
+ for_each_available_child_of_node_scoped(ports, port) {
if (of_property_read_u32(port, "reg",
&port_num))
continue;
if (!(dev->port_mask & BIT(port_num))) {
- of_node_put(port);
+ of_node_put(ports);
return -EINVAL;
}
of_get_phy_mode(port,
&dev->ports[port_num].interface);
+
+ ksz_parse_rgmii_delay(dev, port_num, port);
+ dev->ports[port_num].fiber =
+ of_property_read_bool(port,
+ "micrel,fiber-mode");
}
+ of_node_put(ports);
+ }
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
+ dev->synclko_disable = of_property_read_bool(dev->dev->of_node,
+ "microchip,synclko-disable");
+ if (dev->synclko_125 && dev->synclko_disable) {
+ dev_err(dev->dev, "inconsistent synclko settings\n");
+ return -EINVAL;
+ }
+
+ dev->wakeup_source = of_property_read_bool(dev->dev->of_node,
+ "wakeup-source");
+ dev->pme_active_high = of_property_read_bool(dev->dev->of_node,
+ "microchip,pme-active-high");
}
ret = dsa_register_switch(dev->ds);
@@ -437,12 +5522,12 @@ int ksz_switch_register(struct ksz_device *dev,
}
/* Read MIB counters every 30 seconds to avoid overflow. */
- dev->mib_read_interval = msecs_to_jiffies(30000);
+ dev->mib_read_interval = msecs_to_jiffies(5000);
/* Start the MIB timer. */
schedule_delayed_work(&dev->mib_read, 0);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ksz_switch_register);
@@ -463,6 +5548,24 @@ void ksz_switch_remove(struct ksz_device *dev)
}
EXPORT_SYMBOL(ksz_switch_remove);
+#ifdef CONFIG_PM_SLEEP
+int ksz_switch_suspend(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_suspend(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_suspend);
+
+int ksz_switch_resume(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_resume(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_resume);
+#endif
+
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 1597c63988b4..c65188cd3c0a 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip switch driver common header
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ_COMMON_H
@@ -10,9 +10,31 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/phy.h>
#include <linux/regmap.h>
#include <net/dsa.h>
+#include <linux/irq.h>
+#include <linux/platform_data/microchip-ksz.h>
+
+#include "ksz_ptp.h"
+
+#define KSZ_MAX_NUM_PORTS 8
+/* all KSZ switches count ports from 1 */
+#define KSZ_PORT_1 0
+#define KSZ_PORT_2 1
+#define KSZ_PORT_4 3
+
+struct ksz_device;
+struct ksz_port;
+struct phylink_mac_ops;
+
+enum ksz_regmap_width {
+ KSZ_REGMAP_8,
+ KSZ_REGMAP_16,
+ KSZ_REGMAP_32,
+ __KSZ_NUM_REGMAPS,
+};
struct vlan_table {
u32 table[3];
@@ -22,31 +44,123 @@ struct ksz_port_mib {
struct mutex cnt_mutex; /* structure access */
u8 cnt_ptr;
u64 *counters;
+ struct rtnl_link_stats64 stats64;
+ struct ethtool_pause_stats pause_stats;
+ struct spinlock stats64_lock;
+};
+
+struct ksz_mib_names {
+ int index;
+ char string[ETH_GSTRING_LEN];
+};
+
+struct ksz_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_ports;
+ int port_cnt;
+ u8 port_nirqs;
+ u8 num_tx_queues;
+ u8 num_ipms; /* number of Internal Priority Maps */
+ bool tc_cbs_supported;
+
+ /**
+ * @phy_side_mdio_supported: Indicates if the chip supports an additional
+ * side MDIO channel for accessing integrated PHYs.
+ */
+ bool phy_side_mdio_supported;
+ const struct ksz_dev_ops *ops;
+ const struct phylink_mac_ops *phylink_mac_ops;
+ bool phy_errata_9477;
+ bool ksz87xx_eee_link_erratum;
+ const struct ksz_mib_names *mib_names;
+ int mib_cnt;
+ u8 reg_mib_cnt;
+ const u16 *regs;
+ const u32 *masks;
+ const u8 *shifts;
+ const u8 *xmii_ctrl0;
+ const u8 *xmii_ctrl1;
+ int stp_ctrl_reg;
+ int broadcast_ctrl_reg;
+ int multicast_ctrl_reg;
+ int start_ctrl_reg;
+ bool supports_mii[KSZ_MAX_NUM_PORTS];
+ bool supports_rmii[KSZ_MAX_NUM_PORTS];
+ bool supports_rgmii[KSZ_MAX_NUM_PORTS];
+ bool internal_phy[KSZ_MAX_NUM_PORTS];
+ bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ bool ptp_capable;
+ u8 sgmii_port;
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+};
+
+struct ksz_irq {
+ u16 masked;
+ u16 reg_mask;
+ u16 reg_status;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq_num;
+ char name[16];
+ struct ksz_device *dev;
+};
+
+struct ksz_ptp_irq {
+ struct ksz_port *port;
+ u16 ts_reg;
+ bool ts_en;
+ char name[16];
+ int num;
+};
+
+struct ksz_switch_macaddr {
+ unsigned char addr[ETH_ALEN];
+ refcount_t refcount;
};
struct ksz_port {
- u16 member;
- u16 vid_member;
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
+ bool learning;
+ bool isolated;
int stp_state;
struct phy_device phydev;
- u32 on:1; /* port is not disabled by hardware */
- u32 phy:1; /* port has a PHY */
u32 fiber:1; /* port is fiber */
- u32 sgmii:1; /* port is SGMII */
u32 force:1;
u32 read:1; /* read MIB counters in background */
u32 freeze:1; /* MIB counter freeze is enabled */
+ u32 sgmii_adv_write:1;
struct ksz_port_mib mib;
phy_interface_t interface;
+ u32 rgmii_tx_val;
+ u32 rgmii_rx_val;
+ struct ksz_device *ksz_dev;
+ void *acl_priv;
+ struct ksz_irq pirq;
+ u8 num;
+ struct phylink_pcs *pcs;
+#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
+ struct kernel_hwtstamp_config tstamp_config;
+ bool hwts_tx_en;
+ bool hwts_rx_en;
+ struct ksz_irq ptpirq;
+ struct ksz_ptp_irq ptpmsg_irq[3];
+ ktime_t tstamp_msg;
+ struct completion tstamp_msg_comp;
+#endif
+ bool manual_flow;
};
struct ksz_device {
struct dsa_switch *ds;
struct ksz_platform_data *pdata;
- const char *name;
+ const struct ksz_chip_data *info;
struct mutex dev_mutex; /* device access */
struct mutex regmap_mutex; /* regmap access */
@@ -55,42 +169,164 @@ struct ksz_device {
const struct ksz_dev_ops *dev_ops;
struct device *dev;
- struct regmap *regmap[3];
+ struct regmap *regmap[__KSZ_NUM_REGMAPS];
void *priv;
+ int irq;
struct gpio_desc *reset_gpio; /* Optional reset GPIO */
/* chip specific data */
u32 chip_id;
- int num_vlans;
- int num_alus;
- int num_statics;
+ u8 chip_rev;
int cpu_port; /* port connected to CPU */
- int cpu_ports; /* port bitmap can be cpu port */
int phy_port_cnt;
- int port_cnt;
- u8 reg_mib_cnt;
- int mib_cnt;
- const struct mib_names *mib_names;
phy_interface_t compat_interface;
- u32 regs_size;
- bool phy_errata_9477;
bool synclko_125;
+ bool synclko_disable;
+ bool wakeup_source;
+ bool pme_active_high;
struct vlan_table *vlan_cache;
struct ksz_port *ports;
struct delayed_work mib_read;
unsigned long mib_read_interval;
- u16 br_member;
- u16 member;
u16 mirror_rx;
u16 mirror_tx;
- u32 features; /* chip specific features */
- u32 overrides; /* chip functions set by user */
- u16 host_mask;
u16 port_mask;
+ struct mutex lock_irq; /* IRQ Access */
+ struct ksz_irq girq;
+ struct ksz_ptp_data ptp_data;
+
+ struct ksz_switch_macaddr *switch_macaddr;
+ struct net_device *hsr_dev; /* HSR */
+ u8 hsr_ports;
+
+ /**
+ * @phy_addr_map: Array mapping switch ports to their corresponding PHY
+ * addresses.
+ */
+ u8 phy_addr_map[KSZ_MAX_NUM_PORTS];
+
+ /**
+ * @parent_mdio_bus: Pointer to the external MDIO bus controller.
+ *
+ * This points to an external MDIO bus controller that is used to access
+ * the PHYs integrated within the switch. Unlike an integrated MDIO
+ * bus, this external controller provides a direct path for managing
+ * the switch’s internal PHYs, bypassing the main SPI interface.
+ */
+ struct mii_bus *parent_mdio_bus;
+};
+
+/* List of supported models */
+enum ksz_model {
+ KSZ8463,
+ KSZ8563,
+ KSZ8567,
+ KSZ8795,
+ KSZ8794,
+ KSZ8765,
+ KSZ88X3,
+ KSZ8864,
+ KSZ8895,
+ KSZ9477,
+ KSZ9896,
+ KSZ9897,
+ KSZ9893,
+ KSZ9563,
+ KSZ9567,
+ LAN9370,
+ LAN9371,
+ LAN9372,
+ LAN9373,
+ LAN9374,
+ LAN9646,
+};
+
+enum ksz_regs {
+ REG_SW_MAC_ADDR,
+ REG_IND_CTRL_0,
+ REG_IND_DATA_8,
+ REG_IND_DATA_CHECK,
+ REG_IND_DATA_HI,
+ REG_IND_DATA_LO,
+ REG_IND_MIB_CHECK,
+ REG_IND_BYTE,
+ P_FORCE_CTRL,
+ P_LINK_STATUS,
+ P_LOCAL_CTRL,
+ P_NEG_RESTART_CTRL,
+ P_REMOTE_STATUS,
+ P_SPEED_STATUS,
+ S_TAIL_TAG_CTRL,
+ P_STP_CTRL,
+ S_START_CTRL,
+ S_BROADCAST_CTRL,
+ S_MULTICAST_CTRL,
+ P_XMII_CTRL_0,
+ P_XMII_CTRL_1,
+ REG_SW_PME_CTRL,
+ REG_PORT_PME_STATUS,
+ REG_PORT_PME_CTRL,
+};
+
+enum ksz_masks {
+ PORT_802_1P_REMAPPING,
+ SW_TAIL_TAG_ENABLE,
+ MIB_COUNTER_OVERFLOW,
+ MIB_COUNTER_VALID,
+ VLAN_TABLE_FID,
+ VLAN_TABLE_MEMBERSHIP,
+ VLAN_TABLE_VALID,
+ STATIC_MAC_TABLE_VALID,
+ STATIC_MAC_TABLE_USE_FID,
+ STATIC_MAC_TABLE_FID,
+ STATIC_MAC_TABLE_OVERRIDE,
+ STATIC_MAC_TABLE_FWD_PORTS,
+ DYNAMIC_MAC_TABLE_ENTRIES_H,
+ DYNAMIC_MAC_TABLE_MAC_EMPTY,
+ DYNAMIC_MAC_TABLE_NOT_READY,
+ DYNAMIC_MAC_TABLE_ENTRIES,
+ DYNAMIC_MAC_TABLE_FID,
+ DYNAMIC_MAC_TABLE_SRC_PORT,
+ DYNAMIC_MAC_TABLE_TIMESTAMP,
+ ALU_STAT_WRITE,
+ ALU_STAT_READ,
+ ALU_STAT_DIRECT,
+ ALU_RESV_MCAST_ADDR,
+ P_MII_TX_FLOW_CTRL,
+ P_MII_RX_FLOW_CTRL,
+};
+
+enum ksz_shifts {
+ VLAN_TABLE_MEMBERSHIP_S,
+ VLAN_TABLE,
+ STATIC_MAC_FWD_PORTS,
+ STATIC_MAC_FID,
+ DYNAMIC_MAC_ENTRIES_H,
+ DYNAMIC_MAC_ENTRIES,
+ DYNAMIC_MAC_FID,
+ DYNAMIC_MAC_TIMESTAMP,
+ DYNAMIC_MAC_SRC_PORT,
+ ALU_STAT_INDEX,
+};
+
+enum ksz_xmii_ctrl0 {
+ P_MII_100MBIT,
+ P_MII_10MBIT,
+ P_MII_FULL_DUPLEX,
+ P_MII_HALF_DUPLEX,
+};
+
+enum ksz_xmii_ctrl1 {
+ P_RGMII_SEL,
+ P_RMII_SEL,
+ P_GMII_SEL,
+ P_MII_SEL,
+ P_GMII_1GBIT,
+ P_GMII_NOT_1GBIT,
};
struct alu_struct {
@@ -113,70 +349,157 @@ struct alu_struct {
};
struct ksz_dev_ops {
+ int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
u32 (*get_port_addr)(int port, int offset);
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
void (*port_cleanup)(struct ksz_device *dev, int port);
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
- void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
- void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
- int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp,
- u16 *entries);
- int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
- void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
+ int (*set_ageing_time)(struct ksz_device *dev, unsigned int msecs);
+
+ /**
+ * @mdio_bus_preinit: Function pointer to pre-initialize the MDIO bus
+ * for accessing PHYs.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side
+ * MDIO bus.
+ *
+ * This function pointer is used to configure the MDIO bus for PHY
+ * access before initiating regular PHY operations. It enables either
+ * SPI/I2C or side MDIO access modes by unlocking necessary registers
+ * and setting up access permissions for the selected mode.
+ *
+ * Return:
+ * - 0 on success.
+ * - Negative error code on failure.
+ */
+ int (*mdio_bus_preinit)(struct ksz_device *dev, bool side_mdio);
+
+ /**
+ * @create_phy_addr_map: Function pointer to create a port-to-PHY
+ * address map.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side
+ * MDIO bus.
+ *
+ * This function pointer is responsible for mapping switch ports to PHY
+ * addresses according to the configured access mode (SPI or side MDIO)
+ * and the device’s strap configuration. The mapping setup may vary
+ * depending on the chip variant and configuration. Ensures the correct
+ * address mapping for PHY communication.
+ *
+ * Return:
+ * - 0 on success.
+ * - Negative error code on failure (e.g., invalid configuration).
+ */
+ int (*create_phy_addr_map)(struct ksz_device *dev, bool side_mdio);
+ int (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ int (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
u64 *cnt);
void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
+ void (*r_mib_stat64)(struct ksz_device *dev, int port);
+ int (*vlan_filtering)(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+ int (*vlan_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+ int (*vlan_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+ int (*mirror_add)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+ void (*mirror_del)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+ int (*fdb_add)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_del)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_dump)(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+ int (*mdb_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ int (*mdb_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ void (*get_caps)(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+ int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
+ int (*pme_write8)(struct ksz_device *dev, u32 reg, u8 value);
+ int (*pme_pread8)(struct ksz_device *dev, int port, int offset,
+ u8 *data);
+ int (*pme_pwrite8)(struct ksz_device *dev, int port, int offset,
+ u8 data);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
- int (*shutdown)(struct ksz_device *dev);
- int (*detect)(struct ksz_device *dev);
+ void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause);
+ void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
+ int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val);
+ void (*config_cpu_port)(struct dsa_switch *ds);
+ int (*enable_stp_addr)(struct ksz_device *dev);
+ int (*reset)(struct ksz_device *dev);
int (*init)(struct ksz_device *dev);
void (*exit)(struct ksz_device *dev);
+
+ int (*pcs_create)(struct ksz_device *dev);
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops);
+int ksz_switch_register(struct ksz_device *dev);
void ksz_switch_remove(struct ksz_device *dev);
+int ksz_switch_suspend(struct device *dev);
+int ksz_switch_resume(struct device *dev);
-int ksz8_switch_register(struct ksz_device *dev);
-int ksz9477_switch_register(struct ksz_device *dev);
-
-void ksz_update_port_member(struct ksz_device *dev, int port);
void ksz_init_mib_timer(struct ksz_device *dev);
-
-/* Common DSA access functions */
-
-int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg);
-int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val);
-void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface);
-int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
-void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
-int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br);
-void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br);
-void ksz_port_fast_age(struct dsa_switch *ds, int port);
-int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
- void *data);
-int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+bool ksz_is_port_mac_global_usable(struct dsa_switch *ds, int port);
+void ksz_r_mib_stats64(struct ksz_device *dev, int port);
+void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+bool ksz_get_gbit(struct ksz_device *dev, int port);
+phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit);
+extern const struct ksz_chip_data ksz_switch_chips[];
+int ksz_switch_macaddr_get(struct dsa_switch *ds, int port,
+ struct netlink_ext_ack *extack);
+void ksz_switch_macaddr_put(struct dsa_switch *ds);
+void ksz_switch_shutdown(struct ksz_device *dev);
+int ksz_handle_wake_reason(struct ksz_device *dev, int port);
/* Common register access functions */
+static inline struct regmap *ksz_regmap_8(struct ksz_device *dev)
+{
+ return dev->regmap[KSZ_REGMAP_8];
+}
+
+static inline struct regmap *ksz_regmap_16(struct ksz_device *dev)
+{
+ return dev->regmap[KSZ_REGMAP_16];
+}
+
+static inline struct regmap *ksz_regmap_32(struct ksz_device *dev)
+{
+ return dev->regmap[KSZ_REGMAP_32];
+}
+
+static inline bool ksz_is_ksz8463(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8463_CHIP_ID;
+}
static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
{
unsigned int value;
- int ret = regmap_read(dev->regmap[0], reg, &value);
+ int ret = regmap_read(ksz_regmap_8(dev), reg, &value);
+
+ if (ret)
+ dev_err(dev->dev, "can't read 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
*val = value;
return ret;
@@ -185,7 +508,11 @@ static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
{
unsigned int value;
- int ret = regmap_read(dev->regmap[1], reg, &value);
+ int ret = regmap_read(ksz_regmap_16(dev), reg, &value);
+
+ if (ret)
+ dev_err(dev->dev, "can't read 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
*val = value;
return ret;
@@ -194,7 +521,11 @@ static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
{
unsigned int value;
- int ret = regmap_read(dev->regmap[2], reg, &value);
+ int ret = regmap_read(ksz_regmap_32(dev), reg, &value);
+
+ if (ret)
+ dev_err(dev->dev, "can't read 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
*val = value;
return ret;
@@ -205,8 +536,11 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
u32 value[2];
int ret;
- ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
- if (!ret)
+ ret = regmap_bulk_read(ksz_regmap_32(dev), reg, value, 2);
+ if (ret)
+ dev_err(dev->dev, "can't read 64bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+ else
*val = (u64)value[0] << 32 | value[1];
return ret;
@@ -214,17 +548,64 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
{
- return regmap_write(dev->regmap[0], reg, value);
+ int ret;
+
+ ret = regmap_write(ksz_regmap_8(dev), reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
{
- return regmap_write(dev->regmap[1], reg, value);
+ int ret;
+
+ ret = regmap_write(ksz_regmap_16(dev), reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
{
- return regmap_write(dev->regmap[2], reg, value);
+ int ret;
+
+ ret = regmap_write(ksz_regmap_32(dev), reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static inline int ksz_rmw16(struct ksz_device *dev, u32 reg, u16 mask,
+ u16 value)
+{
+ int ret;
+
+ ret = regmap_update_bits(ksz_regmap_16(dev), reg, mask, value);
+ if (ret)
+ dev_err(dev->dev, "can't rmw 16bit reg 0x%x: %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static inline int ksz_rmw32(struct ksz_device *dev, u32 reg, u32 mask,
+ u32 value)
+{
+ int ret;
+
+ ret = regmap_update_bits(ksz_regmap_32(dev), reg, mask, value);
+ if (ret)
+ dev_err(dev->dev, "can't rmw 32bit reg 0x%x: %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
@@ -236,43 +617,71 @@ static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
val[0] = swab32(value & 0xffffffffULL);
val[1] = swab32(value >> 32ULL);
- return regmap_bulk_write(dev->regmap[2], reg, val, 2);
+ return regmap_bulk_write(ksz_regmap_32(dev), reg, val, 2);
+}
+
+static inline int ksz_rmw8(struct ksz_device *dev, int offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = regmap_update_bits(ksz_regmap_8(dev), offset, mask, val);
+ if (ret)
+ dev_err(dev->dev, "can't rmw 8bit reg 0x%x: %pe\n", offset,
+ ERR_PTR(ret));
+
+ return ret;
}
-static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
- u8 *data)
+static inline int ksz_pread8(struct ksz_device *dev, int port, int offset,
+ u8 *data)
{
- ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
- u16 *data)
+static inline int ksz_pread16(struct ksz_device *dev, int port, int offset,
+ u16 *data)
{
- ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
- u32 *data)
+static inline int ksz_pread32(struct ksz_device *dev, int port, int offset,
+ u32 *data)
{
- ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
- u8 data)
+static inline int ksz_pwrite8(struct ksz_device *dev, int port, int offset,
+ u8 data)
{
- ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
- u16 data)
+static inline int ksz_pwrite16(struct ksz_device *dev, int port, int offset,
+ u16 data)
{
- ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
}
-static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
- u32 data)
+static inline int ksz_pwrite32(struct ksz_device *dev, int port, int offset,
+ u32 data)
{
- ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
+}
+
+static inline int ksz_prmw8(struct ksz_device *dev, int port, int offset,
+ u8 mask, u8 val)
+{
+ return ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset),
+ mask, val);
+}
+
+static inline int ksz_prmw32(struct ksz_device *dev, int port, int offset,
+ u32 mask, u32 val)
+{
+ return ksz_rmw32(dev, dev->dev_ops->get_port_addr(port, offset),
+ mask, val);
}
static inline void ksz_regmap_lock(void *__mtx)
@@ -287,6 +696,222 @@ static inline void ksz_regmap_unlock(void *__mtx)
mutex_unlock(mtx);
}
+static inline bool ksz_is_ksz87xx(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8795_CHIP_ID ||
+ dev->chip_id == KSZ8794_CHIP_ID ||
+ dev->chip_id == KSZ8765_CHIP_ID;
+}
+
+static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ88X3_CHIP_ID;
+}
+
+static inline bool ksz_is_8895_family(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8895_CHIP_ID ||
+ dev->chip_id == KSZ8864_CHIP_ID;
+}
+
+static inline bool is_ksz8(struct ksz_device *dev)
+{
+ return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev) ||
+ ksz_is_8895_family(dev) || ksz_is_ksz8463(dev);
+}
+
+static inline bool is_ksz88xx(struct ksz_device *dev)
+{
+ return ksz_is_ksz88x3(dev) || ksz_is_8895_family(dev) ||
+ ksz_is_ksz8463(dev);
+}
+
+static inline bool is_ksz9477(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ9477_CHIP_ID;
+}
+
+static inline int is_lan937x(struct ksz_device *dev)
+{
+ return dev->chip_id == LAN9370_CHIP_ID ||
+ dev->chip_id == LAN9371_CHIP_ID ||
+ dev->chip_id == LAN9372_CHIP_ID ||
+ dev->chip_id == LAN9373_CHIP_ID ||
+ dev->chip_id == LAN9374_CHIP_ID;
+}
+
+static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
+{
+ return (dev->chip_id == LAN9371_CHIP_ID ||
+ dev->chip_id == LAN9372_CHIP_ID) && port == KSZ_PORT_4;
+}
+
+static inline int ksz_get_sgmii_port(struct ksz_device *dev)
+{
+ return dev->info->sgmii_port - 1;
+}
+
+static inline bool ksz_has_sgmii_port(struct ksz_device *dev)
+{
+ return dev->info->sgmii_port > 0;
+}
+
+static inline bool ksz_is_sgmii_port(struct ksz_device *dev, int port)
+{
+ return dev->info->sgmii_port == port + 1;
+}
+
+/* STP State Defines */
+#define PORT_TX_ENABLE BIT(2)
+#define PORT_RX_ENABLE BIT(1)
+#define PORT_LEARN_DISABLE BIT(0)
+
+/* Switch ID Defines */
+#define REG_CHIP_ID0 0x00
+
+#define SW_FAMILY_ID_M GENMASK(15, 8)
+#define KSZ84_FAMILY_ID 0x84
+#define KSZ87_FAMILY_ID 0x87
+#define KSZ88_FAMILY_ID 0x88
+#define KSZ8895_FAMILY_ID 0x95
+
+#define KSZ8_PORT_STATUS_0 0x08
+#define KSZ8_PORT_FIBER_MODE BIT(7)
+
+#define SW_CHIP_ID_M GENMASK(7, 4)
+#define KSZ87_CHIP_ID_94 0x6
+#define KSZ87_CHIP_ID_95 0x9
+#define KSZ88_CHIP_ID_63 0x3
+#define KSZ8895_CHIP_ID_95 0x4
+#define KSZ8895_CHIP_ID_95R 0x6
+
+/* KSZ8895 specific register */
+#define REG_KSZ8864_CHIP_ID 0xFE
+#define SW_KSZ8864 BIT(7)
+
+#define SW_REV_ID_M GENMASK(7, 4)
+
+/* KSZ9893, KSZ9563, KSZ8563 specific register */
+#define REG_CHIP_ID4 0x0f
+#define SKU_ID_KSZ8563 0x3c
+#define SKU_ID_KSZ9563 0x1c
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROT_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+#define BROADCAST_STORM_RATE_HI 0x07
+#define BROADCAST_STORM_RATE_LO 0xFF
+#define BROADCAST_STORM_RATE 0x07FF
+
+#define MULTICAST_STORM_DISABLE BIT(6)
+
+#define SW_START 0x01
+
+/* xMII configuration */
+#define P_MII_DUPLEX_M BIT(6)
+#define P_MII_100MBIT_M BIT(4)
+
+#define P_GMII_1GBIT_M BIT(6)
+#define P_RGMII_ID_IG_ENABLE BIT(4)
+#define P_RGMII_ID_EG_ENABLE BIT(3)
+#define P_MII_MAC_MODE BIT(2)
+#define P_MII_SEL_M 0x3
+
+/* KSZ9477, KSZ87xx Wake-on-LAN (WoL) masks */
+#define PME_WOL_MAGICPKT BIT(2)
+#define PME_WOL_LINKUP BIT(1)
+#define PME_WOL_ENERGY BIT(0)
+
+#define PME_ENABLE BIT(1)
+#define PME_POLARITY BIT(0)
+
+#define KSZ87XX_REG_INT_EN 0x7D
+#define KSZ87XX_INT_PME_MASK BIT(4)
+
+/* Interrupt */
+#define REG_SW_PORT_INT_STATUS__1 0x001B
+#define REG_SW_PORT_INT_MASK__1 0x001F
+
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_SRC_PHY_INT 1
+#define PORT_SRC_PTP_INT 2
+
+#define KSZ8795_HUGE_PACKET_SIZE 2000
+#define KSZ8863_HUGE_PACKET_SIZE 1916
+#define KSZ8863_NORMAL_PACKET_SIZE 1536
+#define KSZ8_LEGAL_PACKET_SIZE 1518
+#define KSZ9477_MAX_FRAME_SIZE 9000
+
+#define KSZ8873_REG_GLOBAL_CTRL_12 0x0e
+/* Drive Strength of I/O Pad
+ * 0: 8mA, 1: 16mA
+ */
+#define KSZ8873_DRIVE_STRENGTH_16MA BIT(6)
+
+#define KSZ8795_REG_SW_CTRL_20 0xa3
+#define KSZ9477_REG_SW_IO_STRENGTH 0x010d
+#define SW_DRIVE_STRENGTH_M 0x7
+#define SW_DRIVE_STRENGTH_2MA 0
+#define SW_DRIVE_STRENGTH_4MA 1
+#define SW_DRIVE_STRENGTH_8MA 2
+#define SW_DRIVE_STRENGTH_12MA 3
+#define SW_DRIVE_STRENGTH_16MA 4
+#define SW_DRIVE_STRENGTH_20MA 5
+#define SW_DRIVE_STRENGTH_24MA 6
+#define SW_DRIVE_STRENGTH_28MA 7
+#define SW_HI_SPEED_DRIVE_STRENGTH_S 4
+#define SW_LO_SPEED_DRIVE_STRENGTH_S 0
+
+/* TXQ Split Control Register for per-port, per-queue configuration.
+ * Register 0xAF is TXQ Split for Q3 on Port 1.
+ * Register offset formula: 0xAF + (port * 4) + (3 - queue)
+ * where: port = 0..2, queue = 0..3
+ */
+#define KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue) \
+ (0xAF + ((port) * 4) + (3 - (queue)))
+
+/* Bit 7 selects between:
+ * 0 = Strict priority mode (highest-priority queue first)
+ * 1 = Weighted Fair Queuing (WFQ) mode:
+ * Queue weights: Q3:Q2:Q1:Q0 = 8:4:2:1
+ * If any queues are empty, weight is redistributed.
+ *
+ * Note: This is referred to as "Weighted Fair Queuing" (WFQ) in KSZ8863/8873
+ * documentation, and as "Weighted Round Robin" (WRR) in KSZ9477 family docs.
+ */
+#define KSZ8873_TXQ_WFQ_ENABLE BIT(7)
+
+#define KSZ9477_REG_PORT_OUT_RATE_0 0x0420
+#define KSZ9477_OUT_RATE_NO_LIMIT 0
+
+#define KSZ9477_PORT_MRI_TC_MAP__4 0x0808
+
+#define KSZ9477_PORT_TC_MAP_S 4
+
+/* CBS related registers */
+#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
+
+#define REG_PORT_MTI_QUEUE_CTRL_0 0x0914
+
+#define MTI_SCHEDULE_MODE_M GENMASK(7, 6)
+#define MTI_SCHEDULE_STRICT_PRIO 0
+#define MTI_SCHEDULE_WRR 2
+#define MTI_SHAPING_M GENMASK(5, 4)
+#define MTI_SHAPING_OFF 0
+#define MTI_SHAPING_SRP 1
+#define MTI_SHAPING_TIME_AWARE 2
+
+#define KSZ9477_PORT_MTI_QUEUE_CTRL_1 0x0915
+#define KSZ9477_DEFAULT_WRR_WEIGHT 1
+
+#define REG_PORT_MTI_HI_WATER_MARK 0x0916
+#define REG_PORT_MTI_LO_WATER_MARK 0x0918
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
@@ -319,9 +944,34 @@ static inline void ksz_regmap_unlock(void *__mtx)
#define KSZ_REGMAP_TABLE(ksz, swp, regbits, regpad, regalign) \
static const struct regmap_config ksz##_regmap_config[] = { \
- KSZ_REGMAP_ENTRY(8, swp, (regbits), (regpad), (regalign)), \
- KSZ_REGMAP_ENTRY(16, swp, (regbits), (regpad), (regalign)), \
- KSZ_REGMAP_ENTRY(32, swp, (regbits), (regpad), (regalign)), \
+ [KSZ_REGMAP_8] = KSZ_REGMAP_ENTRY(8, swp, (regbits), (regpad), (regalign)), \
+ [KSZ_REGMAP_16] = KSZ_REGMAP_ENTRY(16, swp, (regbits), (regpad), (regalign)), \
+ [KSZ_REGMAP_32] = KSZ_REGMAP_ENTRY(32, swp, (regbits), (regpad), (regalign)), \
+ }
+
+#define KSZ8463_REGMAP_ENTRY(width, regbits, regpad, regalign) \
+ { \
+ .name = #width, \
+ .val_bits = (width), \
+ .reg_stride = (width / 8), \
+ .reg_bits = (regbits) + (regalign), \
+ .pad_bits = (regpad), \
+ .read = ksz8463_spi_read, \
+ .write = ksz8463_spi_write, \
+ .max_register = BIT(regbits) - 1, \
+ .cache_type = REGCACHE_NONE, \
+ .zero_flag_mask = 1, \
+ .use_single_read = 1, \
+ .use_single_write = 1, \
+ .lock = ksz_regmap_lock, \
+ .unlock = ksz_regmap_unlock, \
+ }
+
+#define KSZ8463_REGMAP_TABLE(ksz, regbits, regpad, regalign) \
+ static const struct regmap_config ksz##_regmap_config[] = { \
+ [KSZ_REGMAP_8] = KSZ8463_REGMAP_ENTRY(8, (regbits), (regpad), (regalign)), \
+ [KSZ_REGMAP_16] = KSZ8463_REGMAP_ENTRY(16, (regbits), (regpad), (regalign)), \
+ [KSZ_REGMAP_32] = KSZ8463_REGMAP_ENTRY(32, (regbits), (regpad), (regalign)), \
}
#endif
diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c
new file mode 100644
index 000000000000..7131c5caac54
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_dcb.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+
+#include <linux/dsa/ksz_common.h>
+#include <net/dsa.h>
+#include <net/dscp.h>
+#include <net/ieee8021q.h>
+
+#include "ksz_common.h"
+#include "ksz_dcb.h"
+#include "ksz8.h"
+
+/* Port X Control 0 register.
+ * The datasheet specifies: Port 1 - 0x10, Port 2 - 0x20, Port 3 - 0x30.
+ * However, the driver uses get_port_addr(), which maps Port 1 to offset 0.
+ * Therefore, we define the base offset as 0x00 here to align with that logic.
+ */
+#define KSZ8_REG_PORT_1_CTRL_0 0x00
+#define KSZ8463_REG_PORT_1_CTRL_0 0x6C
+#define KSZ8_PORT_DIFFSERV_ENABLE BIT(6)
+#define KSZ8_PORT_802_1P_ENABLE BIT(5)
+#define KSZ8_PORT_BASED_PRIO_M GENMASK(4, 3)
+
+#define KSZ8463_REG_TOS_DSCP_CTRL 0x16
+#define KSZ88X3_REG_TOS_DSCP_CTRL 0x60
+#define KSZ8765_REG_TOS_DSCP_CTRL 0x90
+
+#define KSZ9477_REG_SW_MAC_TOS_CTRL 0x033e
+#define KSZ9477_SW_TOS_DSCP_REMAP BIT(0)
+#define KSZ9477_SW_TOS_DSCP_DEFAULT_PRIO_M GENMASK(5, 3)
+
+#define KSZ9477_REG_DIFFSERV_PRIO_MAP 0x0340
+
+#define KSZ9477_REG_PORT_MRI_PRIO_CTRL 0x0801
+#define KSZ9477_PORT_HIGHEST_PRIO BIT(7)
+#define KSZ9477_PORT_OR_PRIO BIT(6)
+#define KSZ9477_PORT_MAC_PRIO_ENABLE BIT(4)
+#define KSZ9477_PORT_VLAN_PRIO_ENABLE BIT(3)
+#define KSZ9477_PORT_802_1P_PRIO_ENABLE BIT(2)
+#define KSZ9477_PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define KSZ9477_PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define KSZ9477_REG_PORT_MRI_MAC_CTRL 0x0802
+#define KSZ9477_PORT_BASED_PRIO_M GENMASK(2, 0)
+
+struct ksz_apptrust_map {
+ u8 apptrust;
+ u8 bit;
+};
+
+static const struct ksz_apptrust_map ksz8_apptrust_map_to_bit[] = {
+ { DCB_APP_SEL_PCP, KSZ8_PORT_802_1P_ENABLE },
+ { IEEE_8021QAZ_APP_SEL_DSCP, KSZ8_PORT_DIFFSERV_ENABLE },
+};
+
+static const struct ksz_apptrust_map ksz9477_apptrust_map_to_bit[] = {
+ { DCB_APP_SEL_PCP, KSZ9477_PORT_802_1P_PRIO_ENABLE },
+ { IEEE_8021QAZ_APP_SEL_DSCP, KSZ9477_PORT_DIFFSERV_PRIO_ENABLE },
+};
+
+/* ksz_supported_apptrust[] - Supported apptrust selectors and Priority Order
+ * of Internal Priority Map (IPM) sources.
+ *
+ * This array defines the apptrust selectors supported by the hardware, where
+ * the index within the array indicates the priority of the selector - lower
+ * indices correspond to higher priority. This fixed priority scheme is due to
+ * the hardware's design, which does not support configurable priority among
+ * different priority sources.
+ *
+ * The priority sources, including Tail Tag, ACL, VLAN PCP and DSCP are ordered
+ * by the hardware's fixed logic, as detailed below. The order reflects a
+ * non-configurable precedence where certain types of priority information
+ * override others:
+ *
+ * 1. Tail Tag - Highest priority, overrides ACL, VLAN PCP, and DSCP priorities.
+ * 2. ACL - Overrides VLAN PCP and DSCP priorities.
+ * 3. VLAN PCP - Overrides DSCP priority.
+ * 4. DSCP - Lowest priority, does not override any other priority source.
+ *
+ * In this context, the array's lower index (higher priority) for
+ * 'DCB_APP_SEL_PCP' suggests its relative priority over
+ * 'IEEE_8021QAZ_APP_SEL_DSCP' within the system's fixed priority scheme.
+ *
+ * DCB_APP_SEL_PCP - Priority Code Point selector
+ * IEEE_8021QAZ_APP_SEL_DSCP - Differentiated Services Code Point selector
+ */
+static const u8 ksz_supported_apptrust[] = {
+ DCB_APP_SEL_PCP,
+ IEEE_8021QAZ_APP_SEL_DSCP,
+};
+
+static const char * const ksz_supported_apptrust_variants[] = {
+ "empty", "dscp", "pcp", "dscp pcp"
+};
+
+static void ksz_get_default_port_prio_reg(struct ksz_device *dev, int *reg,
+ u8 *mask, int *shift)
+{
+ if (is_ksz8(dev)) {
+ *reg = KSZ8_REG_PORT_1_CTRL_0;
+ *mask = KSZ8_PORT_BASED_PRIO_M;
+ *shift = __bf_shf(KSZ8_PORT_BASED_PRIO_M);
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_PORT_1_CTRL_0;
+ } else {
+ *reg = KSZ9477_REG_PORT_MRI_MAC_CTRL;
+ *mask = KSZ9477_PORT_BASED_PRIO_M;
+ *shift = __bf_shf(KSZ9477_PORT_BASED_PRIO_M);
+ }
+}
+
+/**
+ * ksz_get_dscp_prio_reg - Retrieves the DSCP-to-priority-mapping register
+ * @dev: Pointer to the KSZ switch device structure
+ * @reg: Pointer to the register address to be set
+ * @per_reg: Pointer to the number of DSCP values per register
+ * @mask: Pointer to the mask to be set
+ *
+ * This function retrieves the DSCP to priority mapping register, the number of
+ * DSCP values per register, and the mask to be set.
+ */
+static void ksz_get_dscp_prio_reg(struct ksz_device *dev, int *reg,
+ int *per_reg, u8 *mask)
+{
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev)) {
+ *reg = KSZ8765_REG_TOS_DSCP_CTRL;
+ *per_reg = 4;
+ *mask = GENMASK(1, 0);
+ } else if (ksz_is_ksz88x3(dev) || ksz_is_ksz8463(dev)) {
+ *reg = KSZ88X3_REG_TOS_DSCP_CTRL;
+ *per_reg = 4;
+ *mask = GENMASK(1, 0);
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_TOS_DSCP_CTRL;
+ } else {
+ *reg = KSZ9477_REG_DIFFSERV_PRIO_MAP;
+ *per_reg = 2;
+ *mask = GENMASK(2, 0);
+ }
+}
+
+/**
+ * ksz_get_apptrust_map_and_reg - Retrieves the apptrust map and register
+ * @dev: Pointer to the KSZ switch device structure
+ * @map: Pointer to the apptrust map to be set
+ * @reg: Pointer to the register address to be set
+ * @mask: Pointer to the mask to be set
+ *
+ * This function retrieves the apptrust map and register address for the
+ * apptrust configuration.
+ */
+static void ksz_get_apptrust_map_and_reg(struct ksz_device *dev,
+ const struct ksz_apptrust_map **map,
+ int *reg, u8 *mask)
+{
+ if (is_ksz8(dev)) {
+ *map = ksz8_apptrust_map_to_bit;
+ *reg = KSZ8_REG_PORT_1_CTRL_0;
+ *mask = KSZ8_PORT_DIFFSERV_ENABLE | KSZ8_PORT_802_1P_ENABLE;
+ if (ksz_is_ksz8463(dev))
+ *reg = KSZ8463_REG_PORT_1_CTRL_0;
+ } else {
+ *map = ksz9477_apptrust_map_to_bit;
+ *reg = KSZ9477_REG_PORT_MRI_PRIO_CTRL;
+ *mask = KSZ9477_PORT_802_1P_PRIO_ENABLE |
+ KSZ9477_PORT_DIFFSERV_PRIO_ENABLE;
+ }
+}
+
+/**
+ * ksz_port_get_default_prio - Retrieves the default priority for a port on a
+ * KSZ switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number from which to get the default priority
+ *
+ * This function fetches the default priority for the specified port on a KSZ
+ * switch.
+ *
+ * Return: The default priority of the port on success, or a negative error
+ * code on failure.
+ */
+int ksz_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret, reg, shift;
+ u8 data, mask;
+
+ ksz_get_default_port_prio_reg(dev, &reg, &mask, &shift);
+
+ ret = ksz_pread8(dev, port, reg, &data);
+ if (ret)
+ return ret;
+
+ return (data & mask) >> shift;
+}
+
+/**
+ * ksz_port_set_default_prio - Sets the default priority for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to set the default priority
+ * @prio: Priority value to set
+ *
+ * This function sets the default priority for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+ int reg, shift;
+ u8 mask;
+
+ if (prio >= dev->info->num_ipms)
+ return -EINVAL;
+
+ ksz_get_default_port_prio_reg(dev, &reg, &mask, &shift);
+
+ return ksz_prmw8(dev, port, reg, mask, (prio << shift) & mask);
+}
+
+/**
+ * ksz_port_get_dscp_prio - Retrieves the priority for a DSCP value on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to get the priority
+ * @dscp: DSCP value for which to get the priority
+ *
+ * This function fetches the priority value from switch global DSCP-to-priorty
+ * mapping table for the specified DSCP value.
+ *
+ * Return: The priority value for the DSCP on success, or a negative error
+ * code on failure.
+ */
+int ksz_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct ksz_device *dev = ds->priv;
+ int reg, per_reg, ret, shift;
+ u8 data, mask;
+
+ ksz_get_dscp_prio_reg(dev, &reg, &per_reg, &mask);
+
+ /* If DSCP remapping is disabled, DSCP bits 3-5 are used as Internal
+ * Priority Map (IPM)
+ */
+ if (!is_ksz8(dev)) {
+ ret = ksz_read8(dev, KSZ9477_REG_SW_MAC_TOS_CTRL, &data);
+ if (ret)
+ return ret;
+
+ /* If DSCP remapping is disabled, DSCP bits 3-5 are used as
+ * Internal Priority Map (IPM)
+ */
+ if (!(data & KSZ9477_SW_TOS_DSCP_REMAP))
+ return FIELD_GET(KSZ9477_SW_TOS_DSCP_DEFAULT_PRIO_M,
+ dscp);
+ }
+
+ /* In case DSCP remapping is enabled, we need to write the DSCP to
+ * priority mapping table.
+ */
+ reg += dscp / per_reg;
+ ret = ksz_read8(dev, reg, &data);
+ if (ret)
+ return ret;
+
+ shift = (dscp % per_reg) * (8 / per_reg);
+
+ return (data >> shift) & mask;
+}
+
+/**
+ * ksz_set_global_dscp_entry - Sets the global DSCP-to-priority mapping entry
+ * @dev: Pointer to the KSZ switch device structure
+ * @dscp: DSCP value for which to set the priority
+ * @ipm: Priority value to set
+ *
+ * This function sets the global DSCP-to-priority mapping entry for the
+ * specified DSCP value.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_set_global_dscp_entry(struct ksz_device *dev, u8 dscp, u8 ipm)
+{
+ int reg, per_reg, shift;
+ u8 mask;
+
+ ksz_get_dscp_prio_reg(dev, &reg, &per_reg, &mask);
+
+ shift = (dscp % per_reg) * (8 / per_reg);
+
+ return ksz_rmw8(dev, reg + (dscp / per_reg), mask << shift,
+ ipm << shift);
+}
+
+/**
+ * ksz_init_global_dscp_map - Initializes the global DSCP-to-priority mapping
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function initializes the global DSCP-to-priority mapping table for the
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz_init_global_dscp_map(struct ksz_device *dev)
+{
+ int ret, dscp;
+
+ /* On KSZ9xxx variants, DSCP remapping is disabled by default.
+ * Enable to have, predictable and reproducible behavior across
+ * different devices.
+ */
+ if (!is_ksz8(dev)) {
+ ret = ksz_rmw8(dev, KSZ9477_REG_SW_MAC_TOS_CTRL,
+ KSZ9477_SW_TOS_DSCP_REMAP,
+ KSZ9477_SW_TOS_DSCP_REMAP);
+ if (ret)
+ return ret;
+ }
+
+ for (dscp = 0; dscp < DSCP_MAX; dscp++) {
+ int ipm, tt;
+
+ /* Map DSCP to Traffic Type, which is corresponding to the
+ * Internal Priority Map (IPM) in the switch.
+ */
+ if (!is_ksz8(dev)) {
+ ipm = ietf_dscp_to_ieee8021q_tt(dscp);
+ } else {
+ /* On KSZ8xxx variants we do not have IPM to queue
+ * remapping table. We need to convert DSCP to Traffic
+ * Type and then to queue.
+ */
+ tt = ietf_dscp_to_ieee8021q_tt(dscp);
+ if (tt < 0)
+ return tt;
+
+ ipm = ieee8021q_tt_to_tc(tt, dev->info->num_tx_queues);
+ }
+
+ if (ipm < 0)
+ return ipm;
+
+ ret = ksz_set_global_dscp_entry(dev, dscp, ipm);
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_port_add_dscp_prio - Adds a DSCP-to-priority mapping entry for a port on
+ * a KSZ switch.
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to add the DSCP-to-priority mapping entry
+ * @dscp: DSCP value for which to add the priority
+ * @prio: Priority value to set
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (prio >= dev->info->num_ipms)
+ return -ERANGE;
+
+ return ksz_set_global_dscp_entry(dev, dscp, prio);
+}
+
+/**
+ * ksz_port_del_dscp_prio - Deletes a DSCP-to-priority mapping entry for a port
+ * on a KSZ switch.
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to delete the DSCP-to-priority mapping entry
+ * @dscp: DSCP value for which to delete the priority
+ * @prio: Priority value to delete
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+ int ipm;
+
+ if (ksz_port_get_dscp_prio(ds, port, dscp) != prio)
+ return 0;
+
+ if (is_ksz8(dev)) {
+ ipm = ieee8021q_tt_to_tc(IEEE8021Q_TT_BE,
+ dev->info->num_tx_queues);
+ if (ipm < 0)
+ return ipm;
+ } else {
+ ipm = IEEE8021Q_TT_BE;
+ }
+
+ return ksz_set_global_dscp_entry(dev, dscp, ipm);
+}
+
+/**
+ * ksz_apptrust_error - Prints an error message for an invalid apptrust selector
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function prints an error message when an invalid apptrust selector is
+ * provided.
+ */
+static void ksz_apptrust_error(struct ksz_device *dev)
+{
+ char supported_apptrust_variants[64];
+ int i;
+
+ supported_apptrust_variants[0] = '\0';
+ for (i = 0; i < ARRAY_SIZE(ksz_supported_apptrust_variants); i++) {
+ if (i > 0)
+ strlcat(supported_apptrust_variants, ", ",
+ sizeof(supported_apptrust_variants));
+ strlcat(supported_apptrust_variants,
+ ksz_supported_apptrust_variants[i],
+ sizeof(supported_apptrust_variants));
+ }
+
+ dev_err(dev->dev, "Invalid apptrust selector or priority order. Supported: %s\n",
+ supported_apptrust_variants);
+}
+
+/**
+ * ksz_port_set_apptrust_validate - Validates the apptrust selectors
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the apptrust selectors
+ * @sel: Array of apptrust selectors to validate
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function validates the apptrust selectors provided and ensures that
+ * they are in the correct order.
+ *
+ * This family of switches supports two apptrust selectors: DCB_APP_SEL_PCP and
+ * IEEE_8021QAZ_APP_SEL_DSCP. The priority order of the selectors is fixed and
+ * cannot be changed. The order is as follows:
+ * 1. DCB_APP_SEL_PCP - Priority Code Point selector (highest priority)
+ * 2. IEEE_8021QAZ_APP_SEL_DSCP - Differentiated Services Code Point selector
+ * (lowest priority)
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz_port_set_apptrust_validate(struct ksz_device *dev, int port,
+ const u8 *sel, int nsel)
+{
+ int i, j, found;
+ int j_prev = 0;
+
+ /* Iterate through the requested selectors */
+ for (i = 0; i < nsel; i++) {
+ found = 0;
+
+ /* Check if the current selector is supported by the hardware */
+ for (j = 0; j < sizeof(ksz_supported_apptrust); j++) {
+ if (sel[i] != ksz_supported_apptrust[j])
+ continue;
+
+ found = 1;
+
+ /* Ensure that no higher priority selector (lower index)
+ * precedes a lower priority one
+ */
+ if (i > 0 && j <= j_prev)
+ goto err_sel_not_vaild;
+
+ j_prev = j;
+ break;
+ }
+
+ if (!found)
+ goto err_sel_not_vaild;
+ }
+
+ return 0;
+
+err_sel_not_vaild:
+ ksz_apptrust_error(dev);
+
+ return -EINVAL;
+}
+
+/**
+ * ksz_port_set_apptrust - Sets the apptrust selectors for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to set the apptrust selectors
+ * @sel: Array of apptrust selectors to set
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function sets the apptrust selectors for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_set_apptrust(struct dsa_switch *ds, int port,
+ const u8 *sel, int nsel)
+{
+ const struct ksz_apptrust_map *map;
+ struct ksz_device *dev = ds->priv;
+ int reg, i, ret;
+ u8 data = 0;
+ u8 mask;
+
+ ret = ksz_port_set_apptrust_validate(dev, port, sel, nsel);
+ if (ret)
+ return ret;
+
+ ksz_get_apptrust_map_and_reg(dev, &map, &reg, &mask);
+
+ for (i = 0; i < nsel; i++) {
+ int j;
+
+ for (j = 0; j < ARRAY_SIZE(ksz_supported_apptrust); j++) {
+ if (sel[i] != ksz_supported_apptrust[j])
+ continue;
+
+ data |= map[j].bit;
+ break;
+ }
+ }
+
+ return ksz_prmw8(dev, port, reg, mask, data);
+}
+
+/**
+ * ksz_port_get_apptrust - Retrieves the apptrust selectors for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to get the apptrust selectors
+ * @sel: Array to store the apptrust selectors
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function fetches the apptrust selectors for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel, int *nsel)
+{
+ const struct ksz_apptrust_map *map;
+ struct ksz_device *dev = ds->priv;
+ int reg, i, ret;
+ u8 data;
+ u8 mask;
+
+ ksz_get_apptrust_map_and_reg(dev, &map, &reg, &mask);
+
+ ret = ksz_pread8(dev, port, reg, &data);
+ if (ret)
+ return ret;
+
+ *nsel = 0;
+ for (i = 0; i < ARRAY_SIZE(ksz_supported_apptrust); i++) {
+ if (data & map[i].bit)
+ sel[(*nsel)++] = ksz_supported_apptrust[i];
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_dcb_init_port - Initializes the DCB configuration for a port on a KSZ
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to initialize the DCB configuration
+ *
+ * This function initializes the DCB configuration for the specified port on a
+ * KSZ switch. Particular DCB configuration is set for the port, including the
+ * default priority and apptrust selectors.
+ * The default priority is set to Best Effort, and the apptrust selectors are
+ * set to all supported selectors.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_dcb_init_port(struct ksz_device *dev, int port)
+{
+ const u8 ksz_default_apptrust[] = { DCB_APP_SEL_PCP };
+ int ret, ipm;
+
+ if (is_ksz8(dev)) {
+ ipm = ieee8021q_tt_to_tc(IEEE8021Q_TT_BE,
+ dev->info->num_tx_queues);
+ if (ipm < 0)
+ return ipm;
+ } else {
+ ipm = IEEE8021Q_TT_BE;
+ }
+
+ /* Set the default priority for the port to Best Effort */
+ ret = ksz_port_set_default_prio(dev->ds, port, ipm);
+ if (ret)
+ return ret;
+
+ return ksz_port_set_apptrust(dev->ds, port, ksz_default_apptrust,
+ ARRAY_SIZE(ksz_default_apptrust));
+}
+
+/**
+ * ksz_dcb_init - Initializes the DCB configuration for a KSZ switch
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function initializes the DCB configuration for a KSZ switch. The global
+ * DSCP-to-priority mapping table is initialized.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_dcb_init(struct ksz_device *dev)
+{
+ return ksz_init_global_dscp_map(dev);
+}
diff --git a/drivers/net/dsa/microchip/ksz_dcb.h b/drivers/net/dsa/microchip/ksz_dcb.h
new file mode 100644
index 000000000000..e2065223ba90
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_dcb.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> */
+
+#ifndef __KSZ_DCB_H
+#define __KSZ_DCB_H
+
+#include <net/dsa.h>
+
+#include "ksz_common.h"
+
+int ksz_port_get_default_prio(struct dsa_switch *ds, int port);
+int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio);
+int ksz_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp);
+int ksz_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio);
+int ksz_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio);
+int ksz_port_set_apptrust(struct dsa_switch *ds, int port,
+ const unsigned char *sel,
+ int nsel);
+int ksz_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel, int *nsel);
+int ksz_dcb_init_port(struct ksz_device *dev, int port);
+int ksz_dcb_init(struct ksz_device *dev);
+
+#endif /* __KSZ_DCB_H */
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
new file mode 100644
index 000000000000..997e4a76d0a6
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -0,0 +1,1188 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Microchip KSZ PTP Implementation
+ *
+ * Copyright (C) 2020 ARRI Lighting
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#include <linux/dsa/ksz_common.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "ksz_common.h"
+#include "ksz_ptp.h"
+#include "ksz_ptp_reg.h"
+
+#define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps)
+#define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data)
+#define work_to_xmit_work(w) \
+ container_of((w), struct ksz_deferred_xmit_work, work)
+
+/* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns
+ * = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999
+ */
+#define KSZ_MAX_DRIFT_CORR 6249999
+#define KSZ_MAX_PULSE_WIDTH 125000000LL
+
+#define KSZ_PTP_INC_NS 40ULL /* HW clock is incremented every 40 ns (by 40) */
+#define KSZ_PTP_SUBNS_BITS 32
+
+#define KSZ_PTP_INT_START 13
+
+static int ksz_ptp_tou_gpio(struct ksz_device *dev)
+{
+ int ret;
+
+ if (!is_lan937x(dev))
+ return 0;
+
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT,
+ GPIO_OUT);
+ if (ret)
+ return ret;
+
+ ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2,
+ LED_OVR_1 | LED_OVR_2);
+ if (ret)
+ return ret;
+
+ return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4,
+ LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2,
+ LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2);
+}
+
+static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit)
+{
+ u32 data;
+ int ret;
+
+ /* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET);
+
+ data = FIELD_PREP(TRIG_DONE_M, BIT(unit));
+ ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data);
+ if (ret)
+ return ret;
+
+ data = FIELD_PREP(TRIG_INT_M, BIT(unit));
+ ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data);
+ if (ret)
+ return ret;
+
+ /* Clear reset and set GPIO direction */
+ return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE),
+ 0);
+}
+
+static int ksz_ptp_tou_pulse_verify(u64 pulse_ns)
+{
+ u32 data;
+
+ if (pulse_ns & 0x3)
+ return -EINVAL;
+
+ data = (pulse_ns / 8);
+ if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data))
+ return -ERANGE;
+
+ return 0;
+}
+
+static int ksz_ptp_tou_target_time_set(struct ksz_device *dev,
+ struct timespec64 const *ts)
+{
+ int ret;
+
+ /* Hardware has only 32 bit */
+ if ((ts->tv_sec & 0xffffffff) != ts->tv_sec)
+ return -EINVAL;
+
+ ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec);
+ if (ret)
+ return ret;
+
+ ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit)
+{
+ u32 data;
+ int ret;
+
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE);
+ if (ret)
+ return ret;
+
+ /* Check error flag:
+ * - the ACTIVE flag is NOT cleared an error!
+ */
+ ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) {
+ dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__,
+ unit);
+ ret = -EIO;
+ /* Unit will be reset on next access */
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ksz_ptp_configure_perout(struct ksz_device *dev,
+ u32 cycle_width_ns, u32 pulse_width_ns,
+ struct timespec64 const *target_time,
+ u8 index)
+{
+ u32 data;
+ int ret;
+
+ data = FIELD_PREP(TRIG_NOTIFY, 1) |
+ FIELD_PREP(TRIG_GPO_M, index) |
+ FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD);
+ ret = ksz_write32(dev, REG_TRIG_CTRL__4, data);
+ if (ret)
+ return ret;
+
+ ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns);
+ if (ret)
+ return ret;
+
+ /* Set cycle count 0 - Infinite */
+ ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0);
+ if (ret)
+ return ret;
+
+ data = (pulse_width_ns / 8);
+ ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_target_time_set(dev, target_time);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ksz_ptp_enable_perout(struct ksz_device *dev,
+ struct ptp_perout_request const *request,
+ int on)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ u64 req_pulse_width_ns;
+ u64 cycle_width_ns;
+ u64 pulse_width_ns;
+ int pin = 0;
+ u32 data32;
+ int ret;
+
+ if (request->flags & ~PTP_PEROUT_DUTY_CYCLE)
+ return -EOPNOTSUPP;
+
+ if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT &&
+ ptp_data->tou_mode != KSZ_PTP_TOU_IDLE)
+ return -EBUSY;
+
+ pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index);
+ if (pin < 0)
+ return -EINVAL;
+
+ data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) |
+ FIELD_PREP(PTP_TOU_INDEX, request->index);
+ ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4,
+ PTP_GPIO_INDEX | PTP_TOU_INDEX, data32);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_reset(dev, request->index);
+ if (ret)
+ return ret;
+
+ if (!on) {
+ ptp_data->tou_mode = KSZ_PTP_TOU_IDLE;
+ return 0;
+ }
+
+ ptp_data->perout_target_time_first.tv_sec = request->start.sec;
+ ptp_data->perout_target_time_first.tv_nsec = request->start.nsec;
+
+ ptp_data->perout_period.tv_sec = request->period.sec;
+ ptp_data->perout_period.tv_nsec = request->period.nsec;
+
+ cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period);
+ if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns)
+ return -EINVAL;
+
+ if (request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ pulse_width_ns = request->on.sec * NSEC_PER_SEC +
+ request->on.nsec;
+ } else {
+ /* Use a duty cycle of 50%. Maximum pulse width supported by the
+ * hardware is a little bit more than 125 ms.
+ */
+ req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC +
+ request->period.nsec) / 2;
+ pulse_width_ns = min_t(u64, req_pulse_width_ns,
+ KSZ_MAX_PULSE_WIDTH);
+ }
+
+ ret = ksz_ptp_tou_pulse_verify(pulse_width_ns);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns,
+ &ptp_data->perout_target_time_first,
+ pin);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_gpio(dev);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_start(dev, request->index);
+ if (ret)
+ return ret;
+
+ ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT;
+
+ return 0;
+}
+
+static int ksz_ptp_enable_mode(struct ksz_device *dev)
+{
+ struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ struct ksz_port *prt;
+ struct dsa_port *dp;
+ bool tag_en = false;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ prt = &dev->ports[dp->index];
+ if (prt->hwts_tx_en || prt->hwts_rx_en) {
+ tag_en = true;
+ break;
+ }
+ }
+
+ if (tag_en) {
+ ptp_schedule_worker(ptp_data->clock, 0);
+ } else {
+ ptp_cancel_worker_sync(ptp_data->clock);
+ }
+
+ tagger_data->hwtstamp_set_state(dev->ds, tag_en);
+
+ return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE,
+ tag_en ? PTP_ENABLE : 0);
+}
+
+/* The function is return back the capability of timestamping feature when
+ * requested through ethtool -T <interface> utility
+ */
+int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_info *ts)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+
+ ptp_data = &dev->ptp_data;
+
+ if (!ptp_data->clock)
+ return -ENODEV;
+
+ ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P);
+
+ if (is_lan937x(dev))
+ ts->tx_types |= BIT(HWTSTAMP_TX_ON);
+
+ ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ ts->phc_index = ptp_clock_index(ptp_data->clock);
+
+ return 0;
+}
+
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *prt;
+
+ prt = &dev->ports[port];
+ *config = prt->tstamp_config;
+
+ return 0;
+}
+
+static int ksz_set_hwtstamp_config(struct ksz_device *dev,
+ struct ksz_port *prt,
+ struct kernel_hwtstamp_config *config)
+{
+ int ret;
+
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
+ prt->hwts_tx_en = false;
+ break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
+ prt->hwts_tx_en = true;
+
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP);
+ if (ret)
+ return ret;
+
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!is_lan937x(dev))
+ return -ERANGE;
+
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
+ prt->hwts_tx_en = true;
+
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ prt->hwts_rx_en = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ return ksz_ptp_enable_mode(dev);
+}
+
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *prt;
+ int ret;
+
+ prt = &dev->ports[port];
+
+ ret = ksz_set_hwtstamp_config(dev, prt, config);
+ if (ret)
+ return ret;
+
+ prt->tstamp_config = *config;
+
+ return 0;
+}
+
+static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp)
+{
+ struct timespec64 ptp_clock_time;
+ struct ksz_ptp_data *ptp_data;
+ struct timespec64 diff;
+ struct timespec64 ts;
+
+ ptp_data = &dev->ptp_data;
+ ts = ktime_to_timespec64(tstamp);
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_clock_time = ptp_data->clock_time;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+ /* calculate full time from partial time stamp */
+ ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec;
+
+ /* find nearest possible point in time */
+ diff = timespec64_sub(ts, ptp_clock_time);
+ if (diff.tv_sec > 2)
+ ts.tv_sec -= 4;
+ else if (diff.tv_sec < -2)
+ ts.tv_sec += 4;
+
+ return timespec64_to_ktime(ts);
+}
+
+bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
+ unsigned int type)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct ksz_device *dev = ds->priv;
+ struct ptp_header *ptp_hdr;
+ struct ksz_port *prt;
+ u8 ptp_msg_type;
+ ktime_t tstamp;
+ s64 correction;
+
+ prt = &dev->ports[port];
+
+ tstamp = KSZ_SKB_CB(skb)->tstamp;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp);
+
+ if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P)
+ goto out;
+
+ ptp_hdr = ptp_parse_header(skb, type);
+ if (!ptp_hdr)
+ goto out;
+
+ ptp_msg_type = ptp_get_msgtype(ptp_hdr, type);
+ if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ)
+ goto out;
+
+ /* Only subtract the partial time stamp from the correction field. When
+ * the hardware adds the egress time stamp to the correction field of
+ * the PDelay_Resp message on tx, also only the partial time stamp will
+ * be added.
+ */
+ correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
+ correction -= ktime_to_ns(tstamp) << 16;
+
+ ptp_header_update_correction(skb, type, ptp_hdr, correction);
+
+out:
+ return false;
+}
+
+void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ptp_header *hdr;
+ struct sk_buff *clone;
+ struct ksz_port *prt;
+ unsigned int type;
+ u8 ptp_msg_type;
+
+ prt = &dev->ports[port];
+
+ if (!prt->hwts_tx_en)
+ return;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return;
+
+ ptp_msg_type = ptp_get_msgtype(hdr, type);
+
+ switch (ptp_msg_type) {
+ case PTP_MSGTYPE_SYNC:
+ if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P)
+ return;
+ break;
+ case PTP_MSGTYPE_PDELAY_REQ:
+ break;
+ case PTP_MSGTYPE_PDELAY_RESP:
+ if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) {
+ KSZ_SKB_CB(skb)->ptp_type = type;
+ KSZ_SKB_CB(skb)->update_correction = true;
+ return;
+ }
+ break;
+
+ default:
+ return;
+ }
+
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
+
+ /* caching the value to be used in tag_ksz.c */
+ KSZ_SKB_CB(skb)->clone = clone;
+}
+
+static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
+ struct ksz_port *prt, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps hwtstamps = {};
+ int ret;
+
+ /* timeout must include DSA conduit to transmit data, tstamp latency,
+ * IRQ latency and time for reading the time stamp.
+ */
+ ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
+ msecs_to_jiffies(100));
+ if (!ret)
+ return;
+
+ hwtstamps.hwtstamp = prt->tstamp_msg;
+ skb_complete_tx_timestamp(skb, &hwtstamps);
+}
+
+void ksz_port_deferred_xmit(struct kthread_work *work)
+{
+ struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct sk_buff *clone, *skb = xmit_work->skb;
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *prt;
+
+ prt = &dev->ports[xmit_work->dp->index];
+
+ clone = KSZ_SKB_CB(skb)->clone;
+
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ reinit_completion(&prt->tstamp_msg_comp);
+
+ dsa_enqueue_skb(skb, skb->dev);
+
+ ksz_ptp_txtstamp_skb(dev, prt, clone);
+
+ kfree(xmit_work);
+}
+
+static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
+{
+ u32 nanoseconds;
+ u32 seconds;
+ u8 phase;
+ int ret;
+
+ /* Copy current PTP clock into shadow registers and read */
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME);
+ if (ret)
+ return ret;
+
+ ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase);
+ if (ret)
+ return ret;
+
+ ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds);
+ if (ret)
+ return ret;
+
+ ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds);
+ if (ret)
+ return ret;
+
+ ts->tv_sec = seconds;
+ ts->tv_nsec = nanoseconds + phase * 8;
+
+ return 0;
+}
+
+static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+ ret = _ksz_ptp_gettime(dev, ts);
+ mutex_unlock(&ptp_data->lock);
+
+ return ret;
+}
+
+static int ksz_ptp_restart_perout(struct ksz_device *dev)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ s64 now_ns, first_ns, period_ns, next_ns;
+ struct ptp_perout_request request;
+ struct timespec64 next;
+ struct timespec64 now;
+ unsigned int count;
+ int ret;
+
+ dev_info(dev->dev, "Restarting periodic output signal\n");
+
+ ret = _ksz_ptp_gettime(dev, &now);
+ if (ret)
+ return ret;
+
+ now_ns = timespec64_to_ns(&now);
+ first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first);
+
+ /* Calculate next perout event based on start time and period */
+ period_ns = timespec64_to_ns(&ptp_data->perout_period);
+
+ if (first_ns < now_ns) {
+ count = div_u64(now_ns - first_ns, period_ns);
+ next_ns = first_ns + count * period_ns;
+ } else {
+ next_ns = first_ns;
+ }
+
+ /* Ensure 100 ms guard time prior next event */
+ while (next_ns < now_ns + 100000000)
+ next_ns += period_ns;
+
+ /* Restart periodic output signal */
+ next = ns_to_timespec64(next_ns);
+ request.start.sec = next.tv_sec;
+ request.start.nsec = next.tv_nsec;
+ request.period.sec = ptp_data->perout_period.tv_sec;
+ request.period.nsec = ptp_data->perout_period.tv_nsec;
+ request.index = 0;
+ request.flags = 0;
+
+ return ksz_ptp_enable_perout(dev, &request, 1);
+}
+
+static int ksz_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ /* Write to shadow registers and Load PTP clock */
+ ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME);
+ if (ret)
+ goto unlock;
+
+ switch (ptp_data->tou_mode) {
+ case KSZ_PTP_TOU_IDLE:
+ break;
+
+ case KSZ_PTP_TOU_PEROUT:
+ ret = ksz_ptp_restart_perout(dev);
+ if (ret)
+ goto unlock;
+
+ break;
+ }
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = *ts;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+
+ return ret;
+}
+
+static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ u64 base, adj;
+ bool negative;
+ u32 data32;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ if (scaled_ppm) {
+ base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS;
+ negative = diff_by_scaled_ppm(base, scaled_ppm, &adj);
+
+ data32 = (u32)adj;
+ data32 &= PTP_SUBNANOSEC_M;
+ if (!negative)
+ data32 |= PTP_RATE_DIR;
+
+ ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE,
+ PTP_CLK_ADJ_ENABLE);
+ if (ret)
+ goto unlock;
+ } else {
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0);
+ if (ret)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+ return ret;
+}
+
+static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ struct timespec64 delta64 = ns_to_timespec64(delta);
+ s32 sec, nsec;
+ u16 data16;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ /* do not use ns_to_timespec64(),
+ * both sec and nsec are subtracted by hw
+ */
+ sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
+
+ ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec));
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec));
+ if (ret)
+ goto unlock;
+
+ ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16);
+ if (ret)
+ goto unlock;
+
+ data16 |= PTP_STEP_ADJ;
+
+ /* PTP_STEP_DIR -- 0: subtract, 1: add */
+ if (delta < 0)
+ data16 &= ~PTP_STEP_DIR;
+ else
+ data16 |= PTP_STEP_DIR;
+
+ ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16);
+ if (ret)
+ goto unlock;
+
+ switch (ptp_data->tou_mode) {
+ case KSZ_PTP_TOU_IDLE:
+ break;
+
+ case KSZ_PTP_TOU_PEROUT:
+ ret = ksz_ptp_restart_perout(dev);
+ if (ret)
+ goto unlock;
+
+ break;
+ }
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64);
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+ return ret;
+}
+
+static int ksz_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ switch (req->type) {
+ case PTP_CLK_REQ_PEROUT:
+ mutex_lock(&ptp_data->lock);
+ ret = ksz_ptp_enable_perout(dev, &req->perout, on);
+ mutex_unlock(&ptp_data->lock);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ int ret = 0;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Function is pointer to the do_aux_work in the ptp_clock capability */
+static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ struct timespec64 ts;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+ ret = _ksz_ptp_gettime(dev, &ts);
+ if (ret)
+ goto out;
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = ts;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+
+ return HZ; /* reschedule in 1 second */
+}
+
+static int ksz_ptp_start_clock(struct ksz_device *dev)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ int ret;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE);
+ if (ret)
+ return ret;
+
+ ptp_data->clock_time.tv_sec = 0;
+ ptp_data->clock_time.tv_nsec = 0;
+
+ return 0;
+}
+
+int ksz_ptp_clock_register(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+ int ret;
+ u8 i;
+
+ ptp_data = &dev->ptp_data;
+ mutex_init(&ptp_data->lock);
+ spin_lock_init(&ptp_data->clock_lock);
+
+ ptp_data->caps.owner = THIS_MODULE;
+ snprintf(ptp_data->caps.name, 16, "Microchip Clock");
+ ptp_data->caps.max_adj = KSZ_MAX_DRIFT_CORR;
+ ptp_data->caps.gettime64 = ksz_ptp_gettime;
+ ptp_data->caps.settime64 = ksz_ptp_settime;
+ ptp_data->caps.adjfine = ksz_ptp_adjfine;
+ ptp_data->caps.adjtime = ksz_ptp_adjtime;
+ ptp_data->caps.do_aux_work = ksz_ptp_do_aux_work;
+ ptp_data->caps.enable = ksz_ptp_enable;
+ ptp_data->caps.verify = ksz_ptp_verify_pin;
+ ptp_data->caps.n_pins = KSZ_PTP_N_GPIO;
+ ptp_data->caps.n_per_out = 3;
+
+ ret = ksz_ptp_start_clock(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < KSZ_PTP_N_GPIO; i++) {
+ struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i];
+
+ snprintf(ptp_pin->name,
+ sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i);
+ ptp_pin->index = i;
+ ptp_pin->func = PTP_PF_NONE;
+ }
+
+ ptp_data->caps.pin_config = ptp_data->pin_config;
+
+ /* Currently only P2P mode is supported. When 802_1AS bit is set, it
+ * forwards all PTP packets to host port and none to other ports.
+ */
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS,
+ PTP_TC_P2P | PTP_802_1AS);
+ if (ret)
+ return ret;
+
+ ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev);
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return PTR_ERR(ptp_data->clock);
+
+ return 0;
+}
+
+void ksz_ptp_clock_unregister(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+
+ ptp_data = &dev->ptp_data;
+
+ if (ptp_data->clock)
+ ptp_clock_unregister(ptp_data->clock);
+}
+
+static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_ptp_irq *ptpmsg_irq = dev_id;
+ struct ksz_device *dev;
+ struct ksz_port *port;
+ u32 tstamp_raw;
+ ktime_t tstamp;
+ int ret;
+
+ port = ptpmsg_irq->port;
+ dev = port->ksz_dev;
+
+ if (ptpmsg_irq->ts_en) {
+ ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw);
+ if (ret)
+ return IRQ_NONE;
+
+ tstamp = ksz_decode_tstamp(tstamp_raw);
+
+ port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp);
+
+ complete(&port->tstamp_msg_comp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_irq *ptpirq = dev_id;
+ unsigned int nhandled = 0;
+ struct ksz_device *dev;
+ unsigned int sub_irq;
+ u16 data;
+ int ret;
+ u8 n;
+
+ dev = ptpirq->dev;
+
+ ret = ksz_read16(dev, ptpirq->reg_status, &data);
+ if (ret)
+ goto out;
+
+ /* Clear the interrupts W1C */
+ ret = ksz_write16(dev, ptpirq->reg_status, data);
+ if (ret)
+ return IRQ_NONE;
+
+ for (n = 0; n < ptpirq->nirqs; ++n) {
+ if (data & BIT(n + KSZ_PTP_INT_START)) {
+ sub_irq = irq_find_mapping(ptpirq->domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void ksz_ptp_irq_mask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START);
+}
+
+static void ksz_ptp_irq_unmask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START);
+}
+
+static void ksz_ptp_irq_bus_lock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&kirq->dev->lock_irq);
+}
+
+static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+ ret = ksz_write16(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&dev->lock_irq);
+}
+
+static const struct irq_chip ksz_ptp_irq_chip = {
+ .name = "ksz-irq",
+ .irq_mask = ksz_ptp_irq_mask,
+ .irq_unmask = ksz_ptp_irq_unmask,
+ .irq_bus_lock = ksz_ptp_irq_bus_lock,
+ .irq_bus_sync_unlock = ksz_ptp_irq_bus_sync_unlock,
+};
+
+static int ksz_ptp_irq_domain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ksz_ptp_irq_domain_ops = {
+ .map = ksz_ptp_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n)
+{
+ struct ksz_ptp_irq *ptpmsg_irq;
+
+ ptpmsg_irq = &port->ptpmsg_irq[n];
+
+ free_irq(ptpmsg_irq->num, ptpmsg_irq);
+ irq_dispose_mapping(ptpmsg_irq->num);
+}
+
+static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
+{
+ u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS,
+ REG_PTP_PORT_SYNC_TS};
+ static const char * const name[] = {"pdresp-msg", "xdreq-msg",
+ "sync-msg"};
+ const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
+ struct ksz_irq *ptpirq = &port->ptpirq;
+ struct ksz_ptp_irq *ptpmsg_irq;
+
+ ptpmsg_irq = &port->ptpmsg_irq[n];
+ ptpmsg_irq->num = irq_create_mapping(ptpirq->domain, n);
+ if (!ptpmsg_irq->num)
+ return -EINVAL;
+
+ ptpmsg_irq->port = port;
+ ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
+
+ strscpy(ptpmsg_irq->name, name[n]);
+
+ return request_threaded_irq(ptpmsg_irq->num, NULL,
+ ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
+ ptpmsg_irq->name, ptpmsg_irq);
+}
+
+int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
+{
+ struct ksz_device *dev = ds->priv;
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ struct ksz_port *port = &dev->ports[p];
+ struct ksz_irq *ptpirq = &port->ptpirq;
+ int irq;
+ int ret;
+
+ ptpirq->dev = dev;
+ ptpirq->masked = 0;
+ ptpirq->nirqs = 3;
+ ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
+ ptpirq->reg_status = ops->get_port_addr(p,
+ REG_PTP_PORT_TX_INT_STATUS__2);
+ snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
+
+ init_completion(&port->tstamp_msg_comp);
+
+ ptpirq->domain = irq_domain_create_linear(dev_fwnode(dev->dev), ptpirq->nirqs,
+ &ksz_ptp_irq_domain_ops, ptpirq);
+ if (!ptpirq->domain)
+ return -ENOMEM;
+
+ ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
+ if (!ptpirq->irq_num) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn,
+ IRQF_ONESHOT, ptpirq->name, ptpirq);
+ if (ret)
+ goto out;
+
+ for (irq = 0; irq < ptpirq->nirqs; irq++) {
+ ret = ksz_ptp_msg_irq_setup(port, irq);
+ if (ret)
+ goto out_ptp_msg;
+ }
+
+ return 0;
+
+out_ptp_msg:
+ free_irq(ptpirq->irq_num, ptpirq);
+ while (irq--) {
+ free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
+ irq_dispose_mapping(port->ptpmsg_irq[irq].num);
+ }
+out:
+ irq_domain_remove(ptpirq->domain);
+
+ return ret;
+}
+
+void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *port = &dev->ports[p];
+ struct ksz_irq *ptpirq = &port->ptpirq;
+ u8 n;
+
+ for (n = 0; n < ptpirq->nirqs; n++)
+ ksz_ptp_msg_irq_free(port, n);
+
+ free_irq(ptpirq->irq_num, ptpirq);
+ irq_dispose_mapping(ptpirq->irq_num);
+
+ irq_domain_remove(ptpirq->domain);
+}
+
+MODULE_AUTHOR("Christian Eggers <ceggers@arri.de>");
+MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
+MODULE_DESCRIPTION("PTP support for KSZ switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h
new file mode 100644
index 000000000000..3086e519b1b6
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip KSZ PTP Implementation
+ *
+ * Copyright (C) 2020 ARRI Lighting
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef _NET_DSA_DRIVERS_KSZ_PTP_H
+#define _NET_DSA_DRIVERS_KSZ_PTP_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
+
+#include <linux/ptp_clock_kernel.h>
+
+#define KSZ_PTP_N_GPIO 2
+
+enum ksz_ptp_tou_mode {
+ KSZ_PTP_TOU_IDLE,
+ KSZ_PTP_TOU_PEROUT,
+};
+
+struct ksz_ptp_data {
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ struct ptp_pin_desc pin_config[KSZ_PTP_N_GPIO];
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+ /* lock for accessing the clock_time */
+ spinlock_t clock_lock;
+ struct timespec64 clock_time;
+ enum ksz_ptp_tou_mode tou_mode;
+ struct timespec64 perout_target_time_first; /* start of first pulse */
+ struct timespec64 perout_period;
+};
+
+int ksz_ptp_clock_register(struct dsa_switch *ds);
+
+void ksz_ptp_clock_unregister(struct dsa_switch *ds);
+
+int ksz_get_ts_info(struct dsa_switch *ds, int port,
+ struct kernel_ethtool_ts_info *ts);
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config);
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+void ksz_port_deferred_xmit(struct kthread_work *work);
+bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
+ unsigned int type);
+int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p);
+void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p);
+
+#else
+
+struct ksz_ptp_data {
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+};
+
+static inline int ksz_ptp_clock_register(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static inline void ksz_ptp_clock_unregister(struct dsa_switch *ds) { }
+
+static inline int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
+{
+ return 0;
+}
+
+static inline void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p) {}
+
+#define ksz_get_ts_info NULL
+
+#define ksz_hwtstamp_get NULL
+
+#define ksz_hwtstamp_set NULL
+
+#define ksz_port_rxtstamp NULL
+
+#define ksz_port_txtstamp NULL
+
+#define ksz_port_deferred_xmit NULL
+
+#endif /* End of CONFIG_NET_DSA_MICROCHIP_KSZ_PTP */
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz_ptp_reg.h b/drivers/net/dsa/microchip/ksz_ptp_reg.h
new file mode 100644
index 000000000000..d71e85510cda
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp_reg.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip KSZ PTP register definitions
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ_PTP_REGS_H
+#define __KSZ_PTP_REGS_H
+
+#define REG_SW_GLOBAL_LED_OVR__4 0x0120
+#define LED_OVR_2 BIT(1)
+#define LED_OVR_1 BIT(0)
+
+#define REG_SW_GLOBAL_LED_SRC__4 0x0128
+#define LED_SRC_PTP_GPIO_1 BIT(3)
+#define LED_SRC_PTP_GPIO_2 BIT(2)
+
+/* 5 - PTP Clock */
+#define REG_PTP_CLK_CTRL 0x0500
+
+#define PTP_STEP_ADJ BIT(6)
+#define PTP_STEP_DIR BIT(5)
+#define PTP_READ_TIME BIT(4)
+#define PTP_LOAD_TIME BIT(3)
+#define PTP_CLK_ADJ_ENABLE BIT(2)
+#define PTP_CLK_ENABLE BIT(1)
+#define PTP_CLK_RESET BIT(0)
+
+#define REG_PTP_RTC_SUB_NANOSEC__2 0x0502
+
+#define PTP_RTC_SUB_NANOSEC_M 0x0007
+#define PTP_RTC_0NS 0x00
+
+#define REG_PTP_RTC_NANOSEC 0x0504
+
+#define REG_PTP_RTC_SEC 0x0508
+
+#define REG_PTP_SUBNANOSEC_RATE 0x050C
+
+#define PTP_SUBNANOSEC_M 0x3FFFFFFF
+#define PTP_RATE_DIR BIT(31)
+#define PTP_TMP_RATE_ENABLE BIT(30)
+
+#define REG_PTP_SUBNANOSEC_RATE_L 0x050E
+
+#define REG_PTP_RATE_DURATION 0x0510
+#define REG_PTP_RATE_DURATION_H 0x0510
+#define REG_PTP_RATE_DURATION_L 0x0512
+
+#define REG_PTP_MSG_CONF1 0x0514
+
+#define PTP_802_1AS BIT(7)
+#define PTP_ENABLE BIT(6)
+#define PTP_ETH_ENABLE BIT(5)
+#define PTP_IPV4_UDP_ENABLE BIT(4)
+#define PTP_IPV6_UDP_ENABLE BIT(3)
+#define PTP_TC_P2P BIT(2)
+#define PTP_MASTER BIT(1)
+#define PTP_1STEP BIT(0)
+
+#define REG_PTP_UNIT_INDEX__4 0x0520
+
+#define PTP_GPIO_INDEX GENMASK(19, 16)
+#define PTP_TSI_INDEX BIT(8)
+#define PTP_TOU_INDEX GENMASK(1, 0)
+
+#define REG_PTP_TRIG_STATUS__4 0x0524
+
+#define TRIG_ERROR_M GENMASK(18, 16)
+#define TRIG_DONE_M GENMASK(2, 0)
+
+#define REG_PTP_INT_STATUS__4 0x0528
+
+#define TRIG_INT_M GENMASK(18, 16)
+#define TS_INT_M GENMASK(1, 0)
+
+#define REG_PTP_CTRL_STAT__4 0x052C
+
+#define GPIO_IN BIT(7)
+#define GPIO_OUT BIT(6)
+#define TS_INT_ENABLE BIT(5)
+#define TRIG_ACTIVE BIT(4)
+#define TRIG_ENABLE BIT(3)
+#define TRIG_RESET BIT(2)
+#define TS_ENABLE BIT(1)
+#define TS_RESET BIT(0)
+
+#define REG_TRIG_TARGET_NANOSEC 0x0530
+#define REG_TRIG_TARGET_SEC 0x0534
+
+#define REG_TRIG_CTRL__4 0x0538
+
+#define TRIG_CASCADE_ENABLE BIT(31)
+#define TRIG_CASCADE_TAIL BIT(30)
+#define TRIG_CASCADE_UPS_M GENMASK(29, 26)
+#define TRIG_NOW BIT(25)
+#define TRIG_NOTIFY BIT(24)
+#define TRIG_EDGE BIT(23)
+#define TRIG_PATTERN_M GENMASK(22, 20)
+#define TRIG_NEG_EDGE 0
+#define TRIG_POS_EDGE 1
+#define TRIG_NEG_PULSE 2
+#define TRIG_POS_PULSE 3
+#define TRIG_NEG_PERIOD 4
+#define TRIG_POS_PERIOD 5
+#define TRIG_REG_OUTPUT 6
+#define TRIG_GPO_M GENMASK(19, 16)
+#define TRIG_CASCADE_ITERATE_CNT_M GENMASK(15, 0)
+
+#define REG_TRIG_CYCLE_WIDTH 0x053C
+#define TRIG_CYCLE_WIDTH_M GENMASK(31, 0)
+
+#define REG_TRIG_CYCLE_CNT 0x0540
+
+#define TRIG_CYCLE_CNT_M GENMASK(31, 16)
+#define TRIG_BIT_PATTERN_M GENMASK(15, 0)
+
+#define REG_TRIG_ITERATE_TIME 0x0544
+
+#define REG_TRIG_PULSE_WIDTH__4 0x0548
+
+#define TRIG_PULSE_WIDTH_M GENMASK(23, 0)
+
+/* Port PTP Register */
+#define REG_PTP_PORT_RX_DELAY__2 0x0C00
+#define REG_PTP_PORT_TX_DELAY__2 0x0C02
+#define REG_PTP_PORT_ASYM_DELAY__2 0x0C04
+
+#define REG_PTP_PORT_XDELAY_TS 0x0C08
+#define REG_PTP_PORT_SYNC_TS 0x0C0C
+#define REG_PTP_PORT_PDRESP_TS 0x0C10
+
+#define REG_PTP_PORT_TX_INT_STATUS__2 0x0C14
+#define REG_PTP_PORT_TX_INT_ENABLE__2 0x0C16
+
+#define PTP_PORT_SYNC_INT BIT(15)
+#define PTP_PORT_XDELAY_REQ_INT BIT(14)
+#define PTP_PORT_PDELAY_RESP_INT BIT(13)
+#define KSZ_SYNC_MSG 2
+#define KSZ_XDREQ_MSG 1
+#define KSZ_PDRES_MSG 0
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
new file mode 100644
index 000000000000..d8001734b057
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microchip ksz series register access through SPI
+ *
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#include <linux/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "ksz_common.h"
+
+#define KSZ8463_SPI_ADDR_SHIFT 13
+#define KSZ8463_SPI_ADDR_ALIGN 3
+#define KSZ8463_SPI_TURNAROUND_SHIFT 2
+
+#define KSZ8795_SPI_ADDR_SHIFT 12
+#define KSZ8795_SPI_ADDR_ALIGN 3
+#define KSZ8795_SPI_TURNAROUND_SHIFT 1
+
+#define KSZ8863_SPI_ADDR_SHIFT 8
+#define KSZ8863_SPI_ADDR_ALIGN 8
+#define KSZ8863_SPI_TURNAROUND_SHIFT 0
+
+#define KSZ9477_SPI_ADDR_SHIFT 24
+#define KSZ9477_SPI_ADDR_ALIGN 3
+#define KSZ9477_SPI_TURNAROUND_SHIFT 5
+
+KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT,
+ KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN);
+
+KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
+ KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN);
+
+KSZ_REGMAP_TABLE(ksz9477, 32, KSZ9477_SPI_ADDR_SHIFT,
+ KSZ9477_SPI_TURNAROUND_SHIFT, KSZ9477_SPI_ADDR_ALIGN);
+
+static u16 ksz8463_reg(u16 reg, size_t size)
+{
+ switch (size) {
+ case 1:
+ reg = ((reg >> 2) << 4) | (1 << (reg & 3));
+ break;
+ case 2:
+ reg = ((reg >> 2) << 4) | (reg & 2 ? 0x0c : 0x03);
+ break;
+ default:
+ reg = ((reg >> 2) << 4) | 0xf;
+ break;
+ }
+ reg <<= KSZ8463_SPI_TURNAROUND_SHIFT;
+ return reg;
+}
+
+static int ksz8463_spi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u8 bytes[2];
+ u16 cmd;
+ int rc;
+
+ if (reg_size > 2 || val_size > 4)
+ return -EINVAL;
+ memcpy(&cmd, reg, sizeof(u16));
+ cmd = ksz8463_reg(cmd, val_size);
+ /* SPI command uses big-endian format. */
+ put_unaligned_be16(cmd, bytes);
+ rc = spi_write_then_read(spi, bytes, reg_size, val, val_size);
+#if defined(__BIG_ENDIAN)
+ /* Register value uses little-endian format so need to convert when
+ * running in big-endian system.
+ */
+ if (!rc && val_size > 1) {
+ if (val_size == 2) {
+ u16 v = get_unaligned_le16(val);
+
+ memcpy(val, &v, sizeof(v));
+ } else if (val_size == 4) {
+ u32 v = get_unaligned_le32(val);
+
+ memcpy(val, &v, sizeof(v));
+ }
+ }
+#endif
+ return rc;
+}
+
+static int ksz8463_spi_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ size_t val_size = count - 2;
+ u8 bytes[6];
+ u16 cmd;
+
+ if (count <= 2 || count > 6)
+ return -EINVAL;
+ memcpy(bytes, data, count);
+ memcpy(&cmd, data, sizeof(u16));
+ cmd = ksz8463_reg(cmd, val_size);
+ cmd |= (1 << (KSZ8463_SPI_ADDR_SHIFT + KSZ8463_SPI_TURNAROUND_SHIFT));
+ /* SPI command uses big-endian format. */
+ put_unaligned_be16(cmd, bytes);
+#if defined(__BIG_ENDIAN)
+ /* Register value uses little-endian format so need to convert when
+ * running in big-endian system.
+ */
+ if (val_size == 2) {
+ u8 *val = &bytes[2];
+ u16 v;
+
+ memcpy(&v, val, sizeof(v));
+ put_unaligned_le16(v, val);
+ } else if (val_size == 4) {
+ u8 *val = &bytes[2];
+ u32 v;
+
+ memcpy(&v, val, sizeof(v));
+ put_unaligned_le32(v, val);
+ }
+#endif
+ return spi_write(spi, bytes, count);
+}
+
+KSZ8463_REGMAP_TABLE(ksz8463, KSZ8463_SPI_ADDR_SHIFT, 0,
+ KSZ8463_SPI_ADDR_ALIGN);
+
+static int ksz_spi_probe(struct spi_device *spi)
+{
+ const struct regmap_config *regmap_config;
+ const struct ksz_chip_data *chip;
+ struct device *ddev = &spi->dev;
+ struct regmap_config rc;
+ struct ksz_device *dev;
+ int i, ret = 0;
+
+ dev = ksz_switch_alloc(&spi->dev, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ chip = device_get_match_data(ddev);
+ if (!chip)
+ return -EINVAL;
+
+ /* Save chip id to do special initialization when probing. */
+ dev->chip_id = chip->chip_id;
+ if (chip->chip_id == KSZ88X3_CHIP_ID)
+ regmap_config = ksz8863_regmap_config;
+ else if (chip->chip_id == KSZ8463_CHIP_ID)
+ regmap_config = ksz8463_regmap_config;
+ else if (chip->chip_id == KSZ8795_CHIP_ID ||
+ chip->chip_id == KSZ8794_CHIP_ID ||
+ chip->chip_id == KSZ8765_CHIP_ID)
+ regmap_config = ksz8795_regmap_config;
+ else if (chip->chip_id == KSZ8895_CHIP_ID ||
+ chip->chip_id == KSZ8864_CHIP_ID)
+ regmap_config = ksz8863_regmap_config;
+ else
+ regmap_config = ksz9477_regmap_config;
+
+ for (i = 0; i < __KSZ_NUM_REGMAPS; i++) {
+ rc = regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ rc.wr_table = chip->wr_table;
+ rc.rd_table = chip->rd_table;
+ dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
+
+ if (IS_ERR(dev->regmap[i])) {
+ return dev_err_probe(&spi->dev, PTR_ERR(dev->regmap[i]),
+ "Failed to initialize regmap%i\n",
+ regmap_config[i].val_bits);
+ }
+ }
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ /* setup spi */
+ spi->mode = SPI_MODE_3;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ dev->irq = spi->irq;
+
+ ret = ksz_switch_register(dev);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static void ksz_spi_remove(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ ksz_switch_remove(dev);
+}
+
+static void ksz_spi_shutdown(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (!dev)
+ return;
+
+ ksz_switch_shutdown(dev);
+
+ spi_set_drvdata(spi, NULL);
+}
+
+static const struct of_device_id ksz_dt_ids[] = {
+ {
+ .compatible = "microchip,ksz8463",
+ .data = &ksz_switch_chips[KSZ8463]
+ },
+ {
+ .compatible = "microchip,ksz8765",
+ .data = &ksz_switch_chips[KSZ8765]
+ },
+ {
+ .compatible = "microchip,ksz8794",
+ .data = &ksz_switch_chips[KSZ8794]
+ },
+ {
+ .compatible = "microchip,ksz8795",
+ .data = &ksz_switch_chips[KSZ8795]
+ },
+ {
+ .compatible = "microchip,ksz8863",
+ .data = &ksz_switch_chips[KSZ88X3]
+ },
+ {
+ .compatible = "microchip,ksz8864",
+ .data = &ksz_switch_chips[KSZ8864]
+ },
+ {
+ .compatible = "microchip,ksz8873",
+ .data = &ksz_switch_chips[KSZ88X3]
+ },
+ {
+ .compatible = "microchip,ksz8895",
+ .data = &ksz_switch_chips[KSZ8895]
+ },
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9563]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ8563]
+ },
+ {
+ .compatible = "microchip,ksz8567",
+ .data = &ksz_switch_chips[KSZ8567]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
+ {
+ .compatible = "microchip,lan9370",
+ .data = &ksz_switch_chips[LAN9370]
+ },
+ {
+ .compatible = "microchip,lan9371",
+ .data = &ksz_switch_chips[LAN9371]
+ },
+ {
+ .compatible = "microchip,lan9372",
+ .data = &ksz_switch_chips[LAN9372]
+ },
+ {
+ .compatible = "microchip,lan9373",
+ .data = &ksz_switch_chips[LAN9373]
+ },
+ {
+ .compatible = "microchip,lan9374",
+ .data = &ksz_switch_chips[LAN9374]
+ },
+ {
+ .compatible = "microchip,lan9646",
+ .data = &ksz_switch_chips[LAN9646]
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ksz_dt_ids);
+
+static const struct spi_device_id ksz_spi_ids[] = {
+ { "ksz8463" },
+ { "ksz8765" },
+ { "ksz8794" },
+ { "ksz8795" },
+ { "ksz8863" },
+ { "ksz8864" },
+ { "ksz8873" },
+ { "ksz8895" },
+ { "ksz9477" },
+ { "ksz9896" },
+ { "ksz9897" },
+ { "ksz9893" },
+ { "ksz9563" },
+ { "ksz8563" },
+ { "ksz8567" },
+ { "ksz9567" },
+ { "lan9370" },
+ { "lan9371" },
+ { "lan9372" },
+ { "lan9373" },
+ { "lan9374" },
+ { "lan9646" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_spi_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
+static struct spi_driver ksz_spi_driver = {
+ .driver = {
+ .name = "ksz-switch",
+ .of_match_table = ksz_dt_ids,
+ .pm = &ksz_spi_pm_ops,
+ },
+ .id_table = ksz_spi_ids,
+ .probe = ksz_spi_probe,
+ .remove = ksz_spi_remove,
+ .shutdown = ksz_spi_shutdown,
+};
+
+module_spi_driver(ksz_spi_driver);
+
+MODULE_ALIAS("spi:lan937x");
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
+MODULE_DESCRIPTION("Microchip ksz Series Switch SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
new file mode 100644
index 000000000000..df13ebbd356f
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip lan937x dev ops headers
+ * Copyright (C) 2019-2022 Microchip Technology Inc.
+ */
+
+#ifndef __LAN937X_CFG_H
+#define __LAN937X_CFG_H
+
+int lan937x_reset_switch(struct ksz_device *dev);
+int lan937x_setup(struct dsa_switch *ds);
+void lan937x_teardown(struct dsa_switch *ds);
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+void lan937x_config_cpu_port(struct dsa_switch *ds);
+int lan937x_switch_init(struct ksz_device *dev);
+void lan937x_switch_exit(struct ksz_device *dev);
+int lan937x_mdio_bus_preinit(struct ksz_device *dev, bool side_mdio);
+int lan937x_create_phy_addr_map(struct ksz_device *dev, bool side_mdio);
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu);
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port);
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+int lan937x_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val);
+#endif
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
new file mode 100644
index 000000000000..5a1496fff445
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Microchip LAN937X switch driver main logic
+ * Copyright (C) 2019-2024 Microchip Technology Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/math.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "lan937x_reg.h"
+#include "ksz_common.h"
+#include "ksz9477.h"
+#include "lan937x.h"
+
+/* marker for ports without built-in PHY */
+#define LAN937X_NO_PHY U8_MAX
+
+/*
+ * lan9370_phy_addr - Mapping of LAN9370 switch ports to PHY addresses.
+ *
+ * Each entry corresponds to a specific port on the LAN9370 switch,
+ * where ports 1-4 are connected to integrated 100BASE-T1 PHYs, and
+ * Port 5 is connected to an RGMII interface without a PHY. The values
+ * are based on the documentation (DS00003108E, section 3.3).
+ */
+static const u8 lan9370_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 6, /* Port 4, T1 AFE4 */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+};
+
+/*
+ * lan9371_phy_addr - Mapping of LAN9371 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003109E, section 3.3).
+ */
+static const u8 lan9371_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 8, /* Port 4, TX PHY */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+};
+
+/*
+ * lan9372_phy_addr - Mapping of LAN9372 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003110F, section 3.3).
+ */
+static const u8 lan9372_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 8, /* Port 4, TX PHY */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+ [6] = 6, /* Port 7, T1 AFE4 */
+ [7] = 4, /* Port 8, T1 AFE2 */
+};
+
+/*
+ * lan9373_phy_addr - Mapping of LAN9373 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003110F, section 3.3).
+ */
+static const u8 lan9373_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = LAN937X_NO_PHY, /* Port 4, SGMII */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+ [6] = 6, /* Port 7, T1 AFE4 */
+ [7] = 4, /* Port 8, T1 AFE2 */
+};
+
+/*
+ * lan9374_phy_addr - Mapping of LAN9374 switch ports to PHY addresses.
+ *
+ * The values are based on the documentation (DS00003110F, section 3.3).
+ */
+static const u8 lan9374_phy_addr[] = {
+ [0] = 2, /* Port 1, T1 AFE0 */
+ [1] = 3, /* Port 2, T1 AFE1 */
+ [2] = 5, /* Port 3, T1 AFE3 */
+ [3] = 7, /* Port 4, T1 AFE5 */
+ [4] = LAN937X_NO_PHY, /* Port 5, RGMII 2 */
+ [5] = LAN937X_NO_PHY, /* Port 6, RGMII 1 */
+ [6] = 6, /* Port 7, T1 AFE4 */
+ [7] = 4, /* Port 8, T1 AFE2 */
+};
+
+static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ return regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
+}
+
+static int lan937x_port_cfg(struct ksz_device *dev, int port, int offset,
+ u8 bits, bool set)
+{
+ return regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+/**
+ * lan937x_create_phy_addr_map - Create port-to-PHY address map for MDIO bus.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side MDIO bus.
+ *
+ * This function sets up the PHY address mapping for the LAN937x switches,
+ * which support two access modes for internal PHYs:
+ * 1. **SPI Access**: A straightforward one-to-one port-to-PHY address
+ * mapping is applied.
+ * 2. **MDIO Access**: The PHY address mapping varies based on chip variant
+ * and strap configuration. An offset is calculated based on strap settings
+ * to ensure correct PHY addresses are assigned. The offset calculation logic
+ * is based on Microchip's Article Number 000015828, available at:
+ * https://microchip.my.site.com/s/article/LAN9374-Virtual-PHY-PHY-Address-Mapping
+ *
+ * The function first checks if side MDIO access is disabled, in which case a
+ * simple direct mapping (port number = PHY address) is applied. If side MDIO
+ * access is enabled, it reads the strap configuration to determine the correct
+ * offset for PHY addresses.
+ *
+ * The appropriate mapping table is selected based on the chip ID, and the
+ * `phy_addr_map` is populated with the correct addresses for each port. Any
+ * port with no PHY is assigned a `LAN937X_NO_PHY` marker.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int lan937x_create_phy_addr_map(struct ksz_device *dev, bool side_mdio)
+{
+ static const u8 *phy_addr_map;
+ u32 strap_val;
+ u8 offset = 0;
+ size_t size;
+ int ret, i;
+
+ if (!side_mdio) {
+ /* simple direct mapping */
+ for (i = 0; i < dev->info->port_cnt; i++)
+ dev->phy_addr_map[i] = i;
+
+ return 0;
+ }
+
+ ret = ksz_read32(dev, REG_SW_CFG_STRAP_VAL, &strap_val);
+ if (ret < 0)
+ return ret;
+
+ if (!(strap_val & SW_CASCADE_ID_CFG) && !(strap_val & SW_VPHY_ADD_CFG))
+ offset = 0;
+ else if (!(strap_val & SW_CASCADE_ID_CFG) && (strap_val & SW_VPHY_ADD_CFG))
+ offset = 7;
+ else if ((strap_val & SW_CASCADE_ID_CFG) && !(strap_val & SW_VPHY_ADD_CFG))
+ offset = 15;
+ else
+ offset = 22;
+
+ switch (dev->info->chip_id) {
+ case LAN9370_CHIP_ID:
+ phy_addr_map = lan9370_phy_addr;
+ size = ARRAY_SIZE(lan9370_phy_addr);
+ break;
+ case LAN9371_CHIP_ID:
+ phy_addr_map = lan9371_phy_addr;
+ size = ARRAY_SIZE(lan9371_phy_addr);
+ break;
+ case LAN9372_CHIP_ID:
+ phy_addr_map = lan9372_phy_addr;
+ size = ARRAY_SIZE(lan9372_phy_addr);
+ break;
+ case LAN9373_CHIP_ID:
+ phy_addr_map = lan9373_phy_addr;
+ size = ARRAY_SIZE(lan9373_phy_addr);
+ break;
+ case LAN9374_CHIP_ID:
+ phy_addr_map = lan9374_phy_addr;
+ size = ARRAY_SIZE(lan9374_phy_addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (size < dev->info->port_cnt)
+ return -EINVAL;
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ if (phy_addr_map[i] == LAN937X_NO_PHY)
+ dev->phy_addr_map[i] = phy_addr_map[i];
+ else
+ dev->phy_addr_map[i] = phy_addr_map[i] + offset;
+ }
+
+ return 0;
+}
+
+/**
+ * lan937x_mdio_bus_preinit - Pre-initialize MDIO bus for accessing PHYs.
+ * @dev: Pointer to device structure.
+ * @side_mdio: Boolean indicating if the PHYs are accessed over a side MDIO bus.
+ *
+ * This function configures the LAN937x switch for PHY access either through
+ * SPI or the side MDIO bus, unlocking the necessary registers for each access
+ * mode.
+ *
+ * Operation Modes:
+ * 1. **SPI Access**: Enables SPI indirect access to address clock domain
+ * crossing issues when SPI is used for PHY access.
+ * 2. **MDIO Access**: Grants access to internal PHYs over the side MDIO bus,
+ * required when using the MDIO bus for PHY management.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int lan937x_mdio_bus_preinit(struct ksz_device *dev, bool side_mdio)
+{
+ u16 data16;
+ int ret;
+
+ /* Unlock access to the PHYs, needed for SPI and side MDIO access */
+ ret = lan937x_cfg(dev, REG_GLOBAL_CTRL_0, SW_PHY_REG_BLOCK, false);
+ if (ret < 0)
+ goto print_error;
+
+ if (side_mdio)
+ /* Allow access to internal PHYs over MDIO bus */
+ data16 = VPHY_MDIO_INTERNAL_ENABLE;
+ else
+ /* Enable SPI indirect access to address clock domain crossing
+ * issue
+ */
+ data16 = VPHY_SPI_INDIRECT_ENABLE;
+
+ ret = ksz_rmw16(dev, REG_VPHY_SPECIAL_CTRL__2,
+ VPHY_SPI_INDIRECT_ENABLE | VPHY_MDIO_INTERNAL_ENABLE,
+ data16);
+
+print_error:
+ if (ret < 0)
+ dev_err(dev->dev, "failed to preinit the MDIO bus\n");
+
+ return ret;
+}
+
+static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
+{
+ u16 addr_base = REG_PORT_T1_PHY_CTRL_BASE;
+ u16 temp;
+
+ if (is_lan937x_tx_phy(dev, addr))
+ addr_base = REG_PORT_TX_PHY_CTRL_BASE;
+
+ /* get register address based on the logical port */
+ temp = PORT_CTRL_ADDR(addr, (addr_base + (reg << 2)));
+
+ return ksz_write16(dev, REG_VPHY_IND_ADDR__2, temp);
+}
+
+static int lan937x_internal_phy_write(struct ksz_device *dev, int addr, int reg,
+ u16 val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port */
+ if (!dev->info->internal_phy[addr])
+ return -EOPNOTSUPP;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write the data to be written to the VPHY reg */
+ ret = ksz_write16(dev, REG_VPHY_IND_DATA__2, val);
+ if (ret < 0)
+ return ret;
+
+ /* Write the Write En and Busy bit */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2,
+ (VPHY_IND_WRITE | VPHY_IND_BUSY));
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(ksz_regmap_16(dev), REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to write phy register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan937x_internal_phy_read(struct ksz_device *dev, int addr, int reg,
+ u16 *val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port, return 0xffff for non-existent phy */
+ if (!dev->info->internal_phy[addr])
+ return 0xffff;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write Read and Busy bit to start the transaction */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2, VPHY_IND_BUSY);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(ksz_regmap_16(dev), REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to read phy register\n");
+ return ret;
+ }
+
+ /* Read the VPHY register which has the PHY data */
+ return ksz_read16(dev, REG_VPHY_IND_DATA__2, val);
+}
+
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+{
+ return lan937x_internal_phy_read(dev, addr, reg, data);
+}
+
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+{
+ return lan937x_internal_phy_write(dev, addr, reg, val);
+}
+
+int lan937x_reset_switch(struct ksz_device *dev)
+{
+ u32 data32;
+ int ret;
+
+ /* reset switch */
+ ret = lan937x_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Auto Aging */
+ ret = lan937x_cfg(dev, REG_SW_LUE_CTRL_1, SW_LINK_AUTO_AGING, true);
+ if (ret < 0)
+ return ret;
+
+ /* disable interrupts */
+ ret = ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_write32(dev, REG_SW_INT_STATUS__4, POR_READY_INT);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF);
+ if (ret < 0)
+ return ret;
+
+ return ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+}
+
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ const u32 *masks = dev->info->masks;
+ const u16 *regs = dev->info->regs;
+ struct dsa_switch *ds = dev->ds;
+ u8 member;
+
+ /* enable tag tail for host port */
+ if (cpu_port)
+ lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_TAIL_TAG_ENABLE, true);
+
+ /* Enable the Port Queue split */
+ ksz9477_port_queue_split(dev, port);
+
+ /* set back pressure for half duplex */
+ lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
+ true);
+
+ /* enable 802.1p priority */
+ lan937x_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+
+ if (!dev->info->internal_phy[port])
+ lan937x_port_cfg(dev, port, regs[P_XMII_CTRL_0],
+ masks[P_MII_TX_FLOW_CTRL] |
+ masks[P_MII_RX_FLOW_CTRL],
+ true);
+
+ if (cpu_port)
+ member = dsa_user_ports(ds);
+ else
+ member = BIT(dsa_upstream_port(ds, port));
+
+ dev->dev_ops->cfg_port_member(dev, port, member);
+}
+
+void lan937x_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dev->info->cpu_ports & (1 << dp->index)) {
+ dev->cpu_port = dp->index;
+
+ /* enable cpu port */
+ lan937x_port_setup(dev, dp->index, true);
+ }
+ }
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ ksz_port_stp_state_set(ds, dp->index, BR_STATE_DISABLED);
+ }
+}
+
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
+{
+ struct dsa_switch *ds = dev->ds;
+ int ret;
+
+ new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port))
+ new_mtu += LAN937X_TAG_LEN;
+
+ if (new_mtu >= FR_MIN_SIZE)
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, true);
+ else
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, false);
+ if (ret < 0) {
+ dev_err(ds->dev, "failed to enable jumbo\n");
+ return ret;
+ }
+
+ /* Write the frame size in PORT_MAX_FR_SIZE register */
+ ret = ksz_pwrite16(dev, port, PORT_MAX_FR_SIZE, new_mtu);
+ if (ret) {
+ dev_err(ds->dev, "failed to update mtu for port %d\n", port);
+ return ret;
+ }
+
+ return 0;
+}
+
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u8 data, mult, value8;
+ bool in_msec = false;
+ u32 max_val, value;
+ u32 secs = msecs;
+ int ret;
+
+#define MAX_TIMER_VAL ((1 << 20) - 1)
+
+ /* The aging timer comprises a 3-bit multiplier and a 20-bit second
+ * value. Either of them cannot be zero. The maximum timer is then
+ * 7 * 1048575 = 7340025 seconds. As this value is too large for
+ * practical use it can be interpreted as microseconds, making the
+ * maximum timer 7340 seconds with finer control. This allows for
+ * maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
+ */
+ if (msecs % 1000)
+ in_msec = true;
+ else
+ secs /= 1000;
+ if (!secs)
+ secs = 1;
+
+ /* Return error if too large. */
+ else if (secs > 7 * MAX_TIMER_VAL)
+ return -EINVAL;
+
+ /* Configure how to interpret the number value. */
+ ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
+ in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
+ if (ret < 0)
+ return ret;
+
+ /* Check whether there is need to update the multiplier. */
+ mult = FIELD_GET(SW_AGE_CNT_M, value8);
+ max_val = MAX_TIMER_VAL;
+ if (mult > 0) {
+ /* Try to use the same multiplier already in the register as
+ * the hardware default uses multiplier 4 and 75 seconds for
+ * 300 seconds.
+ */
+ max_val = DIV_ROUND_UP(secs, mult);
+ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
+ max_val = MAX_TIMER_VAL;
+ }
+
+ data = DIV_ROUND_UP(secs, max_val);
+ if (mult != data) {
+ value8 &= ~SW_AGE_CNT_M;
+ value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
+ if (ret < 0)
+ return ret;
+ }
+
+ secs = DIV_ROUND_UP(secs, data);
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_19_8_M, secs);
+
+ return ksz_write16(dev, REG_SW_AGE_PERIOD__2, value);
+}
+
+static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
+ u16 reg, u8 val)
+{
+ u16 data16;
+
+ ksz_pread16(dev, port, reg, &data16);
+
+ /* Update tune Adjust */
+ data16 &= ~PORT_TUNE_ADJ;
+ data16 |= FIELD_PREP(PORT_TUNE_ADJ, val);
+ ksz_pwrite16(dev, port, reg, data16);
+
+ /* write DLL reset to take effect */
+ data16 |= PORT_DLL_RESET;
+ ksz_pwrite16(dev, port, reg, data16);
+}
+
+static void lan937x_set_rgmii_tx_delay(struct ksz_device *dev, int port)
+{
+ u8 val;
+
+ /* Apply different codes based on the ports as per characterization
+ * results
+ */
+ val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_TX_DELAY_2NS :
+ RGMII_2_TX_DELAY_2NS;
+
+ lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_5, val);
+}
+
+static void lan937x_set_rgmii_rx_delay(struct ksz_device *dev, int port)
+{
+ u8 val;
+
+ val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_RX_DELAY_2NS :
+ RGMII_2_RX_DELAY_2NS;
+
+ lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_4, val);
+}
+
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_100FD;
+
+ if (dev->info->supports_rgmii[port]) {
+ /* MII/RMII/RGMII ports */
+ config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_100HD | MAC_10 | MAC_1000FD;
+ } else if (is_lan937x_tx_phy(dev, port)) {
+ config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_100HD | MAC_10;
+ }
+}
+
+void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port)
+{
+ struct ksz_port *p = &dev->ports[port];
+
+ if (p->rgmii_tx_val) {
+ lan937x_set_rgmii_tx_delay(dev, port);
+ dev_info(dev->dev, "Applied rgmii tx delay for the port %d\n",
+ port);
+ }
+
+ if (p->rgmii_rx_val) {
+ lan937x_set_rgmii_rx_delay(dev, port);
+ dev_info(dev->dev, "Applied rgmii rx delay for the port %d\n",
+ port);
+ }
+}
+
+int lan937x_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
+{
+ return ksz_pwrite32(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
+}
+
+int lan937x_switch_init(struct ksz_device *dev)
+{
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
+ return 0;
+}
+
+int lan937x_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ /* The VLAN aware is a global setting. Mixed vlan
+ * filterings are not supported.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* Enable aggressive back off for half duplex & UNH mode */
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_0, (SW_PAUSE_UNH_MODE |
+ SW_NEW_BACKOFF |
+ SW_AGGR_BACKOFF), true);
+ if (ret < 0)
+ return ret;
+
+ /* If NO_EXC_COLLISION_DROP bit is set, the switch will not drop
+ * packets when 16 or more collisions occur
+ */
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+ if (ret < 0)
+ return ret;
+
+ /* enable global MIB counter freeze function */
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+ if (ret < 0)
+ return ret;
+
+ /* disable CLK125 & CLK25, 1: disable, 0: enable */
+ ret = lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
+ (SW_CLK125_ENB | SW_CLK25_ENB), true);
+ if (ret < 0)
+ return ret;
+
+ /* Disable global VPHY support. Related to CPU interface only? */
+ return ksz_rmw32(dev, REG_SW_CFG_STRAP_OVR, SW_VPHY_DISABLE,
+ SW_VPHY_DISABLE);
+}
+
+void lan937x_teardown(struct dsa_switch *ds)
+{
+
+}
+
+void lan937x_switch_exit(struct ksz_device *dev)
+{
+ lan937x_reset_switch(dev);
+}
+
+MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
+MODULE_DESCRIPTION("Microchip LAN937x Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
new file mode 100644
index 000000000000..72042fd64e5b
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip LAN937X switch register definitions
+ * Copyright (C) 2019-2024 Microchip Technology Inc.
+ */
+#ifndef __LAN937X_REG_H
+#define __LAN937X_REG_H
+
+#define PORT_CTRL_ADDR(port, addr) ((addr) | (((port) + 1) << 12))
+
+/* 0 - Operation */
+#define REG_GLOBAL_CTRL_0 0x0007
+
+#define SW_PHY_REG_BLOCK BIT(7)
+#define SW_FAST_MODE BIT(3)
+#define SW_FAST_MODE_OVERRIDE BIT(2)
+
+#define REG_SW_INT_STATUS__4 0x0010
+#define REG_SW_INT_MASK__4 0x0014
+
+#define LUE_INT BIT(31)
+#define TRIG_TS_INT BIT(30)
+#define APB_TIMEOUT_INT BIT(29)
+#define OVER_TEMP_INT BIT(28)
+#define HSR_INT BIT(27)
+#define PIO_INT BIT(26)
+#define POR_READY_INT BIT(25)
+
+#define SWITCH_INT_MASK \
+ (LUE_INT | TRIG_TS_INT | APB_TIMEOUT_INT | OVER_TEMP_INT | HSR_INT | \
+ PIO_INT | POR_READY_INT)
+
+#define REG_SW_PORT_INT_STATUS__4 0x0018
+#define REG_SW_PORT_INT_MASK__4 0x001C
+
+/* 1 - Global */
+#define REG_SW_GLOBAL_OUTPUT_CTRL__1 0x0103
+#define SW_CLK125_ENB BIT(1)
+#define SW_CLK25_ENB BIT(0)
+
+#define REG_SW_CFG_STRAP_VAL 0x0200
+#define SW_CASCADE_ID_CFG BIT(15)
+#define SW_VPHY_ADD_CFG BIT(0)
+
+/* 2 - PHY Control */
+#define REG_SW_CFG_STRAP_OVR 0x0214
+#define SW_VPHY_DISABLE BIT(31)
+
+/* 3 - Operation Control */
+#define REG_SW_OPERATION 0x0300
+
+#define SW_DOUBLE_TAG BIT(7)
+#define SW_OVER_TEMP_ENABLE BIT(2)
+#define SW_RESET BIT(1)
+
+#define REG_SW_LUE_CTRL_0 0x0310
+
+#define SW_VLAN_ENABLE BIT(7)
+#define SW_DROP_INVALID_VID BIT(6)
+#define SW_AGE_CNT_M GENMASK(5, 3)
+#define SW_RESV_MCAST_ENABLE BIT(2)
+
+#define REG_SW_LUE_CTRL_1 0x0311
+
+#define UNICAST_LEARN_DISABLE BIT(7)
+#define SW_FLUSH_STP_TABLE BIT(5)
+#define SW_FLUSH_MSTP_TABLE BIT(4)
+#define SW_SRC_ADDR_FILTER BIT(3)
+#define SW_AGING_ENABLE BIT(2)
+#define SW_FAST_AGING BIT(1)
+#define SW_LINK_AUTO_AGING BIT(0)
+
+#define REG_SW_LUE_CTRL_2 0x0312
+
+#define SW_AGE_CNT_IN_MICROSEC BIT(7)
+
+#define REG_SW_AGE_PERIOD__1 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
+
+#define REG_SW_AGE_PERIOD__2 0x0320
+#define SW_AGE_PERIOD_19_8_M GENMASK(19, 8)
+
+#define REG_SW_MAC_CTRL_0 0x0330
+#define SW_NEW_BACKOFF BIT(7)
+#define SW_PAUSE_UNH_MODE BIT(1)
+#define SW_AGGR_BACKOFF BIT(0)
+
+#define REG_SW_MAC_CTRL_1 0x0331
+#define SW_SHORT_IFG BIT(7)
+#define MULTICAST_STORM_DISABLE BIT(6)
+#define SW_BACK_PRESSURE BIT(5)
+#define FAIR_FLOW_CTRL BIT(4)
+#define NO_EXC_COLLISION_DROP BIT(3)
+#define SW_LEGAL_PACKET_DISABLE BIT(1)
+#define SW_PASS_SHORT_FRAME BIT(0)
+
+#define REG_SW_MAC_CTRL_6 0x0336
+#define SW_MIB_COUNTER_FLUSH BIT(7)
+#define SW_MIB_COUNTER_FREEZE BIT(6)
+
+/* 4 - LUE */
+#define REG_SW_ALU_STAT_CTRL__4 0x041C
+
+#define REG_SW_ALU_VAL_B 0x0424
+#define ALU_V_OVERRIDE BIT(31)
+#define ALU_V_USE_FID BIT(30)
+#define ALU_V_PORT_MAP 0xFF
+
+/* 7 - VPhy */
+#define REG_VPHY_IND_ADDR__2 0x075C
+#define REG_VPHY_IND_DATA__2 0x0760
+
+#define REG_VPHY_IND_CTRL__2 0x0768
+
+#define VPHY_IND_WRITE BIT(1)
+#define VPHY_IND_BUSY BIT(0)
+
+#define REG_VPHY_SPECIAL_CTRL__2 0x077C
+#define VPHY_SMI_INDIRECT_ENABLE BIT(15)
+#define VPHY_SW_LOOPBACK BIT(14)
+#define VPHY_MDIO_INTERNAL_ENABLE BIT(13)
+#define VPHY_SPI_INDIRECT_ENABLE BIT(12)
+#define VPHY_PORT_MODE_M 0x3
+#define VPHY_PORT_MODE_S 8
+#define VPHY_MODE_RGMII 0
+#define VPHY_MODE_MII_PHY 1
+#define VPHY_MODE_SGMII 2
+#define VPHY_MODE_RMII_PHY 3
+#define VPHY_SW_COLLISION_TEST BIT(7)
+#define VPHY_SPEED_DUPLEX_STAT_M 0x7
+#define VPHY_SPEED_DUPLEX_STAT_S 2
+#define VPHY_SPEED_1000 BIT(4)
+#define VPHY_SPEED_100 BIT(3)
+#define VPHY_FULL_DUPLEX BIT(2)
+
+/* Port Registers */
+
+/* 0 - Operation */
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_TAS_INT BIT(5)
+#define PORT_QCI_INT BIT(4)
+#define PORT_SGMII_INT BIT(3)
+#define PORT_PTP_INT BIT(2)
+#define PORT_PHY_INT BIT(1)
+#define PORT_ACL_INT BIT(0)
+
+#define PORT_SRC_PHY_INT 1
+
+#define REG_PORT_CTRL_0 0x0020
+
+#define PORT_MAC_LOOPBACK BIT(7)
+#define PORT_MAC_REMOTE_LOOPBACK BIT(6)
+#define PORT_K2L_INSERT_ENABLE BIT(5)
+#define PORT_K2L_DEBUG_ENABLE BIT(4)
+#define PORT_TAIL_TAG_ENABLE BIT(2)
+#define PORT_QUEUE_SPLIT_ENABLE 0x3
+
+/* 1 - Phy */
+#define REG_PORT_T1_PHY_CTRL_BASE 0x0100
+#define REG_PORT_TX_PHY_CTRL_BASE 0x0280
+
+/* 3 - xMII */
+#define PORT_SGMII_SEL BIT(7)
+#define PORT_GRXC_ENABLE BIT(0)
+
+#define PORT_MII_SEL_EDGE BIT(5)
+
+#define REG_PORT_XMII_CTRL_4 0x0304
+#define REG_PORT_XMII_CTRL_5 0x0306
+
+#define PORT_DLL_RESET BIT(15)
+#define PORT_TUNE_ADJ GENMASK(13, 7)
+
+/* 4 - MAC */
+#define REG_PORT_MAC_CTRL_0 0x0400
+#define PORT_CHECK_LENGTH BIT(2)
+#define PORT_BROADCAST_STORM BIT(1)
+#define PORT_JUMBO_PACKET BIT(0)
+
+#define REG_PORT_MAC_CTRL_1 0x0401
+#define PORT_BACK_PRESSURE BIT(3)
+#define PORT_PASS_ALL BIT(0)
+
+#define PORT_MAX_FR_SIZE 0x404
+#define FR_MIN_SIZE 1522
+
+/* 8 - Classification and Policing */
+#define REG_PORT_MRI_PRIO_CTRL 0x0801
+#define PORT_HIGHEST_PRIO BIT(7)
+#define PORT_OR_PRIO BIT(6)
+#define PORT_MAC_PRIO_ENABLE BIT(4)
+#define PORT_VLAN_PRIO_ENABLE BIT(3)
+#define PORT_802_1P_PRIO_ENABLE BIT(2)
+#define PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
+
+/* 9 - Shaping */
+#define REG_PORT_MTI_CREDIT_INCREMENT 0x091C
+
+/* The port number as per the datasheet */
+#define RGMII_2_PORT_NUM 5
+#define RGMII_1_PORT_NUM 6
+
+#define LAN937X_RGMII_2_PORT (RGMII_2_PORT_NUM - 1)
+#define LAN937X_RGMII_1_PORT (RGMII_1_PORT_NUM - 1)
+
+#define RGMII_1_TX_DELAY_2NS 2
+#define RGMII_2_TX_DELAY_2NS 0
+#define RGMII_1_RX_DELAY_2NS 0x1B
+#define RGMII_2_RX_DELAY_2NS 0x14
+
+#define LAN937X_TAG_LEN 2
+
+#endif
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
new file mode 100644
index 000000000000..0286a6cecb6f
--- /dev/null
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/gpio/consumer.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/pcs/pcs-mtk-lynxi.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
+#include <net/dsa.h>
+
+#include "mt7530.h"
+
+static int
+mt7530_regmap_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct mt7530_priv *priv = context;
+ struct mii_bus *bus = priv->bus;
+ u16 page, r, lo, hi;
+ int ret;
+
+ page = (reg >> 6) & 0x3ff;
+ r = (reg >> 2) & 0xf;
+ lo = val & 0xffff;
+ hi = val >> 16;
+
+ ret = bus->write(bus, priv->mdiodev->addr, 0x1f, page);
+ if (ret < 0)
+ return ret;
+
+ ret = bus->write(bus, priv->mdiodev->addr, r, lo);
+ if (ret < 0)
+ return ret;
+
+ ret = bus->write(bus, priv->mdiodev->addr, 0x10, hi);
+ return ret;
+}
+
+static int
+mt7530_regmap_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct mt7530_priv *priv = context;
+ struct mii_bus *bus = priv->bus;
+ u16 page, r, lo, hi;
+ int ret;
+
+ page = (reg >> 6) & 0x3ff;
+ r = (reg >> 2) & 0xf;
+
+ ret = bus->write(bus, priv->mdiodev->addr, 0x1f, page);
+ if (ret < 0)
+ return ret;
+
+ lo = bus->read(bus, priv->mdiodev->addr, r);
+ hi = bus->read(bus, priv->mdiodev->addr, 0x10);
+
+ *val = (hi << 16) | (lo & 0xffff);
+
+ return 0;
+}
+
+static void
+mt7530_mdio_regmap_lock(void *mdio_lock)
+{
+ mutex_lock_nested(mdio_lock, MDIO_MUTEX_NESTED);
+}
+
+static void
+mt7530_mdio_regmap_unlock(void *mdio_lock)
+{
+ mutex_unlock(mdio_lock);
+}
+
+static const struct regmap_bus mt7530_regmap_bus = {
+ .reg_write = mt7530_regmap_write,
+ .reg_read = mt7530_regmap_read,
+};
+
+static int
+mt7531_create_sgmii(struct mt7530_priv *priv)
+{
+ struct regmap_config *mt7531_pcs_config[2] = {};
+ struct phylink_pcs *pcs;
+ struct regmap *regmap;
+ int i, ret = 0;
+
+ for (i = priv->p5_sgmii ? 0 : 1; i < 2; i++) {
+ mt7531_pcs_config[i] = devm_kzalloc(priv->dev,
+ sizeof(struct regmap_config),
+ GFP_KERNEL);
+ if (!mt7531_pcs_config[i]) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ mt7531_pcs_config[i]->name = i ? "port6" : "port5";
+ mt7531_pcs_config[i]->reg_bits = 16;
+ mt7531_pcs_config[i]->val_bits = 32;
+ mt7531_pcs_config[i]->reg_stride = 4;
+ mt7531_pcs_config[i]->reg_base = MT7531_SGMII_REG_BASE(5 + i);
+ mt7531_pcs_config[i]->max_register = 0x17c;
+ mt7531_pcs_config[i]->lock = mt7530_mdio_regmap_lock;
+ mt7531_pcs_config[i]->unlock = mt7530_mdio_regmap_unlock;
+ mt7531_pcs_config[i]->lock_arg = &priv->bus->mdio_lock;
+
+ regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv,
+ mt7531_pcs_config[i]);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ break;
+ }
+ pcs = mtk_pcs_lynxi_create(priv->dev, regmap,
+ MT7531_PHYA_CTRL_SIGNAL3, 0);
+ if (!pcs) {
+ ret = -ENXIO;
+ break;
+ }
+ priv->ports[5 + i].sgmii_pcs = pcs;
+ }
+
+ if (ret && i)
+ mtk_pcs_lynxi_destroy(priv->ports[5].sgmii_pcs);
+
+ return ret;
+}
+
+static const struct of_device_id mt7530_of_match[] = {
+ { .compatible = "mediatek,mt7621", .data = &mt753x_table[ID_MT7621], },
+ { .compatible = "mediatek,mt7530", .data = &mt753x_table[ID_MT7530], },
+ { .compatible = "mediatek,mt7531", .data = &mt753x_table[ID_MT7531], },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt7530_of_match);
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MT7530_CREV,
+ .disable_locking = true,
+};
+
+static int
+mt7530_probe(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv;
+ struct device_node *dn;
+ int ret;
+
+ dn = mdiodev->dev.of_node;
+
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+ priv->mdiodev = mdiodev;
+
+ ret = mt7530_probe_common(priv);
+ if (ret)
+ return ret;
+
+ /* Use medatek,mcm property to distinguish hardware type that would
+ * cause a little bit differences on power-on sequence.
+ * Not MCM that indicates switch works as the remote standalone
+ * integrated circuit so the GPIO pin would be used to complete
+ * the reset, otherwise memory-mapped register accessing used
+ * through syscon provides in the case of MCM.
+ */
+ priv->mcm = of_property_read_bool(dn, "mediatek,mcm");
+ if (priv->mcm) {
+ dev_info(&mdiodev->dev, "MT7530 adapts as multi-chip module\n");
+
+ priv->rstc = devm_reset_control_get(&mdiodev->dev, "mcm");
+ if (IS_ERR(priv->rstc)) {
+ dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(priv->rstc);
+ }
+ } else {
+ priv->reset = devm_gpiod_get_optional(&mdiodev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(priv->reset);
+ }
+ }
+
+ if (priv->id == ID_MT7530) {
+ priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
+ if (IS_ERR(priv->core_pwr))
+ return PTR_ERR(priv->core_pwr);
+
+ priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io");
+ if (IS_ERR(priv->io_pwr))
+ return PTR_ERR(priv->io_pwr);
+ }
+
+ priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv,
+ &regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ if (priv->id == ID_MT7531)
+ priv->create_sgmii = mt7531_create_sgmii;
+
+ return dsa_register_switch(priv->ds);
+}
+
+static void
+mt7530_remove(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ int ret = 0, i;
+
+ if (!priv)
+ return;
+
+ ret = regulator_disable(priv->core_pwr);
+ if (ret < 0)
+ dev_err(priv->dev,
+ "Failed to disable core power: %d\n", ret);
+
+ ret = regulator_disable(priv->io_pwr);
+ if (ret < 0)
+ dev_err(priv->dev, "Failed to disable io pwr: %d\n",
+ ret);
+
+ mt7530_remove_common(priv);
+
+ for (i = 0; i < 2; ++i)
+ mtk_pcs_lynxi_destroy(priv->ports[5 + i].sgmii_pcs);
+}
+
+static void mt7530_shutdown(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static struct mdio_driver mt7530_mdio_driver = {
+ .probe = mt7530_probe,
+ .remove = mt7530_remove,
+ .shutdown = mt7530_shutdown,
+ .mdiodrv.driver = {
+ .name = "mt7530-mdio",
+ .of_match_table = mt7530_of_match,
+ },
+};
+
+mdio_module_driver(mt7530_mdio_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch (MDIO)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
new file mode 100644
index 000000000000..1dc8b93fb51a
--- /dev/null
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+
+#include "mt7530.h"
+
+static const struct of_device_id mt7988_of_match[] = {
+ { .compatible = "airoha,an7583-switch", .data = &mt753x_table[ID_AN7583], },
+ { .compatible = "airoha,en7581-switch", .data = &mt753x_table[ID_EN7581], },
+ { .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt7988_of_match);
+
+static const struct regmap_config sw_regmap_config = {
+ .name = "switch",
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MT7530_CREV,
+};
+
+static int
+mt7988_probe(struct platform_device *pdev)
+{
+ struct mt7530_priv *priv;
+ void __iomem *base_addr;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = NULL;
+ priv->dev = &pdev->dev;
+
+ ret = mt7530_probe_common(priv);
+ if (ret)
+ return ret;
+
+ priv->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc)) {
+ dev_err(&pdev->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(priv->rstc);
+ }
+
+ base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base_addr)) {
+ dev_err(&pdev->dev, "cannot request I/O memory space\n");
+ return -ENXIO;
+ }
+
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base_addr,
+ &sw_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ return dsa_register_switch(priv->ds);
+}
+
+static void mt7988_remove(struct platform_device *pdev)
+{
+ struct mt7530_priv *priv = platform_get_drvdata(pdev);
+
+ if (priv)
+ mt7530_remove_common(priv);
+}
+
+static void mt7988_shutdown(struct platform_device *pdev)
+{
+ struct mt7530_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static struct platform_driver mt7988_platform_driver = {
+ .probe = mt7988_probe,
+ .remove = mt7988_remove,
+ .shutdown = mt7988_shutdown,
+ .driver = {
+ .name = "mt7530-mmio",
+ .of_match_table = mt7988_of_match,
+ },
+};
+module_platform_driver(mt7988_platform_driver);
+
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch (MMIO)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 9890672a206d..b9423389c2ef 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -21,147 +21,118 @@
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <net/dsa.h>
+#include <net/pkt_cls.h>
#include "mt7530.h"
+static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mt753x_pcs, pcs);
+}
+
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib[] = {
- MIB_DESC(1, 0x00, "TxDrop"),
- MIB_DESC(1, 0x04, "TxCrcErr"),
- MIB_DESC(1, 0x08, "TxUnicast"),
- MIB_DESC(1, 0x0c, "TxMulticast"),
- MIB_DESC(1, 0x10, "TxBroadcast"),
- MIB_DESC(1, 0x14, "TxCollision"),
- MIB_DESC(1, 0x18, "TxSingleCollision"),
- MIB_DESC(1, 0x1c, "TxMultipleCollision"),
- MIB_DESC(1, 0x20, "TxDeferred"),
- MIB_DESC(1, 0x24, "TxLateCollision"),
- MIB_DESC(1, 0x28, "TxExcessiveCollistion"),
- MIB_DESC(1, 0x2c, "TxPause"),
- MIB_DESC(1, 0x30, "TxPktSz64"),
- MIB_DESC(1, 0x34, "TxPktSz65To127"),
- MIB_DESC(1, 0x38, "TxPktSz128To255"),
- MIB_DESC(1, 0x3c, "TxPktSz256To511"),
- MIB_DESC(1, 0x40, "TxPktSz512To1023"),
- MIB_DESC(1, 0x44, "Tx1024ToMax"),
- MIB_DESC(2, 0x48, "TxBytes"),
- MIB_DESC(1, 0x60, "RxDrop"),
- MIB_DESC(1, 0x64, "RxFiltering"),
- MIB_DESC(1, 0x68, "RxUnicast"),
- MIB_DESC(1, 0x6c, "RxMulticast"),
- MIB_DESC(1, 0x70, "RxBroadcast"),
- MIB_DESC(1, 0x74, "RxAlignErr"),
- MIB_DESC(1, 0x78, "RxCrcErr"),
- MIB_DESC(1, 0x7c, "RxUnderSizeErr"),
- MIB_DESC(1, 0x80, "RxFragErr"),
- MIB_DESC(1, 0x84, "RxOverSzErr"),
- MIB_DESC(1, 0x88, "RxJabberErr"),
- MIB_DESC(1, 0x8c, "RxPause"),
- MIB_DESC(1, 0x90, "RxPktSz64"),
- MIB_DESC(1, 0x94, "RxPktSz65To127"),
- MIB_DESC(1, 0x98, "RxPktSz128To255"),
- MIB_DESC(1, 0x9c, "RxPktSz256To511"),
- MIB_DESC(1, 0xa0, "RxPktSz512To1023"),
- MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"),
- MIB_DESC(2, 0xa8, "RxBytes"),
- MIB_DESC(1, 0xb0, "RxCtrlDrop"),
- MIB_DESC(1, 0xb4, "RxIngressDrop"),
- MIB_DESC(1, 0xb8, "RxArlDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_DROP, "TxDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_CRC_ERR, "TxCrcErr"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_COLLISION, "TxCollision"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_DROP, "RxDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_FILTERING, "RxFiltering"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_CRC_ERR, "RxCrcErr"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_CTRL_DROP, "RxCtrlDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_INGRESS_DROP, "RxIngressDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_ARL_DROP, "RxArlDrop"),
};
-/* Since phy_device has not yet been created and
- * phy_{read,write}_mmd_indirect is not available, we provide our own
- * core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers
- * to complete this function.
- */
-static int
-core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
+static void
+mt7530_mutex_lock(struct mt7530_priv *priv)
+{
+ if (priv->bus)
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+}
+
+static void
+mt7530_mutex_unlock(struct mt7530_priv *priv)
+{
+ if (priv->bus)
+ mutex_unlock(&priv->bus->mdio_lock);
+}
+
+static void
+core_write(struct mt7530_priv *priv, u32 reg, u32 val)
{
struct mii_bus *bus = priv->bus;
- int value, ret;
+ int ret;
+
+ mt7530_mutex_lock(priv);
/* Write the desired MMD Devad */
- ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2);
if (ret < 0)
goto err;
/* Write the desired MMD register address */
- ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, reg);
if (ret < 0)
goto err;
/* Select the Function : DATA with no post increment */
- ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
if (ret < 0)
goto err;
- /* Read the content of the MMD's selected register */
- value = bus->read(bus, 0, MII_MMD_DATA);
-
- return value;
+ /* Write the data into MMD's selected register */
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, val);
err:
- dev_err(&bus->dev, "failed to read mmd register\n");
+ if (ret < 0)
+ dev_err(&bus->dev, "failed to write mmd register\n");
- return ret;
+ mt7530_mutex_unlock(priv);
}
-static int
-core_write_mmd_indirect(struct mt7530_priv *priv, int prtad,
- int devad, u32 data)
+static void
+core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
{
struct mii_bus *bus = priv->bus;
+ u32 val;
int ret;
+ mt7530_mutex_lock(priv);
+
/* Write the desired MMD Devad */
- ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2);
if (ret < 0)
goto err;
/* Write the desired MMD register address */
- ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, reg);
if (ret < 0)
goto err;
/* Select the Function : DATA with no post increment */
- ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
if (ret < 0)
goto err;
+ /* Read the content of the MMD's selected register */
+ val = bus->read(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA);
+ val &= ~mask;
+ val |= set;
/* Write the data into MMD's selected register */
- ret = bus->write(bus, 0, MII_MMD_DATA, data);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, val);
err:
if (ret < 0)
- dev_err(&bus->dev,
- "failed to write mmd register\n");
- return ret;
-}
-
-static void
-core_write(struct mt7530_priv *priv, u32 reg, u32 val)
-{
- struct mii_bus *bus = priv->bus;
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
+ dev_err(&bus->dev, "failed to write mmd register\n");
- mutex_unlock(&bus->mdio_lock);
-}
-
-static void
-core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
-{
- struct mii_bus *bus = priv->bus;
- u32 val;
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2);
- val &= ~mask;
- val |= set;
- core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
-
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
}
static void
@@ -179,66 +150,42 @@ core_clear(struct mt7530_priv *priv, u32 reg, u32 val)
static int
mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val)
{
- struct mii_bus *bus = priv->bus;
- u16 page, r, lo, hi;
int ret;
- page = (reg >> 6) & 0x3ff;
- r = (reg >> 2) & 0xf;
- lo = val & 0xffff;
- hi = val >> 16;
-
- /* MT7530 uses 31 as the pseudo port */
- ret = bus->write(bus, 0x1f, 0x1f, page);
- if (ret < 0)
- goto err;
+ ret = regmap_write(priv->regmap, reg, val);
- ret = bus->write(bus, 0x1f, r, lo);
if (ret < 0)
- goto err;
-
- ret = bus->write(bus, 0x1f, 0x10, hi);
-err:
- if (ret < 0)
- dev_err(&bus->dev,
+ dev_err(priv->dev,
"failed to write mt7530 register\n");
+
return ret;
}
static u32
mt7530_mii_read(struct mt7530_priv *priv, u32 reg)
{
- struct mii_bus *bus = priv->bus;
- u16 page, r, lo, hi;
int ret;
+ u32 val;
- page = (reg >> 6) & 0x3ff;
- r = (reg >> 2) & 0xf;
-
- /* MT7530 uses 31 as the pseudo port */
- ret = bus->write(bus, 0x1f, 0x1f, page);
- if (ret < 0) {
- dev_err(&bus->dev,
+ ret = regmap_read(priv->regmap, reg, &val);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ dev_err(priv->dev,
"failed to read mt7530 register\n");
- return ret;
+ return 0;
}
- lo = bus->read(bus, 0x1f, r);
- hi = bus->read(bus, 0x1f, 0x10);
-
- return (hi << 16) | (lo & 0xffff);
+ return val;
}
static void
mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val)
{
- struct mii_bus *bus = priv->bus;
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
mt7530_mii_write(priv, reg, val);
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
}
static u32
@@ -250,14 +197,13 @@ _mt7530_unlocked_read(struct mt7530_dummy_poll *p)
static u32
_mt7530_read(struct mt7530_dummy_poll *p)
{
- struct mii_bus *bus = p->priv->bus;
u32 val;
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(p->priv);
val = mt7530_mii_read(p->priv, p->reg);
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(p->priv);
return val;
}
@@ -275,23 +221,17 @@ static void
mt7530_rmw(struct mt7530_priv *priv, u32 reg,
u32 mask, u32 set)
{
- struct mii_bus *bus = priv->bus;
- u32 val;
+ mt7530_mutex_lock(priv);
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ regmap_update_bits(priv->regmap, reg, mask, set);
- val = mt7530_mii_read(priv, reg);
- val &= ~mask;
- val |= set;
- mt7530_mii_write(priv, reg, val);
-
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
}
static void
mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val)
{
- mt7530_rmw(priv, reg, 0, val);
+ mt7530_rmw(priv, reg, val, val);
}
static void
@@ -388,67 +328,12 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
}
-/* Setup TX circuit including relevant PAD and driving */
-static int
-mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+/* Set up switch core clock for MT7530 */
+static void mt7530_pll_setup(struct mt7530_priv *priv)
{
- struct mt7530_priv *priv = ds->priv;
- u32 ncpo1, ssc_delta, trgint, i, xtal;
-
- xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+ /* Disable core clock */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
- if (xtal == HWTRAP_XTAL_20MHZ) {
- dev_err(priv->dev,
- "%s: MT7530 with a 20MHz XTAL is not supported!\n",
- __func__);
- return -EINVAL;
- }
-
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII:
- trgint = 0;
- /* PLL frequency: 125MHz */
- ncpo1 = 0x0c80;
- break;
- case PHY_INTERFACE_MODE_TRGMII:
- trgint = 1;
- if (priv->id == ID_MT7621) {
- /* PLL frequency: 150MHz: 1.2GBit */
- if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0780;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ncpo1 = 0x0a00;
- } else { /* PLL frequency: 250MHz: 2.0Gbit */
- if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0c80;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ncpo1 = 0x1400;
- }
- break;
- default:
- dev_err(priv->dev, "xMII interface %d not supported\n",
- interface);
- return -EINVAL;
- }
-
- if (xtal == HWTRAP_XTAL_25MHZ)
- ssc_delta = 0x57;
- else
- ssc_delta = 0x87;
-
- mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
- P6_INTF_MODE(trgint));
-
- /* Lower Tx Driving for TRGMII path */
- for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
- mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
- TD_DM_DRVP(8) | TD_DM_DRVN(8));
-
- /* Disable MT7530 core and TRGMII Tx clocks */
- core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
- REG_GSWCK_EN | REG_TRGMIICK_EN);
-
- /* Setup core clock for MT7530 */
/* Disable PLL */
core_write(priv, CORE_GSWPLL_GRP1, 0);
@@ -463,61 +348,97 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
RG_GSWPLL_POSDIV_200M(2) |
RG_GSWPLL_FBKDIV_200M(32));
- /* Setup the MT7530 TRGMII Tx Clock */
- core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
- core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
- core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
- core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
- core_write(priv, CORE_PLL_GROUP4,
- RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
- RG_SYSPLL_BIAS_LPF_EN);
- core_write(priv, CORE_PLL_GROUP2,
- RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
- RG_SYSPLL_POSDIV(1));
- core_write(priv, CORE_PLL_GROUP7,
- RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
- RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
-
- /* Enable MT7530 core and TRGMII Tx clocks */
- core_set(priv, CORE_TRGMII_GSW_CLK_CG,
- REG_GSWCK_EN | REG_TRGMIICK_EN);
-
- if (!trgint)
- for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
- mt7530_rmw(priv, MT7530_TRGMII_RD(i),
- RD_TAP_MASK, RD_TAP(16));
- return 0;
+ udelay(20);
+
+ /* Enable core clock */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
}
-static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
+/* If port 6 is available as a CPU port, always prefer that as the default,
+ * otherwise don't care.
+ */
+static struct dsa_port *
+mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds)
{
- u32 val;
+ struct dsa_port *cpu_dp = dsa_to_port(ds, 6);
- val = mt7530_read(priv, MT7531_TOP_SIG_SR);
+ if (dsa_port_is_cpu(cpu_dp))
+ return cpu_dp;
- return (val & PAD_DUAL_SGMII_EN) != 0;
+ return NULL;
}
-static int
-mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
+/* Setup port 6 interface mode and TRGMII TX circuit */
+static void
+mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
+ u32 ncpo1, ssc_delta, xtal;
+
+ /* Disable the MT7530 TRGMII clocks */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+
+ if (interface == PHY_INTERFACE_MODE_RGMII) {
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ P6_INTF_MODE(0));
+ return;
+ }
+
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1));
+
+ xtal = mt7530_read(priv, MT753X_MTRAP) & MT7530_XTAL_MASK;
+
+ if (xtal == MT7530_XTAL_25MHZ)
+ ssc_delta = 0x57;
+ else
+ ssc_delta = 0x87;
+
+ if (priv->id == ID_MT7621) {
+ /* PLL frequency: 125MHz: 1.0GBit */
+ if (xtal == MT7530_XTAL_40MHZ)
+ ncpo1 = 0x0640;
+ if (xtal == MT7530_XTAL_25MHZ)
+ ncpo1 = 0x0a00;
+ } else { /* PLL frequency: 250MHz: 2.0Gbit */
+ if (xtal == MT7530_XTAL_40MHZ)
+ ncpo1 = 0x0c80;
+ if (xtal == MT7530_XTAL_25MHZ)
+ ncpo1 = 0x1400;
+ }
+
+ /* Setup the MT7530 TRGMII Tx Clock */
+ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+ core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
+ RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
+ core_write(priv, CORE_PLL_GROUP2, RG_SYSPLL_EN_NORMAL |
+ RG_SYSPLL_VODEN | RG_SYSPLL_POSDIV(1));
+ core_write(priv, CORE_PLL_GROUP7, RG_LCDDS_PCW_NCPO_CHG |
+ RG_LCCDS_C(3) | RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+
+ /* Enable the MT7530 TRGMII clocks */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+}
+
+static void
+mt7531_pll_setup(struct mt7530_priv *priv)
+{
+ enum mt7531_xtal_fsel xtal;
u32 top_sig;
u32 hwstrap;
- u32 xtal;
u32 val;
- if (mt7531_dual_sgmii_supported(priv))
- return 0;
-
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
- hwstrap = mt7530_read(priv, MT7531_HWTRAP);
+ hwstrap = mt7530_read(priv, MT753X_TRAP);
if ((val & CHIP_REV_M) > 0)
- xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ :
- HWTRAP_XTAL_FSEL_25MHZ;
+ xtal = (top_sig & PAD_MCM_SMI_EN) ? MT7531_XTAL_FSEL_40MHZ :
+ MT7531_XTAL_FSEL_25MHZ;
else
- xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK;
+ xtal = (hwstrap & MT7531_XTAL25) ? MT7531_XTAL_FSEL_25MHZ :
+ MT7531_XTAL_FSEL_40MHZ;
/* Step 1 : Disable MT7531 COREPLL */
val = mt7530_read(priv, MT7531_PLLGP_EN);
@@ -546,13 +467,13 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
usleep_range(25, 35);
switch (xtal) {
- case HWTRAP_XTAL_FSEL_25MHZ:
+ case MT7531_XTAL_FSEL_25MHZ:
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_SDM_PCW_M;
val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
break;
- case HWTRAP_XTAL_FSEL_40MHZ:
+ case MT7531_XTAL_FSEL_40MHZ:
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_SDM_PCW_M;
val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
@@ -587,8 +508,6 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
val |= EN_COREPLL;
mt7530_write(priv, MT7531_PLLGP_EN, val);
usleep_range(25, 35);
-
- return 0;
}
static void
@@ -600,29 +519,40 @@ mt7530_mib_reset(struct dsa_switch *ds)
mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
}
-static int mt7530_phy_read(struct mt7530_priv *priv, int port, int regnum)
+static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum)
{
return mdiobus_read_nested(priv->bus, port, regnum);
}
-static int mt7530_phy_write(struct mt7530_priv *priv, int port, int regnum,
- u16 val)
+static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum,
+ u16 val)
{
return mdiobus_write_nested(priv->bus, port, regnum, val);
}
+static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port,
+ int devad, int regnum)
+{
+ return mdiobus_c45_read_nested(priv->bus, port, devad, regnum);
+}
+
+static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u16 val)
+{
+ return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val);
+}
+
static int
mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
int regnum)
{
- struct mii_bus *bus = priv->bus;
struct mt7530_dummy_poll p;
u32 reg, val;
int ret;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
@@ -655,23 +585,22 @@ mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
ret = val & MT7531_MDIO_RW_DATA_MASK;
out:
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
return ret;
}
static int
mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
- int regnum, u32 data)
+ int regnum, u16 data)
{
- struct mii_bus *bus = priv->bus;
struct mt7530_dummy_poll p;
u32 val, reg;
int ret;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
@@ -703,7 +632,7 @@ mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
}
out:
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
return ret;
}
@@ -711,14 +640,13 @@ out:
static int
mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
{
- struct mii_bus *bus = priv->bus;
struct mt7530_dummy_poll p;
int ret;
u32 val;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
@@ -741,7 +669,7 @@ mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
ret = val & MT7531_MDIO_RW_DATA_MASK;
out:
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
return ret;
}
@@ -750,14 +678,13 @@ static int
mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
u16 data)
{
- struct mii_bus *bus = priv->bus;
struct mt7530_dummy_poll p;
int ret;
u32 reg;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
!(reg & MT7531_PHY_ACS_ST), 20, 100000);
@@ -779,61 +706,42 @@ mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
}
out:
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
return ret;
}
static int
-mt7531_ind_phy_read(struct mt7530_priv *priv, int port, int regnum)
+mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum)
{
- int devad;
- int ret;
-
- if (regnum & MII_ADDR_C45) {
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- ret = mt7531_ind_c45_phy_read(priv, port, devad,
- regnum & MII_REGADDR_C45_MASK);
- } else {
- ret = mt7531_ind_c22_phy_read(priv, port, regnum);
- }
+ struct mt7530_priv *priv = bus->priv;
- return ret;
+ return priv->info->phy_read_c22(priv, port, regnum);
}
static int
-mt7531_ind_phy_write(struct mt7530_priv *priv, int port, int regnum,
- u16 data)
+mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum)
{
- int devad;
- int ret;
-
- if (regnum & MII_ADDR_C45) {
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- ret = mt7531_ind_c45_phy_write(priv, port, devad,
- regnum & MII_REGADDR_C45_MASK,
- data);
- } else {
- ret = mt7531_ind_c22_phy_write(priv, port, regnum, data);
- }
+ struct mt7530_priv *priv = bus->priv;
- return ret;
+ return priv->info->phy_read_c45(priv, port, devad, regnum);
}
static int
-mt753x_phy_read(struct mii_bus *bus, int port, int regnum)
+mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val)
{
struct mt7530_priv *priv = bus->priv;
- return priv->info->phy_read(priv, port, regnum);
+ return priv->info->phy_write_c22(priv, port, regnum, val);
}
static int
-mt753x_phy_write(struct mii_bus *bus, int port, int regnum, u16 val)
+mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum,
+ u16 val)
{
struct mt7530_priv *priv = bus->priv;
- return priv->info->phy_write(priv, port, regnum, val);
+ return priv->info->phy_write_c45(priv, port, devad, regnum, val);
}
static void
@@ -846,8 +754,22 @@ mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
return;
for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++)
- strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name,
- ETH_GSTRING_LEN);
+ ethtool_puts(&data, mt7530_mib[i].name);
+}
+
+static void
+mt7530_read_port_stats(struct mt7530_priv *priv, int port,
+ u32 offset, u8 size, uint64_t *data)
+{
+ u32 val, reg = MT7530_PORT_MIB_COUNTER(port) + offset;
+
+ val = mt7530_read(priv, reg);
+ *data = val;
+
+ if (size == 2) {
+ val = mt7530_read(priv, reg + 4);
+ *data |= (u64)val << 32;
+ }
}
static void
@@ -856,18 +778,13 @@ mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
const struct mt7530_mib_desc *mib;
- u32 reg, i;
- u64 hi;
+ int i;
for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) {
mib = &mt7530_mib[i];
- reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset;
- data[i] = mt7530_read(priv, reg);
- if (mib->size == 2) {
- hi = mt7530_read(priv, reg + 4);
- data[i] |= hi << 32;
- }
+ mt7530_read_port_stats(priv, port, mib->offset, mib->size,
+ data + i);
}
}
@@ -880,6 +797,172 @@ mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(mt7530_mib);
}
+static void mt7530_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* MIB counter doesn't provide a FramesTransmittedOK but instead
+ * provide stats for Unicast, Broadcast and Multicast frames separately.
+ * To simulate a global frame counter, read Unicast and addition Multicast
+ * and Broadcast later
+ */
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_UNICAST, 1,
+ &mac_stats->FramesTransmittedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_SINGLE_COLLISION, 1,
+ &mac_stats->SingleCollisionFrames);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTIPLE_COLLISION, 1,
+ &mac_stats->MultipleCollisionFrames);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNICAST, 1,
+ &mac_stats->FramesReceivedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BYTES, 2,
+ &mac_stats->OctetsTransmittedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_ALIGN_ERR, 1,
+ &mac_stats->AlignmentErrors);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_DEFERRED, 1,
+ &mac_stats->FramesWithDeferredXmissions);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_LATE_COLLISION, 1,
+ &mac_stats->LateCollisions);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_EXCESSIVE_COLLISION, 1,
+ &mac_stats->FramesAbortedDueToXSColls);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BYTES, 2,
+ &mac_stats->OctetsReceivedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTICAST, 1,
+ &mac_stats->MulticastFramesXmittedOK);
+ mac_stats->FramesTransmittedOK += mac_stats->MulticastFramesXmittedOK;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BROADCAST, 1,
+ &mac_stats->BroadcastFramesXmittedOK);
+ mac_stats->FramesTransmittedOK += mac_stats->BroadcastFramesXmittedOK;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_MULTICAST, 1,
+ &mac_stats->MulticastFramesReceivedOK);
+ mac_stats->FramesReceivedOK += mac_stats->MulticastFramesReceivedOK;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BROADCAST, 1,
+ &mac_stats->BroadcastFramesReceivedOK);
+ mac_stats->FramesReceivedOK += mac_stats->BroadcastFramesReceivedOK;
+}
+
+static const struct ethtool_rmon_hist_range mt7530_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, MT7530_MAX_MTU },
+ {}
+};
+
+static void mt7530_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNDER_SIZE_ERR, 1,
+ &rmon_stats->undersize_pkts);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_OVER_SZ_ERR, 1,
+ &rmon_stats->oversize_pkts);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_FRAG_ERR, 1,
+ &rmon_stats->fragments);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_JABBER_ERR, 1,
+ &rmon_stats->jabbers);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_64, 1,
+ &rmon_stats->hist[0]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_65_TO_127, 1,
+ &rmon_stats->hist[1]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_128_TO_255, 1,
+ &rmon_stats->hist[2]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_256_TO_511, 1,
+ &rmon_stats->hist[3]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_512_TO_1023, 1,
+ &rmon_stats->hist[4]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_1024_TO_MAX, 1,
+ &rmon_stats->hist[5]);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_64, 1,
+ &rmon_stats->hist_tx[0]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_65_TO_127, 1,
+ &rmon_stats->hist_tx[1]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_128_TO_255, 1,
+ &rmon_stats->hist_tx[2]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_256_TO_511, 1,
+ &rmon_stats->hist_tx[3]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_512_TO_1023, 1,
+ &rmon_stats->hist_tx[4]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_1024_TO_MAX, 1,
+ &rmon_stats->hist_tx[5]);
+
+ *ranges = mt7530_rmon_ranges;
+}
+
+static void mt7530_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *storage)
+{
+ struct mt7530_priv *priv = ds->priv;
+ uint64_t data;
+
+ /* MIB counter doesn't provide a FramesTransmittedOK but instead
+ * provide stats for Unicast, Broadcast and Multicast frames separately.
+ * To simulate a global frame counter, read Unicast and addition Multicast
+ * and Broadcast later
+ */
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNICAST, 1,
+ &storage->rx_packets);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_MULTICAST, 1,
+ &storage->multicast);
+ storage->rx_packets += storage->multicast;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BROADCAST, 1,
+ &data);
+ storage->rx_packets += data;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_UNICAST, 1,
+ &storage->tx_packets);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTICAST, 1,
+ &data);
+ storage->tx_packets += data;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BROADCAST, 1,
+ &data);
+ storage->tx_packets += data;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BYTES, 2,
+ &storage->rx_bytes);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BYTES, 2,
+ &storage->tx_bytes);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_DROP, 1,
+ &storage->rx_dropped);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_DROP, 1,
+ &storage->tx_dropped);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_CRC_ERR, 1,
+ &storage->rx_crc_errors);
+}
+
+static void mt7530_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PAUSE, 1,
+ &ctrl_stats->MACControlFramesTransmitted);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PAUSE, 1,
+ &ctrl_stats->MACControlFramesReceived);
+}
+
static int
mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
@@ -920,6 +1003,18 @@ mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
return 0;
}
+static const char *mt7530_p5_mode_str(unsigned int mode)
+{
+ switch (mode) {
+ case MUX_PHY_P0:
+ return "MUX PHY P0";
+ case MUX_PHY_P4:
+ return "MUX PHY P4";
+ default:
+ return "GMAC5";
+ }
+}
+
static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
@@ -928,39 +1023,31 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
mutex_lock(&priv->reg_mutex);
- val = mt7530_read(priv, MT7530_MHWTRAP);
+ val = mt7530_read(priv, MT753X_MTRAP);
- val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS;
- val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL;
+ val &= ~MT7530_P5_PHY0_SEL & ~MT7530_P5_MAC_SEL & ~MT7530_P5_RGMII_MODE;
- switch (priv->p5_intf_sel) {
- case P5_INTF_SEL_PHY_P0:
- /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */
- val |= MHWTRAP_PHY0_SEL;
+ switch (priv->p5_mode) {
+ /* MUX_PHY_P0: P0 -> P5 -> SoC MAC */
+ case MUX_PHY_P0:
+ val |= MT7530_P5_PHY0_SEL;
fallthrough;
- case P5_INTF_SEL_PHY_P4:
- /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */
- val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS;
+ /* MUX_PHY_P4: P4 -> P5 -> SoC MAC */
+ case MUX_PHY_P4:
/* Setup the MAC by default for the cpu port */
- mt7530_write(priv, MT7530_PMCR_P(5), 0x56300);
- break;
- case P5_INTF_SEL_GMAC5:
- /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
- val &= ~MHWTRAP_P5_DIS;
- break;
- case P5_DISABLED:
- interface = PHY_INTERFACE_MODE_NA;
+ mt7530_write(priv, MT753X_PMCR_P(5), 0x56300);
break;
+
+ /* GMAC5: P5 -> SoC MAC or external PHY */
default:
- dev_err(ds->dev, "Unsupported p5_intf_sel %d\n",
- priv->p5_intf_sel);
- goto unlock_exit;
+ val |= MT7530_P5_MAC_SEL;
+ break;
}
/* Setup RGMII settings */
if (phy_interface_mode_is_rgmii(interface)) {
- val |= MHWTRAP_P5_RGMII_MODE;
+ val |= MT7530_P5_RGMII_MODE;
/* P5 RGMII RX Clock Control: delay setting for 1000M */
mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN);
@@ -980,41 +1067,239 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1));
}
- mt7530_write(priv, MT7530_MHWTRAP, val);
-
- dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n",
- val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
+ mt7530_write(priv, MT753X_MTRAP, val);
- priv->p5_interface = interface;
+ dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, mode=%s, phy-mode=%s\n", val,
+ mt7530_p5_mode_str(priv->p5_mode), phy_modes(interface));
-unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
-static int
+/* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
+ * of the Open Systems Interconnection basic reference model (OSI/RM) are
+ * described; the medium access control (MAC) and logical link control (LLC)
+ * sublayers. The MAC sublayer is the one facing the physical layer.
+ *
+ * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
+ * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
+ * of the Bridge, at least two Ports, and higher layer entities with at least a
+ * Spanning Tree Protocol Entity included.
+ *
+ * Each Bridge Port also functions as an end station and shall provide the MAC
+ * Service to an LLC Entity. Each instance of the MAC Service is provided to a
+ * distinct LLC Entity that supports protocol identification, multiplexing, and
+ * demultiplexing, for protocol data unit (PDU) transmission and reception by
+ * one or more higher layer entities.
+ *
+ * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
+ * Entity associated with each Bridge Port is modeled as being directly
+ * connected to the attached Local Area Network (LAN).
+ *
+ * On the switch with CPU port architecture, CPU port functions as Management
+ * Port, and the Management Port functionality is provided by software which
+ * functions as an end station. Software is connected to an IEEE 802 LAN that is
+ * wholly contained within the system that incorporates the Bridge. Software
+ * provides access to the LLC Entity associated with each Bridge Port by the
+ * value of the source port field on the special tag on the frame received by
+ * software.
+ *
+ * We call frames that carry control information to determine the active
+ * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
+ * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
+ * Protocol Data Units (MVRPDUs), and frames from other link constrained
+ * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
+ * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
+ * forwarded by a Bridge. Permanently configured entries in the filtering
+ * database (FDB) ensure that such frames are discarded by the Forwarding
+ * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
+ *
+ * Each of the reserved MAC addresses specified in Table 8-1
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
+ * permanently configured in the FDB in C-VLAN components and ERs.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-2
+ * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
+ * configured in the FDB in S-VLAN components.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-3
+ * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
+ * TPMR components.
+ *
+ * The FDB entries for reserved MAC addresses shall specify filtering for all
+ * Bridge Ports and all VIDs. Management shall not provide the capability to
+ * modify or remove entries for reserved MAC addresses.
+ *
+ * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
+ * propagation of PDUs within a Bridged Network, as follows:
+ *
+ * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
+ * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
+ * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
+ * PDUs transmitted using this destination address, or any other addresses
+ * that appear in Table 8-1, Table 8-2, and Table 8-3
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
+ * therefore travel no further than those stations that can be reached via a
+ * single individual LAN from the originating station.
+ *
+ * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
+ * address that no conformant S-VLAN component, C-VLAN component, or MAC
+ * Bridge can forward; however, this address is relayed by a TPMR component.
+ * PDUs using this destination address, or any of the other addresses that
+ * appear in both Table 8-1 and Table 8-2 but not in Table 8-3
+ * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
+ * any TPMRs but will propagate no further than the nearest S-VLAN component,
+ * C-VLAN component, or MAC Bridge.
+ *
+ * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
+ * that no conformant C-VLAN component, MAC Bridge can forward; however, it is
+ * relayed by TPMR components and S-VLAN components. PDUs using this
+ * destination address, or any of the other addresses that appear in Table 8-1
+ * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
+ * will be relayed by TPMR components and S-VLAN components but will propagate
+ * no further than the nearest C-VLAN component or MAC Bridge.
+ *
+ * Because the LLC Entity associated with each Bridge Port is provided via CPU
+ * port, we must not filter these frames but forward them to CPU port.
+ *
+ * In a Bridge, the transmission Port is majorly decided by ingress and egress
+ * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
+ * For link-local frames, only CPU port should be designated as destination port
+ * in the FDB, and the other functions of the Forwarding Process must not
+ * interfere with the decision of the transmission Port. We call this process
+ * trapping frames to CPU port.
+ *
+ * Therefore, on the switch with CPU port architecture, link-local frames must
+ * be trapped to CPU port, and certain link-local frames received by a Port of a
+ * Bridge comprising a TPMR component or an S-VLAN component must be excluded
+ * from it.
+ *
+ * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
+ * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
+ * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
+ * doesn't count) of this architecture will either function as a standard MAC
+ * Bridge or a standard VLAN Bridge.
+ *
+ * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
+ * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
+ * we don't need to relay PDUs using the destination addresses specified on the
+ * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
+ * section where they must be relayed by TPMR components.
+ *
+ * One option to trap link-local frames to CPU port is to add static FDB entries
+ * with CPU port designated as destination port. However, because that
+ * Independent VLAN Learning (IVL) is being used on every VID, each entry only
+ * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
+ * Bridge component or a C-VLAN component, there would have to be 16 times 4096
+ * entries. This switch intellectual property can only hold a maximum of 2048
+ * entries. Using this option, there also isn't a mechanism to prevent
+ * link-local frames from being discarded when the spanning tree Port State of
+ * the reception Port is discarding.
+ *
+ * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
+ * registers. Whilst this applies to every VID, it doesn't contain all of the
+ * reserved MAC addresses without affecting the remaining Standard Group MAC
+ * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
+ * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
+ * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
+ * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
+ * The latter option provides better but not complete conformance.
+ *
+ * This switch intellectual property also does not provide a mechanism to trap
+ * link-local frames with specific destination addresses to CPU port by Bridge,
+ * to conform to the filtering rules for the distinct Bridge components.
+ *
+ * Therefore, regardless of the type of the Bridge component, link-local frames
+ * with these destination addresses will be trapped to CPU port:
+ *
+ * 01-80-C2-00-00-[00,01,02,03,0E]
+ *
+ * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
+ *
+ * In a Bridge comprising an S-VLAN component:
+ *
+ * Link-local frames with these destination addresses will be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-00
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A]
+ *
+ * To trap link-local frames to CPU port as conformant as this switch
+ * intellectual property can allow, link-local frames are made to be regarded as
+ * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
+ * property only lets the frames regarded as BPDUs bypass the spanning tree Port
+ * State function of the Forwarding Process.
+ *
+ * The only remaining interference is the ingress rules. When the reception Port
+ * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
+ * There doesn't seem to be a mechanism on the switch intellectual property to
+ * have link-local frames bypass this function of the Forwarding Process.
+ */
+static void
+mt753x_trap_frames(struct mt7530_priv *priv)
+{
+ /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
+ * VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_BPC,
+ PAE_BPDU_FR | PAE_EG_TAG_MASK | PAE_PORT_FW_MASK |
+ BPDU_EG_TAG_MASK | BPDU_PORT_FW_MASK,
+ PAE_BPDU_FR | PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ PAE_PORT_FW(TO_CPU_FW_CPU_ONLY) |
+ BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
+
+ /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
+ * them VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_RGAC1,
+ R02_BPDU_FR | R02_EG_TAG_MASK | R02_PORT_FW_MASK |
+ R01_BPDU_FR | R01_EG_TAG_MASK | R01_PORT_FW_MASK,
+ R02_BPDU_FR | R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ R02_PORT_FW(TO_CPU_FW_CPU_ONLY) | R01_BPDU_FR |
+ R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
+
+ /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
+ * them VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_RGAC2,
+ R0E_BPDU_FR | R0E_EG_TAG_MASK | R0E_PORT_FW_MASK |
+ R03_BPDU_FR | R03_EG_TAG_MASK | R03_PORT_FW_MASK,
+ R0E_BPDU_FR | R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ R0E_PORT_FW(TO_CPU_FW_CPU_ONLY) | R03_BPDU_FR |
+ R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
+}
+
+static void
mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- int ret;
-
- /* Setup max capability of CPU port at first */
- if (priv->info->cpu_port_config) {
- ret = priv->info->cpu_port_config(ds, port);
- if (ret)
- return ret;
- }
/* Enable Mediatek header mode on the cpu port */
mt7530_write(priv, MT7530_PVC_P(port),
PORT_SPEC_TAG);
- /* Disable flooding by default */
- mt7530_rmw(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | UNU_FFP_MASK,
- BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | UNU_FFP(BIT(port)));
+ /* Enable flooding on the CPU port */
+ mt7530_set(priv, MT753X_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
+ UNU_FFP(BIT(port)));
- /* Set CPU port number */
- if (priv->id == ID_MT7621)
- mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
+ /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
+ * the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
+ * is affine to the inbound user port.
+ */
+ if (priv->id == ID_MT7531 || priv->id == ID_MT7988 ||
+ priv->id == ID_EN7581 || priv->id == ID_AN7583)
+ mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
/* CPU port gets connected to all user ports of
* the switch.
@@ -1025,14 +1310,13 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_FALLBACK_MODE);
-
- return 0;
}
static int
mt7530_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
@@ -1041,14 +1325,25 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
* restore the port matrix if the port is the member of a certain
* bridge.
*/
- priv->ports[port].pm |= PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ if (dsa_port_is_user(dp)) {
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index));
+ }
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return 0;
+
+ if (port == 5)
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
+ else if (port == 6)
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P6_DIS);
+
return 0;
}
@@ -1065,27 +1360,34 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
priv->ports[port].enable = false;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
+
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return;
+
+ /* Do not set MT7530_P5_DIS when port 5 is being used for PHY muxing. */
+ if (port == 5 && priv->p5_mode == GMAC5)
+ mt7530_set(priv, MT753X_MTRAP, MT7530_P5_DIS);
+ else if (port == 6)
+ mt7530_set(priv, MT753X_MTRAP, MT7530_P6_DIS);
}
static int
mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct mt7530_priv *priv = ds->priv;
- struct mii_bus *bus = priv->bus;
int length;
u32 val;
/* When a new MTU is set, DSA always set the CPU port's MTU to the
- * largest MTU of the slave ports. Because the switch only has a global
+ * largest MTU of the user ports. Because the switch only has a global
* RX length register, only allowing CPU port here is enough.
*/
if (!dsa_is_cpu_port(ds, port))
return 0;
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
val = mt7530_mii_read(priv, MT7530_GMACCR);
val &= ~MAX_RX_PKT_LEN_MASK;
@@ -1106,7 +1408,7 @@ mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
mt7530_mii_write(priv, MT7530_GMACCR, val);
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
return 0;
}
@@ -1146,13 +1448,62 @@ mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
FID_PST(FID_BRIDGED, stp_state));
}
+static void mt7530_update_port_member(struct mt7530_priv *priv, int port,
+ const struct net_device *bridge_dev,
+ bool join) __must_hold(&priv->reg_mutex)
+{
+ struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp;
+ struct mt7530_port *p = &priv->ports[port], *other_p;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ u32 port_bitmap = BIT(cpu_dp->index);
+ int other_port;
+ bool isolated;
+
+ dsa_switch_for_each_user_port(other_dp, priv->ds) {
+ other_port = other_dp->index;
+ other_p = &priv->ports[other_port];
+
+ if (dp == other_dp)
+ continue;
+
+ /* Add/remove this port to/from the port matrix of the other
+ * ports in the same bridge. If the port is disabled, port
+ * matrix is kept and not being setup until the port becomes
+ * enabled.
+ */
+ if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev))
+ continue;
+
+ isolated = p->isolated && other_p->isolated;
+
+ if (join && !isolated) {
+ other_p->pm |= PCR_MATRIX(BIT(port));
+ port_bitmap |= BIT(other_port);
+ } else {
+ other_p->pm &= ~PCR_MATRIX(BIT(port));
+ }
+
+ if (other_p->enable)
+ mt7530_rmw(priv, MT7530_PCR_P(other_port),
+ PCR_MATRIX_MASK, other_p->pm);
+ }
+
+ /* Add/remove the all other ports to this port matrix. For !join
+ * (leaving the bridge), only the CPU port will remain in the port matrix
+ * of this port.
+ */
+ p->pm = PCR_MATRIX(port_bitmap);
+ if (priv->ports[port].enable)
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, p->pm);
+}
+
static int
mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
- BR_BCAST_FLOOD))
+ BR_BCAST_FLOOD | BR_ISOLATED))
return -EINVAL;
return 0;
@@ -1170,52 +1521,41 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
flags.val & BR_LEARNING ? 0 : SA_DIS);
if (flags.mask & BR_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, UNU_FFP(BIT(port)),
flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0);
if (flags.mask & BR_MCAST_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, UNM_FFP(BIT(port)),
flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0);
if (flags.mask & BR_BCAST_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, BC_FFP(BIT(port)),
flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0);
+ if (flags.mask & BR_ISOLATED) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
+
+ priv->ports[port].isolated = !!(flags.val & BR_ISOLATED);
+
+ mutex_lock(&priv->reg_mutex);
+ mt7530_update_port_member(priv, port, bridge_dev, true);
+ mutex_unlock(&priv->reg_mutex);
+ }
+
return 0;
}
static int
mt7530_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct mt7530_priv *priv = ds->priv;
- u32 port_bitmap = BIT(MT7530_CPU_PORT);
- int i;
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
- /* Add this port to the port matrix of the other ports in the
- * same bridge. If the port is disabled, port matrix is kept
- * and not being setup until the port becomes enabled.
- */
- if (dsa_is_user_port(ds, i) && i != port) {
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
- continue;
- if (priv->ports[i].enable)
- mt7530_set(priv, MT7530_PCR_P(i),
- PCR_MATRIX(BIT(port)));
- priv->ports[i].pm |= PCR_MATRIX(BIT(port));
-
- port_bitmap |= BIT(i);
- }
- }
-
- /* Add the all other ports to this port matrix. */
- if (priv->ports[port].enable)
- mt7530_rmw(priv, MT7530_PCR_P(port),
- PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap));
- priv->ports[port].pm |= PCR_MATRIX(port_bitmap);
+ mt7530_update_port_member(priv, port, bridge.dev, true);
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
@@ -1236,7 +1576,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
/* This is called after .port_bridge_leave when leaving a VLAN-aware
* bridge. Don't set standalone ports to fallback mode.
*/
- if (dsa_to_port(ds, port)->bridge_dev)
+ if (dsa_port_bridge_dev_get(dsa_to_port(ds, port)))
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_FALLBACK_MODE);
@@ -1250,7 +1590,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
G0_PORT_VID_DEF);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ for (i = 0; i < priv->ds->num_ports; i++) {
if (dsa_is_user_port(ds, i) &&
dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
all_user_ports_removed = false;
@@ -1262,9 +1602,12 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
* the CPU port get out of VLAN filtering mode.
*/
if (all_user_ports_removed) {
- mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ mt7530_write(priv, MT7530_PCR_P(cpu_dp->index),
PCR_MATRIX(dsa_user_ports(priv->ds)));
- mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), PORT_SPEC_TAG
+ mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG
| PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
}
@@ -1287,47 +1630,37 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
if (!priv->ports[port].pvid)
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_TAGGED);
- }
- /* Set the port as a user port which is to be able to recognize VID
- * from incoming packets before fetching entry within the VLAN table.
- */
- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
- VLAN_ATTR(MT7530_VLAN_USER) |
- PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ /* Set the port as a user port which is to be able to recognize
+ * VID from incoming packets before fetching entry within the
+ * VLAN table.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port),
+ VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER) |
+ PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ } else {
+ /* Also set CPU ports to the "user" VLAN port attribute, to
+ * allow VLAN classification, but keep the EG_TAG attribute as
+ * "consistent" (i.o.w. don't change its value) for packets
+ * received by the switch from the CPU, so that tagged packets
+ * are forwarded to user ports as tagged, and untagged as
+ * untagged.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER));
+ }
}
static void
mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
struct mt7530_priv *priv = ds->priv;
- int i;
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
- /* Remove this port from the port matrix of the other ports
- * in the same bridge. If the port is disabled, port matrix
- * is kept and not being setup until the port becomes enabled.
- */
- if (dsa_is_user_port(ds, i) && i != port) {
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
- continue;
- if (priv->ports[i].enable)
- mt7530_clear(priv, MT7530_PCR_P(i),
- PCR_MATRIX(BIT(port)));
- priv->ports[i].pm &= ~PCR_MATRIX(BIT(port));
- }
- }
-
- /* Set the cpu port to be the only one in the port matrix of
- * this port.
- */
- if (priv->ports[port].enable)
- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
- PCR_MATRIX(BIT(MT7530_CPU_PORT)));
- priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ mt7530_update_port_member(priv, port, bridge.dev, false);
/* When a port is removed from the bridge, the port would be set up
* back to the default as is at initial boot which is a VLAN-unaware
@@ -1341,7 +1674,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
static int
mt7530_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
@@ -1357,7 +1691,8 @@ mt7530_port_fdb_add(struct dsa_switch *ds, int port,
static int
mt7530_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
@@ -1408,7 +1743,8 @@ err:
static int
mt7530_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -1434,7 +1770,8 @@ mt7530_port_mdb_add(struct dsa_switch *ds, int port,
static int
mt7530_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -1490,6 +1827,9 @@ static int
mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
@@ -1497,7 +1837,7 @@ mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
* for becoming a VLAN-aware port.
*/
mt7530_port_set_vlan_aware(ds, port);
- mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT);
+ mt7530_port_set_vlan_aware(ds, cpu_dp->index);
} else {
mt7530_port_set_vlan_unaware(ds, port);
}
@@ -1509,11 +1849,11 @@ static void
mt7530_hw_vlan_add(struct mt7530_priv *priv,
struct mt7530_hw_vlan_entry *entry)
{
+ struct dsa_port *dp = dsa_to_port(priv->ds, entry->port);
u8 new_members;
u32 val;
- new_members = entry->old_members | BIT(entry->port) |
- BIT(MT7530_CPU_PORT);
+ new_members = entry->old_members | BIT(entry->port);
/* Validate the entry with independent learning, create egress tag per
* VLAN and joining the port as one of the port members.
@@ -1524,22 +1864,20 @@ mt7530_hw_vlan_add(struct mt7530_priv *priv,
/* Decide whether adding tag or not for those outgoing packets from the
* port inside the VLAN.
- */
- val = entry->untagged ? MT7530_VLAN_EGRESS_UNTAG :
- MT7530_VLAN_EGRESS_TAG;
- mt7530_rmw(priv, MT7530_VAWD2,
- ETAG_CTRL_P_MASK(entry->port),
- ETAG_CTRL_P(entry->port, val));
-
- /* CPU port is always taken as a tagged port for serving more than one
+ * CPU port is always taken as a tagged port for serving more than one
* VLANs across and also being applied with egress type stack mode for
* that VLAN tags would be appended after hardware special tag used as
* DSA tag.
*/
+ if (dsa_port_is_cpu(dp))
+ val = MT7530_VLAN_EGRESS_STACK;
+ else if (entry->untagged)
+ val = MT7530_VLAN_EGRESS_UNTAG;
+ else
+ val = MT7530_VLAN_EGRESS_TAG;
mt7530_rmw(priv, MT7530_VAWD2,
- ETAG_CTRL_P_MASK(MT7530_CPU_PORT),
- ETAG_CTRL_P(MT7530_CPU_PORT,
- MT7530_VLAN_EGRESS_STACK));
+ ETAG_CTRL_P_MASK(entry->port),
+ ETAG_CTRL_P(entry->port, val));
}
static void
@@ -1558,11 +1896,7 @@ mt7530_hw_vlan_del(struct mt7530_priv *priv,
return;
}
- /* If certain member apart from CPU port is still alive in the VLAN,
- * the entry would be kept valid. Otherwise, the entry is got to be
- * disabled.
- */
- if (new_members && new_members != BIT(MT7530_CPU_PORT)) {
+ if (new_members) {
val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
VLAN_VALID;
mt7530_write(priv, MT7530_VAWD1, val);
@@ -1687,21 +2021,9 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int mt753x_mirror_port_get(unsigned int id, u32 val)
-{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
- MIRROR_PORT(val);
-}
-
-static int mt753x_mirror_port_set(unsigned int id, u32 val)
-{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
- MIRROR_PORT(val);
-}
-
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
struct mt7530_priv *priv = ds->priv;
int monitor_port;
@@ -1714,14 +2036,14 @@ static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
/* MT7530 only supports one monitor port */
- monitor_port = mt753x_mirror_port_get(priv->id, val);
+ monitor_port = MT753X_MIRROR_PORT_GET(priv->id, val);
if (val & MT753X_MIRROR_EN(priv->id) &&
monitor_port != mirror->to_local_port)
return -EEXIST;
val |= MT753X_MIRROR_EN(priv->id);
- val &= ~MT753X_MIRROR_MASK(priv->id);
- val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port);
+ val &= ~MT753X_MIRROR_PORT_MASK(priv->id);
+ val |= MT753X_MIRROR_PORT_SET(priv->id, mirror->to_local_port);
mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
val = mt7530_read(priv, MT7530_PCR_P(port));
@@ -1790,7 +2112,7 @@ mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit);
}
-static void
+static int
mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct mt7530_priv *priv = gpiochip_get_data(gc);
@@ -1800,6 +2122,8 @@ mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
else
mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
+
+ return 0;
}
static int
@@ -1872,90 +2196,6 @@ mt7530_setup_gpio(struct mt7530_priv *priv)
}
#endif /* CONFIG_GPIOLIB */
-static irqreturn_t
-mt7530_irq_thread_fn(int irq, void *dev_id)
-{
- struct mt7530_priv *priv = dev_id;
- bool handled = false;
- u32 val;
- int p;
-
- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
- val = mt7530_mii_read(priv, MT7530_SYS_INT_STS);
- mt7530_mii_write(priv, MT7530_SYS_INT_STS, val);
- mutex_unlock(&priv->bus->mdio_lock);
-
- for (p = 0; p < MT7530_NUM_PHYS; p++) {
- if (BIT(p) & val) {
- unsigned int irq;
-
- irq = irq_find_mapping(priv->irq_domain, p);
- handle_nested_irq(irq);
- handled = true;
- }
- }
-
- return IRQ_RETVAL(handled);
-}
-
-static void
-mt7530_irq_mask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable &= ~BIT(d->hwirq);
-}
-
-static void
-mt7530_irq_unmask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable |= BIT(d->hwirq);
-}
-
-static void
-mt7530_irq_bus_lock(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
-}
-
-static void
-mt7530_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
- mutex_unlock(&priv->bus->mdio_lock);
-}
-
-static struct irq_chip mt7530_irq_chip = {
- .name = KBUILD_MODNAME,
- .irq_mask = mt7530_irq_mask,
- .irq_unmask = mt7530_irq_unmask,
- .irq_bus_lock = mt7530_irq_bus_lock,
- .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock,
-};
-
-static int
-mt7530_irq_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_data(irq, domain->host_data);
- irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq);
- irq_set_nested_thread(irq, true);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops mt7530_irq_domain_ops = {
- .map = mt7530_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
static void
mt7530_setup_mdio_irq(struct mt7530_priv *priv)
{
@@ -1967,47 +2207,77 @@ mt7530_setup_mdio_irq(struct mt7530_priv *priv)
unsigned int irq;
irq = irq_create_mapping(priv->irq_domain, p);
- ds->slave_mii_bus->irq[p] = irq;
+ ds->user_mii_bus->irq[p] = irq;
}
}
}
+static const struct regmap_irq mt7530_irqs[] = {
+ REGMAP_IRQ_REG_LINE(0, 32), /* PHY0_LC */
+ REGMAP_IRQ_REG_LINE(1, 32), /* PHY1_LC */
+ REGMAP_IRQ_REG_LINE(2, 32), /* PHY2_LC */
+ REGMAP_IRQ_REG_LINE(3, 32), /* PHY3_LC */
+ REGMAP_IRQ_REG_LINE(4, 32), /* PHY4_LC */
+ REGMAP_IRQ_REG_LINE(5, 32), /* PHY5_LC */
+ REGMAP_IRQ_REG_LINE(6, 32), /* PHY6_LC */
+ REGMAP_IRQ_REG_LINE(16, 32), /* MAC_PC */
+ REGMAP_IRQ_REG_LINE(17, 32), /* BMU */
+ REGMAP_IRQ_REG_LINE(18, 32), /* MIB */
+ REGMAP_IRQ_REG_LINE(22, 32), /* ARL_COL_FULL_COL */
+ REGMAP_IRQ_REG_LINE(23, 32), /* ARL_COL_FULL */
+ REGMAP_IRQ_REG_LINE(24, 32), /* ARL_TBL_ERR */
+ REGMAP_IRQ_REG_LINE(25, 32), /* ARL_PKT_QERR */
+ REGMAP_IRQ_REG_LINE(26, 32), /* ARL_EQ_ERR */
+ REGMAP_IRQ_REG_LINE(27, 32), /* ARL_PKT_BC */
+ REGMAP_IRQ_REG_LINE(28, 32), /* ARL_SEC_IG1X */
+ REGMAP_IRQ_REG_LINE(29, 32), /* ARL_SEC_VLAN */
+ REGMAP_IRQ_REG_LINE(30, 32), /* ARL_SEC_TAG */
+ REGMAP_IRQ_REG_LINE(31, 32), /* ACL */
+};
+
+static const struct regmap_irq_chip mt7530_regmap_irq_chip = {
+ .name = KBUILD_MODNAME,
+ .status_base = MT7530_SYS_INT_STS,
+ .unmask_base = MT7530_SYS_INT_EN,
+ .ack_base = MT7530_SYS_INT_STS,
+ .init_ack_masked = true,
+ .irqs = mt7530_irqs,
+ .num_irqs = ARRAY_SIZE(mt7530_irqs),
+ .num_regs = 1,
+};
+
static int
mt7530_setup_irq(struct mt7530_priv *priv)
{
+ struct regmap_irq_chip_data *irq_data;
struct device *dev = priv->dev;
struct device_node *np = dev->of_node;
- int ret;
+ int irq, ret;
if (!of_property_read_bool(np, "interrupt-controller")) {
dev_info(dev, "no interrupt support\n");
return 0;
}
- priv->irq = of_irq_get(np, 0);
- if (priv->irq <= 0) {
- dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq);
- return priv->irq ? : -EINVAL;
- }
-
- priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
- &mt7530_irq_domain_ops, priv);
- if (!priv->irq_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
+ irq = of_irq_get(np, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed to get parent IRQ: %d\n", irq);
+ return irq ? : -EINVAL;
}
/* This register must be set for MT7530 to properly fire interrupts */
- if (priv->id != ID_MT7531)
+ if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
- ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
- IRQF_ONESHOT, KBUILD_MODNAME, priv);
- if (ret) {
- irq_domain_remove(priv->irq_domain);
- dev_err(dev, "failed to request IRQ: %d\n", ret);
+ ret = devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
+ priv->regmap, irq,
+ IRQF_ONESHOT,
+ 0, &mt7530_regmap_irq_chip,
+ &irq_data);
+ if (ret)
return ret;
- }
+
+ priv->irq_domain = regmap_irq_get_domain(irq_data);
return 0;
}
@@ -2027,52 +2297,52 @@ mt7530_free_mdio_irq(struct mt7530_priv *priv)
}
}
-static void
-mt7530_free_irq_common(struct mt7530_priv *priv)
-{
- free_irq(priv->irq, priv);
- irq_domain_remove(priv->irq_domain);
-}
-
-static void
-mt7530_free_irq(struct mt7530_priv *priv)
-{
- mt7530_free_mdio_irq(priv);
- mt7530_free_irq_common(priv);
-}
-
static int
mt7530_setup_mdio(struct mt7530_priv *priv)
{
+ struct device_node *mnp, *np = priv->dev->of_node;
struct dsa_switch *ds = priv->ds;
struct device *dev = priv->dev;
struct mii_bus *bus;
static int idx;
- int ret;
+ int ret = 0;
+
+ mnp = of_get_child_by_name(np, "mdio");
+
+ if (mnp && !of_device_is_available(mnp))
+ goto out;
bus = devm_mdiobus_alloc(dev);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!mnp)
+ ds->user_mii_bus = bus;
- ds->slave_mii_bus = bus;
bus->priv = priv;
bus->name = KBUILD_MODNAME "-mii";
snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
- bus->read = mt753x_phy_read;
- bus->write = mt753x_phy_write;
+ bus->read = mt753x_phy_read_c22;
+ bus->write = mt753x_phy_write_c22;
+ bus->read_c45 = mt753x_phy_read_c45;
+ bus->write_c45 = mt753x_phy_write_c45;
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
- if (priv->irq)
+ if (priv->irq_domain && !mnp)
mt7530_setup_mdio_irq(priv);
- ret = mdiobus_register(bus);
+ ret = devm_of_mdiobus_register(dev, bus, mnp);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
- if (priv->irq)
+ if (priv->irq_domain && !mnp)
mt7530_free_mdio_irq(priv);
}
+out:
+ of_node_put(mnp);
return ret;
}
@@ -2080,19 +2350,32 @@ static int
mt7530_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
+ struct device_node *dn = NULL;
struct device_node *phy_node;
struct device_node *mac_np;
struct mt7530_dummy_poll p;
phy_interface_t interface;
- struct device_node *dn;
+ struct dsa_port *cpu_dp;
u32 id, val;
int ret, i;
- /* The parent node of master netdev which holds the common system
+ /* The parent node of conduit netdev which holds the common system
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
- dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ dn = cpu_dp->conduit->dev.of_node->parent;
+ /* It doesn't matter which CPU port is found first,
+ * their conduits should share the same parent OF node
+ */
+ break;
+ }
+
+ if (!dn) {
+ dev_err(ds->dev, "parent OF node of DSA conduit not found");
+ return -EINVAL;
+ }
+
ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
@@ -2119,16 +2402,16 @@ mt7530_setup(struct dsa_switch *ds)
*/
if (priv->mcm) {
reset_control_assert(priv->rstc);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
reset_control_deassert(priv->rstc);
} else {
gpiod_set_value_cansleep(priv->reset, 0);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
gpiod_set_value_cansleep(priv->reset, 1);
}
/* Waiting for MT7530 got to stable */
- INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
20, 1000000);
if (ret < 0) {
@@ -2143,23 +2426,49 @@ mt7530_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_20MHZ) {
+ dev_err(priv->dev,
+ "MT7530 with a 20MHz XTAL is not supported!\n");
+ return -EINVAL;
+ }
+
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
- /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
- val = mt7530_read(priv, MT7530_MHWTRAP);
- val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
- val |= MHWTRAP_MANUAL;
- mt7530_write(priv, MT7530_MHWTRAP, val);
+ /* Lower Tx driving for TRGMII path */
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+ TD_DM_DRVP(8) | TD_DM_DRVN(8));
+
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+ RD_TAP_MASK, RD_TAP(16));
+
+ /* Allow modifying the trap and directly access PHY registers via the
+ * MDIO bus the switch is on.
+ */
+ mt7530_rmw(priv, MT753X_MTRAP, MT7530_CHG_TRAP |
+ MT7530_PHY_INDIRECT_ACCESS, MT7530_CHG_TRAP);
+
+ if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_40MHZ)
+ mt7530_pll_setup(priv);
- priv->p6_interface = PHY_INTERFACE_MODE_NA;
+ mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ /* Clear link settings and enable force mode to force link down
+ * on all ports until they're enabled later.
+ */
+ mt7530_rmw(priv, MT753X_PMCR_P(i),
+ PMCR_LINK_SETTINGS_MASK |
+ MT753X_FORCE_MODE(priv->id),
+ MT753X_FORCE_MODE(priv->id));
+
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
@@ -2168,9 +2477,7 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
if (dsa_is_cpu_port(ds, i)) {
- ret = mt753x_cpu_port_enable(ds, i);
- if (ret)
- return ret;
+ mt753x_cpu_port_enable(ds, i);
} else {
mt7530_port_disable(ds, i);
@@ -2183,22 +2490,20 @@ mt7530_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
if (ret)
return ret;
- /* Setup port 5 */
- priv->p5_intf_sel = P5_DISABLED;
- interface = PHY_INTERFACE_MODE_NA;
-
- if (!dsa_is_unused_port(ds, 5)) {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface);
- if (ret && ret != -ENODEV)
- return ret;
- } else {
- /* Scan the ethernet nodes. look for GMAC1, lookup used phy */
+ /* Check for PHY muxing on port 5 */
+ if (dsa_is_unused_port(ds, 5)) {
+ /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY.
+ * Set priv->p5_mode to the appropriate value if PHY muxing is
+ * detected.
+ */
for_each_child_of_node(dn, mac_np) {
if (!of_device_is_compatible(mac_np,
"mediatek,eth-mac"))
@@ -2212,22 +2517,30 @@ mt7530_setup(struct dsa_switch *ds)
if (!phy_node)
continue;
- if (phy_node->parent == priv->dev->of_node->parent) {
+ if (phy_node->parent == priv->dev->of_node->parent ||
+ phy_node->parent->parent == priv->dev->of_node) {
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV) {
of_node_put(mac_np);
+ of_node_put(phy_node);
return ret;
}
id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0)
- priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
+ priv->p5_mode = MUX_PHY_P0;
if (id == 4)
- priv->p5_intf_sel = P5_INTF_SEL_PHY_P4;
+ priv->p5_mode = MUX_PHY_P4;
}
of_node_put(mac_np);
of_node_put(phy_node);
break;
}
+
+ if (priv->p5_mode == MUX_PHY_P0 ||
+ priv->p5_mode == MUX_PHY_P4) {
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
+ mt7530_setup_port5(ds, interface);
+ }
}
#ifdef CONFIG_GPIOLIB
@@ -2238,8 +2551,6 @@ mt7530_setup(struct dsa_switch *ds)
}
#endif /* CONFIG_GPIOLIB */
- mt7530_setup_port5(ds, interface);
-
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2249,6 +2560,74 @@ mt7530_setup(struct dsa_switch *ds)
}
static int
+mt7531_setup_common(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int ret, i;
+
+ ds->assisted_learning_on_cpu_port = true;
+ ds->mtu_enforcement_ingress = true;
+
+ mt753x_trap_frames(priv);
+
+ /* Enable and reset MIB counters */
+ mt7530_mib_reset(ds);
+
+ /* Disable flooding on all ports */
+ mt7530_clear(priv, MT753X_MFC, BC_FFP_MASK | UNM_FFP_MASK |
+ UNU_FFP_MASK);
+
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ /* Clear link settings and enable force mode to force link down
+ * on all ports until they're enabled later.
+ */
+ mt7530_rmw(priv, MT753X_PMCR_P(i),
+ PMCR_LINK_SETTINGS_MASK |
+ MT753X_FORCE_MODE(priv->id),
+ MT753X_FORCE_MODE(priv->id));
+
+ /* Disable forwarding by default on all ports */
+ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
+ PCR_MATRIX_CLR);
+
+ /* Disable learning by default on all ports */
+ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+
+ mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
+
+ if (dsa_is_cpu_port(ds, i)) {
+ mt753x_cpu_port_enable(ds, i);
+ } else {
+ mt7530_port_disable(ds, i);
+
+ /* Set default PVID to 0 on all user ports */
+ mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
+ }
+
+ /* Enable consistent egress tag */
+ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
+ /* Enable Special Tag for rx frames */
+ if (priv->id == ID_EN7581 || priv->id == ID_AN7583)
+ mt7530_write(priv, MT753X_CPORT_SPTAG_CFG,
+ CPORT_SW2FE_STAG_EN | CPORT_FE2SW_STAG_EN);
+
+ /* Flush the FDB table */
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
+ if (ret < 0)
+ return ret;
+
+ /* Setup VLAN ID 0 for VLAN-unaware bridges */
+ return mt7530_setup_vlan0(priv);
+}
+
+static int
mt7531_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
@@ -2261,16 +2640,16 @@ mt7531_setup(struct dsa_switch *ds)
*/
if (priv->mcm) {
reset_control_assert(priv->rstc);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
reset_control_deassert(priv->rstc);
} else {
gpiod_set_value_cansleep(priv->reset, 0);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
gpiod_set_value_cansleep(priv->reset, 1);
}
/* Waiting for MT7530 got to stable */
- INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
20, 1000000);
if (ret < 0) {
@@ -2286,205 +2665,195 @@ mt7531_setup(struct dsa_switch *ds)
return -ENODEV;
}
- /* Reset the switch through internal reset */
- mt7530_write(priv, MT7530_SYS_CTRL,
- SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
- SYS_CTRL_REG_RST);
+ /* MT7531AE has got two SGMII units. One for port 5, one for port 6.
+ * MT7531BE has got only one SGMII unit which is for port 6.
+ */
+ val = mt7530_read(priv, MT7531_TOP_SIG_SR);
+ priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
- if (mt7531_dual_sgmii_supported(priv)) {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
+ /* Force link down on all ports before internal reset */
+ for (i = 0; i < priv->ds->num_ports; i++)
+ mt7530_write(priv, MT753X_PMCR_P(i), MT7531_FORCE_MODE_LNK);
+
+ /* Reset the switch through internal reset */
+ mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
- /* Let ds->slave_mii_bus be able to access external phy. */
+ if (!priv->p5_sgmii) {
+ mt7531_pll_setup(priv);
+ } else {
+ /* Unlike MT7531BE, the GPIO 6-12 pins are not used for RGMII on
+ * MT7531AE. Set the GPIO 11-12 pins to function as MDC and MDIO
+ * to expose the MDIO bus of the switch.
+ */
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
MT7531_EXT_P_MDC_11);
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
MT7531_EXT_P_MDIO_12);
- } else {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
}
- dev_dbg(ds->dev, "P5 support %s interface\n",
- p5_intf_modes(priv->p5_intf_sel));
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
- /* Let phylink decide the interface later. */
- priv->p5_interface = PHY_INTERFACE_MODE_NA;
- priv->p6_interface = PHY_INTERFACE_MODE_NA;
-
- /* Enable PHY core PLL, since phy_device has not yet been created
- * provided for phy_[read,write]_mmd_indirect is called, we provide
- * our own mt7531_ind_mmd_phy_[read,write] to complete this
- * function.
+ /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
+ * phy_device has not yet been created provided for
+ * phy_[read,write]_mmd_indirect is called, we provide our own
+ * mt7531_ind_mmd_phy_[read,write] to complete this function.
*/
- val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
+ val = mt7531_ind_c45_phy_read(priv,
+ MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
- val |= MT7531_PHY_PLL_BYPASS_MODE;
+ val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
val &= ~MT7531_PHY_PLL_OFF;
- mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
- CORE_PLL_GROUP4, val);
-
- /* BPDU to CPU port */
- mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
- BIT(MT7530_CPU_PORT));
- mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
- MT753X_BPDU_CPU_ONLY);
+ mt7531_ind_c45_phy_write(priv,
+ MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MDIO_MMD_VEND2, CORE_PLL_GROUP4, val);
- /* Enable and reset MIB counters */
- mt7530_mib_reset(ds);
-
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
- /* Disable forwarding by default on all ports */
- mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
- PCR_MATRIX_CLR);
-
- /* Disable learning by default on all ports */
- mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
-
- mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
-
- if (dsa_is_cpu_port(ds, i)) {
- ret = mt753x_cpu_port_enable(ds, i);
- if (ret)
- return ret;
- } else {
- mt7530_port_disable(ds, i);
-
- /* Set default PVID to 0 on all user ports */
- mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
- G0_PORT_VID_DEF);
- }
-
- /* Enable consistent egress tag */
- mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
- PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ /* Disable EEE advertisement on the switch PHYs. */
+ for (i = MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr);
+ i < MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr) + MT7530_NUM_PHYS;
+ i++) {
+ mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
+ 0);
}
- /* Setup VLAN ID 0 for VLAN-unaware bridges */
- ret = mt7530_setup_vlan0(priv);
+ ret = mt7531_setup_common(ds);
if (ret)
return ret;
- ds->assisted_learning_on_cpu_port = true;
- ds->mtu_enforcement_ingress = true;
-
- /* Flush the FDB table */
- ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
- if (ret < 0)
- return ret;
-
return 0;
}
-static bool
-mt7530_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- struct mt7530_priv *priv = ds->priv;
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
switch (port) {
- case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+
+ /* Port 5 supports rgmii with delays, mii, and gmii. */
+ case 5:
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
- case 6: /* 1st cpu port */
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- return false;
+
+ /* Port 6 supports rgmii and trgmii. */
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ config->supported_interfaces);
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
-}
-
-static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
-{
- return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
}
-static bool
-mt7531_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+
switch (port) {
- case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
- case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
- if (mt7531_is_rgmii_port(priv, port))
- return phy_interface_mode_is_rgmii(state->interface);
+
+ /* Port 5 supports rgmii with delays on MT7531BE, sgmii/802.3z on
+ * MT7531AE.
+ */
+ case 5:
+ if (!priv->p5_sgmii) {
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+ }
fallthrough;
- case 6: /* 1st cpu port supports sgmii/8023z only */
- if (state->interface != PHY_INTERFACE_MODE_SGMII &&
- !phy_interface_mode_is_8023z(state->interface))
- return false;
+
+ /* Port 6 supports sgmii/802.3z. */
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_2500FD;
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
}
-static bool
-mt753x_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- struct mt7530_priv *priv = ds->priv;
+ switch (port) {
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 3:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
- return priv->info->phy_mode_supported(ds, port, state);
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+ break;
+
+ /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10000FD;
+ break;
+ }
}
-static int
-mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state)
+static void en7581_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- struct mt7530_priv *priv = ds->priv;
+ switch (port) {
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+ break;
+
+ /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
- return priv->info->pad_setup(ds, state->interface);
+ config->mac_capabilities |= MAC_10000FD;
+ break;
+ }
}
-static int
+static void
mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- /* Only need to setup port5. */
- if (port != 5)
- return 0;
-
- mt7530_setup_port5(priv->ds, interface);
-
- return 0;
+ if (port == 5)
+ mt7530_setup_port5(priv->ds, interface);
+ else if (port == 6)
+ mt7530_setup_port6(priv->ds, interface);
}
-static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
- phy_interface_t interface,
- struct phy_device *phydev)
+static void mt7531_rgmii_setup(struct mt7530_priv *priv,
+ phy_interface_t interface,
+ struct phy_device *phydev)
{
u32 val;
- if (!mt7531_is_rgmii_port(priv, port)) {
- dev_err(priv->dev, "RGMII mode is not available for port %d\n",
- port);
- return -EINVAL;
- }
-
val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
val |= GP_CLK_EN;
val &= ~GP_MODE_MASK;
@@ -2512,152 +2881,14 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
case PHY_INTERFACE_MODE_RGMII_ID:
break;
default:
- return -EINVAL;
+ break;
}
}
- mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
- return 0;
-}
-
-static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port,
- unsigned long *supported)
-{
- /* Port5 supports ethier RGMII or SGMII.
- * Port6 supports SGMII only.
- */
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- break;
- fallthrough;
- case 6:
- phylink_set(supported, 1000baseX_Full);
- phylink_set(supported, 2500baseX_Full);
- phylink_set(supported, 2500baseT_Full);
- }
+ mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
}
static void
-mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
-{
- struct mt7530_priv *priv = ds->priv;
- unsigned int val;
-
- /* For adjusting speed and duplex of SGMII force mode. */
- if (interface != PHY_INTERFACE_MODE_SGMII ||
- phylink_autoneg_inband(mode))
- return;
-
- /* SGMII force mode setting */
- val = mt7530_read(priv, MT7531_SGMII_MODE(port));
- val &= ~MT7531_SGMII_IF_MODE_MASK;
-
- switch (speed) {
- case SPEED_10:
- val |= MT7531_SGMII_FORCE_SPEED_10;
- break;
- case SPEED_100:
- val |= MT7531_SGMII_FORCE_SPEED_100;
- break;
- case SPEED_1000:
- val |= MT7531_SGMII_FORCE_SPEED_1000;
- break;
- }
-
- /* MT7531 SGMII 1G force mode can only work in full duplex mode,
- * no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
- */
- if ((speed == SPEED_10 || speed == SPEED_100) &&
- duplex != DUPLEX_FULL)
- val |= MT7531_SGMII_FORCE_HALF_DUPLEX;
-
- mt7530_write(priv, MT7531_SGMII_MODE(port), val);
-}
-
-static bool mt753x_is_mac_port(u32 port)
-{
- return (port == 5 || port == 6);
-}
-
-static int mt7531_sgmii_setup_mode_force(struct mt7530_priv *priv, u32 port,
- phy_interface_t interface)
-{
- u32 val;
-
- if (!mt753x_is_mac_port(port))
- return -EINVAL;
-
- mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
- MT7531_SGMII_PHYA_PWD);
-
- val = mt7530_read(priv, MT7531_PHYA_CTRL_SIGNAL3(port));
- val &= ~MT7531_RG_TPHY_SPEED_MASK;
- /* Setup 2.5 times faster clock for 2.5Gbps data speeds with 10B/8B
- * encoding.
- */
- val |= (interface == PHY_INTERFACE_MODE_2500BASEX) ?
- MT7531_RG_TPHY_SPEED_3_125G : MT7531_RG_TPHY_SPEED_1_25G;
- mt7530_write(priv, MT7531_PHYA_CTRL_SIGNAL3(port), val);
-
- mt7530_clear(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
-
- /* MT7531 SGMII 1G and 2.5G force mode can only work in full duplex
- * mode, no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
- */
- mt7530_rmw(priv, MT7531_SGMII_MODE(port),
- MT7531_SGMII_IF_MODE_MASK | MT7531_SGMII_REMOTE_FAULT_DIS,
- MT7531_SGMII_FORCE_SPEED_1000);
-
- mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
-
- return 0;
-}
-
-static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port,
- phy_interface_t interface)
-{
- if (!mt753x_is_mac_port(port))
- return -EINVAL;
-
- mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
- MT7531_SGMII_PHYA_PWD);
-
- mt7530_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
- MT7531_RG_TPHY_SPEED_MASK, MT7531_RG_TPHY_SPEED_1_25G);
-
- mt7530_set(priv, MT7531_SGMII_MODE(port),
- MT7531_SGMII_REMOTE_FAULT_DIS |
- MT7531_SGMII_SPEED_DUPLEX_AN);
-
- mt7530_rmw(priv, MT7531_PCS_SPEED_ABILITY(port),
- MT7531_SGMII_TX_CONFIG_MASK, 1);
-
- mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
-
- mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_RESTART);
-
- mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
-
- return 0;
-}
-
-static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
- u32 val;
-
- /* Only restart AN when AN is enabled */
- val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
- if (val & MT7531_SGMII_AN_ENABLE) {
- val |= MT7531_SGMII_AN_RESTART;
- mt7530_write(priv, MT7531_PCS_CONTROL_1(port), val);
- }
-}
-
-static int
mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
@@ -2665,165 +2896,78 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
struct phy_device *phydev;
struct dsa_port *dp;
- if (!mt753x_is_mac_port(port)) {
- dev_err(priv->dev, "port %d is not a MAC port\n", port);
- return -EINVAL;
+ if (phy_interface_mode_is_rgmii(interface)) {
+ dp = dsa_to_port(ds, port);
+ phydev = dp->user->phydev;
+ mt7531_rgmii_setup(priv, interface, phydev);
}
+}
+
+static struct phylink_pcs *
+mt753x_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
switch (interface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- dp = dsa_to_port(ds, port);
- phydev = dp->slave->phydev;
- return mt7531_rgmii_setup(priv, port, interface, phydev);
+ case PHY_INTERFACE_MODE_TRGMII:
+ return &priv->pcs[dp->index].pcs;
case PHY_INTERFACE_MODE_SGMII:
- return mt7531_sgmii_setup_mode_an(priv, port, interface);
- case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- if (phylink_autoneg_inband(mode))
- return -EINVAL;
-
- return mt7531_sgmii_setup_mode_force(priv, port, interface);
+ return priv->ports[dp->index].sgmii_pcs;
default:
- return -EINVAL;
+ return NULL;
}
-
- return -EINVAL;
-}
-
-static int
-mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->mac_port_config(ds, port, mode, state->interface);
}
static void
-mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+mt753x_phylink_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
- u32 mcr_cur, mcr_new;
-
- if (!mt753x_phy_mode_supported(ds, port, state))
- goto unsupported;
-
- switch (port) {
- case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
- break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (priv->p5_interface == state->interface)
- break;
-
- if (mt753x_mac_config(ds, port, mode, state) < 0)
- goto unsupported;
-
- if (priv->p5_intf_sel != P5_DISABLED)
- priv->p5_interface = state->interface;
- break;
- case 6: /* 1st cpu port */
- if (priv->p6_interface == state->interface)
- break;
-
- mt753x_pad_setup(ds, state);
-
- if (mt753x_mac_config(ds, port, mode, state) < 0)
- goto unsupported;
-
- priv->p6_interface = state->interface;
- break;
- default:
-unsupported:
- dev_err(ds->dev, "%s: unsupported %s port: %i\n",
- __func__, phy_modes(state->interface), port);
- return;
- }
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
+ struct mt7530_priv *priv;
+ int port = dp->index;
- if (phylink_autoneg_inband(mode) &&
- state->interface != PHY_INTERFACE_MODE_SGMII) {
- dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
- __func__);
- return;
- }
+ priv = ds->priv;
- mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
- mcr_new = mcr_cur;
- mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
- mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
- PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id);
+ if ((port == 5 || port == 6) && priv->info->mac_port_config)
+ priv->info->mac_port_config(ds, port, mode, state->interface);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
- mcr_new |= PMCR_EXT_PHY;
-
- if (mcr_new != mcr_cur)
- mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
-}
-
-static void
-mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_an_restart)
- return;
-
- priv->info->mac_pcs_an_restart(ds, port);
+ mt7530_set(priv, MT753X_PMCR_P(port), PMCR_EXT_PHY);
}
-static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void mt753x_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
-
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
-}
-
-static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
-{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_link_up)
- return;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
- priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+ mt7530_clear(priv, MT753X_PMCR_P(dp->index), PMCR_LINK_SETTINGS_MASK);
}
-static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void mt753x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
u32 mcr;
- mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
-
- mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
-
- /* MT753x MAC works in 1G full duplex mode for all up-clocked
- * variants.
- */
- if (interface == PHY_INTERFACE_MODE_TRGMII ||
- (phy_interface_mode_is_8023z(interface))) {
- speed = SPEED_1000;
- duplex = DUPLEX_FULL;
- }
+ mcr = PMCR_MAC_RX_EN | PMCR_MAC_TX_EN | PMCR_FORCE_LNK;
switch (speed) {
case SPEED_1000:
+ case SPEED_2500:
+ case SPEED_10000:
mcr |= PMCR_FORCE_SPEED_1000;
break;
case SPEED_100:
@@ -2833,138 +2977,88 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (duplex == DUPLEX_FULL) {
mcr |= PMCR_FORCE_FDX;
if (tx_pause)
- mcr |= PMCR_TX_FC_EN;
+ mcr |= PMCR_FORCE_TX_FC_EN;
if (rx_pause)
- mcr |= PMCR_RX_FC_EN;
+ mcr |= PMCR_FORCE_RX_FC_EN;
}
- if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, 0) >= 0) {
- switch (speed) {
- case SPEED_1000:
- mcr |= PMCR_FORCE_EEE1G;
- break;
- case SPEED_100:
- mcr |= PMCR_FORCE_EEE100;
- break;
- }
- }
-
- mt7530_set(priv, MT7530_PMCR_P(port), mcr);
+ mt7530_set(priv, MT753X_PMCR_P(dp->index), mcr);
}
-static int
-mt7531_cpu_port_config(struct dsa_switch *ds, int port)
+static void mt753x_phylink_mac_disable_tx_lpi(struct phylink_config *config)
{
- struct mt7530_priv *priv = ds->priv;
- phy_interface_t interface;
- int speed;
- int ret;
-
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- interface = PHY_INTERFACE_MODE_RGMII;
- else
- interface = PHY_INTERFACE_MODE_2500BASEX;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
- priv->p5_interface = interface;
- break;
- case 6:
- interface = PHY_INTERFACE_MODE_2500BASEX;
-
- mt7531_pad_setup(ds, interface);
+ mt7530_clear(priv, MT753X_PMCR_P(dp->index),
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100);
+}
- priv->p6_interface = interface;
- break;
- default:
- return -EINVAL;
- }
+static int mt753x_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
+ u32 val;
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
- speed = SPEED_2500;
+ /* If the timer is zero, then set LPI_MODE_EN, which allows the
+ * system to enter LPI mode immediately rather than waiting for
+ * the LPI threshold.
+ */
+ if (!timer)
+ val = LPI_MODE_EN;
+ else if (FIELD_FIT(LPI_THRESH_MASK, timer))
+ val = FIELD_PREP(LPI_THRESH_MASK, timer);
else
- speed = SPEED_1000;
+ val = LPI_THRESH_MASK;
- ret = mt7531_mac_config(ds, port, MLO_AN_FIXED, interface);
- if (ret)
- return ret;
- mt7530_write(priv, MT7530_PMCR_P(port),
- PMCR_CPU_PORT_SETTING(priv->id));
- mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
- speed, DUPLEX_FULL, true, true);
+ mt7530_rmw(priv, MT753X_PMEEECR_P(dp->index),
+ LPI_THRESH_MASK | LPI_MODE_EN, val);
- return 0;
-}
+ mt7530_set(priv, MT753X_PMCR_P(dp->index),
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100);
-static void
-mt7530_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
-{
- if (port == 5)
- phylink_set(supported, 1000baseX_Full);
-}
-
-static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
-{
- struct mt7530_priv *priv = ds->priv;
-
- mt7531_sgmii_validate(priv, port, supported);
+ return 0;
}
-static void
-mt753x_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mt7530_priv *priv = ds->priv;
+ u32 eeecr;
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !mt753x_phy_mode_supported(ds, port, state)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
-
- if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
- !phy_interface_mode_is_8023z(state->interface)) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, Autoneg);
- }
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
- /* This switch only supports 1G full-duplex. */
- if (state->interface != PHY_INTERFACE_MODE_MII)
- phylink_set(mask, 1000baseT_Full);
+ config->lpi_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD;
- priv->info->mac_port_validate(ds, port, mask);
+ eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
+ /* tx_lpi_timer should be in microseconds. The time units for
+ * LPI threshold are unspecified.
+ */
+ config->lpi_timer_default = FIELD_GET(LPI_THRESH_MASK, eeecr);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+ priv->info->mac_port_get_caps(ds, port, config);
+}
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+static int mt753x_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* Autonegotiation is not supported in TRGMII nor 802.3z modes */
+ if (state->interface == PHY_INTERFACE_MODE_TRGMII ||
+ phy_interface_mode_is_8023z(state->interface))
+ phylink_clear(supported, Autoneg);
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ return 0;
}
-static int
-mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
+ struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
u32 pmsr;
- if (port < 0 || port >= MT7530_NUM_PORTS)
- return -EINVAL;
-
pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
state->link = (pmsr & PMSR_LINK);
@@ -2991,75 +3085,33 @@ mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
state->pause |= MLO_PAUSE_RX;
if (pmsr & PMSR_TX_FC)
state->pause |= MLO_PAUSE_TX;
-
- return 1;
}
-static int
-mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
- struct phylink_link_state *state)
-{
- u32 status, val;
- u16 config_reg;
-
- status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
- state->link = !!(status & MT7531_SGMII_LINK_STATUS);
- if (state->interface == PHY_INTERFACE_MODE_SGMII &&
- (status & MT7531_SGMII_AN_ENABLE)) {
- val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
- config_reg = val >> 16;
-
- switch (config_reg & LPA_SGMII_SPD_MASK) {
- case LPA_SGMII_1000:
- state->speed = SPEED_1000;
- break;
- case LPA_SGMII_100:
- state->speed = SPEED_100;
- break;
- case LPA_SGMII_10:
- state->speed = SPEED_10;
- break;
- default:
- dev_err(priv->dev, "invalid sgmii PHY speed\n");
- state->link = false;
- return -EINVAL;
- }
-
- if (config_reg & LPA_SGMII_FULL_DUPLEX)
- state->duplex = DUPLEX_FULL;
- else
- state->duplex = DUPLEX_HALF;
- }
-
+static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
return 0;
}
-static int
-mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7530_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct mt7530_priv *priv = ds->priv;
-
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
- return mt7531_sgmii_pcs_get_state_an(priv, port, state);
-
- return -EOPNOTSUPP;
}
-static int
-mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->mac_port_get_state(ds, port, state);
-}
+static const struct phylink_pcs_ops mt7530_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7530_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7530_pcs_an_restart,
+};
static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
int ret = priv->info->sw_setup(ds);
+ int i;
if (ret)
return ret;
@@ -3069,48 +3121,150 @@ mt753x_setup(struct dsa_switch *ds)
return ret;
ret = mt7530_setup_mdio(priv);
- if (ret && priv->irq)
- mt7530_free_irq_common(priv);
+ if (ret)
+ return ret;
+
+ /* Initialise the PCS devices */
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ priv->pcs[i].pcs.ops = priv->info->pcs_ops;
+ priv->pcs[i].priv = priv;
+ priv->pcs[i].port = i;
+ }
+
+ if (priv->create_sgmii)
+ ret = priv->create_sgmii(priv);
+
+ if (ret && priv->irq_domain)
+ mt7530_free_mdio_irq(priv);
return ret;
}
-static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_keee *e)
{
+ if (e->tx_lpi_timer > 0xFFF)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+mt753x_conduit_state_change(struct dsa_switch *ds,
+ const struct net_device *conduit,
+ bool operational)
+{
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
struct mt7530_priv *priv = ds->priv;
- u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
+ int val = 0;
+ u8 mask;
+
+ /* Set the CPU port to trap frames to for MT7530. Trapped frames will be
+ * forwarded to the numerically smallest CPU port whose conduit
+ * interface is up.
+ */
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return;
- e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
- e->tx_lpi_timer = GET_LPI_THRESH(eeecr);
+ mask = BIT(cpu_dp->index);
- return 0;
+ if (operational)
+ priv->active_cpu_ports |= mask;
+ else
+ priv->active_cpu_ports &= ~mask;
+
+ if (priv->active_cpu_ports) {
+ val = MT7530_CPU_EN |
+ MT7530_CPU_PORT(__ffs(priv->active_cpu_ports));
+ }
+
+ mt7530_rmw(priv, MT753X_MFC, MT7530_CPU_EN | MT7530_CPU_PORT_MASK, val);
}
-static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+static int mt753x_tc_setup_qdisc_tbf(struct dsa_switch *ds, int port,
+ struct tc_tbf_qopt_offload *qopt)
{
+ struct tc_tbf_qopt_offload_replace_params *p = &qopt->replace_params;
struct mt7530_priv *priv = ds->priv;
- u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
+ u32 rate = 0;
- if (e->tx_lpi_timer > 0xFFF)
- return -EINVAL;
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ rate = div_u64(p->rate.rate_bytes_ps, 1000) << 3; /* kbps */
+ fallthrough;
+ case TC_TBF_DESTROY: {
+ u32 val, tick;
- set = SET_LPI_THRESH(e->tx_lpi_timer);
- if (!e->tx_lpi_enabled)
- /* Force LPI Mode without a delay */
- set |= LPI_MODE_EN;
- mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set);
+ mt7530_rmw(priv, MT753X_GERLCR, EGR_BC_MASK,
+ EGR_BC_CRC_IPG_PREAMBLE);
+
+ /* if rate is greater than 10Mbps tick is 1/32 ms,
+ * 1ms otherwise
+ */
+ tick = rate > 10000 ? 2 : 7;
+ val = FIELD_PREP(ERLCR_CIR_MASK, (rate >> 5)) |
+ FIELD_PREP(ERLCR_EN_MASK, !!rate) |
+ FIELD_PREP(ERLCR_EXP_MASK, tick) |
+ ERLCR_TBF_MODE_MASK |
+ FIELD_PREP(ERLCR_MANT_MASK, 0xf);
+ mt7530_write(priv, MT753X_ERLCR_P(port), val);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
return 0;
}
+static int mt753x_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TBF:
+ return mt753x_tc_setup_qdisc_tbf(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mt7988_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* Reset the switch */
+ reset_control_assert(priv->rstc);
+ usleep_range(20, 50);
+ reset_control_deassert(priv->rstc);
+ usleep_range(20, 50);
+
+ /* AN7583 require additional tweak to CONN_CFG */
+ if (priv->id == ID_AN7583)
+ mt7530_rmw(priv, AN7583_GEPHY_CONN_CFG,
+ AN7583_CSR_DPHY_CKIN_SEL |
+ AN7583_CSR_PHY_CORE_REG_CLK_SEL |
+ AN7583_CSR_ETHER_AFE_PWD,
+ AN7583_CSR_DPHY_CKIN_SEL |
+ AN7583_CSR_PHY_CORE_REG_CLK_SEL |
+ FIELD_PREP(AN7583_CSR_ETHER_AFE_PWD, 0));
+
+ /* Reset the switch PHYs */
+ mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST);
+
+ return mt7531_setup_common(ds);
+}
+
static const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
.setup = mt753x_setup,
+ .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port,
.get_strings = mt7530_get_strings,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
+ .get_eth_mac_stats = mt7530_get_eth_mac_stats,
+ .get_rmon_stats = mt7530_get_rmon_stats,
+ .get_eth_ctrl_stats = mt7530_get_eth_ctrl_stats,
+ .get_stats64 = mt7530_get_stats64,
.set_ageing_time = mt7530_set_ageing_time,
.port_enable = mt7530_port_enable,
.port_disable = mt7530_port_disable,
@@ -3131,200 +3285,133 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_del = mt7530_port_vlan_del,
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
- .phylink_validate = mt753x_phylink_validate,
- .phylink_mac_link_state = mt753x_phylink_mac_link_state,
- .phylink_mac_config = mt753x_phylink_mac_config,
- .phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
- .phylink_mac_link_down = mt753x_phylink_mac_link_down,
- .phylink_mac_link_up = mt753x_phylink_mac_link_up,
- .get_mac_eee = mt753x_get_mac_eee,
+ .phylink_get_caps = mt753x_phylink_get_caps,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mt753x_set_mac_eee,
+ .conduit_state_change = mt753x_conduit_state_change,
+ .port_setup_tc = mt753x_setup_tc,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
+};
+
+static const struct phylink_mac_ops mt753x_phylink_mac_ops = {
+ .mac_select_pcs = mt753x_phylink_mac_select_pcs,
+ .mac_config = mt753x_phylink_mac_config,
+ .mac_link_down = mt753x_phylink_mac_link_down,
+ .mac_link_up = mt753x_phylink_mac_link_up,
+ .mac_disable_tx_lpi = mt753x_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mt753x_phylink_mac_enable_tx_lpi,
};
-static const struct mt753x_info mt753x_table[] = {
+const struct mt753x_info mt753x_table[] = {
[ID_MT7621] = {
.id = ID_MT7621,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
- .phy_read = mt7530_phy_read,
- .phy_write = mt7530_phy_write,
- .pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .phy_read_c22 = mt7530_phy_read_c22,
+ .phy_write_c22 = mt7530_phy_write_c22,
+ .phy_read_c45 = mt7530_phy_read_c45,
+ .phy_write_c45 = mt7530_phy_write_c45,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7530] = {
.id = ID_MT7530,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
- .phy_read = mt7530_phy_read,
- .phy_write = mt7530_phy_write,
- .pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .phy_read_c22 = mt7530_phy_read_c22,
+ .phy_write_c22 = mt7530_phy_write_c22,
+ .phy_read_c45 = mt7530_phy_read_c45,
+ .phy_write_c45 = mt7530_phy_write_c45,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7531] = {
.id = ID_MT7531,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7531_setup,
- .phy_read = mt7531_ind_phy_read,
- .phy_write = mt7531_ind_phy_write,
- .pad_setup = mt7531_pad_setup,
- .cpu_port_config = mt7531_cpu_port_config,
- .phy_mode_supported = mt7531_phy_mode_supported,
- .mac_port_validate = mt7531_mac_port_validate,
- .mac_port_get_state = mt7531_phylink_mac_link_state,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = mt7531_mac_port_get_caps,
.mac_port_config = mt7531_mac_config,
- .mac_pcs_an_restart = mt7531_sgmii_restart_an,
- .mac_pcs_link_up = mt7531_sgmii_link_up_force,
+ },
+ [ID_MT7988] = {
+ .id = ID_MT7988,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = mt7988_mac_port_get_caps,
+ },
+ [ID_EN7581] = {
+ .id = ID_EN7581,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = en7581_mac_port_get_caps,
+ },
+ [ID_AN7583] = {
+ .id = ID_AN7583,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = en7581_mac_port_get_caps,
},
};
+EXPORT_SYMBOL_GPL(mt753x_table);
-static const struct of_device_id mt7530_of_match[] = {
- { .compatible = "mediatek,mt7621", .data = &mt753x_table[ID_MT7621], },
- { .compatible = "mediatek,mt7530", .data = &mt753x_table[ID_MT7530], },
- { .compatible = "mediatek,mt7531", .data = &mt753x_table[ID_MT7531], },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, mt7530_of_match);
-
-static int
-mt7530_probe(struct mdio_device *mdiodev)
+int
+mt7530_probe_common(struct mt7530_priv *priv)
{
- struct mt7530_priv *priv;
- struct device_node *dn;
-
- dn = mdiodev->dev.of_node;
-
- priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ struct device *dev = priv->dev;
- priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
- priv->ds->dev = &mdiodev->dev;
+ priv->ds->dev = dev;
priv->ds->num_ports = MT7530_NUM_PORTS;
- /* Use medatek,mcm property to distinguish hardware type that would
- * casues a little bit differences on power-on sequence.
- */
- priv->mcm = of_property_read_bool(dn, "mediatek,mcm");
- if (priv->mcm) {
- dev_info(&mdiodev->dev, "MT7530 adapts as multi-chip module\n");
-
- priv->rstc = devm_reset_control_get(&mdiodev->dev, "mcm");
- if (IS_ERR(priv->rstc)) {
- dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
- return PTR_ERR(priv->rstc);
- }
- }
-
/* Get the hardware identifier from the devicetree node.
* We will need it for some of the clock and regulator setup.
*/
- priv->info = of_device_get_match_data(&mdiodev->dev);
+ priv->info = of_device_get_match_data(dev);
if (!priv->info)
return -EINVAL;
- /* Sanity check if these required device operations are filled
- * properly.
- */
- if (!priv->info->sw_setup || !priv->info->pad_setup ||
- !priv->info->phy_read || !priv->info->phy_write ||
- !priv->info->phy_mode_supported ||
- !priv->info->mac_port_validate ||
- !priv->info->mac_port_get_state || !priv->info->mac_port_config)
- return -EINVAL;
-
priv->id = priv->info->id;
-
- if (priv->id == ID_MT7530) {
- priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
- if (IS_ERR(priv->core_pwr))
- return PTR_ERR(priv->core_pwr);
-
- priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io");
- if (IS_ERR(priv->io_pwr))
- return PTR_ERR(priv->io_pwr);
- }
-
- /* Not MCM that indicates switch works as the remote standalone
- * integrated circuit so the GPIO pin would be used to complete
- * the reset, otherwise memory-mapped register accessing used
- * through syscon provides in the case of MCM.
- */
- if (!priv->mcm) {
- priv->reset = devm_gpiod_get_optional(&mdiodev->dev, "reset",
- GPIOD_OUT_LOW);
- if (IS_ERR(priv->reset)) {
- dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
- return PTR_ERR(priv->reset);
- }
- }
-
- priv->bus = mdiodev->bus;
- priv->dev = &mdiodev->dev;
+ priv->dev = dev;
priv->ds->priv = priv;
priv->ds->ops = &mt7530_switch_ops;
+ priv->ds->phylink_mac_ops = &mt753x_phylink_mac_ops;
mutex_init(&priv->reg_mutex);
- dev_set_drvdata(&mdiodev->dev, priv);
+ dev_set_drvdata(dev, priv);
- return dsa_register_switch(priv->ds);
+ return 0;
}
+EXPORT_SYMBOL_GPL(mt7530_probe_common);
-static void
-mt7530_remove(struct mdio_device *mdiodev)
+void
+mt7530_remove_common(struct mt7530_priv *priv)
{
- struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
- int ret = 0;
-
- if (!priv)
- return;
-
- ret = regulator_disable(priv->core_pwr);
- if (ret < 0)
- dev_err(priv->dev,
- "Failed to disable core power: %d\n", ret);
-
- ret = regulator_disable(priv->io_pwr);
- if (ret < 0)
- dev_err(priv->dev, "Failed to disable io pwr: %d\n",
- ret);
-
- if (priv->irq)
- mt7530_free_irq(priv);
+ if (priv->irq_domain)
+ mt7530_free_mdio_irq(priv);
dsa_unregister_switch(priv->ds);
- mutex_destroy(&priv->reg_mutex);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
-}
-
-static void mt7530_shutdown(struct mdio_device *mdiodev)
-{
- struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
-
- if (!priv)
- return;
- dsa_switch_shutdown(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
+ mutex_destroy(&priv->reg_mutex);
}
-
-static struct mdio_driver mt7530_mdio_driver = {
- .probe = mt7530_probe,
- .remove = mt7530_remove,
- .shutdown = mt7530_shutdown,
- .mdiodrv.driver = {
- .name = "mt7530",
- .of_match_table = mt7530_of_match,
- },
-};
-
-mdio_module_driver(mt7530_mdio_driver);
+EXPORT_SYMBOL_GPL(mt7530_remove_common);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch");
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 91508e2feef9..3e0090bed298 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -8,7 +8,6 @@
#define MT7530_NUM_PORTS 7
#define MT7530_NUM_PHYS 5
-#define MT7530_CPU_PORT 6
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
@@ -19,6 +18,9 @@ enum mt753x_id {
ID_MT7530 = 0,
ID_MT7621 = 1,
ID_MT7531 = 2,
+ ID_MT7988 = 3,
+ ID_EN7581 = 4,
+ ID_AN7583 = 5,
};
#define NUM_TRGMII_CTRL 5
@@ -32,46 +34,110 @@ enum mt753x_id {
#define SYSC_REG_RSTCTRL 0x34
#define RESET_MCM BIT(2)
-/* Registers to mac forward control for unknown frames */
-#define MT7530_MFC 0x10
-#define BC_FFP(x) (((x) & 0xff) << 24)
-#define BC_FFP_MASK BC_FFP(~0)
-#define UNM_FFP(x) (((x) & 0xff) << 16)
-#define UNM_FFP_MASK UNM_FFP(~0)
-#define UNU_FFP(x) (((x) & 0xff) << 8)
-#define UNU_FFP_MASK UNU_FFP(~0)
-#define CPU_EN BIT(7)
-#define CPU_PORT(x) ((x) << 4)
-#define CPU_MASK (0xf << 4)
-#define MIRROR_EN BIT(3)
-#define MIRROR_PORT(x) ((x) & 0x7)
-#define MIRROR_MASK 0x7
-
-/* Registers for CPU forward control */
+/* Register for ARL global control */
+#define MT753X_AGC 0xc
+#define LOCAL_EN BIT(7)
+
+/* Register for MAC forward control */
+#define MT753X_MFC 0x10
+#define BC_FFP_MASK GENMASK(31, 24)
+#define BC_FFP(x) FIELD_PREP(BC_FFP_MASK, x)
+#define UNM_FFP_MASK GENMASK(23, 16)
+#define UNM_FFP(x) FIELD_PREP(UNM_FFP_MASK, x)
+#define UNU_FFP_MASK GENMASK(15, 8)
+#define UNU_FFP(x) FIELD_PREP(UNU_FFP_MASK, x)
+#define MT7530_CPU_EN BIT(7)
+#define MT7530_CPU_PORT_MASK GENMASK(6, 4)
+#define MT7530_CPU_PORT(x) FIELD_PREP(MT7530_CPU_PORT_MASK, x)
+#define MT7530_MIRROR_EN BIT(3)
+#define MT7530_MIRROR_PORT_MASK GENMASK(2, 0)
+#define MT7530_MIRROR_PORT_GET(x) FIELD_GET(MT7530_MIRROR_PORT_MASK, x)
+#define MT7530_MIRROR_PORT_SET(x) FIELD_PREP(MT7530_MIRROR_PORT_MASK, x)
+#define MT7531_QRY_FFP_MASK GENMASK(7, 0)
+#define MT7531_QRY_FFP(x) FIELD_PREP(MT7531_QRY_FFP_MASK, x)
+
+/* Register for CPU forward control */
#define MT7531_CFC 0x4
#define MT7531_MIRROR_EN BIT(19)
-#define MT7531_MIRROR_MASK (MIRROR_MASK << 16)
-#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK)
-#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
+#define MT7531_MIRROR_PORT_MASK GENMASK(18, 16)
+#define MT7531_MIRROR_PORT_GET(x) FIELD_GET(MT7531_MIRROR_PORT_MASK, x)
+#define MT7531_MIRROR_PORT_SET(x) FIELD_PREP(MT7531_MIRROR_PORT_MASK, x)
#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
-
-#define MT753X_MIRROR_REG(id) (((id) == ID_MT7531) ? \
- MT7531_CFC : MT7530_MFC)
-#define MT753X_MIRROR_EN(id) (((id) == ID_MT7531) ? \
- MT7531_MIRROR_EN : MIRROR_EN)
-#define MT753X_MIRROR_MASK(id) (((id) == ID_MT7531) ? \
- MT7531_MIRROR_MASK : MIRROR_MASK)
-
-/* Registers for BPDU and PAE frame control*/
+#define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x)
+
+#define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \
+ id == ID_MT7988 || \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
+ MT7531_CFC : MT753X_MFC)
+
+#define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
+ MT7531_MIRROR_EN : MT7530_MIRROR_EN)
+
+#define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \
+ id == ID_MT7988 || \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
+ MT7531_MIRROR_PORT_MASK : \
+ MT7530_MIRROR_PORT_MASK)
+
+#define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \
+ id == ID_MT7988 || \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
+ MT7531_MIRROR_PORT_GET(val) : \
+ MT7530_MIRROR_PORT_GET(val))
+
+#define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \
+ id == ID_MT7988 || \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
+ MT7531_MIRROR_PORT_SET(val) : \
+ MT7530_MIRROR_PORT_SET(val))
+
+/* Register for BPDU and PAE frame control */
#define MT753X_BPC 0x24
-#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
-
-enum mt753x_bpdu_port_fw {
- MT753X_BPDU_FOLLOW_MFC,
- MT753X_BPDU_CPU_EXCLUDE = 4,
- MT753X_BPDU_CPU_INCLUDE = 5,
- MT753X_BPDU_CPU_ONLY = 6,
- MT753X_BPDU_DROP = 7,
+#define PAE_BPDU_FR BIT(25)
+#define PAE_EG_TAG_MASK GENMASK(24, 22)
+#define PAE_EG_TAG(x) FIELD_PREP(PAE_EG_TAG_MASK, x)
+#define PAE_PORT_FW_MASK GENMASK(18, 16)
+#define PAE_PORT_FW(x) FIELD_PREP(PAE_PORT_FW_MASK, x)
+#define BPDU_EG_TAG_MASK GENMASK(8, 6)
+#define BPDU_EG_TAG(x) FIELD_PREP(BPDU_EG_TAG_MASK, x)
+#define BPDU_PORT_FW_MASK GENMASK(2, 0)
+
+/* Register for 01-80-C2-00-00-[01,02] MAC DA frame control */
+#define MT753X_RGAC1 0x28
+#define R02_BPDU_FR BIT(25)
+#define R02_EG_TAG_MASK GENMASK(24, 22)
+#define R02_EG_TAG(x) FIELD_PREP(R02_EG_TAG_MASK, x)
+#define R02_PORT_FW_MASK GENMASK(18, 16)
+#define R02_PORT_FW(x) FIELD_PREP(R02_PORT_FW_MASK, x)
+#define R01_BPDU_FR BIT(9)
+#define R01_EG_TAG_MASK GENMASK(8, 6)
+#define R01_EG_TAG(x) FIELD_PREP(R01_EG_TAG_MASK, x)
+#define R01_PORT_FW_MASK GENMASK(2, 0)
+
+/* Register for 01-80-C2-00-00-[03,0E] MAC DA frame control */
+#define MT753X_RGAC2 0x2c
+#define R0E_BPDU_FR BIT(25)
+#define R0E_EG_TAG_MASK GENMASK(24, 22)
+#define R0E_EG_TAG(x) FIELD_PREP(R0E_EG_TAG_MASK, x)
+#define R0E_PORT_FW_MASK GENMASK(18, 16)
+#define R0E_PORT_FW(x) FIELD_PREP(R0E_PORT_FW_MASK, x)
+#define R03_BPDU_FR BIT(9)
+#define R03_EG_TAG_MASK GENMASK(8, 6)
+#define R03_EG_TAG(x) FIELD_PREP(R03_EG_TAG_MASK, x)
+#define R03_PORT_FW_MASK GENMASK(2, 0)
+
+enum mt753x_to_cpu_fw {
+ TO_CPU_FW_SYSTEM_DEFAULT,
+ TO_CPU_FW_CPU_EXCLUDE = 4,
+ TO_CPU_FW_CPU_INCLUDE = 5,
+ TO_CPU_FW_CPU_ONLY = 6,
+ TO_CPU_FW_DROP = 7,
};
/* Registers for address table access */
@@ -187,6 +253,18 @@ enum mt7530_vlan_egress_attr {
#define AGE_UNIT_MAX 0xfff
#define AGE_UNIT(x) (AGE_UNIT_MASK & (x))
+#define MT753X_ERLCR_P(x) (0x1040 + ((x) * 0x100))
+#define ERLCR_CIR_MASK GENMASK(31, 16)
+#define ERLCR_EN_MASK BIT(15)
+#define ERLCR_EXP_MASK GENMASK(11, 8)
+#define ERLCR_TBF_MODE_MASK BIT(7)
+#define ERLCR_MANT_MASK GENMASK(6, 0)
+
+#define MT753X_GERLCR 0x10e0
+#define EGR_BC_MASK GENMASK(7, 0)
+#define EGR_BC_CRC 0x4 /* crc */
+#define EGR_BC_CRC_IPG_PREAMBLE 0x18 /* crc + ipg + preamble */
+
/* Register for port STP state control */
#define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100))
#define FID_PST(fid, state) (((state) & 0x3) << ((fid) * 2))
@@ -245,6 +323,7 @@ enum mt7530_port_mode {
enum mt7530_vlan_port_eg_tag {
MT7530_VLAN_EG_DISABLED = 0,
MT7530_VLAN_EG_CONSISTENT = 1,
+ MT7530_VLAN_EG_UNTAGGED = 4,
};
enum mt7530_vlan_port_attr {
@@ -267,58 +346,59 @@ enum mt7530_vlan_port_acc_frm {
#define G0_PORT_VID_DEF G0_PORT_VID(0)
/* Register for port MAC control register */
-#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100))
-#define PMCR_IFG_XMIT(x) (((x) & 0x3) << 18)
+#define MT753X_PMCR_P(x) (0x3000 + ((x) * 0x100))
+#define PMCR_IFG_XMIT_MASK GENMASK(19, 18)
+#define PMCR_IFG_XMIT(x) FIELD_PREP(PMCR_IFG_XMIT_MASK, x)
#define PMCR_EXT_PHY BIT(17)
#define PMCR_MAC_MODE BIT(16)
-#define PMCR_FORCE_MODE BIT(15)
-#define PMCR_TX_EN BIT(14)
-#define PMCR_RX_EN BIT(13)
+#define MT7530_FORCE_MODE BIT(15)
+#define PMCR_MAC_TX_EN BIT(14)
+#define PMCR_MAC_RX_EN BIT(13)
#define PMCR_BACKOFF_EN BIT(9)
#define PMCR_BACKPR_EN BIT(8)
#define PMCR_FORCE_EEE1G BIT(7)
#define PMCR_FORCE_EEE100 BIT(6)
-#define PMCR_TX_FC_EN BIT(5)
-#define PMCR_RX_FC_EN BIT(4)
+#define PMCR_FORCE_RX_FC_EN BIT(5)
+#define PMCR_FORCE_TX_FC_EN BIT(4)
#define PMCR_FORCE_SPEED_1000 BIT(3)
#define PMCR_FORCE_SPEED_100 BIT(2)
#define PMCR_FORCE_FDX BIT(1)
#define PMCR_FORCE_LNK BIT(0)
-#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
- PMCR_FORCE_SPEED_1000)
-#define MT7531_FORCE_LNK BIT(31)
-#define MT7531_FORCE_SPD BIT(30)
-#define MT7531_FORCE_DPX BIT(29)
-#define MT7531_FORCE_RX_FC BIT(28)
-#define MT7531_FORCE_TX_FC BIT(27)
-#define MT7531_FORCE_MODE (MT7531_FORCE_LNK | \
- MT7531_FORCE_SPD | \
- MT7531_FORCE_DPX | \
- MT7531_FORCE_RX_FC | \
- MT7531_FORCE_TX_FC)
-#define PMCR_FORCE_MODE_ID(id) (((id) == ID_MT7531) ? \
- MT7531_FORCE_MODE : \
- PMCR_FORCE_MODE)
-#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
- PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
- PMCR_FORCE_FDX | PMCR_FORCE_LNK | \
- PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100)
-#define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \
- PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
- PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
- PMCR_TX_EN | PMCR_RX_EN | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
+#define MT7531_FORCE_MODE_LNK BIT(31)
+#define MT7531_FORCE_MODE_SPD BIT(30)
+#define MT7531_FORCE_MODE_DPX BIT(29)
+#define MT7531_FORCE_MODE_RX_FC BIT(28)
+#define MT7531_FORCE_MODE_TX_FC BIT(27)
+#define MT7531_FORCE_MODE_EEE100 BIT(26)
+#define MT7531_FORCE_MODE_EEE1G BIT(25)
+#define MT7531_FORCE_MODE_MASK (MT7531_FORCE_MODE_LNK | \
+ MT7531_FORCE_MODE_SPD | \
+ MT7531_FORCE_MODE_DPX | \
+ MT7531_FORCE_MODE_RX_FC | \
+ MT7531_FORCE_MODE_TX_FC | \
+ MT7531_FORCE_MODE_EEE100 | \
+ MT7531_FORCE_MODE_EEE1G)
+#define MT753X_FORCE_MODE(id) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_FORCE_MODE_MASK : \
+ MT7530_FORCE_MODE)
+#define PMCR_LINK_SETTINGS_MASK (PMCR_MAC_TX_EN | PMCR_MAC_RX_EN | \
+ PMCR_FORCE_EEE1G | \
+ PMCR_FORCE_EEE100 | \
+ PMCR_FORCE_RX_FC_EN | \
+ PMCR_FORCE_TX_FC_EN | \
PMCR_FORCE_SPEED_1000 | \
+ PMCR_FORCE_SPEED_100 | \
PMCR_FORCE_FDX | PMCR_FORCE_LNK)
-#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100)
-#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24)
-#define WAKEUP_TIME_100(x) (((x) & 0xFF) << 16)
+#define MT753X_PMEEECR_P(x) (0x3004 + (x) * 0x100)
+#define WAKEUP_TIME_1000_MASK GENMASK(31, 24)
+#define WAKEUP_TIME_1000(x) FIELD_PREP(WAKEUP_TIME_1000_MASK, x)
+#define WAKEUP_TIME_100_MASK GENMASK(23, 16)
+#define WAKEUP_TIME_100(x) FIELD_PREP(WAKEUP_TIME_100_MASK, x)
#define LPI_THRESH_MASK GENMASK(15, 4)
-#define LPI_THRESH_SHT 4
-#define SET_LPI_THRESH(x) (((x) << LPI_THRESH_SHT) & LPI_THRESH_MASK)
-#define GET_LPI_THRESH(x) (((x) & LPI_THRESH_MASK) >> LPI_THRESH_SHT)
+#define LPI_THRESH_GET(x) FIELD_GET(LPI_THRESH_MASK, x)
+#define LPI_THRESH_SET(x) FIELD_PREP(LPI_THRESH_MASK, x)
#define LPI_MODE_EN BIT(0)
#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
@@ -348,6 +428,48 @@ enum mt7530_vlan_port_acc_frm {
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
+/* Each define is an offset of MT7530_PORT_MIB_COUNTER */
+#define MT7530_PORT_MIB_TX_DROP 0x00
+#define MT7530_PORT_MIB_TX_CRC_ERR 0x04
+#define MT7530_PORT_MIB_TX_UNICAST 0x08
+#define MT7530_PORT_MIB_TX_MULTICAST 0x0c
+#define MT7530_PORT_MIB_TX_BROADCAST 0x10
+#define MT7530_PORT_MIB_TX_COLLISION 0x14
+#define MT7530_PORT_MIB_TX_SINGLE_COLLISION 0x18
+#define MT7530_PORT_MIB_TX_MULTIPLE_COLLISION 0x1c
+#define MT7530_PORT_MIB_TX_DEFERRED 0x20
+#define MT7530_PORT_MIB_TX_LATE_COLLISION 0x24
+#define MT7530_PORT_MIB_TX_EXCESSIVE_COLLISION 0x28
+#define MT7530_PORT_MIB_TX_PAUSE 0x2c
+#define MT7530_PORT_MIB_TX_PKT_SZ_64 0x30
+#define MT7530_PORT_MIB_TX_PKT_SZ_65_TO_127 0x34
+#define MT7530_PORT_MIB_TX_PKT_SZ_128_TO_255 0x38
+#define MT7530_PORT_MIB_TX_PKT_SZ_256_TO_511 0x3c
+#define MT7530_PORT_MIB_TX_PKT_SZ_512_TO_1023 0x40
+#define MT7530_PORT_MIB_TX_PKT_SZ_1024_TO_MAX 0x44
+#define MT7530_PORT_MIB_TX_BYTES 0x48 /* 64 bytes */
+#define MT7530_PORT_MIB_RX_DROP 0x60
+#define MT7530_PORT_MIB_RX_FILTERING 0x64
+#define MT7530_PORT_MIB_RX_UNICAST 0x68
+#define MT7530_PORT_MIB_RX_MULTICAST 0x6c
+#define MT7530_PORT_MIB_RX_BROADCAST 0x70
+#define MT7530_PORT_MIB_RX_ALIGN_ERR 0x74
+#define MT7530_PORT_MIB_RX_CRC_ERR 0x78
+#define MT7530_PORT_MIB_RX_UNDER_SIZE_ERR 0x7c
+#define MT7530_PORT_MIB_RX_FRAG_ERR 0x80
+#define MT7530_PORT_MIB_RX_OVER_SZ_ERR 0x84
+#define MT7530_PORT_MIB_RX_JABBER_ERR 0x88
+#define MT7530_PORT_MIB_RX_PAUSE 0x8c
+#define MT7530_PORT_MIB_RX_PKT_SZ_64 0x90
+#define MT7530_PORT_MIB_RX_PKT_SZ_65_TO_127 0x94
+#define MT7530_PORT_MIB_RX_PKT_SZ_128_TO_255 0x98
+#define MT7530_PORT_MIB_RX_PKT_SZ_256_TO_511 0x9c
+#define MT7530_PORT_MIB_RX_PKT_SZ_512_TO_1023 0xa0
+#define MT7530_PORT_MIB_RX_PKT_SZ_1024_TO_MAX 0xa4
+#define MT7530_PORT_MIB_RX_BYTES 0xa8 /* 64 bytes */
+#define MT7530_PORT_MIB_RX_CTRL_DROP 0xb0
+#define MT7530_PORT_MIB_RX_INGRESS_DROP 0xb4
+#define MT7530_PORT_MIB_RX_ARL_DROP 0xb8
#define MT7530_MIB_CCR 0x4fe0
#define CCR_MIB_ENABLE BIT(31)
#define CCR_RX_OCT_CNT_GOOD BIT(7)
@@ -365,46 +487,8 @@ enum mt7530_vlan_port_acc_frm {
CCR_TX_OCT_CNT_BAD)
/* MT7531 SGMII register group */
-#define MT7531_SGMII_REG_BASE 0x5000
-#define MT7531_SGMII_REG(p, r) (MT7531_SGMII_REG_BASE + \
- ((p) - 5) * 0x1000 + (r))
-
-/* Register forSGMII PCS_CONTROL_1 */
-#define MT7531_PCS_CONTROL_1(p) MT7531_SGMII_REG(p, 0x00)
-#define MT7531_SGMII_LINK_STATUS BIT(18)
-#define MT7531_SGMII_AN_ENABLE BIT(12)
-#define MT7531_SGMII_AN_RESTART BIT(9)
-
-/* Register for SGMII PCS_SPPED_ABILITY */
-#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
-#define MT7531_SGMII_TX_CONFIG_MASK GENMASK(15, 0)
-#define MT7531_SGMII_TX_CONFIG BIT(0)
-
-/* Register for SGMII_MODE */
-#define MT7531_SGMII_MODE(p) MT7531_SGMII_REG(p, 0x20)
-#define MT7531_SGMII_REMOTE_FAULT_DIS BIT(8)
-#define MT7531_SGMII_IF_MODE_MASK GENMASK(5, 1)
-#define MT7531_SGMII_FORCE_DUPLEX BIT(4)
-#define MT7531_SGMII_FORCE_SPEED_MASK GENMASK(3, 2)
-#define MT7531_SGMII_FORCE_SPEED_1000 BIT(3)
-#define MT7531_SGMII_FORCE_SPEED_100 BIT(2)
-#define MT7531_SGMII_FORCE_SPEED_10 0
-#define MT7531_SGMII_SPEED_DUPLEX_AN BIT(1)
-
-enum mt7531_sgmii_force_duplex {
- MT7531_SGMII_FORCE_FULL_DUPLEX = 0,
- MT7531_SGMII_FORCE_HALF_DUPLEX = 0x10,
-};
-
-/* Fields of QPHY_PWR_STATE_CTRL */
-#define MT7531_QPHY_PWR_STATE_CTRL(p) MT7531_SGMII_REG(p, 0xe8)
-#define MT7531_SGMII_PHYA_PWD BIT(4)
-
-/* Values of SGMII SPEED */
-#define MT7531_PHYA_CTRL_SIGNAL3(p) MT7531_SGMII_REG(p, 0x128)
-#define MT7531_RG_TPHY_SPEED_MASK (BIT(2) | BIT(3))
-#define MT7531_RG_TPHY_SPEED_1_25G 0x0
-#define MT7531_RG_TPHY_SPEED_3_125G BIT(2)
+#define MT7531_SGMII_REG_BASE(p) (0x5000 + ((p) - 5) * 0x1000)
+#define MT7531_PHYA_CTRL_SIGNAL3 0x128
/* Register for system reset */
#define MT7530_SYS_CTRL 0x7000
@@ -481,32 +565,30 @@ enum mt7531_clk_skew {
MT7531_CLK_SKEW_REVERSE = 3,
};
-/* Register for hw trap status */
-#define MT7530_HWTRAP 0x7800
-#define HWTRAP_XTAL_MASK (BIT(10) | BIT(9))
-#define HWTRAP_XTAL_25MHZ (BIT(10) | BIT(9))
-#define HWTRAP_XTAL_40MHZ (BIT(10))
-#define HWTRAP_XTAL_20MHZ (BIT(9))
-
-#define MT7531_HWTRAP 0x7800
-#define HWTRAP_XTAL_FSEL_MASK BIT(7)
-#define HWTRAP_XTAL_FSEL_25MHZ BIT(7)
-#define HWTRAP_XTAL_FSEL_40MHZ 0
-/* Unique fields of (M)HWSTRAP for MT7531 */
-#define XTAL_FSEL_S 7
-#define XTAL_FSEL_M BIT(7)
-#define PHY_EN BIT(6)
-#define CHG_STRAP BIT(8)
-
-/* Register for hw trap modification */
-#define MT7530_MHWTRAP 0x7804
-#define MHWTRAP_PHY0_SEL BIT(20)
-#define MHWTRAP_MANUAL BIT(16)
-#define MHWTRAP_P5_MAC_SEL BIT(13)
-#define MHWTRAP_P6_DIS BIT(8)
-#define MHWTRAP_P5_RGMII_MODE BIT(7)
-#define MHWTRAP_P5_DIS BIT(6)
-#define MHWTRAP_PHY_ACCESS BIT(5)
+/* Register for trap status */
+#define MT753X_TRAP 0x7800
+#define MT7530_XTAL_MASK (BIT(10) | BIT(9))
+#define MT7530_XTAL_25MHZ (BIT(10) | BIT(9))
+#define MT7530_XTAL_40MHZ BIT(10)
+#define MT7530_XTAL_20MHZ BIT(9)
+#define MT7531_XTAL25 BIT(7)
+
+/* Register for trap modification */
+#define MT753X_MTRAP 0x7804
+#define MT7530_P5_PHY0_SEL BIT(20)
+#define MT7530_CHG_TRAP BIT(16)
+#define MT7530_P5_MAC_SEL BIT(13)
+#define MT7530_P6_DIS BIT(8)
+#define MT7530_P5_RGMII_MODE BIT(7)
+#define MT7530_P5_DIS BIT(6)
+#define MT7530_PHY_INDIRECT_ACCESS BIT(5)
+#define MT7531_CHG_STRAP BIT(8)
+#define MT7531_PHY_EN BIT(6)
+
+enum mt7531_xtal_fsel {
+ MT7531_XTAL_FSEL_25MHZ,
+ MT7531_XTAL_FSEL_40MHZ,
+};
/* Register for TOP signal control */
#define MT7530_TOP_SIG_CTRL 0x7808
@@ -592,6 +674,15 @@ enum mt7531_clk_skew {
#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
#define MT7531_EXT_P_MDIO_12 (2 << 16)
+#define MT753X_CPORT_SPTAG_CFG 0x7c10
+#define CPORT_SW2FE_STAG_EN BIT(1)
+#define CPORT_FE2SW_STAG_EN BIT(0)
+
+#define AN7583_GEPHY_CONN_CFG 0x7c14
+#define AN7583_CSR_DPHY_CKIN_SEL BIT(31)
+#define AN7583_CSR_PHY_CORE_REG_CLK_SEL BIT(30)
+#define AN7583_CSR_ETHER_AFE_PWD GENMASK(28, 24)
+
/* Registers for LED GPIO control (MT7530 only)
* All registers follow this pattern:
* [ 2: 0] port 0
@@ -636,10 +727,11 @@ enum mt7531_clk_skew {
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+#define MT7531_RG_SYSPLL_DMY2 BIT(6)
#define MT7531_PHY_PLL_OFF BIT(5)
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
-#define MT753X_CTRL_PHY_ADDR 0
+#define MT753X_CTRL_PHY_ADDR(addr) ((addr + 1) & 0x1f)
#define CORE_PLL_GROUP5 0x404
#define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff)
@@ -703,85 +795,62 @@ struct mt7530_fdb {
* @pm: The matrix used to show all connections with the port.
* @pvid: The VLAN specified is to be considered a PVID at ingress. Any
* untagged frames will be assigned to the related VLAN.
- * @vlan_filtering: The flags indicating whether the port that can recognize
- * VLAN-tagged frames.
+ * @sgmii_pcs: Pointer to PCS instance for SerDes ports
*/
struct mt7530_port {
bool enable;
+ bool isolated;
u32 pm;
u16 pvid;
+ struct phylink_pcs *sgmii_pcs;
};
-/* Port 5 interface select definitions */
-enum p5_interface_select {
- P5_DISABLED = 0,
- P5_INTF_SEL_PHY_P0,
- P5_INTF_SEL_PHY_P4,
- P5_INTF_SEL_GMAC5,
- P5_INTF_SEL_GMAC5_SGMII,
+/* Port 5 mode definitions of the MT7530 switch */
+enum mt7530_p5_mode {
+ GMAC5,
+ MUX_PHY_P0,
+ MUX_PHY_P4,
};
-static const char *p5_intf_modes(unsigned int p5_interface)
-{
- switch (p5_interface) {
- case P5_DISABLED:
- return "DISABLED";
- case P5_INTF_SEL_PHY_P0:
- return "PHY P0";
- case P5_INTF_SEL_PHY_P4:
- return "PHY P4";
- case P5_INTF_SEL_GMAC5:
- return "GMAC5";
- case P5_INTF_SEL_GMAC5_SGMII:
- return "GMAC5_SGMII";
- default:
- return "unknown";
- }
-}
-
struct mt7530_priv;
+struct mt753x_pcs {
+ struct phylink_pcs pcs;
+ struct mt7530_priv *priv;
+ int port;
+};
+
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
+ * @id: Holding the identifier to a switch model
+ * @pcs_ops: Holding the pointer to the MAC PCS operations structure
* @sw_setup: Holding the handler to a device initialization
- * @phy_read: Holding the way reading PHY port
- * @phy_write: Holding the way writing PHY port
- * @pad_setup: Holding the way setting up the bus pad for a certain
- * MAC port
- * @phy_mode_supported: Check if the PHY type is being supported on a certain
- * port
- * @mac_port_validate: Holding the way to set addition validate type for a
- * certan MAC port
- * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain
- * MAC port
+ * @phy_read_c22: Holding the way reading PHY port using C22
+ * @phy_write_c22: Holding the way writing PHY port using C22
+ * @phy_read_c45: Holding the way reading PHY port using C45
+ * @phy_write_c45: Holding the way writing PHY port using C45
+ * @mac_port_get_caps: Holding the handler that provides MAC capabilities
* @mac_port_config: Holding the way setting up the PHY attribute to a
* certain MAC port
- * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a
- * certain MAC port
- * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs
- * of the certain MAC port
*/
struct mt753x_info {
enum mt753x_id id;
+ const struct phylink_pcs_ops *pcs_ops;
+
int (*sw_setup)(struct dsa_switch *ds);
- int (*phy_read)(struct mt7530_priv *priv, int port, int regnum);
- int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val);
- int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
- int (*cpu_port_config)(struct dsa_switch *ds, int port);
- bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state);
- void (*mac_port_validate)(struct dsa_switch *ds, int port,
- unsigned long *supported);
- int (*mac_port_get_state)(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
- int (*mac_port_config)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
- void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port);
- void (*mac_pcs_link_up)(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex);
+ int (*phy_read_c22)(struct mt7530_priv *priv, int port, int regnum);
+ int (*phy_write_c22)(struct mt7530_priv *priv, int port, int regnum,
+ u16 val);
+ int (*phy_read_c45)(struct mt7530_priv *priv, int port, int devad,
+ int regnum);
+ int (*phy_write_c45)(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u16 val);
+ void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
+ void (*mac_port_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -789,6 +858,7 @@ struct mt753x_info {
* @dev: The device pointer
* @ds: The pointer to the dsa core structure
* @bus: The bus used for the device and built-in PHY
+ * @regmap: The regmap instance representing all switch registers
* @rstc: The pointer to reset control used by MCM
* @core_pwr: The power supplied into the core
* @io_pwr: The power supplied into the I/O
@@ -798,17 +868,19 @@ struct mt753x_info {
* @ports: Holding the state among ports
* @reg_mutex: The lock for protecting among process accessing
* registers
- * @p6_interface Holding the current port 6 interface
- * @p5_intf_sel: Holding the current port 5 interface select
- *
- * @irq: IRQ number of the switch
+ * @p5_mode: Holding the current mode of port 5 of the MT7530 switch
+ * @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch
+ * has got SGMII
* @irq_domain: IRQ domain of the switch irq_chip
- * @irq_enable: IRQ enable bits, synced to SYS_INT_EN
+ * @create_sgmii: Pointer to function creating SGMII PCS instance(s)
+ * @active_cpu_ports: Holding the active CPU ports
+ * @mdiodev: The pointer to the MDIO device structure
*/
struct mt7530_priv {
struct device *dev;
struct dsa_switch *ds;
struct mii_bus *bus;
+ struct regmap *regmap;
struct reset_control *rstc;
struct regulator *core_pwr;
struct regulator *io_pwr;
@@ -816,18 +888,18 @@ struct mt7530_priv {
const struct mt753x_info *info;
unsigned int id;
bool mcm;
- phy_interface_t p6_interface;
- phy_interface_t p5_interface;
- unsigned int p5_intf_sel;
+ enum mt7530_p5_mode p5_mode;
+ bool p5_sgmii;
u8 mirror_rx;
u8 mirror_tx;
-
struct mt7530_port ports[MT7530_NUM_PORTS];
+ struct mt753x_pcs pcs[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
struct mutex reg_mutex;
- int irq;
struct irq_domain *irq_domain;
- u32 irq_enable;
+ int (*create_sgmii)(struct mt7530_priv *priv);
+ u8 active_cpu_ports;
+ struct mdio_device *mdiodev;
};
struct mt7530_hw_vlan_entry {
@@ -864,4 +936,9 @@ static inline void INIT_MT7530_DUMMY_POLL(struct mt7530_dummy_poll *p,
p->reg = reg;
}
+int mt7530_probe_common(struct mt7530_priv *priv);
+void mt7530_remove_common(struct mt7530_priv *priv);
+
+extern const struct mt753x_info mt753x_table[];
+
#endif /* __MT7530_H */
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a4c6eb9a52d0..9c8ac14cd4f5 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
int addr = REG_PORT(p);
int ret;
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
@@ -244,11 +247,58 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
return reg_write(priv, addr, regnum, val);
}
+static void mv88e6060_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ unsigned long *interfaces = config->supported_interfaces;
+ struct mv88e6060_priv *priv = ds->priv;
+ int addr = REG_PORT(port);
+ int ret;
+
+ ret = reg_read(priv, addr, PORT_STATUS);
+ if (ret < 0) {
+ dev_err(ds->dev,
+ "port %d: unable to read status register: %pe\n",
+ port, ERR_PTR(ret));
+ return;
+ }
+
+ /* If the port is configured in SNI mode (acts as a 10Mbps PHY),
+ * it should have phy-mode = "sni", but that doesn't yet exist, so
+ * forcibly fail validation until the need arises to introduce it.
+ */
+ if (!(ret & PORT_STATUS_PORTMODE)) {
+ dev_warn(ds->dev, "port %d: SNI mode not supported\n", port);
+ return;
+ }
+
+ config->mac_capabilities = MAC_100 | MAC_10 | MAC_SYM_PAUSE;
+
+ if (port >= 4) {
+ /* Ports 4 and 5 can support MII, REVMII and REVRMII modes */
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVRMII, interfaces);
+ }
+ if (port <= 4) {
+ /* Ports 0 to 3 have internal PHYs, and port 4 can optionally
+ * use an internal PHY.
+ */
+ /* Internal PHY */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ /* Default phylib interface mode */
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ }
+}
+
static const struct dsa_switch_ops mv88e6060_switch_ops = {
.get_tag_protocol = mv88e6060_get_tag_protocol,
.setup = mv88e6060_setup,
.phy_read = mv88e6060_phy_read,
.phy_write = mv88e6060_phy_write,
+ .phylink_get_caps = mv88e6060_phylink_get_caps,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
static int mv88e6060_probe(struct mdio_device *mdiodev)
@@ -294,8 +344,6 @@ static void mv88e6060_remove(struct mdio_device *mdiodev)
return;
dsa_unregister_switch(ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mv88e6060_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index 7a2445a34eb7..64ae3882d17c 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -2,7 +2,6 @@
config NET_DSA_MV88E6XXX
tristate "Marvell 88E6xxx Ethernet switch fabric support"
depends on NET_DSA
- depends on PTP_1588_CLOCK_OPTIONAL
select IRQ_DOMAIN
select NET_DSA_TAG_EDSA
select NET_DSA_TAG_DSA
@@ -13,7 +12,18 @@ config NET_DSA_MV88E6XXX
config NET_DSA_MV88E6XXX_PTP
bool "PTP support for Marvell 88E6xxx"
default n
- depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK
+ depends on (NET_DSA_MV88E6XXX = y && PTP_1588_CLOCK = y) || \
+ (NET_DSA_MV88E6XXX = m && PTP_1588_CLOCK)
help
Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
chips that support it.
+
+config NET_DSA_MV88E6XXX_LEDS
+ bool "LED support for Marvell 88E6xxx"
+ default y
+ depends on NET_DSA_MV88E6XXX
+ depends on LEDS_CLASS=y || LEDS_CLASS=NET_DSA_MV88E6XXX
+ depends on LEDS_TRIGGERS
+ help
+ This enabled support for controlling the LEDs attached to the
+ Marvell 88E6xxx switch chips.
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index c8eca2b6f959..dd961081d631 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -9,9 +9,18 @@ mv88e6xxx-objs += global2.o
mv88e6xxx-objs += global2_avb.o
mv88e6xxx-objs += global2_scratch.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o
+mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_LEDS) += leds.o
+mv88e6xxx-objs += pcs-6185.o
+mv88e6xxx-objs += pcs-6352.o
+mv88e6xxx-objs += pcs-639x.o
mv88e6xxx-objs += phy.o
mv88e6xxx-objs += port.o
mv88e6xxx-objs += port_hidden.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
mv88e6xxx-objs += serdes.o
mv88e6xxx-objs += smi.o
+mv88e6xxx-objs += switchdev.o
+mv88e6xxx-objs += trace.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 14c678a9e41b..b4d48997bf46 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -23,10 +23,11 @@
#include <linux/list.h>
#include <linux/mdio.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/platform_data/mv88e6xxx.h>
+#include <linux/property.h>
#include <linux/netdevice.h>
#include <linux/gpio/consumer.h>
#include <linux/phylink.h>
@@ -86,12 +87,16 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 mask, u16 val)
{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
- /* There's no bus specific operation to wait for a mask */
- for (i = 0; i < 16; i++) {
+ /* There's no bus specific operation to wait for a mask. Even
+ * if the initial poll takes longer than 50ms, always do at
+ * least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_read(chip, addr, reg, &data);
if (err)
return err;
@@ -99,9 +104,19 @@ int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
if ((data & mask) == val)
return 0;
- usleep_range(1000, 2000);
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
}
+ err = mv88e6xxx_read(chip, addr, reg, &data);
+ if (err)
+ return err;
+
+ if ((data & mask) == val)
+ return 0;
+
dev_err(chip->dev, "Timeout while waiting for switch\n");
return -ETIMEDOUT;
}
@@ -117,8 +132,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_mdio_bus *mdio_bus;
- mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
- list);
+ mdio_bus = list_first_entry_or_null(&chip->mdios,
+ struct mv88e6xxx_mdio_bus, list);
if (!mdio_bus)
return NULL;
@@ -282,7 +297,7 @@ static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
u16 reg, mask;
chip->g1_irq.nirqs = chip->info->g1_irqs;
- chip->g1_irq.domain = irq_domain_add_simple(
+ chip->g1_irq.domain = irq_domain_create_simple(
NULL, chip->g1_irq.nirqs, 0,
&mv88e6xxx_g1_irq_domain_ops, chip);
if (!chip->g1_irq.domain)
@@ -379,7 +394,7 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
kthread_init_delayed_work(&chip->irq_poll_work,
mv88e6xxx_irq_poll);
- chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev));
+ chip->kworker = kthread_run_worker(0, "%s", dev_name(chip->dev));
if (IS_ERR(chip->kworker))
return PTR_ERR(chip->kworker);
@@ -442,9 +457,6 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
goto restore_link;
}
- if (speed == SPEED_MAX && chip->info->ops->port_max_speed_mode)
- mode = chip->info->ops->port_max_speed_mode(port);
-
if (chip->info->ops->port_set_pause) {
err = chip->info->ops->port_set_pause(chip, port, pause);
if (err)
@@ -459,11 +471,11 @@ restore_link:
return err;
}
-static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port)
+static int mv88e6xxx_phy_is_internal(struct mv88e6xxx_chip *chip, int port)
{
- struct mv88e6xxx_chip *chip = ds->priv;
-
- return port < chip->info->num_internal_phys;
+ return port >= chip->info->internal_phys_offset &&
+ port < chip->info->num_internal_phys +
+ chip->info->internal_phys_offset;
}
static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
@@ -471,6 +483,12 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
u16 reg;
int err;
+ /* The 88e6250 family does not have the PHY detect bit. Instead,
+ * report whether the port is internal.
+ */
+ if (chip->info->family == MV88E6XXX_FAMILY_6250)
+ return mv88e6xxx_phy_is_internal(chip, port);
+
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err) {
dev_err(chip->dev,
@@ -482,280 +500,484 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
return !!(reg & MV88E6XXX_PORT_STS_PHY_DETECT);
}
-static int mv88e6xxx_serdes_pcs_get_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static const u8 mv88e6185_phy_interface_modes[] = {
+ [MV88E6185_PORT_STS_CMODE_GMII_FD] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6185_PORT_STS_CMODE_MII_100_FD_PS] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_100] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_10] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_SERDES] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_1000BASE_X] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_PHY] = PHY_INTERFACE_MODE_SGMII,
+};
+
+static void mv88e6095_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- int lane;
- int err;
+ u8 cmode = chip->ports[port].cmode;
- mv88e6xxx_reg_lock(chip);
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0 && chip->info->ops->serdes_pcs_get_state)
- err = chip->info->ops->serdes_pcs_get_state(chip, port, lane,
- state);
- else
- err = -EOPNOTSUPP;
- mv88e6xxx_reg_unlock(chip);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
- return err;
+ if (mv88e6xxx_phy_is_internal(chip, port)) {
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ } else {
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_1000FD;
+ }
}
-static int mv88e6xxx_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise)
+static void mv88e6185_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- const struct mv88e6xxx_ops *ops = chip->info->ops;
- int lane;
+ u8 cmode = chip->ports[port].cmode;
- if (ops->serdes_pcs_config) {
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0)
- return ops->serdes_pcs_config(chip, port, lane, mode,
- interface, advertise);
- }
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
- return 0;
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
}
-static void mv88e6xxx_serdes_pcs_an_restart(struct dsa_switch *ds, int port)
+static const u8 mv88e6xxx_phy_interface_modes[] = {
+ [MV88E6XXX_PORT_STS_CMODE_MII_PHY] = PHY_INTERFACE_MODE_REVMII,
+ [MV88E6XXX_PORT_STS_CMODE_MII] = PHY_INTERFACE_MODE_MII,
+ [MV88E6XXX_PORT_STS_CMODE_GMII] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII_PHY] = PHY_INTERFACE_MODE_REVRMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII] = PHY_INTERFACE_MODE_RMII,
+ [MV88E6XXX_PORT_STS_CMODE_100BASEX] = PHY_INTERFACE_MODE_100BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_1000BASEX] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_SGMII] = PHY_INTERFACE_MODE_SGMII,
+ /* higher interface modes are not needed here, since ports supporting
+ * them are writable, and so the supported interfaces are filled in the
+ * corresponding .phylink_set_interfaces() implementation below
+ */
+};
+
+static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- const struct mv88e6xxx_ops *ops;
- int err = 0;
- int lane;
+ if (cmode < ARRAY_SIZE(mv88e6xxx_phy_interface_modes) &&
+ mv88e6xxx_phy_interface_modes[cmode])
+ __set_bit(mv88e6xxx_phy_interface_modes[cmode], supported);
+ else if (cmode == MV88E6XXX_PORT_STS_CMODE_RGMII)
+ phy_interface_set_rgmii(supported);
+}
- ops = chip->info->ops;
+static void
+mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+ int err;
+ u16 reg;
- if (ops->serdes_pcs_an_restart) {
- mv88e6xxx_reg_lock(chip);
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0)
- err = ops->serdes_pcs_an_restart(chip, port, lane);
- mv88e6xxx_reg_unlock(chip);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err) {
+ dev_err(chip->dev, "p%d: failed to read port status\n", port);
+ return;
+ }
- if (err)
- dev_err(ds->dev, "p%d: failed to restart AN\n", port);
+ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
+ case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_MII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ break;
+
+ default:
+ dev_err(chip->dev,
+ "p%d: invalid port mode in status register: %04x\n",
+ port, reg);
}
}
-static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- unsigned int mode,
- int speed, int duplex)
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- const struct mv88e6xxx_ops *ops = chip->info->ops;
- int lane;
+ if (!mv88e6xxx_phy_is_internal(chip, port))
+ mv88e6250_setup_supported_interfaces(chip, port, config);
- if (!phylink_autoneg_inband(mode) && ops->serdes_pcs_link_up) {
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0)
- return ops->serdes_pcs_link_up(chip, port, lane,
- speed, duplex);
- }
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+}
- return 0;
+static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
}
-static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static int mv88e63xx_get_port_serdes_cmode(struct mv88e6xxx_chip *chip, int port)
{
- if (!phy_interface_mode_is_8023z(state->interface)) {
- /* 10M and 100M are only supported in non-802.3z mode */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- }
+ u16 reg, val;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ /* If PHY_DETECT is zero, then we are not in auto-media mode */
+ if (!(reg & MV88E6XXX_PORT_STS_PHY_DETECT))
+ return 0xf;
+
+ val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT;
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &val);
+ if (err)
+ return err;
+
+ /* Restore PHY_DETECT value */
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
+ if (err)
+ return err;
+
+ return val & MV88E6XXX_PORT_STS_CMODE_MASK;
}
-static void mv88e6185_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- /* FIXME: if the port is in 1000Base-X mode, then it only supports
- * 1000M FD speeds. In this case, CMODE will indicate 5.
- */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ unsigned long *supported = config->supported_interfaces;
+ int err, cmode;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
- mv88e6065_phylink_validate(chip, port, mask, state);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* Port 4 supports automedia if the serdes is associated with it. */
+ if (port == 4) {
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err < 0)
+ dev_err(chip->dev, "p%d: failed to read scratch\n",
+ port);
+ if (err <= 0)
+ return;
+
+ cmode = mv88e63xx_get_port_serdes_cmode(chip, port);
+ if (cmode < 0)
+ dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
+ }
}
-static void mv88e6341_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e632x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 5)
- phylink_set(mask, 2500baseX_Full);
+ unsigned long *supported = config->supported_interfaces;
+ int cmode;
- /* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
- mv88e6065_phylink_validate(chip, port, mask, state);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* Port 0/1 are serdes only ports */
+ if (port == 0 || port == 1) {
+ cmode = mv88e63xx_get_port_serdes_cmode(chip, port);
+ if (cmode < 0)
+ dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
+ }
}
-static void mv88e6352_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6341_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
/* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field is programmable on port 5 */
+ if (port == 5) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
- mv88e6065_phylink_validate(chip, port, mask, state);
+ config->mac_capabilities |= MAC_2500FD;
+ }
}
-static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6390_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 9) {
- phylink_set(mask, 2500baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- }
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
/* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
- mv88e6065_phylink_validate(chip, port, mask, state);
+ /* The C_Mode field is programmable on ports 9 and 10 */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ config->mac_capabilities |= MAC_2500FD;
+ }
}
-static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6390x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 9) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseKR_Full);
- }
+ unsigned long *supported = config->supported_interfaces;
+
+ mv88e6390_phylink_get_caps(chip, port, config);
- mv88e6390_phylink_validate(chip, port, mask, state);
+ /* For the 6x90X, ports 2-7 can be in automedia mode.
+ * (Note that 6x90 doesn't support RXAUI nor XAUI).
+ *
+ * Port 2 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 3-4 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * Port 5 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 6-7 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * For now, be permissive (as the old code was) and allow 1000BASE-X
+ * on ports 2..7.
+ */
+ if (port >= 2 && port <= 7)
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+
+ /* The C_Mode field can also be programmed for 10G speeds */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_XAUI, supported);
+ __set_bit(PHY_INTERFACE_MODE_RXAUI, supported);
+
+ config->mac_capabilities |= MAC_10000FD;
+ }
}
-static void mv88e6393x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6393x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
+ unsigned long *supported = config->supported_interfaces;
+ bool is_6191x =
+ chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6191X;
+ bool is_6361 =
+ chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6361;
+
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field can be programmed for ports 0, 9 and 10 */
if (port == 0 || port == 9 || port == 10) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseKR_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- phylink_set(mask, 2500baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+
+ /* 6191X supports >1G modes only on port 10 */
+ if (!is_6191x || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+ config->mac_capabilities |= MAC_2500FD;
+
+ /* 6361 only supports up to 2500BaseX */
+ if (!is_6361) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, supported);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, supported);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII, supported);
+ config->mac_capabilities |= MAC_5000FD |
+ MAC_10000FD;
+ }
+ }
}
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
-
- mv88e6065_phylink_validate(chip, port, mask, state);
+ if (port == 0) {
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, supported);
+ }
}
-static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mv88e6xxx_chip *chip = ds->priv;
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set_port_modes(mask);
+ mv88e6xxx_reg_lock(chip);
+ chip->info->ops->phylink_get_caps(chip, port, config);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (mv88e6xxx_phy_is_internal(chip, port)) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ /* Internal ports with no phy-mode need GMII for PHYLIB */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
+}
+
+static struct phylink_pcs *
+mv88e6xxx_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ struct phylink_pcs *pcs = NULL;
- if (chip->info->ops->phylink_validate)
- chip->info->ops->phylink_validate(chip, port, mask, state);
+ if (chip->info->ops->pcs_ops)
+ pcs = chip->info->ops->pcs_ops->pcs_select(chip, dp->index,
+ interface);
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ return pcs;
+}
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
+static int mv88e6xxx_mac_prepare(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
+ int err = 0;
+
+ /* In inband mode, the link may come up at any time while the link
+ * is not forced down. Force the link down while we reconfigure the
+ * interface mode.
*/
- phylink_helper_basex_speed(state);
+ if (mode == MLO_AN_INBAND &&
+ chip->ports[port].interface != interface &&
+ chip->info->ops->port_set_link) {
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->port_set_link(chip, port,
+ LINK_FORCED_DOWN);
+ mv88e6xxx_reg_unlock(chip);
+ }
+
+ return err;
}
-static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
+static void mv88e6xxx_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- struct mv88e6xxx_port *p;
- int err;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
+ int err = 0;
- p = &chip->ports[port];
+ mv88e6xxx_reg_lock(chip);
- /* FIXME: is this the correct test? If we're in fixed mode on an
- * internal port, why should we process this any different from
- * PHY mode? On the other hand, the port may be automedia between
- * an internal PHY and the serdes...
- */
- if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
- return;
+ if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(chip, port)) {
+ err = mv88e6xxx_port_config_interface(chip, port,
+ state->interface);
+ if (err && err != -EOPNOTSUPP)
+ goto err_unlock;
+ }
- mv88e6xxx_reg_lock(chip);
- /* In inband mode, the link may come up at any time while the link
- * is not forced down. Force the link down while we reconfigure the
- * interface mode.
- */
- if (mode == MLO_AN_INBAND && p->interface != state->interface &&
- chip->info->ops->port_set_link)
- chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
+err_unlock:
+ mv88e6xxx_reg_unlock(chip);
- err = mv88e6xxx_port_config_interface(chip, port, state->interface);
if (err && err != -EOPNOTSUPP)
- goto err_unlock;
+ dev_err(chip->dev, "p%d: failed to configure MAC/PCS\n", port);
+}
- err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface,
- state->advertising);
- /* FIXME: we should restart negotiation if something changed - which
- * is something we get if we convert to using phylinks PCS operations.
- */
- if (err > 0)
- err = 0;
+static int mv88e6xxx_mac_finish(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
+ int err = 0;
/* Undo the forced down state above after completing configuration
- * irrespective of its state on entry, which allows the link to come up.
+ * irrespective of its state on entry, which allows the link to come
+ * up in the in-band case where there is no separate SERDES. Also
+ * ensure that the link can come up if the PPU is in use and we are
+ * in PHY mode (we treat the PPU as an effective in-band mechanism.)
*/
- if (mode == MLO_AN_INBAND && p->interface != state->interface &&
- chip->info->ops->port_set_link)
- chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
+ mv88e6xxx_reg_lock(chip);
- p->interface = state->interface;
+ if (chip->info->ops->port_set_link &&
+ ((mode == MLO_AN_INBAND &&
+ chip->ports[port].interface != interface) ||
+ (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port))))
+ err = chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
-err_unlock:
mv88e6xxx_reg_unlock(chip);
- if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port);
+ chip->ports[port].interface = interface;
+
+ return err;
}
-static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
+static void mv88e6xxx_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
const struct mv88e6xxx_ops *ops;
+ int port = dp->index;
int err = 0;
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- /* Internal PHYs propagate their configuration directly to the MAC.
- * External PHYs depend on whether the PPU is enabled for this port.
+ /* Force the link down if we know the port may not be automatically
+ * updated by the switch or if we are using fixed-link mode.
*/
- if (((!mv88e6xxx_phy_is_internal(ds, port) &&
- !mv88e6xxx_port_ppu_updates(chip, port)) ||
+ if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
mode == MLO_AN_FIXED) && ops->port_sync_link)
err = ops->port_sync_link(chip, port, mode, false);
+
+ if (!err && ops->port_set_speed_duplex)
+ err = ops->port_set_speed_duplex(chip, port, SPEED_UNFORCED,
+ DUPLEX_UNFORCED);
mv88e6xxx_reg_unlock(chip);
if (err)
@@ -763,36 +985,27 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
"p%d: failed to force MAC link down\n", port);
}
-static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
+static void mv88e6xxx_mac_link_up(struct phylink_config *config,
struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
const struct mv88e6xxx_ops *ops;
+ int port = dp->index;
int err = 0;
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- /* Internal PHYs propagate their configuration directly to the MAC.
- * External PHYs depend on whether the PPU is enabled for this port.
+ /* Configure and force the link up if we know that the port may not
+ * automatically updated by the switch or if we are using fixed-link
+ * mode.
*/
- if ((!mv88e6xxx_phy_is_internal(ds, port) &&
- !mv88e6xxx_port_ppu_updates(chip, port)) ||
+ if (!mv88e6xxx_port_ppu_updates(chip, port) ||
mode == MLO_AN_FIXED) {
- /* FIXME: for an automedia port, should we force the link
- * down here - what if the link comes up due to "other" media
- * while we're bringing the port up, how is the exclusivity
- * handled in the Marvell hardware? E.g. port 2 on 88E6390
- * shared between internal PHY and Serdes.
- */
- err = mv88e6xxx_serdes_pcs_link_up(chip, port, mode, speed,
- duplex);
- if (err)
- goto error;
-
if (ops->port_set_speed_duplex) {
err = ops->port_set_speed_duplex(chip, port,
speed, duplex);
@@ -807,82 +1020,100 @@ error:
mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev,
+ dev_err(chip->dev,
"p%d: failed to configure MAC link up\n", port);
}
static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
+ int err;
+
if (!chip->info->ops->stats_snapshot)
return -EOPNOTSUPP;
- return chip->info->ops->stats_snapshot(chip, port);
-}
-
-static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
- { "in_good_octets", 8, 0x00, STATS_TYPE_BANK0, },
- { "in_bad_octets", 4, 0x02, STATS_TYPE_BANK0, },
- { "in_unicast", 4, 0x04, STATS_TYPE_BANK0, },
- { "in_broadcasts", 4, 0x06, STATS_TYPE_BANK0, },
- { "in_multicasts", 4, 0x07, STATS_TYPE_BANK0, },
- { "in_pause", 4, 0x16, STATS_TYPE_BANK0, },
- { "in_undersize", 4, 0x18, STATS_TYPE_BANK0, },
- { "in_fragments", 4, 0x19, STATS_TYPE_BANK0, },
- { "in_oversize", 4, 0x1a, STATS_TYPE_BANK0, },
- { "in_jabber", 4, 0x1b, STATS_TYPE_BANK0, },
- { "in_rx_error", 4, 0x1c, STATS_TYPE_BANK0, },
- { "in_fcs_error", 4, 0x1d, STATS_TYPE_BANK0, },
- { "out_octets", 8, 0x0e, STATS_TYPE_BANK0, },
- { "out_unicast", 4, 0x10, STATS_TYPE_BANK0, },
- { "out_broadcasts", 4, 0x13, STATS_TYPE_BANK0, },
- { "out_multicasts", 4, 0x12, STATS_TYPE_BANK0, },
- { "out_pause", 4, 0x15, STATS_TYPE_BANK0, },
- { "excessive", 4, 0x11, STATS_TYPE_BANK0, },
- { "collisions", 4, 0x1e, STATS_TYPE_BANK0, },
- { "deferred", 4, 0x05, STATS_TYPE_BANK0, },
- { "single", 4, 0x14, STATS_TYPE_BANK0, },
- { "multiple", 4, 0x17, STATS_TYPE_BANK0, },
- { "out_fcs_error", 4, 0x03, STATS_TYPE_BANK0, },
- { "late", 4, 0x1f, STATS_TYPE_BANK0, },
- { "hist_64bytes", 4, 0x08, STATS_TYPE_BANK0, },
- { "hist_65_127bytes", 4, 0x09, STATS_TYPE_BANK0, },
- { "hist_128_255bytes", 4, 0x0a, STATS_TYPE_BANK0, },
- { "hist_256_511bytes", 4, 0x0b, STATS_TYPE_BANK0, },
- { "hist_512_1023bytes", 4, 0x0c, STATS_TYPE_BANK0, },
- { "hist_1024_max_bytes", 4, 0x0d, STATS_TYPE_BANK0, },
- { "sw_in_discards", 4, 0x10, STATS_TYPE_PORT, },
- { "sw_in_filtered", 2, 0x12, STATS_TYPE_PORT, },
- { "sw_out_filtered", 2, 0x13, STATS_TYPE_PORT, },
- { "in_discards", 4, 0x00, STATS_TYPE_BANK1, },
- { "in_filtered", 4, 0x01, STATS_TYPE_BANK1, },
- { "in_accepted", 4, 0x02, STATS_TYPE_BANK1, },
- { "in_bad_accepted", 4, 0x03, STATS_TYPE_BANK1, },
- { "in_good_avb_class_a", 4, 0x04, STATS_TYPE_BANK1, },
- { "in_good_avb_class_b", 4, 0x05, STATS_TYPE_BANK1, },
- { "in_bad_avb_class_a", 4, 0x06, STATS_TYPE_BANK1, },
- { "in_bad_avb_class_b", 4, 0x07, STATS_TYPE_BANK1, },
- { "tcam_counter_0", 4, 0x08, STATS_TYPE_BANK1, },
- { "tcam_counter_1", 4, 0x09, STATS_TYPE_BANK1, },
- { "tcam_counter_2", 4, 0x0a, STATS_TYPE_BANK1, },
- { "tcam_counter_3", 4, 0x0b, STATS_TYPE_BANK1, },
- { "in_da_unknown", 4, 0x0e, STATS_TYPE_BANK1, },
- { "in_management", 4, 0x0f, STATS_TYPE_BANK1, },
- { "out_queue_0", 4, 0x10, STATS_TYPE_BANK1, },
- { "out_queue_1", 4, 0x11, STATS_TYPE_BANK1, },
- { "out_queue_2", 4, 0x12, STATS_TYPE_BANK1, },
- { "out_queue_3", 4, 0x13, STATS_TYPE_BANK1, },
- { "out_queue_4", 4, 0x14, STATS_TYPE_BANK1, },
- { "out_queue_5", 4, 0x15, STATS_TYPE_BANK1, },
- { "out_queue_6", 4, 0x16, STATS_TYPE_BANK1, },
- { "out_queue_7", 4, 0x17, STATS_TYPE_BANK1, },
- { "out_cut_through", 4, 0x18, STATS_TYPE_BANK1, },
- { "out_octets_a", 4, 0x1a, STATS_TYPE_BANK1, },
- { "out_octets_b", 4, 0x1b, STATS_TYPE_BANK1, },
- { "out_management", 4, 0x1f, STATS_TYPE_BANK1, },
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->stats_snapshot(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+#define MV88E6XXX_HW_STAT_MAPPER(_fn) \
+ _fn(in_good_octets, 8, 0x00, STATS_TYPE_BANK0), \
+ _fn(in_bad_octets, 4, 0x02, STATS_TYPE_BANK0), \
+ _fn(in_unicast, 4, 0x04, STATS_TYPE_BANK0), \
+ _fn(in_broadcasts, 4, 0x06, STATS_TYPE_BANK0), \
+ _fn(in_multicasts, 4, 0x07, STATS_TYPE_BANK0), \
+ _fn(in_pause, 4, 0x16, STATS_TYPE_BANK0), \
+ _fn(in_undersize, 4, 0x18, STATS_TYPE_BANK0), \
+ _fn(in_fragments, 4, 0x19, STATS_TYPE_BANK0), \
+ _fn(in_oversize, 4, 0x1a, STATS_TYPE_BANK0), \
+ _fn(in_jabber, 4, 0x1b, STATS_TYPE_BANK0), \
+ _fn(in_rx_error, 4, 0x1c, STATS_TYPE_BANK0), \
+ _fn(in_fcs_error, 4, 0x1d, STATS_TYPE_BANK0), \
+ _fn(out_octets, 8, 0x0e, STATS_TYPE_BANK0), \
+ _fn(out_unicast, 4, 0x10, STATS_TYPE_BANK0), \
+ _fn(out_broadcasts, 4, 0x13, STATS_TYPE_BANK0), \
+ _fn(out_multicasts, 4, 0x12, STATS_TYPE_BANK0), \
+ _fn(out_pause, 4, 0x15, STATS_TYPE_BANK0), \
+ _fn(excessive, 4, 0x11, STATS_TYPE_BANK0), \
+ _fn(collisions, 4, 0x1e, STATS_TYPE_BANK0), \
+ _fn(deferred, 4, 0x05, STATS_TYPE_BANK0), \
+ _fn(single, 4, 0x14, STATS_TYPE_BANK0), \
+ _fn(multiple, 4, 0x17, STATS_TYPE_BANK0), \
+ _fn(out_fcs_error, 4, 0x03, STATS_TYPE_BANK0), \
+ _fn(late, 4, 0x1f, STATS_TYPE_BANK0), \
+ _fn(hist_64bytes, 4, 0x08, STATS_TYPE_BANK0), \
+ _fn(hist_65_127bytes, 4, 0x09, STATS_TYPE_BANK0), \
+ _fn(hist_128_255bytes, 4, 0x0a, STATS_TYPE_BANK0), \
+ _fn(hist_256_511bytes, 4, 0x0b, STATS_TYPE_BANK0), \
+ _fn(hist_512_1023bytes, 4, 0x0c, STATS_TYPE_BANK0), \
+ _fn(hist_1024_max_bytes, 4, 0x0d, STATS_TYPE_BANK0), \
+ _fn(sw_in_discards, 4, 0x10, STATS_TYPE_PORT), \
+ _fn(sw_in_filtered, 2, 0x12, STATS_TYPE_PORT), \
+ _fn(sw_out_filtered, 2, 0x13, STATS_TYPE_PORT), \
+ _fn(in_discards, 4, 0x00, STATS_TYPE_BANK1), \
+ _fn(in_filtered, 4, 0x01, STATS_TYPE_BANK1), \
+ _fn(in_accepted, 4, 0x02, STATS_TYPE_BANK1), \
+ _fn(in_bad_accepted, 4, 0x03, STATS_TYPE_BANK1), \
+ _fn(in_good_avb_class_a, 4, 0x04, STATS_TYPE_BANK1), \
+ _fn(in_good_avb_class_b, 4, 0x05, STATS_TYPE_BANK1), \
+ _fn(in_bad_avb_class_a, 4, 0x06, STATS_TYPE_BANK1), \
+ _fn(in_bad_avb_class_b, 4, 0x07, STATS_TYPE_BANK1), \
+ _fn(tcam_counter_0, 4, 0x08, STATS_TYPE_BANK1), \
+ _fn(tcam_counter_1, 4, 0x09, STATS_TYPE_BANK1), \
+ _fn(tcam_counter_2, 4, 0x0a, STATS_TYPE_BANK1), \
+ _fn(tcam_counter_3, 4, 0x0b, STATS_TYPE_BANK1), \
+ _fn(in_da_unknown, 4, 0x0e, STATS_TYPE_BANK1), \
+ _fn(in_management, 4, 0x0f, STATS_TYPE_BANK1), \
+ _fn(out_queue_0, 4, 0x10, STATS_TYPE_BANK1), \
+ _fn(out_queue_1, 4, 0x11, STATS_TYPE_BANK1), \
+ _fn(out_queue_2, 4, 0x12, STATS_TYPE_BANK1), \
+ _fn(out_queue_3, 4, 0x13, STATS_TYPE_BANK1), \
+ _fn(out_queue_4, 4, 0x14, STATS_TYPE_BANK1), \
+ _fn(out_queue_5, 4, 0x15, STATS_TYPE_BANK1), \
+ _fn(out_queue_6, 4, 0x16, STATS_TYPE_BANK1), \
+ _fn(out_queue_7, 4, 0x17, STATS_TYPE_BANK1), \
+ _fn(out_cut_through, 4, 0x18, STATS_TYPE_BANK1), \
+ _fn(out_octets_a, 4, 0x1a, STATS_TYPE_BANK1), \
+ _fn(out_octets_b, 4, 0x1b, STATS_TYPE_BANK1), \
+ _fn(out_management, 4, 0x1f, STATS_TYPE_BANK1), \
+ /* */
+
+#define MV88E6XXX_HW_STAT_ENTRY(_string, _size, _reg, _type) \
+ { #_string, _size, _reg, _type }
+static const struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
+ MV88E6XXX_HW_STAT_MAPPER(MV88E6XXX_HW_STAT_ENTRY)
+};
+
+#define MV88E6XXX_HW_STAT_ENUM(_string, _size, _reg, _type) \
+ MV88E6XXX_HW_STAT_ID_ ## _string
+enum mv88e6xxx_hw_stat_id {
+ MV88E6XXX_HW_STAT_MAPPER(MV88E6XXX_HW_STAT_ENUM)
};
static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_hw_stat *s,
+ const struct mv88e6xxx_hw_stat *s,
int port, u16 bank1_select,
u16 histogram)
{
@@ -922,42 +1153,37 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
return value;
}
-static int mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip,
- uint8_t *data, int types)
+static void mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t **data, int types)
{
- struct mv88e6xxx_hw_stat *stat;
- int i, j;
+ const struct mv88e6xxx_hw_stat *stat;
+ int i;
- for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (stat->type & types) {
- memcpy(data + j * ETH_GSTRING_LEN, stat->string,
- ETH_GSTRING_LEN);
- j++;
- }
+ if (stat->type & types)
+ ethtool_puts(data, stat->string);
}
-
- return j;
}
-static int mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip,
- uint8_t *data)
+static void mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t **data)
{
- return mv88e6xxx_stats_get_strings(chip, data,
- STATS_TYPE_BANK0 | STATS_TYPE_PORT);
+ mv88e6xxx_stats_get_strings(chip, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_PORT);
}
-static int mv88e6250_stats_get_strings(struct mv88e6xxx_chip *chip,
- uint8_t *data)
+static void mv88e6250_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t **data)
{
- return mv88e6xxx_stats_get_strings(chip, data, STATS_TYPE_BANK0);
+ mv88e6xxx_stats_get_strings(chip, data, STATS_TYPE_BANK0);
}
-static int mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
- uint8_t *data)
+static void mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t **data)
{
- return mv88e6xxx_stats_get_strings(chip, data,
- STATS_TYPE_BANK0 | STATS_TYPE_BANK1);
+ mv88e6xxx_stats_get_strings(chip, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_BANK1);
}
static const uint8_t *mv88e6xxx_atu_vtu_stats_strings[] = {
@@ -968,21 +1194,18 @@ static const uint8_t *mv88e6xxx_atu_vtu_stats_strings[] = {
"vtu_miss_violation",
};
-static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
+static void mv88e6xxx_atu_vtu_get_strings(uint8_t **data)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
- mv88e6xxx_atu_vtu_stats_strings[i],
- ETH_GSTRING_LEN);
+ ethtool_puts(data, mv88e6xxx_atu_vtu_stats_strings[i]);
}
static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int count = 0;
if (stringset != ETH_SS_STATS)
return;
@@ -990,15 +1213,12 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->stats_get_strings)
- count = chip->info->ops->stats_get_strings(chip, data);
+ chip->info->ops->stats_get_strings(chip, &data);
- if (chip->info->ops->serdes_get_strings) {
- data += count * ETH_GSTRING_LEN;
- count = chip->info->ops->serdes_get_strings(chip, port, data);
- }
+ if (chip->info->ops->serdes_get_strings)
+ chip->info->ops->serdes_get_strings(chip, port, &data);
- data += count * ETH_GSTRING_LEN;
- mv88e6xxx_atu_vtu_get_strings(data);
+ mv88e6xxx_atu_vtu_get_strings(&data);
mv88e6xxx_reg_unlock(chip);
}
@@ -1006,7 +1226,7 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
static int mv88e6xxx_stats_get_sset_count(struct mv88e6xxx_chip *chip,
int types)
{
- struct mv88e6xxx_hw_stat *stat;
+ const struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
@@ -1065,59 +1285,73 @@ out:
return count;
}
-static int mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data, int types,
- u16 bank1_select, u16 histogram)
+static size_t mv88e6095_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data)
{
- struct mv88e6xxx_hw_stat *stat;
- int i, j;
-
- for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
- stat = &mv88e6xxx_hw_stats[i];
- if (stat->type & types) {
- mv88e6xxx_reg_lock(chip);
- data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
- bank1_select,
- histogram);
- mv88e6xxx_reg_unlock(chip);
+ *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
+ MV88E6XXX_G1_STATS_OP_HIST_RX);
+ return 1;
+}
- j++;
- }
- }
- return j;
+static size_t mv88e6250_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data)
+{
+ *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
+ MV88E6XXX_G1_STATS_OP_HIST_RX);
+ return 1;
}
-static int mv88e6095_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+static size_t mv88e6320_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data)
{
- return mv88e6xxx_stats_get_stats(chip, port, data,
- STATS_TYPE_BANK0 | STATS_TYPE_PORT,
- 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+ *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
+ MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
+ MV88E6XXX_G1_STATS_OP_HIST_RX);
+ return 1;
}
-static int mv88e6250_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+static size_t mv88e6390_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data)
{
- return mv88e6xxx_stats_get_stats(chip, port, data, STATS_TYPE_BANK0,
- 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+ *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
+ MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
+ 0);
+ return 1;
}
-static int mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+static size_t mv88e6xxx_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data)
{
- return mv88e6xxx_stats_get_stats(chip, port, data,
- STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
- MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
- MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+ int ret = 0;
+
+ if (!(stat->type & chip->info->stats_type))
+ return 0;
+
+ if (chip->info->ops->stats_get_stat) {
+ mv88e6xxx_reg_lock(chip);
+ ret = chip->info->ops->stats_get_stat(chip, port, stat, data);
+ mv88e6xxx_reg_unlock(chip);
+ }
+
+ return ret;
}
-static int mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+static size_t mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
{
- return mv88e6xxx_stats_get_stats(chip, port, data,
- STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
- MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
- 0);
+ const struct mv88e6xxx_hw_stat *stat;
+ size_t i, j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ j += mv88e6xxx_stats_get_stat(chip, port, stat, &data[j]);
+ }
+ return j;
}
static void mv88e6xxx_atu_vtu_get_stats(struct mv88e6xxx_chip *chip, int port,
@@ -1133,10 +1367,9 @@ static void mv88e6xxx_atu_vtu_get_stats(struct mv88e6xxx_chip *chip, int port,
static void mv88e6xxx_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
- int count = 0;
+ size_t count;
- if (chip->info->ops->stats_get_stats)
- count = chip->info->ops->stats_get_stats(chip, port, data);
+ count = mv88e6xxx_stats_get_stats(chip, port, data);
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->serdes_get_stats) {
@@ -1154,16 +1387,90 @@ static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int ret;
- mv88e6xxx_reg_lock(chip);
+ ret = mv88e6xxx_stats_snapshot(chip, port);
+ if (ret < 0)
+ return;
+
+ mv88e6xxx_get_stats(chip, port, data);
+}
+
+static void mv88e6xxx_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int ret;
ret = mv88e6xxx_stats_snapshot(chip, port);
- mv88e6xxx_reg_unlock(chip);
+ if (ret < 0)
+ return;
+
+#define MV88E6XXX_ETH_MAC_STAT_MAP(_id, _member) \
+ mv88e6xxx_stats_get_stat(chip, port, \
+ &mv88e6xxx_hw_stats[MV88E6XXX_HW_STAT_ID_ ## _id], \
+ &mac_stats->stats._member)
+
+ MV88E6XXX_ETH_MAC_STAT_MAP(out_unicast, FramesTransmittedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(single, SingleCollisionFrames);
+ MV88E6XXX_ETH_MAC_STAT_MAP(multiple, MultipleCollisionFrames);
+ MV88E6XXX_ETH_MAC_STAT_MAP(in_unicast, FramesReceivedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(in_fcs_error, FrameCheckSequenceErrors);
+ MV88E6XXX_ETH_MAC_STAT_MAP(out_octets, OctetsTransmittedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(deferred, FramesWithDeferredXmissions);
+ MV88E6XXX_ETH_MAC_STAT_MAP(late, LateCollisions);
+ MV88E6XXX_ETH_MAC_STAT_MAP(in_good_octets, OctetsReceivedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(out_multicasts, MulticastFramesXmittedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(out_broadcasts, BroadcastFramesXmittedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(excessive, FramesWithExcessiveDeferral);
+ MV88E6XXX_ETH_MAC_STAT_MAP(in_multicasts, MulticastFramesReceivedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(in_broadcasts, BroadcastFramesReceivedOK);
+
+#undef MV88E6XXX_ETH_MAC_STAT_MAP
+
+ mac_stats->stats.FramesTransmittedOK += mac_stats->stats.MulticastFramesXmittedOK;
+ mac_stats->stats.FramesTransmittedOK += mac_stats->stats.BroadcastFramesXmittedOK;
+ mac_stats->stats.FramesReceivedOK += mac_stats->stats.MulticastFramesReceivedOK;
+ mac_stats->stats.FramesReceivedOK += mac_stats->stats.BroadcastFramesReceivedOK;
+}
+
+static void mv88e6xxx_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ static const struct ethtool_rmon_hist_range rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 65535 },
+ {}
+ };
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int ret;
+ ret = mv88e6xxx_stats_snapshot(chip, port);
if (ret < 0)
return;
- mv88e6xxx_get_stats(chip, port, data);
+#define MV88E6XXX_RMON_STAT_MAP(_id, _member) \
+ mv88e6xxx_stats_get_stat(chip, port, \
+ &mv88e6xxx_hw_stats[MV88E6XXX_HW_STAT_ID_ ## _id], \
+ &rmon_stats->stats._member)
+
+ MV88E6XXX_RMON_STAT_MAP(in_undersize, undersize_pkts);
+ MV88E6XXX_RMON_STAT_MAP(in_oversize, oversize_pkts);
+ MV88E6XXX_RMON_STAT_MAP(in_fragments, fragments);
+ MV88E6XXX_RMON_STAT_MAP(in_jabber, jabbers);
+ MV88E6XXX_RMON_STAT_MAP(hist_64bytes, hist[0]);
+ MV88E6XXX_RMON_STAT_MAP(hist_65_127bytes, hist[1]);
+ MV88E6XXX_RMON_STAT_MAP(hist_128_255bytes, hist[2]);
+ MV88E6XXX_RMON_STAT_MAP(hist_256_511bytes, hist[3]);
+ MV88E6XXX_RMON_STAT_MAP(hist_512_1023bytes, hist[4]);
+ MV88E6XXX_RMON_STAT_MAP(hist_1024_max_bytes, hist[5]);
+#undef MV88E6XXX_RMON_STAT_MAP
+
+ *ranges = rmon_ranges;
}
static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
@@ -1206,15 +1513,8 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
mv88e6xxx_reg_unlock(chip);
}
-static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
@@ -1225,8 +1525,7 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
{
struct dsa_switch *ds = chip->ds;
struct dsa_switch_tree *dst = ds->dst;
- struct net_device *br;
- struct dsa_port *dp;
+ struct dsa_port *dp, *other_dp;
bool found = false;
u16 pvlan;
@@ -1235,11 +1534,9 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
list_for_each_entry(dp, &dst->ports, list) {
if (dp->ds->index == dev && dp->index == port) {
/* dp might be a DSA link or a user port, so it
- * might or might not have a bridge_dev
- * pointer. Use the "found" variable for both
- * cases.
+ * might or might not have a bridge.
+ * Use the "found" variable for both cases.
*/
- br = dp->bridge_dev;
found = true;
break;
}
@@ -1247,13 +1544,14 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
/* dev is a virtual bridge */
} else {
list_for_each_entry(dp, &dst->ports, list) {
- if (dp->bridge_num < 0)
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
+
+ if (!bridge_num)
continue;
- if (dp->bridge_num + 1 + dst->last_switch != dev)
+ if (bridge_num + dst->last_switch != dev)
continue;
- br = dp->bridge_dev;
found = true;
break;
}
@@ -1269,15 +1567,21 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
pvlan = 0;
- /* Frames from user ports can egress any local DSA links and CPU ports,
- * as well as any local member of their bridge group.
+ /* Frames from standalone user ports can only egress on the
+ * upstream port.
*/
- list_for_each_entry(dp, &dst->ports, list)
- if (dp->ds == ds &&
- (dp->type == DSA_PORT_TYPE_CPU ||
- dp->type == DSA_PORT_TYPE_DSA ||
- (br && dp->bridge_dev == br)))
- pvlan |= BIT(dp->index);
+ if (!dsa_port_bridge_dev_get(dp))
+ return BIT(dsa_switch_upstream_port(ds));
+
+ /* Frames from bridged user ports can egress any local DSA
+ * links and CPU ports, as well as any local member of their
+ * bridge group.
+ */
+ dsa_switch_for_each_port(other_dp, ds)
+ if (other_dp->type == DSA_PORT_TYPE_CPU ||
+ other_dp->type == DSA_PORT_TYPE_DSA ||
+ dsa_port_bridge_same(dp, other_dp))
+ pvlan |= BIT(other_dp->index);
return pvlan;
}
@@ -1463,15 +1767,16 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
ds = dsa_switch_find(dst->index, dev);
dp = ds ? dsa_to_port(ds, port) : NULL;
- if (dp && dp->lag_dev) {
+ if (dp && dp->lag) {
/* As the PVT is used to limit flooding of
* FORWARD frames, which use the LAG ID as the
* source port, we must translate dev/port to
* the special "LAG device" in the PVT, using
- * the LAG ID as the port number.
+ * the LAG ID (one-based) as the port number
+ * (zero-based).
*/
dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK;
- port = dsa_lag_id(dst, dp->lag_dev);
+ port = dsa_port_lag_id_get(dp) - 1;
}
}
@@ -1504,24 +1809,31 @@ static int mv88e6xxx_pvt_setup(struct mv88e6xxx_chip *chip)
return 0;
}
-static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+static int mv88e6xxx_port_fast_age_fid(struct mv88e6xxx_chip *chip, int port,
+ u16 fid)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- if (dsa_to_port(ds, port)->lag_dev)
+ if (dsa_to_port(chip->ds, port)->lag)
/* Hardware is incapable of fast-aging a LAG through a
* regular ATU move operation. Until we have something
* more fancy in place this is a no-op.
*/
- return;
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_g1_atu_remove(chip, fid, port, false);
+}
+
+static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
+ err = mv88e6xxx_port_fast_age_fid(chip, port, 0);
mv88e6xxx_reg_unlock(chip);
if (err)
- dev_err(ds->dev, "p%d: failed to flush ATU\n", port);
+ dev_err(chip->ds->dev, "p%d: failed to flush ATU: %d\n",
+ port, err);
}
static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
@@ -1540,6 +1852,8 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
if (!chip->info->ops->vtu_getnext)
return -EOPNOTSUPP;
+ memset(entry, 0, sizeof(*entry));
+
entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip);
entry->valid = false;
@@ -1551,11 +1865,11 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
return err;
}
-static int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
- int (*cb)(struct mv88e6xxx_chip *chip,
- const struct mv88e6xxx_vtu_entry *entry,
- void *priv),
- void *priv)
+int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
+ int (*cb)(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv),
+ void *priv)
{
struct mv88e6xxx_vtu_entry entry = {
.vid = mv88e6xxx_max_vid(chip),
@@ -1591,65 +1905,216 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
return chip->info->ops->vtu_loadpurge(chip, entry);
}
-static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip,
- const struct mv88e6xxx_vtu_entry *entry,
- void *_fid_bitmap)
+static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
- unsigned long *fid_bitmap = _fid_bitmap;
+ *fid = find_first_zero_bit(chip->fid_bitmap, MV88E6XXX_N_FID);
+ if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
+ return -ENOSPC;
- set_bit(entry->fid, fid_bitmap);
- return 0;
+ /* Clear the database */
+ return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
-int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
+static int mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
{
- int i, err;
- u16 fid;
+ if (!chip->info->ops->stu_loadpurge)
+ return -EOPNOTSUPP;
- bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
+ return chip->info->ops->stu_loadpurge(chip, entry);
+}
- /* Set every FID bit used by the (un)bridged ports */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- err = mv88e6xxx_port_get_fid(chip, i, &fid);
- if (err)
+static int mv88e6xxx_stu_setup(struct mv88e6xxx_chip *chip)
+{
+ struct mv88e6xxx_stu_entry stu = {
+ .valid = true,
+ .sid = 0
+ };
+
+ if (!mv88e6xxx_has_stu(chip))
+ return 0;
+
+ /* Make sure that SID 0 is always valid. This is used by VTU
+ * entries that do not make use of the STU, e.g. when creating
+ * a VLAN upper on a port that is also part of a VLAN
+ * filtering bridge.
+ */
+ return mv88e6xxx_stu_loadpurge(chip, &stu);
+}
+
+static int mv88e6xxx_sid_get(struct mv88e6xxx_chip *chip, u8 *sid)
+{
+ DECLARE_BITMAP(busy, MV88E6XXX_N_SID) = { 0 };
+ struct mv88e6xxx_mst *mst;
+
+ __set_bit(0, busy);
+
+ list_for_each_entry(mst, &chip->msts, node)
+ __set_bit(mst->stu.sid, busy);
+
+ *sid = find_first_zero_bit(busy, MV88E6XXX_N_SID);
+
+ return (*sid >= mv88e6xxx_max_sid(chip)) ? -ENOSPC : 0;
+}
+
+static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
+{
+ struct mv88e6xxx_mst *mst, *tmp;
+ int err;
+
+ /* If the SID is zero, it is for a VLAN mapped to the default MSTI,
+ * and mv88e6xxx_stu_setup() made sure it is always present, and thus,
+ * should not be removed here.
+ *
+ * If the chip lacks STU support, numerically the "sid" variable will
+ * happen to also be zero, but we don't want to rely on that fact, so
+ * we explicitly test that first. In that case, there is also nothing
+ * to do here.
+ */
+ if (!mv88e6xxx_has_stu(chip) || !sid)
+ return 0;
+
+ list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
+ if (mst->stu.sid != sid)
+ continue;
+
+ if (!refcount_dec_and_test(&mst->refcnt))
+ return 0;
+
+ mst->stu.valid = false;
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err) {
+ refcount_set(&mst->refcnt, 1);
return err;
+ }
- set_bit(fid, fid_bitmap);
+ list_del(&mst->node);
+ kfree(mst);
+ return 0;
}
- /* Set every FID bit used by the VLAN entries */
- return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap);
+ return -ENOENT;
}
-static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
+static int mv88e6xxx_mst_get(struct mv88e6xxx_chip *chip, struct net_device *br,
+ u16 msti, u8 *sid)
{
- DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
- int err;
+ struct mv88e6xxx_mst *mst;
+ int err, i;
+
+ if (!mv88e6xxx_has_stu(chip)) {
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!msti) {
+ *sid = 0;
+ return 0;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == br && mst->msti == msti) {
+ refcount_inc(&mst->refcnt);
+ *sid = mst->stu.sid;
+ return 0;
+ }
+ }
- err = mv88e6xxx_fid_map(chip, fid_bitmap);
+ err = mv88e6xxx_sid_get(chip, sid);
if (err)
- return err;
+ goto err;
- /* The reset value 0x000 is used to indicate that multiple address
- * databases are not needed. Return the next positive available.
+ mst = kzalloc(sizeof(*mst), GFP_KERNEL);
+ if (!mst) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&mst->node);
+ refcount_set(&mst->refcnt, 1);
+ mst->br = br;
+ mst->msti = msti;
+ mst->stu.valid = true;
+ mst->stu.sid = *sid;
+
+ /* The bridge starts out all ports in the disabled state. But
+ * a STU state of disabled means to go by the port-global
+ * state. So we set all user port's initial state to blocking,
+ * to match the bridge's behavior.
*/
- *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
- if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
- return -ENOSPC;
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ mst->stu.state[i] = dsa_is_user_port(chip->ds, i) ?
+ MV88E6XXX_PORT_CTL0_STATE_BLOCKING :
+ MV88E6XXX_PORT_CTL0_STATE_DISABLED;
- /* Clear the database */
- return mv88e6xxx_g1_atu_flush(chip, *fid, true);
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err)
+ goto err_free;
+
+ list_add_tail(&mst->node, &chip->msts);
+ return 0;
+
+err_free:
+ kfree(mst);
+err:
+ return err;
+}
+
+static int mv88e6xxx_port_mst_state_set(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *st)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_mst *mst;
+ u8 state;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ switch (st->state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ state = MV88E6XXX_PORT_CTL0_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ state = MV88E6XXX_PORT_CTL0_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ state = MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == dsa_port_bridge_dev_get(dp) &&
+ mst->msti == st->msti) {
+ if (mst->stu.state[port] == state)
+ return 0;
+
+ mst->stu.state[port] = state;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+ }
+ }
+
+ return -ENOENT;
}
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid)
{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry vlan;
- int i, err;
+ int err;
/* DSA and CPU ports have to be members of multiple vlans */
- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
return 0;
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
@@ -1659,27 +2124,22 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (!vlan.valid)
return 0;
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
- continue;
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *other_br;
- if (!dsa_to_port(ds, i)->slave)
- continue;
-
- if (vlan.member[i] ==
+ if (vlan.member[other_dp->index] ==
MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
- if (dsa_to_port(ds, i)->bridge_dev ==
- dsa_to_port(ds, port)->bridge_dev)
+ if (dsa_port_bridge_same(dp, other_dp))
break; /* same bridge, check next VLAN */
- if (!dsa_to_port(ds, i)->bridge_dev)
+ other_br = dsa_port_bridge_dev_get(other_dp);
+ if (!other_br)
continue;
dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
- port, vlan.vid, i,
- netdev_name(dsa_to_port(ds, i)->bridge_dev));
+ port, vlan.vid, other_dp->index, netdev_name(other_br));
return -EOPNOTSUPP;
}
@@ -1689,13 +2149,14 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_port *dp = dsa_to_port(chip->ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct mv88e6xxx_port *p = &chip->ports[port];
u16 pvid = MV88E6XXX_VID_STANDALONE;
bool drop_untagged = false;
int err;
- if (dp->bridge_dev) {
- if (br_vlan_enabled(dp->bridge_dev)) {
+ if (br) {
+ if (br_vlan_enabled(br)) {
pvid = p->bridge_pvid.vid;
drop_untagged = !p->bridge_pvid.valid;
} else {
@@ -1758,13 +2219,11 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
return err;
}
-static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
- const unsigned char *addr, u16 vid,
- u8 state)
+static int mv88e6xxx_port_db_get(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid,
+ u16 *fid, struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_atu_entry entry;
struct mv88e6xxx_vtu_entry vlan;
- u16 fid;
int err;
/* Ports have two private address databases: one for when the port is
@@ -1775,7 +2234,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
* VLAN ID into the port's database used for VLAN-unaware bridging.
*/
if (vid == 0) {
- fid = MV88E6XXX_FID_BRIDGED;
+ *fid = MV88E6XXX_FID_BRIDGED;
} else {
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
@@ -1785,14 +2244,39 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid)
return -EOPNOTSUPP;
- fid = vlan.fid;
+ *fid = vlan.fid;
}
- entry.state = 0;
- ether_addr_copy(entry.mac, addr);
- eth_addr_dec(entry.mac);
+ entry->state = 0;
+ ether_addr_copy(entry->mac, addr);
+ eth_addr_dec(entry->mac);
+
+ return mv88e6xxx_g1_atu_getnext(chip, *fid, entry);
+}
+
+static bool mv88e6xxx_port_db_find(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
+
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
+ if (err)
+ return false;
+
+ return entry.state && ether_addr_equal(entry.mac, addr);
+}
+
+static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
- err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
if (err)
return err;
@@ -2128,6 +2612,9 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid) {
memset(&vlan, 0, sizeof(vlan));
+ if (vid == MV88E6XXX_VID_STANDALONE)
+ vlan.policy = true;
+
err = mv88e6xxx_atu_new(chip, &vlan.fid);
if (err)
return err;
@@ -2159,6 +2646,9 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
port, vid);
}
+ /* Record FID used in SW FID map */
+ bitmap_set(chip->fid_bitmap, vlan.fid, 1);
+
return 0;
}
@@ -2188,7 +2678,7 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
else
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
- /* net/dsa/slave.c will call dsa_port_vlan_add() for the affected port
+ /* net/dsa/user.c will call dsa_port_vlan_add() for the affected port
* and then the CPU port. Do not warn for duplicates for the CPU port.
*/
warn = !dsa_is_cpu_port(ds, port) && !dsa_is_dsa_port(ds, port);
@@ -2260,6 +2750,15 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
if (err)
return err;
+ if (!vlan.valid) {
+ err = mv88e6xxx_mst_put(chip, vlan.sid);
+ if (err)
+ return err;
+
+ /* Record FID freed in SW FID map */
+ bitmap_clear(chip->fid_bitmap, vlan.fid, 1);
+ }
+
return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
}
@@ -2274,6 +2773,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
+ /* The ATU removal procedure needs the FID to be mapped in the VTU,
+ * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
+ * switchdev workqueue to ensure that all FDB entries are deleted
+ * before we remove the VLAN.
+ */
+ dsa_flush_workqueue();
+
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
@@ -2298,8 +2804,75 @@ unlock:
return err;
}
+static int mv88e6xxx_port_vlan_fast_age(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_fast_age_fid(chip, port, vlan.fid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_vlan_msti_set(struct dsa_switch *ds,
+ struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ u8 old_sid, new_sid;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, msti->vid, &vlan);
+ if (err)
+ goto unlock;
+
+ if (!vlan.valid) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ old_sid = vlan.sid;
+
+ err = mv88e6xxx_mst_get(chip, bridge.dev, msti->msti, &new_sid);
+ if (err)
+ goto unlock;
+
+ if (new_sid != old_sid) {
+ vlan.sid = new_sid;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err) {
+ mv88e6xxx_mst_put(chip, new_sid);
+ goto unlock;
+ }
+ }
+
+ err = mv88e6xxx_mst_put(chip, old_sid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2307,13 +2880,21 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, addr, vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2413,7 +2994,7 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct dsa_switch *ds = chip->ds;
struct dsa_switch_tree *dst = ds->dst;
@@ -2421,7 +3002,7 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
int err;
list_for_each_entry(dp, &dst->ports, list) {
- if (dp->bridge_dev == br) {
+ if (dsa_port_offloads_bridge(dp, &bridge)) {
if (dp->ds == ds) {
/* This is a local bridge group member,
* remap its Port VLAN Map.
@@ -2444,15 +3025,34 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
return 0;
}
+/* Treat the software bridge as a virtual single-port switch behind the
+ * CPU and map in the PVT. First dst->last_switch elements are taken by
+ * physical switches, so start from beyond that range.
+ */
+static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
+ unsigned int bridge_num)
+{
+ u8 dev = bridge_num + ds->dst->last_switch;
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ return mv88e6xxx_pvt_map(chip, dev, 0);
+}
+
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_bridge_map(chip, br);
+ err = mv88e6xxx_bridge_map(chip, bridge);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_set_map_da(chip, port, true);
if (err)
goto unlock;
@@ -2460,6 +3060,14 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
if (err)
goto unlock;
+ if (mv88e6xxx_has_pvt(chip)) {
+ err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
+ if (err)
+ goto unlock;
+
+ *tx_fwd_offload = true;
+ }
+
unlock:
mv88e6xxx_reg_unlock(chip);
@@ -2467,17 +3075,27 @@ unlock:
}
static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_bridge_map(chip, br) ||
+ if (bridge.tx_fwd_offload &&
+ mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
+ dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
+
+ if (mv88e6xxx_bridge_map(chip, bridge) ||
mv88e6xxx_port_vlan_map(chip, port))
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+ err = mv88e6xxx_port_set_map_da(chip, port, false);
+ if (err)
+ dev_err(ds->dev,
+ "port %d failed to restore map-DA: %pe\n",
+ port, ERR_PTR(err));
+
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
dev_err(ds->dev,
@@ -2489,7 +3107,8 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
int tree_index, int sw_index,
- int port, struct net_device *br)
+ int port, struct dsa_bridge bridge,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2499,6 +3118,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_pvt_map(chip, sw_index, port);
+ err = err ? : mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
mv88e6xxx_reg_unlock(chip);
return err;
@@ -2506,7 +3126,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
int tree_index, int sw_index,
- int port, struct net_device *br)
+ int port, struct dsa_bridge bridge)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -2514,49 +3134,12 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
return;
mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_pvt_map(chip, sw_index, port))
+ if (mv88e6xxx_pvt_map(chip, sw_index, port) ||
+ mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
mv88e6xxx_reg_unlock(chip);
}
-/* Treat the software bridge as a virtual single-port switch behind the
- * CPU and map in the PVT. First dst->last_switch elements are taken by
- * physical switches, so start from beyond that range.
- */
-static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
- int bridge_num)
-{
- u8 dev = bridge_num + ds->dst->last_switch + 1;
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_pvt_map(chip, dev, 0);
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static int mv88e6xxx_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
- struct net_device *br,
- int bridge_num)
-{
- return mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
-}
-
-static void mv88e6xxx_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
- struct net_device *br,
- int bridge_num)
-{
- int err;
-
- err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
- if (err) {
- dev_err(ds->dev, "failed to remap cross-chip Port VLAN: %pe\n",
- ERR_PTR(err));
- }
-}
-
static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->reset)
@@ -2568,15 +3151,35 @@ static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
{
struct gpio_desc *gpiod = chip->reset;
+ int err;
/* If there is a GPIO connected to the reset pin, toggle it */
if (gpiod) {
+ /* If the switch has just been reset and not yet completed
+ * loading EEPROM, the reset may interrupt the I2C transaction
+ * mid-byte, causing the first EEPROM read after the reset
+ * from the wrong location resulting in the switch booting
+ * to wrong mode and inoperable.
+ * For this reason, switch families with EEPROM support
+ * generally wait for EEPROM loads to complete as their pre-
+ * and post-reset handlers.
+ */
+ if (chip->info->ops->hardware_reset_pre) {
+ err = chip->info->ops->hardware_reset_pre(chip);
+ if (err)
+ dev_err(chip->dev, "pre-reset error: %d\n", err);
+ }
+
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
- mv88e6xxx_g1_wait_eeprom_done(chip);
+ if (chip->info->ops->hardware_reset_post) {
+ err = chip->info->ops->hardware_reset_post(chip);
+ if (err)
+ dev_err(chip->dev, "post-reset error: %d\n", err);
+ }
}
}
@@ -2700,102 +3303,6 @@ static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
return 0;
}
-static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id)
-{
- struct mv88e6xxx_port *mvp = dev_id;
- struct mv88e6xxx_chip *chip = mvp->chip;
- irqreturn_t ret = IRQ_NONE;
- int port = mvp->port;
- int lane;
-
- mv88e6xxx_reg_lock(chip);
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0)
- ret = mv88e6xxx_serdes_irq_status(chip, port, lane);
- mv88e6xxx_reg_unlock(chip);
-
- return ret;
-}
-
-static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- struct mv88e6xxx_port *dev_id = &chip->ports[port];
- unsigned int irq;
- int err;
-
- /* Nothing to request if this SERDES port has no IRQ */
- irq = mv88e6xxx_serdes_irq_mapping(chip, port);
- if (!irq)
- return 0;
-
- snprintf(dev_id->serdes_irq_name, sizeof(dev_id->serdes_irq_name),
- "mv88e6xxx-%s-serdes-%d", dev_name(chip->dev), port);
-
- /* Requesting the IRQ will trigger IRQ callbacks, so release the lock */
- mv88e6xxx_reg_unlock(chip);
- err = request_threaded_irq(irq, NULL, mv88e6xxx_serdes_irq_thread_fn,
- IRQF_ONESHOT, dev_id->serdes_irq_name,
- dev_id);
- mv88e6xxx_reg_lock(chip);
- if (err)
- return err;
-
- dev_id->serdes_irq = irq;
-
- return mv88e6xxx_serdes_irq_enable(chip, port, lane);
-}
-
-static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- struct mv88e6xxx_port *dev_id = &chip->ports[port];
- unsigned int irq = dev_id->serdes_irq;
- int err;
-
- /* Nothing to free if no IRQ has been requested */
- if (!irq)
- return 0;
-
- err = mv88e6xxx_serdes_irq_disable(chip, port, lane);
-
- /* Freeing the IRQ will trigger IRQ callbacks, so release the lock */
- mv88e6xxx_reg_unlock(chip);
- free_irq(irq, dev_id);
- mv88e6xxx_reg_lock(chip);
-
- dev_id->serdes_irq = 0;
-
- return err;
-}
-
-static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
- bool on)
-{
- int lane;
- int err;
-
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane < 0)
- return 0;
-
- if (on) {
- err = mv88e6xxx_serdes_power_up(chip, port, lane);
- if (err)
- return err;
-
- err = mv88e6xxx_serdes_irq_request(chip, port, lane);
- } else {
- err = mv88e6xxx_serdes_irq_free(chip, port, lane);
- if (err)
- return err;
-
- err = mv88e6xxx_serdes_power_down(chip, port, lane);
- }
-
- return err;
-}
-
static int mv88e6xxx_set_egress_port(struct mv88e6xxx_chip *chip,
enum mv88e6xxx_egress_direction direction,
int port)
@@ -2857,27 +3364,49 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
+ struct device_node *phy_handle = NULL;
+ struct fwnode_handle *ports_fwnode;
+ struct fwnode_handle *port_fwnode;
struct dsa_switch *ds = chip->ds;
+ struct mv88e6xxx_port *p;
+ struct dsa_port *dp;
+ int tx_amp;
int err;
u16 reg;
+ u32 val;
- chip->ports[port].chip = chip;
- chip->ports[port].port = port;
+ p = &chip->ports[port];
+ p->chip = chip;
+ p->port = port;
+
+ /* Look up corresponding fwnode if any */
+ ports_fwnode = device_get_named_child_node(chip->dev, "ethernet-ports");
+ if (!ports_fwnode)
+ ports_fwnode = device_get_named_child_node(chip->dev, "ports");
+ if (ports_fwnode) {
+ fwnode_for_each_child_node(ports_fwnode, port_fwnode) {
+ if (fwnode_property_read_u32(port_fwnode, "reg", &val))
+ continue;
+ if (val == port) {
+ p->fwnode = port_fwnode;
+ p->fiber = fwnode_property_present(port_fwnode, "sfp");
+ break;
+ }
+ }
+ fwnode_handle_put(ports_fwnode);
+ } else {
+ dev_dbg(chip->dev, "no ethernet ports node defined for the device\n");
+ }
- /* MAC Forcing register: don't force link, speed, duplex or flow control
- * state to any particular values on physical ports, but force the CPU
- * port and all DSA ports to their maximum bandwidth and full duplex.
- */
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
- err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP,
- SPEED_MAX, DUPLEX_FULL,
- PAUSE_OFF,
- PHY_INTERFACE_MODE_NA);
- else
- err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
- SPEED_UNFORCED, DUPLEX_UNFORCED,
- PAUSE_ON,
- PHY_INTERFACE_MODE_NA);
+ if (chip->info->ops->port_setup_leds) {
+ err = chip->info->ops->port_setup_leds(chip, port);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
+ SPEED_UNFORCED, DUPLEX_UNFORCED,
+ PAUSE_ON, PHY_INTERFACE_MODE_NA);
if (err)
return err;
@@ -2895,9 +3424,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* If this is the upstream port for this switch, enable
* forwarding of unknown unicasts and multicasts.
*/
- reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP |
- MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
+ reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ /* Forward any IPv4 IGMP or IPv6 MLD frames received
+ * by a USER port to the CPU port to allow snooping.
+ */
+ if (dsa_is_user_port(ds, port))
+ reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
+
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
if (err)
return err;
@@ -2911,12 +3445,13 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
/* Port Control 2: don't force a good FCS, set the MTU size to
- * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
- * untagged frames on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't send a
- * copy of all transmitted/received frames on this port to the CPU.
+ * 10222 bytes, disable 802.1q tags checking, don't discard
+ * tagged or untagged frames on this port, skip destination
+ * address lookup on user ports, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
*/
- err = mv88e6xxx_port_set_map_da(chip, port);
+ err = mv88e6xxx_port_set_map_da(chip, port, !dsa_is_user_port(ds, port));
if (err)
return err;
@@ -2924,8 +3459,44 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
+ /* On chips that support it, set all downstream DSA ports'
+ * VLAN policy to TRAP. In combination with loading
+ * MV88E6XXX_VID_STANDALONE as a policy entry in the VTU, this
+ * provides a better isolation barrier between standalone
+ * ports, as the ATU is bypassed on any intermediate switches
+ * between the incoming port and the CPU.
+ */
+ if (dsa_is_downstream_port(ds, port) &&
+ chip->info->ops->port_set_policy) {
+ err = chip->info->ops->port_set_policy(chip, port,
+ MV88E6XXX_POLICY_MAPPING_VTU,
+ MV88E6XXX_POLICY_ACTION_TRAP);
+ if (err)
+ return err;
+ }
+
+ /* User ports start out in standalone mode and 802.1Q is
+ * therefore disabled. On DSA ports, all valid VIDs are always
+ * loaded in the VTU - therefore, enable 802.1Q in order to take
+ * advantage of VLAN policy on chips that supports it.
+ */
err = mv88e6xxx_port_set_8021q_mode(chip, port,
- MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED);
+ dsa_is_user_port(ds, port) ?
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED :
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE);
+ if (err)
+ return err;
+
+ /* Bind MV88E6XXX_VID_STANDALONE to MV88E6XXX_FID_STANDALONE by
+ * virtue of the fact that mv88e6xxx_atu_new() will pick it as
+ * the first free FID. This will be used as the private PVID for
+ * unbridged ports. Shared (DSA and CPU) ports must also be
+ * members of this VID, in order to trap all frames assigned to
+ * it to the CPU.
+ */
+ err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_STANDALONE,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
+ false);
if (err)
return err;
@@ -2938,7 +3509,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* relying on their port default FID.
*/
err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED,
- MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
false);
if (err)
return err;
@@ -3011,6 +3582,23 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
+ if (chip->info->ops->serdes_set_tx_amplitude) {
+ dp = dsa_to_port(ds, port);
+ if (dp)
+ phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
+
+ if (phy_handle && !of_property_read_u32(phy_handle,
+ "tx-p2p-microvolt",
+ &tx_amp))
+ err = chip->info->ops->serdes_set_tx_amplitude(chip,
+ port, tx_amp);
+ if (phy_handle) {
+ of_node_put(phy_handle);
+ if (err)
+ return err;
+ }
+ }
+
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
@@ -3037,7 +3625,7 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
else if (chip->info->ops->set_max_frame_size)
return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
- return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ return ETH_DATA_LEN;
}
static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
@@ -3045,45 +3633,31 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
struct mv88e6xxx_chip *chip = ds->priv;
int ret = 0;
+ /* For families where we don't know how to alter the MTU,
+ * just accept any value up to ETH_DATA_LEN
+ */
+ if (!chip->info->ops->port_set_jumbo_size &&
+ !chip->info->ops->set_max_frame_size) {
+ if (new_mtu > ETH_DATA_LEN)
+ return -EINVAL;
+
+ return 0;
+ }
+
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
new_mtu += EDSA_HLEN;
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_set_jumbo_size)
ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
- else if (chip->info->ops->set_max_frame_size)
+ else if (chip->info->ops->set_max_frame_size &&
+ dsa_is_cpu_port(ds, port))
ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
- else
- if (new_mtu > 1522)
- ret = -EINVAL;
mv88e6xxx_reg_unlock(chip);
return ret;
}
-static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_serdes_power(chip, port, true);
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
-
- mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_serdes_power(chip, port, false))
- dev_err(chip->dev, "failed to power off SERDES\n");
- mv88e6xxx_reg_unlock(chip);
-}
-
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@@ -3111,6 +3685,21 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_stats_clear(chip);
}
+static int mv88e6320_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ u16 dummy;
+ int err;
+
+ /* Workaround for erratum
+ * 3.3 RGMII timing may be out of spec when transmit delay is enabled
+ */
+ err = mv88e6xxx_port_hidden_write(chip, 0, 0xf, 0x7, 0xe000);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_hidden_read(chip, 0, 0xf, 0x7, &dummy);
+}
+
/* Check if the errata has already been applied. */
static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
{
@@ -3160,11 +3749,225 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
return mv88e6xxx_software_reset(chip);
}
+/* prod_id for switch families which do not have a PHY model number */
+static const u16 family_prod_id_table[] = {
+ [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+ [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
+ [MV88E6XXX_FAMILY_6393] = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X,
+};
+
+static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ u16 prod_id;
+ u16 val;
+ int err;
+
+ if (!chip->info->ops->phy_read)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
+ mv88e6xxx_reg_unlock(chip);
+
+ /* Some internal PHYs don't have a model number. */
+ if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
+ chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
+ prod_id = family_prod_id_table[chip->info->family];
+ if (prod_id)
+ val |= prod_id >> 4;
+ }
+
+ return err ? err : val;
+}
+
+static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
+ int reg)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ u16 val;
+ int err;
+
+ if (!chip->info->ops->phy_read_c45)
+ return -ENODEV;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err ? err : val;
+}
+
+static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ int err;
+
+ if (!chip->info->ops->phy_write)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_write(chip, bus, phy, reg, val);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_mdio_write_c45(struct mii_bus *bus, int phy, int devad,
+ int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ int err;
+
+ if (!chip->info->ops->phy_write_c45)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_write_c45(chip, bus, phy, devad, reg, val);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
+ struct device_node *np,
+ bool external)
+{
+ static int index;
+ struct mv88e6xxx_mdio_bus *mdio_bus;
+ struct mii_bus *bus;
+ int err;
+
+ if (external) {
+ mv88e6xxx_reg_lock(chip);
+ if (chip->info->family == MV88E6XXX_FAMILY_6393)
+ err = mv88e6393x_g2_scratch_gpio_set_smi(chip, true);
+ else
+ err = mv88e6390_g2_scratch_gpio_set_smi(chip, true);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ return err;
+ }
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_bus));
+ if (!bus)
+ return -ENOMEM;
+
+ mdio_bus = bus->priv;
+ mdio_bus->bus = bus;
+ mdio_bus->chip = chip;
+ INIT_LIST_HEAD(&mdio_bus->list);
+ mdio_bus->external = external;
+
+ if (np) {
+ bus->name = np->full_name;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%pOF", np);
+ } else {
+ bus->name = "mv88e6xxx SMI";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++);
+ }
+
+ bus->read = mv88e6xxx_mdio_read;
+ bus->write = mv88e6xxx_mdio_write;
+ bus->read_c45 = mv88e6xxx_mdio_read_c45;
+ bus->write_c45 = mv88e6xxx_mdio_write_c45;
+ bus->parent = chip->dev;
+ bus->phy_mask = ~GENMASK(chip->info->phy_base_addr +
+ mv88e6xxx_num_ports(chip) - 1,
+ chip->info->phy_base_addr);
+
+ if (!external) {
+ err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
+ if (err)
+ goto out;
+ }
+
+ err = of_mdiobus_register(bus, np);
+ if (err) {
+ dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
+ mv88e6xxx_g2_irq_mdio_free(chip, bus);
+ goto out;
+ }
+
+ if (external)
+ list_add_tail(&mdio_bus->list, &chip->mdios);
+ else
+ list_add(&mdio_bus->list, &chip->mdios);
+
+ return 0;
+
+out:
+ mdiobus_free(bus);
+ return err;
+}
+
+static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
+
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus, *p;
+ struct mii_bus *bus;
+
+ list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
+ bus = mdio_bus->bus;
+
+ if (!mdio_bus->external)
+ mv88e6xxx_g2_irq_mdio_free(chip, bus);
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+ }
+}
+
+static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip)
+{
+ struct device_node *np = chip->dev->of_node;
+ struct device_node *child;
+ int err;
+
+ /* Always register one mdio bus for the internal/default mdio
+ * bus. This maybe represented in the device tree, but is
+ * optional.
+ */
+ child = of_get_child_by_name(np, "mdio");
+ err = mv88e6xxx_mdio_register(chip, child, false);
+ of_node_put(child);
+ if (err)
+ return err;
+
+ /* Walk the device tree, and see if there are any other nodes
+ * which say they are compatible with the external mdio
+ * bus.
+ */
+ for_each_available_child_of_node(np, child) {
+ if (of_device_is_compatible(
+ child, "marvell,mv88e6xxx-mdio-external")) {
+ err = mv88e6xxx_mdio_register(chip, child, true);
+ if (err) {
+ mv88e6xxx_mdios_unregister(chip);
+ of_node_put(child);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
static void mv88e6xxx_teardown(struct dsa_switch *ds)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
mv88e6xxx_teardown_devlink_regions_global(ds);
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
+ mv88e6xxx_mdios_unregister(chip);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -3174,8 +3977,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
int err;
int i;
+ err = mv88e6xxx_mdios_register(chip);
+ if (err)
+ return err;
+
chip->ds = ds;
- ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
+ ds->user_mii_bus = mv88e6xxx_default_mdio_bus(chip);
/* Since virtual bridges are mapped in the PVT, the number we support
* depends on the physical switch topology. We need to let DSA figure
@@ -3183,8 +3990,8 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
* time.
*/
if (mv88e6xxx_has_pvt(chip))
- ds->num_fwd_offloading_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
- ds->dst->last_switch - 1;
+ ds->max_num_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
+ ds->dst->last_switch - 1;
mv88e6xxx_reg_lock(chip);
@@ -3209,6 +4016,13 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
if (err)
goto unlock;
+ /* Must be called after mv88e6xxx_vtu_setup (which flushes the
+ * VTU, thereby also flushing the STU).
+ */
+ err = mv88e6xxx_stu_setup(chip);
+ if (err)
+ goto unlock;
+
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (dsa_is_unused_port(ds, i))
@@ -3293,7 +4107,7 @@ unlock:
mv88e6xxx_reg_unlock(chip);
if (err)
- return err;
+ goto out_hwtstamp;
/* Have to be called without holding the register lock, since
* they take the devlink lock, and we later take the locks in
@@ -3302,7 +4116,7 @@ unlock:
*/
err = mv88e6xxx_setup_devlink_resources(ds);
if (err)
- return err;
+ goto out_hwtstamp;
err = mv88e6xxx_setup_devlink_params(ds);
if (err)
@@ -3318,178 +4132,38 @@ out_params:
mv88e6xxx_teardown_devlink_params(ds);
out_resources:
dsa_devlink_resources_unregister(ds);
+out_hwtstamp:
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
+ mv88e6xxx_mdios_unregister(chip);
return err;
}
static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
{
- return mv88e6xxx_setup_devlink_regions_port(ds, port);
-}
-
-static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
-{
- mv88e6xxx_teardown_devlink_regions_port(ds, port);
-}
-
-/* prod_id for switch families which do not have a PHY model number */
-static const u16 family_prod_id_table[] = {
- [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
- [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
- [MV88E6XXX_FAMILY_6393] = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X,
-};
-
-static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
-{
- struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
- struct mv88e6xxx_chip *chip = mdio_bus->chip;
- u16 prod_id;
- u16 val;
- int err;
-
- if (!chip->info->ops->phy_read)
- return -EOPNOTSUPP;
-
- mv88e6xxx_reg_lock(chip);
- err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
- mv88e6xxx_reg_unlock(chip);
-
- /* Some internal PHYs don't have a model number. */
- if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
- chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
- prod_id = family_prod_id_table[chip->info->family];
- if (prod_id)
- val |= prod_id >> 4;
- }
-
- return err ? err : val;
-}
-
-static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
-{
- struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
- struct mv88e6xxx_chip *chip = mdio_bus->chip;
- int err;
-
- if (!chip->info->ops->phy_write)
- return -EOPNOTSUPP;
-
- mv88e6xxx_reg_lock(chip);
- err = chip->info->ops->phy_write(chip, bus, phy, reg, val);
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
- struct device_node *np,
- bool external)
-{
- static int index;
- struct mv88e6xxx_mdio_bus *mdio_bus;
- struct mii_bus *bus;
+ struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (external) {
- mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
- mv88e6xxx_reg_unlock(chip);
-
+ if (chip->info->ops->pcs_ops &&
+ chip->info->ops->pcs_ops->pcs_init) {
+ err = chip->info->ops->pcs_ops->pcs_init(chip, port);
if (err)
return err;
}
- bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
- if (!bus)
- return -ENOMEM;
-
- mdio_bus = bus->priv;
- mdio_bus->bus = bus;
- mdio_bus->chip = chip;
- INIT_LIST_HEAD(&mdio_bus->list);
- mdio_bus->external = external;
-
- if (np) {
- bus->name = np->full_name;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%pOF", np);
- } else {
- bus->name = "mv88e6xxx SMI";
- snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++);
- }
-
- bus->read = mv88e6xxx_mdio_read;
- bus->write = mv88e6xxx_mdio_write;
- bus->parent = chip->dev;
-
- if (!external) {
- err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
- if (err)
- return err;
- }
-
- err = of_mdiobus_register(bus, np);
- if (err) {
- dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
- mv88e6xxx_g2_irq_mdio_free(chip, bus);
- return err;
- }
-
- if (external)
- list_add_tail(&mdio_bus->list, &chip->mdios);
- else
- list_add(&mdio_bus->list, &chip->mdios);
-
- return 0;
-}
-
-static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
-
-{
- struct mv88e6xxx_mdio_bus *mdio_bus;
- struct mii_bus *bus;
-
- list_for_each_entry(mdio_bus, &chip->mdios, list) {
- bus = mdio_bus->bus;
-
- if (!mdio_bus->external)
- mv88e6xxx_g2_irq_mdio_free(chip, bus);
-
- mdiobus_unregister(bus);
- }
+ return mv88e6xxx_setup_devlink_regions_port(ds, port);
}
-static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
- struct device_node *np)
+static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
{
- struct device_node *child;
- int err;
-
- /* Always register one mdio bus for the internal/default mdio
- * bus. This maybe represented in the device tree, but is
- * optional.
- */
- child = of_get_child_by_name(np, "mdio");
- err = mv88e6xxx_mdio_register(chip, child, false);
- if (err)
- return err;
+ struct mv88e6xxx_chip *chip = ds->priv;
- /* Walk the device tree, and see if there are any other nodes
- * which say they are compatible with the external mdio
- * bus.
- */
- for_each_available_child_of_node(np, child) {
- if (of_device_is_compatible(
- child, "marvell,mv88e6xxx-mdio-external")) {
- err = mv88e6xxx_mdio_register(chip, child, true);
- if (err) {
- mv88e6xxx_mdios_unregister(chip);
- of_node_put(child);
- return err;
- }
- }
- }
+ mv88e6xxx_teardown_devlink_regions_port(ds, port);
- return 0;
+ if (chip->info->ops->pcs_ops &&
+ chip->info->ops->pcs_ops->pcs_teardown)
+ chip->info->ops->pcs_ops->pcs_teardown(chip, port);
}
static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
@@ -3551,6 +4225,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3565,7 +4240,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -3577,7 +4252,9 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3601,17 +4278,15 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
- .serdes_power = mv88e6185_serdes_power,
- .serdes_get_lane = mv88e6185_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
+ .pcs_ops = &mv88e6185_pcs_ops,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3621,12 +4296,15 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3641,23 +4319,21 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
- .serdes_power = mv88e6185_serdes_power,
- .serdes_get_lane = mv88e6185_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6097_serdes_irq_enable,
- .serdes_irq_status = mv88e6097_serdes_irq_status,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
+ .pcs_ops = &mv88e6185_pcs_ops,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3667,8 +4343,10 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
@@ -3683,7 +4361,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -3694,7 +4372,9 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3724,7 +4404,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -3735,7 +4415,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6141_ops = {
@@ -3746,8 +4426,10 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -3771,35 +4453,32 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6341_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6341_phylink_validate,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -3808,12 +4487,15 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3828,7 +4510,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -3839,9 +4521,11 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3864,7 +4548,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -3875,9 +4559,11 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6171_ops = {
@@ -3886,8 +4572,10 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -3908,7 +4596,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -3919,7 +4607,9 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@@ -3930,8 +4620,10 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -3948,33 +4640,33 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_leds = mv88e6xxx_port_setup_leds,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_get_lane = mv88e6352_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6352_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
- .serdes_power = mv88e6352_serdes_power,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
+ .pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -3983,8 +4675,10 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4005,7 +4699,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4016,7 +4710,9 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -4027,8 +4723,10 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4045,36 +4743,35 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_leds = mv88e6xxx_port_setup_leds,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_get_lane = mv88e6352_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6352_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
- .serdes_power = mv88e6352_serdes_power,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6352_serdes_irq_enable,
- .serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
+ .pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -4099,21 +4796,19 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
- .serdes_power = mv88e6185_serdes_power,
- .serdes_get_lane = mv88e6185_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
.set_cascade_port = mv88e6185_g1_set_cascade_port,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .pcs_ops = &mv88e6185_pcs_ops,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -4124,8 +4819,10 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4148,34 +4845,31 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -4185,8 +4879,10 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4209,34 +4905,31 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6390x_phylink_validate,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -4246,8 +4939,10 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4268,35 +4963,32 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
@@ -4307,8 +4999,10 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4325,38 +5019,37 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_leds = mv88e6xxx_port_setup_leds,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_get_lane = mv88e6352_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6352_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
- .serdes_power = mv88e6352_serdes_power,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6352_serdes_irq_enable,
- .serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
+ .pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6250_ops = {
@@ -4367,8 +5060,10 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4385,18 +5080,21 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6250_stats_get_sset_count,
.stats_get_strings = mv88e6250_stats_get_strings,
- .stats_get_stats = mv88e6250_stats_get_stats,
+ .stats_get_stat = mv88e6250_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6250_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6250_g1_wait_eeprom_done_prereset,
+ .hardware_reset_post = mv88e6xxx_g1_wait_eeprom_done,
.reset = mv88e6250_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
- .ptp_ops = &mv88e6250_ptp_ops,
- .phylink_validate = mv88e6065_phylink_validate,
+ .ptp_ops = &mv88e6352_ptp_ops,
+ .phylink_get_caps = mv88e6250_phylink_get_caps,
+ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -4406,8 +5104,10 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4429,52 +5129,54 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .ptp_ops = &mv88e6390_ptp_ops,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .setup_errata = mv88e6320_setup_errata,
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -4490,35 +5192,44 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6320_stats_get_stats,
+ .stats_get_stat = mv88e6320_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
- .vtu_getnext = mv88e6185_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e632x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .setup_errata = mv88e6320_setup_errata,
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -4534,17 +5245,22 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6320_stats_get_stats,
+ .stats_get_stat = mv88e6320_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
- .vtu_getnext = mv88e6185_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e632x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -4555,8 +5271,10 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4580,28 +5298,24 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6341_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -4610,7 +5324,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6341_phylink_validate,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -4619,8 +5334,10 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4641,7 +5358,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4652,7 +5369,9 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@@ -4661,8 +5380,10 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4683,7 +5404,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4694,9 +5415,11 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
@@ -4707,8 +5430,10 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4725,32 +5450,29 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_leds = mv88e6xxx_port_setup_leds,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_get_lane = mv88e6352_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6352_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
- .serdes_power = mv88e6352_serdes_power,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6352_serdes_irq_enable,
- .serdes_irq_status = mv88e6352_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -4759,7 +5481,9 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_get_stats = mv88e6352_serdes_get_stats,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
- .phylink_validate = mv88e6352_phylink_validate,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
+ .pcs_ops = &mv88e6352_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -4769,8 +5493,10 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4794,37 +5520,34 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390_serdes_get_lane,
- /* Check status register pause & lpa register */
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
+ .ptp_ops = &mv88e6390_ptp_ops,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -4834,8 +5557,10 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4859,27 +5584,24 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390_serdes_power,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6390_serdes_irq_enable,
- .serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
@@ -4887,19 +5609,21 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390x_phylink_validate,
+ .ptp_ops = &mv88e6390_ptp_ops,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
+ .pcs_ops = &mv88e6390_pcs_ops,
};
static const struct mv88e6xxx_ops mv88e6393x_ops = {
/* MV88E6XXX_FAMILY_6393 */
- .setup_errata = mv88e6393x_serdes_setup_errata,
.irl_init_all = mv88e6390_g2_irl_init_all,
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4924,38 +5648,82 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
/* .set_cpu_port is missing because this family does not support a global
* CPU port, only per port CPU port which is set via
* .port_set_upstream_port method.
*/
.set_egress_port = mv88e6393x_set_egress_port,
- .watchdog_ops = &mv88e6390_watchdog_ops,
+ .watchdog_ops = &mv88e6393x_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6393x_serdes_power,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_get_lane = mv88e6393x_serdes_get_lane,
- .serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state,
- .serdes_pcs_config = mv88e6390_serdes_pcs_config,
- .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
- .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
- .serdes_irq_enable = mv88e6393x_serdes_irq_enable,
- .serdes_irq_status = mv88e6393x_serdes_irq_status,
/* TODO: serdes stats */
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6393x_phylink_validate,
+ .phylink_get_caps = mv88e6393x_phylink_get_caps,
+ .pcs_ops = &mv88e6393x_pcs_ops,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ [MV88E6020] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6020,
+ .family = MV88E6XXX_FAMILY_6250,
+ .name = "Marvell 88E6020",
+ .num_databases = 64,
+ /* Ports 2-4 are not routed to pins
+ * => usable ports 0, 1, 5, 6
+ */
+ .num_ports = 7,
+ .num_internal_phys = 2,
+ .invalid_port_mask = BIT(2) | BIT(3) | BIT(4),
+ .max_vid = 4095,
+ .port_base_addr = 0x8,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0xf,
+ .global2_addr = 0x7,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
+ .atu_move_port_mask = 0xf,
+ .dual_chip = true,
+ .ops = &mv88e6250_ops,
+ },
+
+ [MV88E6071] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6071,
+ .family = MV88E6XXX_FAMILY_6250,
+ .name = "Marvell 88E6071",
+ .num_databases = 64,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .port_base_addr = 0x08,
+ .phy_base_addr = 0x00,
+ .global1_addr = 0x0f,
+ .global2_addr = 0x07,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
+ .atu_move_port_mask = 0xf,
+ .dual_chip = true,
+ .ops = &mv88e6250_ops,
+ },
+
[MV88E6085] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6085,
.family = MV88E6XXX_FAMILY_6097,
@@ -4965,6 +5733,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 10,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -4972,6 +5741,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -4993,6 +5763,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6095_ops,
@@ -5007,6 +5778,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11,
.num_internal_phys = 8,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5014,6 +5786,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5030,6 +5803,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 3,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5037,6 +5811,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5059,6 +5834,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6131_ops,
@@ -5068,20 +5844,22 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
.num_gpio = 11,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
- .atu_move_port_mask = 0x1f,
+ .atu_move_port_mask = 0xf,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5097,6 +5875,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5104,6 +5883,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5121,6 +5901,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_internal_phys = 0,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5128,6 +5909,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5144,6 +5926,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5151,6 +5934,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5168,6 +5952,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5175,6 +5960,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5191,6 +5977,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5198,6 +5985,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5215,6 +6003,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5222,6 +6011,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5244,6 +6034,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5260,6 +6051,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5267,6 +6059,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.atu_move_port_mask = 0x1f,
@@ -5283,6 +6076,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5290,6 +6084,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5305,6 +6100,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5312,6 +6108,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5325,8 +6122,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6191X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 9,
+ .num_internal_phys = 8,
+ .internal_phys_offset = 1,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5334,6 +6133,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5347,8 +6147,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6193X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 9,
+ .num_internal_phys = 8,
+ .internal_phys_offset = 1,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5356,6 +6158,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5383,6 +6186,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -5399,6 +6203,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5406,6 +6211,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5429,6 +6235,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -5444,6 +6251,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5451,6 +6259,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5465,9 +6274,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
- .num_internal_phys = 5,
+ .num_internal_phys = 2,
+ .internal_phys_offset = 3,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5475,6 +6286,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5490,9 +6302,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
- .num_internal_phys = 5,
+ .num_internal_phys = 2,
+ .internal_phys_offset = 3,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5500,7 +6314,9 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
+ .pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
@@ -5511,20 +6327,22 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
.num_gpio = 11,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
- .atu_move_port_mask = 0x1f,
+ .atu_move_port_mask = 0xf,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5541,6 +6359,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5548,6 +6367,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5564,6 +6384,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5571,6 +6392,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5588,6 +6410,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5595,6 +6418,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5602,6 +6426,33 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.ptp_support = true,
.ops = &mv88e6352_ops,
},
+ [MV88E6361] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6361,
+ .family = MV88E6XXX_FAMILY_6393,
+ .name = "Marvell 88E6361",
+ .num_databases = 4096,
+ .num_macs = 16384,
+ .num_ports = 11,
+ /* Ports 1, 2 and 8 are not routed */
+ .invalid_port_mask = BIT(1) | BIT(2) | BIT(8),
+ .num_internal_phys = 5,
+ .internal_phys_offset = 3,
+ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+ .global1_addr = 0x1b,
+ .global2_addr = 0x1c,
+ .age_time_coeff = 3750,
+ .g1_irqs = 10,
+ .g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
+ .atu_move_port_mask = 0x1f,
+ .pvt = true,
+ .multi_chip = true,
+ .ptp_support = true,
+ .ops = &mv88e6393x_ops,
+ },
[MV88E6390] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
.family = MV88E6XXX_FAMILY_6390,
@@ -5612,6 +6463,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5619,6 +6471,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5636,6 +6489,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5643,6 +6497,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5657,8 +6512,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6393X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 9,
+ .num_internal_phys = 8,
+ .internal_phys_offset = 1,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5666,6 +6523,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -5714,6 +6572,32 @@ static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
return 0;
}
+static int mv88e6xxx_single_chip_detect(struct mv88e6xxx_chip *chip,
+ struct mdio_device *mdiodev)
+{
+ int err;
+
+ /* dual_chip takes precedence over single/multi-chip modes */
+ if (chip->info->dual_chip)
+ return -EINVAL;
+
+ /* If the mdio addr is 16 indicating the first port address of a switch
+ * (e.g. mv88e6*41) in single chip addressing mode the device may be
+ * configured in single chip addressing mode. Setup the smi access as
+ * single chip addressing mode and attempt to detect the model of the
+ * switch, if this fails the device is not configured in single chip
+ * addressing mode.
+ */
+ if (mdiodev->addr != 16)
+ return -EINVAL;
+
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, 0);
+ if (err)
+ return err;
+
+ return mv88e6xxx_detect(chip);
+}
+
static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
{
struct mv88e6xxx_chip *chip;
@@ -5727,6 +6611,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
mutex_init(&chip->reg_lock);
INIT_LIST_HEAD(&chip->mdios);
idr_init(&chip->policies);
+ INIT_LIST_HEAD(&chip->msts);
return chip;
}
@@ -5740,11 +6625,12 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->tag_protocol;
}
-static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
+static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct mv88e6xxx_chip *chip = ds->priv;
enum dsa_tag_protocol old_protocol;
+ struct dsa_port *cpu_dp;
int err;
switch (proto) {
@@ -5769,17 +6655,31 @@ static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
chip->tag_protocol = proto;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_setup_port_mode(chip, port);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
+ if (err) {
+ mv88e6xxx_reg_unlock(chip);
+ goto unwind;
+ }
+ }
mv88e6xxx_reg_unlock(chip);
- if (err)
- chip->tag_protocol = old_protocol;
+ return 0;
+
+unwind:
+ chip->tag_protocol = old_protocol;
+
+ mv88e6xxx_reg_lock(chip);
+ dsa_switch_for_each_cpu_port_continue_reverse(cpu_dp, ds)
+ mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -5787,13 +6687,21 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, mdb->addr, mdb->vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -5807,7 +6715,8 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress,
+ struct netlink_ext_ack *extack)
{
enum mv88e6xxx_egress_direction direction = ingress ?
MV88E6XXX_EGRESS_DIR_INGRESS :
@@ -5881,7 +6790,7 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
const struct mv88e6xxx_ops *ops;
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
- BR_BCAST_FLOOD))
+ BR_BCAST_FLOOD | BR_PORT_LOCKED | BR_PORT_MAB))
return -EINVAL;
ops = chip->info->ops;
@@ -5900,7 +6809,7 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int err = -EOPNOTSUPP;
+ int err = 0;
mv88e6xxx_reg_lock(chip);
@@ -5939,6 +6848,19 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
goto out;
}
+ if (flags.mask & BR_PORT_MAB) {
+ bool mab = !!(flags.val & BR_PORT_MAB);
+
+ mv88e6xxx_port_set_mab(chip, port, mab);
+ }
+
+ if (flags.mask & BR_PORT_LOCKED) {
+ bool locked = !!(flags.val & BR_PORT_LOCKED);
+
+ err = mv88e6xxx_port_set_lock(chip, port, locked);
+ if (err)
+ goto out;
+ }
out:
mv88e6xxx_reg_unlock(chip);
@@ -5946,32 +6868,40 @@ out:
}
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
- struct net_device *lag,
- struct netdev_lag_upper_info *info)
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
- int id, members = 0;
+ int members = 0;
- if (!mv88e6xxx_has_lag(chip))
+ if (!mv88e6xxx_has_lag(chip)) {
+ NL_SET_ERR_MSG_MOD(extack, "Chip does not support LAG offload");
return false;
+ }
- id = dsa_lag_id(ds->dst, lag);
- if (id < 0 || id >= ds->num_lag_ids)
+ if (!lag.id)
return false;
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
- if (members > 8)
+ if (members > 8) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 8 LAG ports");
return false;
+ }
/* We could potentially relax this to include active
* backup in the future.
*/
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return false;
+ }
/* Ideally we would also validate that the hash type matches
* the hardware. Alas, this is always set to unknown on team
@@ -5980,20 +6910,21 @@ static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
return true;
}
-static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag)
+static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
u16 map = 0;
int id;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
/* Build the map of all ports to distribute flows destined for
* this LAG. This can be either a local user port, or a DSA
* port if the LAG port is on a remote chip.
*/
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
@@ -6038,8 +6969,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
unsigned int id, num_tx;
- struct net_device *lag;
struct dsa_port *dp;
+ struct dsa_lag *lag;
int i, err, nth;
u16 mask[8];
u16 ivec;
@@ -6048,8 +6979,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
/* Disable all masks for ports that _are_ members of a LAG. */
- list_for_each_entry(dp, &ds->dst->ports, list) {
- if (!dp->lag_dev || dp->ds != ds)
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->lag)
continue;
ivec &= ~BIT(dp->index);
@@ -6062,7 +6993,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
* are in the Tx set.
*/
dsa_lags_foreach_id(id, ds->dst) {
- lag = dsa_lag_dev(ds->dst, id);
+ lag = dsa_lag_by_id(ds->dst, id);
if (!lag)
continue;
@@ -6098,7 +7029,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
}
static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
- struct net_device *lag)
+ struct dsa_lag lag)
{
int err;
@@ -6122,16 +7053,18 @@ static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
}
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
- struct net_device *lag,
- struct netdev_lag_upper_info *info)
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err, id;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based */
+ id = lag.id - 1;
mv88e6xxx_reg_lock(chip);
@@ -6154,7 +7087,7 @@ err_unlock:
}
static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *lag)
+ struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_trunk;
@@ -6179,13 +7112,14 @@ static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
}
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag,
- struct netdev_lag_upper_info *info)
+ int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
@@ -6202,7 +7136,7 @@ unlock:
}
static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag)
+ int port, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_pvt;
@@ -6214,6 +7148,15 @@ static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
return err_sync ? : err_pvt;
}
+static const struct phylink_mac_ops mv88e6xxx_phylink_mac_ops = {
+ .mac_select_pcs = mv88e6xxx_mac_select_pcs,
+ .mac_prepare = mv88e6xxx_mac_prepare,
+ .mac_config = mv88e6xxx_mac_config,
+ .mac_finish = mv88e6xxx_mac_finish,
+ .mac_link_down = mv88e6xxx_mac_link_down,
+ .mac_link_up = mv88e6xxx_mac_link_up,
+};
+
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.change_tag_protocol = mv88e6xxx_change_tag_protocol,
@@ -6221,20 +7164,15 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.teardown = mv88e6xxx_teardown,
.port_setup = mv88e6xxx_port_setup,
.port_teardown = mv88e6xxx_port_teardown,
- .phylink_validate = mv88e6xxx_validate,
- .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
- .phylink_mac_config = mv88e6xxx_mac_config,
- .phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart,
- .phylink_mac_link_down = mv88e6xxx_mac_link_down,
- .phylink_mac_link_up = mv88e6xxx_mac_link_up,
+ .phylink_get_caps = mv88e6xxx_get_caps,
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
+ .get_eth_mac_stats = mv88e6xxx_get_eth_mac_stats,
+ .get_rmon_stats = mv88e6xxx_get_rmon_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
- .port_enable = mv88e6xxx_port_enable,
- .port_disable = mv88e6xxx_port_disable,
.port_max_mtu = mv88e6xxx_get_max_mtu,
.port_change_mtu = mv88e6xxx_change_mtu,
- .get_mac_eee = mv88e6xxx_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mv88e6xxx_set_mac_eee,
.get_eeprom_len = mv88e6xxx_get_eeprom_len,
.get_eeprom = mv88e6xxx_get_eeprom,
@@ -6249,15 +7187,18 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
.port_bridge_flags = mv88e6xxx_port_bridge_flags,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_mst_state_set = mv88e6xxx_port_mst_state_set,
.port_fast_age = mv88e6xxx_port_fast_age,
+ .port_vlan_fast_age = mv88e6xxx_port_vlan_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
- .port_fdb_add = mv88e6xxx_port_fdb_add,
- .port_fdb_del = mv88e6xxx_port_fdb_del,
- .port_fdb_dump = mv88e6xxx_port_fdb_dump,
- .port_mdb_add = mv88e6xxx_port_mdb_add,
- .port_mdb_del = mv88e6xxx_port_mdb_del,
+ .vlan_msti_set = mv88e6xxx_vlan_msti_set,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_dump = mv88e6xxx_port_fdb_dump,
+ .port_mdb_add = mv88e6xxx_port_mdb_add,
+ .port_mdb_del = mv88e6xxx_port_mdb_del,
.port_mirror_add = mv88e6xxx_port_mirror_add,
.port_mirror_del = mv88e6xxx_port_mirror_del,
.crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join,
@@ -6276,8 +7217,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
.crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
.crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
- .port_bridge_tx_fwd_offload = mv88e6xxx_bridge_tx_fwd_offload,
- .port_bridge_tx_fwd_unoffload = mv88e6xxx_bridge_tx_fwd_unoffload,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
@@ -6294,6 +7233,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
ds->priv = chip;
ds->dev = dev;
ds->ops = &mv88e6xxx_switch_ops;
+ ds->phylink_mac_ops = &mv88e6xxx_phylink_mac_ops;
ds->ageing_time_min = chip->info->age_time_coeff;
ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
@@ -6384,21 +7324,27 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
chip->info = compat_info;
- err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
- if (err)
- goto out;
-
chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(chip->reset)) {
err = PTR_ERR(chip->reset);
goto out;
}
if (chip->reset)
- usleep_range(1000, 2000);
+ usleep_range(10000, 20000);
- err = mv88e6xxx_detect(chip);
- if (err)
- goto out;
+ /* Detect if the device is configured in single chip addressing mode,
+ * otherwise continue with address specific smi init/detection.
+ */
+ err = mv88e6xxx_single_chip_detect(chip, mdiodev);
+ if (err) {
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_detect(chip);
+ if (err)
+ goto out;
+ }
if (chip->info->edsa_support == MV88E6XXX_EDSA_SUPPORTED)
chip->tag_protocol = DSA_TAG_PROTO_EDSA;
@@ -6419,13 +7365,13 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
err = mv88e6xxx_switch_reset(chip);
mv88e6xxx_reg_unlock(chip);
if (err)
- goto out;
+ goto out_phy;
if (np) {
chip->irq = of_irq_get(np, 0);
if (chip->irq == -EPROBE_DEFER) {
err = chip->irq;
- goto out;
+ goto out_phy;
}
}
@@ -6444,7 +7390,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
mv88e6xxx_reg_unlock(chip);
if (err)
- goto out;
+ goto out_phy;
if (chip->info->g2_irqs > 0) {
err = mv88e6xxx_g2_irq_setup(chip);
@@ -6460,18 +7406,12 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
goto out_g1_atu_prob_irq;
- err = mv88e6xxx_mdios_register(chip, np);
- if (err)
- goto out_g1_vtu_prob_irq;
-
err = mv88e6xxx_register_switch(chip);
if (err)
- goto out_mdio;
+ goto out_g1_vtu_prob_irq;
return 0;
-out_mdio:
- mv88e6xxx_mdios_unregister(chip);
out_g1_vtu_prob_irq:
mv88e6xxx_g1_vtu_prob_irq_free(chip);
out_g1_atu_prob_irq:
@@ -6484,6 +7424,8 @@ out_g1_irq:
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
+out_phy:
+ mv88e6xxx_phy_destroy(chip);
out:
if (pdata)
dev_put(pdata->netdev);
@@ -6501,14 +7443,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
chip = ds->priv;
- if (chip->info->ptp_support) {
- mv88e6xxx_hwtstamp_free(chip);
- mv88e6xxx_ptp_free(chip);
- }
-
- mv88e6xxx_phy_destroy(chip);
mv88e6xxx_unregister_switch(chip);
- mv88e6xxx_mdios_unregister(chip);
mv88e6xxx_g1_vtu_prob_irq_free(chip);
mv88e6xxx_g1_atu_prob_irq_free(chip);
@@ -6521,7 +7456,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
else
mv88e6xxx_irq_poll_free(chip);
- dev_set_drvdata(&mdiodev->dev, NULL);
+ mv88e6xxx_phy_destroy(chip);
}
static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 8271b8aa7b71..2f211e55cb47 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -13,13 +13,16 @@
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
#include <linux/kthread.h>
+#include <linux/leds.h>
#include <linux/phy.h>
+#include <linux/property.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <net/dsa.h>
#define EDSA_HLEN 8
#define MV88E6XXX_N_FID 4096
+#define MV88E6XXX_N_SID 64
#define MV88E6XXX_FID_STANDALONE 0
#define MV88E6XXX_FID_BRIDGED 1
@@ -53,6 +56,8 @@ enum mv88e6xxx_frame_mode {
/* List of supported models */
enum mv88e6xxx_model {
+ MV88E6020,
+ MV88E6071,
MV88E6085,
MV88E6095,
MV88E6097,
@@ -81,6 +86,7 @@ enum mv88e6xxx_model {
MV88E6350,
MV88E6351,
MV88E6352,
+ MV88E6361,
MV88E6390,
MV88E6390X,
MV88E6393X,
@@ -93,13 +99,13 @@ enum mv88e6xxx_family {
MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */
MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
- MV88E6XXX_FAMILY_6250, /* 6220 6250 */
+ MV88E6XXX_FAMILY_6250, /* 6220 6250 6020 6071 */
MV88E6XXX_FAMILY_6320, /* 6320 6321 */
MV88E6XXX_FAMILY_6341, /* 6141 6341 */
MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */
MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */
- MV88E6XXX_FAMILY_6393, /* 6191X 6193X 6393X */
+ MV88E6XXX_FAMILY_6393, /* 6191X 6193X 6361 6393X */
};
/**
@@ -130,6 +136,7 @@ struct mv88e6xxx_info {
unsigned int num_internal_phys;
unsigned int num_gpio;
unsigned int max_vid;
+ unsigned int max_sid;
unsigned int port_base_addr;
unsigned int phy_base_addr;
unsigned int global1_addr;
@@ -137,6 +144,7 @@ struct mv88e6xxx_info {
unsigned int age_time_coeff;
unsigned int g1_irqs;
unsigned int g2_irqs;
+ int stats_type;
bool pvt;
/* Mark certain ports as invalid. This is required for example for the
@@ -165,6 +173,11 @@ struct mv88e6xxx_info {
/* Supports PTP */
bool ptp_support;
+
+ /* Internal PHY start index. 0 means that internal PHYs range starts at
+ * port 0, 1 means internal PHYs range starts at port 1, etc
+ */
+ unsigned int internal_phys_offset;
};
struct mv88e6xxx_atu_entry {
@@ -179,7 +192,14 @@ struct mv88e6xxx_vtu_entry {
u16 fid;
u8 sid;
bool valid;
+ bool policy;
u8 member[DSA_MAX_PORTS];
+ u8 state[DSA_MAX_PORTS]; /* Older silicon has no STU */
+};
+
+struct mv88e6xxx_stu_entry {
+ u8 sid;
+ bool valid;
u8 state[DSA_MAX_PORTS];
};
@@ -188,6 +208,8 @@ struct mv88e6xxx_irq_ops;
struct mv88e6xxx_gpio_ops;
struct mv88e6xxx_avb_ops;
struct mv88e6xxx_ptp_ops;
+struct mv88e6xxx_pcs_ops;
+struct mv88e6xxx_cc_coeffs;
struct mv88e6xxx_irq {
u16 masked;
@@ -219,7 +241,7 @@ struct mv88e6xxx_port_hwtstamp {
u16 tx_seq_id;
/* Current timestamp configuration */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
};
enum mv88e6xxx_policy_mapping {
@@ -257,6 +279,7 @@ struct mv88e6xxx_vlan {
struct mv88e6xxx_port {
struct mv88e6xxx_chip *chip;
int port;
+ struct fwnode_handle *fwnode;
struct mv88e6xxx_vlan bridge_pvid;
u64 serdes_stats[2];
u64 atu_member_violation;
@@ -268,9 +291,16 @@ struct mv88e6xxx_port {
u8 cmode;
bool mirror_ingress;
bool mirror_egress;
- unsigned int serdes_irq;
- char serdes_irq_name[64];
struct devlink_region *region;
+ void *pcs_private;
+
+ /* LED related information */
+ bool fiber;
+ struct led_classdev led0;
+ struct led_classdev led1;
+
+ /* MacAuth Bypass control flag */
+ bool mab;
};
enum mv88e6xxx_region_id {
@@ -278,6 +308,7 @@ enum mv88e6xxx_region_id {
MV88E6XXX_REGION_GLOBAL2,
MV88E6XXX_REGION_ATU,
MV88E6XXX_REGION_VTU,
+ MV88E6XXX_REGION_STU,
MV88E6XXX_REGION_PVT,
_MV88E6XXX_REGION_MAX,
@@ -287,6 +318,27 @@ struct mv88e6xxx_region_priv {
enum mv88e6xxx_region_id id;
};
+struct mv88e6xxx_mst {
+ struct list_head node;
+
+ refcount_t refcnt;
+ struct net_device *br;
+ u16 msti;
+
+ struct mv88e6xxx_stu_entry stu;
+};
+
+#define STATS_TYPE_PORT BIT(0)
+#define STATS_TYPE_BANK0 BIT(1)
+#define STATS_TYPE_BANK1 BIT(2)
+
+struct mv88e6xxx_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ size_t size;
+ int reg;
+ int type;
+};
+
struct mv88e6xxx_chip {
const struct mv88e6xxx_info *info;
@@ -366,13 +418,12 @@ struct mv88e6xxx_chip {
struct cyclecounter tstamp_cc;
struct timecounter tstamp_tc;
struct delayed_work overflow_work;
+ const struct mv88e6xxx_cc_coeffs *cc_coeffs;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct delayed_work tai_event_work;
struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO];
- u16 trig_config;
- u16 evcap_config;
u16 enable_count;
/* Current ingress and egress monitor ports */
@@ -387,11 +438,18 @@ struct mv88e6xxx_chip {
/* devlink regions */
struct devlink_region *regions[_MV88E6XXX_REGION_MAX];
+
+ /* Bridge MST to SID mappings */
+ struct list_head msts;
+
+ /* FID map */
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
};
struct mv88e6xxx_bus_ops {
int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+ int (*init)(struct mv88e6xxx_chip *chip);
};
struct mv88e6xxx_mdio_bus {
@@ -427,6 +485,13 @@ struct mv88e6xxx_ops {
struct mii_bus *bus,
int addr, int reg, u16 val);
+ int (*phy_read_c45)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 *val);
+ int (*phy_write_c45)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 val);
+
/* Priority Override Table operations */
int (*pot_clear)(struct mv88e6xxx_chip *chip);
@@ -434,6 +499,12 @@ struct mv88e6xxx_ops {
int (*ppu_enable)(struct mv88e6xxx_chip *chip);
int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+ /* Additional handlers to run before and after hard reset, to make sure
+ * that the switch and EEPROM are in a good state.
+ */
+ int (*hardware_reset_pre)(struct mv88e6xxx_chip *chip);
+ int (*hardware_reset_post)(struct mv88e6xxx_chip *chip);
+
/* Switch Software Reset */
int (*reset)(struct mv88e6xxx_chip *chip);
@@ -464,14 +535,13 @@ struct mv88e6xxx_ops {
int (*port_set_pause)(struct mv88e6xxx_chip *chip, int port,
int pause);
-#define SPEED_MAX INT_MAX
#define SPEED_UNFORCED -2
#define DUPLEX_UNFORCED -2
/* Port's MAC speed (in Mbps) and MAC duplex mode
*
* Depending on the chip, 10, 100, 200, 1000, 2500, 10000 are valid.
- * Use SPEED_UNFORCED for normal detection, SPEED_MAX for max value.
+ * Use SPEED_UNFORCED for normal detection.
*
* Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
* or DUPLEX_UNFORCED for normal duplex detection.
@@ -480,7 +550,8 @@ struct mv88e6xxx_ops {
int speed, int duplex);
/* What interface mode should be used for maximum speed? */
- phy_interface_t (*port_max_speed_mode)(int port);
+ phy_interface_t (*port_max_speed_mode)(struct mv88e6xxx_chip *chip,
+ int port);
int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port);
@@ -513,6 +584,9 @@ struct mv88e6xxx_ops {
phy_interface_t mode);
int (*port_get_cmode)(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+ /* LED control */
+ int (*port_setup_leds)(struct mv88e6xxx_chip *chip, int port);
+
/* Some devices have a per port register indicating what is
* the upstream port this port should forward to.
*/
@@ -531,9 +605,10 @@ struct mv88e6xxx_ops {
/* Return the number of strings describing statistics */
int (*stats_get_sset_count)(struct mv88e6xxx_chip *chip);
- int (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t *data);
- int (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data);
+ void (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t **data);
+ size_t (*stats_get_stat)(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data);
int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
int (*set_egress_port)(struct mv88e6xxx_chip *chip,
enum mv88e6xxx_egress_direction direction,
@@ -548,44 +623,29 @@ struct mv88e6xxx_ops {
int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
- /* Power on/off a SERDES interface */
- int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, int lane,
- bool up);
-
/* SERDES lane mapping */
int (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port);
- int (*serdes_pcs_get_state)(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state);
- int (*serdes_pcs_config)(struct mv88e6xxx_chip *chip, int port,
- int lane, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise);
- int (*serdes_pcs_an_restart)(struct mv88e6xxx_chip *chip, int port,
- int lane);
- int (*serdes_pcs_link_up)(struct mv88e6xxx_chip *chip, int port,
- int lane, int speed, int duplex);
-
/* SERDES interrupt handling */
unsigned int (*serdes_irq_mapping)(struct mv88e6xxx_chip *chip,
int port);
- int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable);
- irqreturn_t (*serdes_irq_status)(struct mv88e6xxx_chip *chip, int port,
- int lane);
/* Statistics from the SERDES interface */
int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
- int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
- uint8_t *data);
- int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data);
+ int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
+ uint8_t **data);
+ size_t (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
/* SERDES registers for ethtool */
int (*serdes_get_regs_len)(struct mv88e6xxx_chip *chip, int port);
void (*serdes_get_regs)(struct mv88e6xxx_chip *chip, int port,
void *_p);
+ /* SERDES SGMII/Fiber Output Amplitude */
+ int (*serdes_set_tx_amplitude)(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Address Translation Unit operations */
int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
@@ -596,6 +656,12 @@ struct mv88e6xxx_ops {
int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
+ /* Spanning Tree Unit operations */
+ int (*stu_getnext)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+ int (*stu_loadpurge)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+
/* GPIO operations */
const struct mv88e6xxx_gpio_ops *gpio_ops;
@@ -609,9 +675,10 @@ struct mv88e6xxx_ops {
const struct mv88e6xxx_ptp_ops *ptp_ops;
/* Phylink */
- void (*phylink_validate)(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state);
+ void (*phylink_get_caps)(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config);
+
+ const struct mv88e6xxx_pcs_ops *pcs_ops;
/* Max Frame Size */
int (*set_max_frame_size)(struct mv88e6xxx_chip *chip, int mtu);
@@ -663,7 +730,7 @@ struct mv88e6xxx_avb_ops {
};
struct mv88e6xxx_ptp_ops {
- u64 (*clock_read)(const struct cyclecounter *cc);
+ u64 (*clock_read)(struct cyclecounter *cc);
int (*ptp_enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
int (*ptp_verify)(struct ptp_clock_info *ptp, unsigned int pin,
@@ -673,28 +740,29 @@ struct mv88e6xxx_ptp_ops {
int (*port_disable)(struct mv88e6xxx_chip *chip, int port);
int (*global_enable)(struct mv88e6xxx_chip *chip);
int (*global_disable)(struct mv88e6xxx_chip *chip);
+ int (*set_ptp_cpu_port)(struct mv88e6xxx_chip *chip, int port);
int n_ext_ts;
int arr0_sts_reg;
int arr1_sts_reg;
int dep_sts_reg;
u32 rx_filters;
- u32 cc_shift;
- u32 cc_mult;
- u32 cc_mult_num;
- u32 cc_mult_dem;
};
-#define STATS_TYPE_PORT BIT(0)
-#define STATS_TYPE_BANK0 BIT(1)
-#define STATS_TYPE_BANK1 BIT(2)
+struct mv88e6xxx_pcs_ops {
+ int (*pcs_init)(struct mv88e6xxx_chip *chip, int port);
+ void (*pcs_teardown)(struct mv88e6xxx_chip *chip, int port);
+ struct phylink_pcs *(*pcs_select)(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
-struct mv88e6xxx_hw_stat {
- char string[ETH_GSTRING_LEN];
- size_t size;
- int reg;
- int type;
};
+static inline bool mv88e6xxx_has_stu(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid > 0 &&
+ chip->info->ops->stu_loadpurge &&
+ chip->info->ops->stu_getnext;
+}
+
static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
{
return chip->info->pvt;
@@ -725,6 +793,11 @@ static inline unsigned int mv88e6xxx_max_vid(struct mv88e6xxx_chip *chip)
return chip->info->max_vid;
}
+static inline unsigned int mv88e6xxx_max_sid(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid;
+}
+
static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
{
return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0);
@@ -740,6 +813,12 @@ static inline bool mv88e6xxx_is_invalid_port(struct mv88e6xxx_chip *chip, int po
return (chip->info->invalid_port_mask & BIT(port)) != 0;
}
+static inline void mv88e6xxx_port_set_mab(struct mv88e6xxx_chip *chip,
+ int port, bool mab)
+{
+ chip->ports[port].mab = mab;
+}
+
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
@@ -758,6 +837,10 @@ static inline void mv88e6xxx_reg_unlock(struct mv88e6xxx_chip *chip)
mutex_unlock(&chip->reg_lock);
}
-int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *bitmap);
+int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
+ int (*cb)(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv),
+ void *priv);
#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 381068395c63..da69e0b85879 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -374,32 +374,20 @@ static int mv88e6xxx_region_atu_snapshot(struct devlink *dl,
u8 **data)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
- DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
struct mv88e6xxx_devlink_atu_entry *table;
struct mv88e6xxx_chip *chip = ds->priv;
- int fid = -1, count, err;
+ int fid = -1, err = 0, count = 0;
- table = kmalloc_array(mv88e6xxx_num_databases(chip),
- sizeof(struct mv88e6xxx_devlink_atu_entry),
- GFP_KERNEL);
+ table = kcalloc(mv88e6xxx_num_databases(chip),
+ sizeof(struct mv88e6xxx_devlink_atu_entry),
+ GFP_KERNEL);
if (!table)
return -ENOMEM;
- memset(table, 0, mv88e6xxx_num_databases(chip) *
- sizeof(struct mv88e6xxx_devlink_atu_entry));
-
- count = 0;
-
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_fid_map(chip, fid_bitmap);
- if (err) {
- kfree(table);
- goto out;
- }
-
while (1) {
- fid = find_next_bit(fid_bitmap, MV88E6XXX_N_FID, fid + 1);
+ fid = find_next_bit(chip->fid_bitmap, MV88E6XXX_N_FID, fid + 1);
if (fid == MV88E6XXX_N_FID)
break;
@@ -503,6 +491,85 @@ static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl,
return 0;
}
+/**
+ * struct mv88e6xxx_devlink_stu_entry - Devlink STU entry
+ * @sid: Global1/3: SID, unknown filters and learning.
+ * @vid: Global1/6: Valid bit.
+ * @data: Global1/7-9: Membership data and priority override.
+ * @resvd: Reserved. In case we forgot something.
+ *
+ * The STU entry format varies between chipset generations. Peridot
+ * and Amethyst packs the STU data into Global1/7-8. Older silicon
+ * spreads the information across all three VTU data registers -
+ * inheriting the layout of even older hardware that had no STU at
+ * all. Since this is a low-level debug interface, copy all data
+ * verbatim and defer parsing to the consumer.
+ */
+struct mv88e6xxx_devlink_stu_entry {
+ u16 sid;
+ u16 vid;
+ u16 data[3];
+ u16 resvd;
+};
+
+static int mv88e6xxx_region_stu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_devlink_stu_entry *table, *entry;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_stu_entry stu;
+ int err;
+
+ table = kcalloc(mv88e6xxx_max_sid(chip) + 1,
+ sizeof(struct mv88e6xxx_devlink_stu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+ stu.sid = mv88e6xxx_max_sid(chip);
+ stu.valid = false;
+
+ mv88e6xxx_reg_lock(chip);
+
+ do {
+ err = mv88e6xxx_g1_stu_getnext(chip, &stu);
+ if (err)
+ break;
+
+ if (!stu.valid)
+ break;
+
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
+ &entry->sid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
+ &entry->vid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
+ &entry->data[0]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
+ &entry->data[1]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
+ &entry->data[2]);
+ if (err)
+ break;
+
+ entry++;
+ } while (stu.sid < mv88e6xxx_max_sid(chip));
+
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ kfree(table);
+ return err;
+ }
+
+ *data = (u8 *)table;
+ return 0;
+}
+
static int mv88e6xxx_region_pvt_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
@@ -575,7 +642,7 @@ static struct mv88e6xxx_region_priv mv88e6xxx_region_global1_priv = {
.id = MV88E6XXX_REGION_GLOBAL1,
};
-static struct devlink_region_ops mv88e6xxx_region_global1_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_global1_ops = {
.name = "global1",
.snapshot = mv88e6xxx_region_global_snapshot,
.destructor = kfree,
@@ -586,26 +653,32 @@ static struct mv88e6xxx_region_priv mv88e6xxx_region_global2_priv = {
.id = MV88E6XXX_REGION_GLOBAL2,
};
-static struct devlink_region_ops mv88e6xxx_region_global2_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_global2_ops = {
.name = "global2",
.snapshot = mv88e6xxx_region_global_snapshot,
.destructor = kfree,
.priv = &mv88e6xxx_region_global2_priv,
};
-static struct devlink_region_ops mv88e6xxx_region_atu_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_atu_ops = {
.name = "atu",
.snapshot = mv88e6xxx_region_atu_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
.name = "vtu",
.snapshot = mv88e6xxx_region_vtu_snapshot,
.destructor = kfree,
};
-static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
+static const struct devlink_region_ops mv88e6xxx_region_stu_ops = {
+ .name = "stu",
+ .snapshot = mv88e6xxx_region_stu_snapshot,
+ .destructor = kfree,
+};
+
+static const struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
.name = "pvt",
.snapshot = mv88e6xxx_region_pvt_snapshot,
.destructor = kfree,
@@ -618,13 +691,13 @@ static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
};
struct mv88e6xxx_region {
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
u64 size;
bool (*cond)(struct mv88e6xxx_chip *chip);
};
-static struct mv88e6xxx_region mv88e6xxx_regions[] = {
+static const struct mv88e6xxx_region mv88e6xxx_regions[] = {
[MV88E6XXX_REGION_GLOBAL1] = {
.ops = &mv88e6xxx_region_global1_ops,
.size = 32 * sizeof(u16)
@@ -640,6 +713,11 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
.ops = &mv88e6xxx_region_vtu_ops
/* calculated at runtime */
},
+ [MV88E6XXX_REGION_STU] = {
+ .ops = &mv88e6xxx_region_stu_ops,
+ .cond = mv88e6xxx_has_stu,
+ /* calculated at runtime */
+ },
[MV88E6XXX_REGION_PVT] = {
.ops = &mv88e6xxx_region_pvt_ops,
.size = MV88E6XXX_MAX_PVT_ENTRIES * sizeof(u16),
@@ -653,7 +731,8 @@ void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
- dsa_devlink_region_destroy(chip->regions[i]);
+ if (chip->regions[i])
+ dsa_devlink_region_destroy(chip->regions[i]);
}
void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
@@ -684,7 +763,7 @@ int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
{
bool (*cond)(struct mv88e6xxx_chip *chip);
struct mv88e6xxx_chip *chip = ds->priv;
- struct devlink_region_ops *ops;
+ const struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
int i, j;
@@ -706,6 +785,10 @@ int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
size = (mv88e6xxx_max_vid(chip) + 1) *
sizeof(struct mv88e6xxx_devlink_vtu_entry);
break;
+ case MV88E6XXX_REGION_STU:
+ size = (mv88e6xxx_max_sid(chip) + 1) *
+ sizeof(struct mv88e6xxx_devlink_stu_entry);
+ break;
}
region = dsa_devlink_region_create(ds, ops, 1, size);
@@ -727,11 +810,6 @@ int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- err = devlink_info_driver_name_put(req, "mv88e6xxx");
- if (err)
- return err;
return devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 5848112036b0..9820cd596757 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -75,35 +75,93 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
+static int mv88e6250_g1_eeprom_reload(struct mv88e6xxx_chip *chip)
+{
+ /* MV88E6185_G1_CTL1_RELOAD_EEPROM is also valid for 88E6250 */
+ int bit = __bf_shf(MV88E6185_G1_CTL1_RELOAD_EEPROM);
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+ if (err)
+ return err;
+
+ val |= MV88E6185_G1_CTL1_RELOAD_EEPROM;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_CTL1, bit, 0);
+}
+
+/* Returns 0 when done, -EBUSY when waiting, other negative codes on error */
+static int mv88e6xxx_g1_is_eeprom_done(struct mv88e6xxx_chip *chip)
{
- const unsigned long timeout = jiffies + 1 * HZ;
u16 val;
int err;
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
+ if (err < 0) {
+ dev_err(chip->dev, "Error reading status");
+ return err;
+ }
+
+ /* If the switch is still resetting, it may not
+ * respond on the bus, and so MDIO read returns
+ * 0xffff. Differentiate between that, and waiting for
+ * the EEPROM to be done by bit 0 being set.
+ */
+ if (val == 0xffff || !(val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)))
+ return -EBUSY;
+
+ return 0;
+}
+
+/* As the EEInt (EEPROM done) flag clears on read if the status register, this
+ * function must be called directly after a hard reset or EEPROM ReLoad request,
+ * or the done condition may have been missed
+ */
+int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
+{
+ const unsigned long timeout = jiffies + 1 * HZ;
+ int ret;
+
/* Wait up to 1 second for the switch to finish reading the
* EEPROM.
*/
while (time_before(jiffies, timeout)) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
- if (err) {
- dev_err(chip->dev, "Error reading status");
- return;
- }
-
- /* If the switch is still resetting, it may not
- * respond on the bus, and so MDIO read returns
- * 0xffff. Differentiate between that, and waiting for
- * the EEPROM to be done by bit 0 being set.
- */
- if (val != 0xffff &&
- val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
- return;
-
- usleep_range(1000, 2000);
+ ret = mv88e6xxx_g1_is_eeprom_done(chip);
+ if (ret != -EBUSY)
+ return ret;
}
dev_err(chip->dev, "Timeout waiting for EEPROM done");
+ return -ETIMEDOUT;
+}
+
+int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+
+ ret = mv88e6xxx_g1_is_eeprom_done(chip);
+ if (ret != -EBUSY)
+ return ret;
+
+ /* Pre-reset, we don't know the state of the switch - when
+ * mv88e6xxx_g1_is_eeprom_done() returns -EBUSY, that may be because
+ * the switch is actually busy reading the EEPROM, or because
+ * MV88E6XXX_G1_STS_IRQ_EEPROM_DONE has been cleared by an unrelated
+ * status register read already.
+ *
+ * To account for the latter case, trigger another EEPROM reload for
+ * another chance at seeing the done flag.
+ */
+ ret = mv88e6250_g1_eeprom_reload(chip);
+ if (ret)
+ return ret;
+
+ return mv88e6xxx_g1_wait_eeprom_done(chip);
}
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
@@ -403,6 +461,18 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
return mv88e6390_g1_monitor_write(chip, ptr, port);
}
+int mv88e6390_g1_set_ptp_cpu_port(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_PTP_CPU_DEST;
+
+ /* Use the default high priority for PTP frames sent to
+ * the CPU.
+ */
+ port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
+
+ return mv88e6390_g1_monitor_write(chip, ptr, port);
+}
+
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
{
u16 ptr;
@@ -481,8 +551,7 @@ int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip)
int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_HIST_MODE_MASK,
- MV88E6390_G1_CTL2_HIST_MODE_RX |
- MV88E6390_G1_CTL2_HIST_MODE_TX);
+ MV88E6390_G1_CTL2_HIST_MODE_RX);
}
int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index)
@@ -510,7 +579,7 @@ int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
if (err)
return err;
- val |= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
+ val |= MV88E6XXX_G1_STATS_OP_HIST_RX;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
@@ -525,7 +594,7 @@ int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
MV88E6XXX_G1_STATS_OP_BUSY |
MV88E6XXX_G1_STATS_OP_CAPTURE_PORT |
- MV88E6XXX_G1_STATS_OP_HIST_RX_TX | port);
+ MV88E6XXX_G1_STATS_OP_HIST_RX | port);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 4f3dbb015f77..3dbb7a1b8fe1 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -46,6 +46,7 @@
/* Offset 0x02: VTU FID Register */
#define MV88E6352_G1_VTU_FID 0x02
+#define MV88E6352_G1_VTU_FID_VID_POLICY 0x1000
#define MV88E6352_G1_VTU_FID_MASK 0x0fff
/* Offset 0x03: VTU SID Register */
@@ -213,6 +214,7 @@
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_PTP_CPU_DEST 0x3200
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
@@ -280,7 +282,8 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
+int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
@@ -302,6 +305,7 @@ int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
int port);
int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_set_ptp_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip);
@@ -347,6 +351,16 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 40bd67a5c8e9..c47f068f56b3 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -12,6 +12,8 @@
#include "chip.h"
#include "global1.h"
+#include "switchdev.h"
+#include "trace.h"
/* Offset 0x01: ATU FID Register */
@@ -114,6 +116,19 @@ static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_ATU_OP, bit, 0);
}
+static int mv88e6xxx_g1_read_atu_violation(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_OP,
+ MV88E6XXX_G1_ATU_OP_BUSY |
+ MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_op_wait(chip);
+}
+
static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
{
u16 val;
@@ -159,6 +174,41 @@ int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid)
return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
}
+static int mv88e6xxx_g1_atu_fid_read(struct mv88e6xxx_chip *chip, u16 *fid)
+{
+ u16 val = 0, upper = 0, op = 0;
+ int err = -EOPNOTSUPP;
+
+ if (mv88e6xxx_num_databases(chip) > 256) {
+ err = mv88e6xxx_g1_read(chip, MV88E6352_G1_ATU_FID, &val);
+ val &= 0xfff;
+ if (err)
+ return err;
+ } else {
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &op);
+ if (err)
+ return err;
+ if (mv88e6xxx_num_databases(chip) > 64) {
+ /* ATU DBNum[7:4] are located in ATU Control 15:12 */
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
+ &upper);
+ if (err)
+ return err;
+
+ upper = (upper >> 8) & 0x00f0;
+ } else if (mv88e6xxx_num_databases(chip) > 16) {
+ /* ATU DBNum[5:4] are located in ATU Operation 9:8 */
+ upper = (op >> 4) & 0x30;
+ }
+
+ /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
+ val = (op & 0xf) | upper;
+ }
+ *fid = val;
+
+ return err;
+}
+
/* Offset 0x0C: ATU Data Register */
static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
@@ -353,64 +403,70 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
struct mv88e6xxx_atu_entry entry;
- int spid;
- int err;
- u16 val;
+ int err, spid;
+ u16 val, fid;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g1_atu_op(chip, 0,
- MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
+ err = mv88e6xxx_g1_read_atu_violation(chip);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &val);
if (err)
- goto out;
+ goto out_unlock;
+
+ err = mv88e6xxx_g1_atu_fid_read(chip, &fid);
+ if (err)
+ goto out_unlock;
err = mv88e6xxx_g1_atu_data_read(chip, &entry);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_atu_mac_read(chip, &entry);
if (err)
- goto out;
+ goto out_unlock;
- spid = entry.state;
+ mv88e6xxx_reg_unlock(chip);
- if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
- dev_err_ratelimited(chip->dev,
- "ATU age out violation for %pM\n",
- entry.mac);
- }
+ spid = entry.state;
if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
- dev_err_ratelimited(chip->dev,
- "ATU member violation for %pM portvec %x spid %d\n",
- entry.mac, entry.portvec, spid);
+ trace_mv88e6xxx_atu_member_violation(chip->dev, spid,
+ entry.portvec, entry.mac,
+ fid);
chip->ports[spid].atu_member_violation++;
}
if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
- dev_err_ratelimited(chip->dev,
- "ATU miss violation for %pM portvec %x spid %d\n",
- entry.mac, entry.portvec, spid);
+ trace_mv88e6xxx_atu_miss_violation(chip->dev, spid,
+ entry.portvec, entry.mac,
+ fid);
chip->ports[spid].atu_miss_violation++;
+
+ if (fid != MV88E6XXX_FID_STANDALONE && chip->ports[spid].mab) {
+ err = mv88e6xxx_handle_miss_violation(chip, spid,
+ &entry, fid);
+ if (err)
+ goto out;
+ }
}
if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
- dev_err_ratelimited(chip->dev,
- "ATU full violation for %pM portvec %x spid %d\n",
- entry.mac, entry.portvec, spid);
- chip->ports[spid].atu_full_violation++;
+ trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
+ entry.portvec, entry.mac,
+ fid);
+ if (spid < ARRAY_SIZE(chip->ports))
+ chip->ports[spid].atu_full_violation++;
}
- mv88e6xxx_reg_unlock(chip);
return IRQ_HANDLED;
-out:
+out_unlock:
mv88e6xxx_reg_unlock(chip);
+out:
dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n",
err);
return IRQ_HANDLED;
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index ae12c981923e..b524f27a2f0d 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -13,6 +13,7 @@
#include "chip.h"
#include "global1.h"
+#include "trace.h"
/* Offset 0x02: VTU FID Register */
@@ -27,7 +28,7 @@ static int mv88e6xxx_g1_vtu_fid_read(struct mv88e6xxx_chip *chip,
return err;
entry->fid = val & MV88E6352_G1_VTU_FID_MASK;
-
+ entry->policy = !!(val & MV88E6352_G1_VTU_FID_VID_POLICY);
return 0;
}
@@ -36,13 +37,15 @@ static int mv88e6xxx_g1_vtu_fid_write(struct mv88e6xxx_chip *chip,
{
u16 val = entry->fid & MV88E6352_G1_VTU_FID_MASK;
+ if (entry->policy)
+ val |= MV88E6352_G1_VTU_FID_VID_POLICY;
+
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_FID, val);
}
/* Offset 0x03: VTU SID Register */
-static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip, u8 *sid)
{
u16 val;
int err;
@@ -51,15 +54,14 @@ static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
if (err)
return err;
- entry->sid = val & MV88E6352_G1_VTU_SID_MASK;
+ *sid = val & MV88E6352_G1_VTU_SID_MASK;
return 0;
}
-static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip, u8 sid)
{
- u16 val = entry->sid & MV88E6352_G1_VTU_SID_MASK;
+ u16 val = sid & MV88E6352_G1_VTU_SID_MASK;
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_SID, val);
}
@@ -88,7 +90,7 @@ static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
/* Offset 0x06: VTU VID Register */
static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ bool *valid, u16 *vid)
{
u16 val;
int err;
@@ -97,25 +99,28 @@ static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
if (err)
return err;
- entry->vid = val & 0xfff;
+ if (vid) {
+ *vid = val & 0xfff;
- if (val & MV88E6390_G1_VTU_VID_PAGE)
- entry->vid |= 0x1000;
+ if (val & MV88E6390_G1_VTU_VID_PAGE)
+ *vid |= 0x1000;
+ }
- entry->valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
+ if (valid)
+ *valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
return 0;
}
static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ bool valid, u16 vid)
{
- u16 val = entry->vid & 0xfff;
+ u16 val = vid & 0xfff;
- if (entry->vid & 0x1000)
+ if (vid & 0x1000)
val |= MV88E6390_G1_VTU_VID_PAGE;
- if (entry->valid)
+ if (valid)
val |= MV88E6XXX_G1_VTU_VID_VALID;
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_VID, val);
@@ -144,7 +149,7 @@ static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
}
static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ u8 *member, u8 *state)
{
u16 regs[3];
int err;
@@ -157,36 +162,20 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
/* Extract MemberTag data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int member_offset = (i % 4) * 4;
+ unsigned int state_offset = member_offset + 2;
- entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
- }
-
- return 0;
-}
-
-static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- u16 regs[3];
- int err;
- int i;
-
- err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
- if (err)
- return err;
-
- /* Extract PortState data */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- unsigned int state_offset = (i % 4) * 4 + 2;
+ if (member)
+ member[i] = (regs[i / 4] >> member_offset) & 0x3;
- entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
+ if (state)
+ state[i] = (regs[i / 4] >> state_offset) & 0x3;
}
return 0;
}
static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ u8 *member, u8 *state)
{
u16 regs[3] = { 0 };
int i;
@@ -196,8 +185,11 @@ static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
unsigned int member_offset = (i % 4) * 4;
unsigned int state_offset = member_offset + 2;
- regs[i / 4] |= (entry->member[i] & 0x3) << member_offset;
- regs[i / 4] |= (entry->state[i] & 0x3) << state_offset;
+ if (member)
+ regs[i / 4] |= (member[i] & 0x3) << member_offset;
+
+ if (state)
+ regs[i / 4] |= (state[i] & 0x3) << state_offset;
}
/* Write all 3 VTU/STU Data registers */
@@ -265,48 +257,6 @@ static int mv88e6390_g1_vtu_data_write(struct mv88e6xxx_chip *chip, u8 *data)
/* VLAN Translation Unit Operations */
-static int mv88e6xxx_g1_vtu_stu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- int err;
-
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_sid_read(chip, entry);
- if (err)
- return err;
-
- return mv88e6xxx_g1_vtu_vid_read(chip, entry);
-}
-
-static int mv88e6xxx_g1_vtu_stu_get(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *vtu)
-{
- struct mv88e6xxx_vtu_entry stu;
- int err;
-
- err = mv88e6xxx_g1_vtu_sid_read(chip, vtu);
- if (err)
- return err;
-
- stu.sid = vtu->sid - 1;
-
- err = mv88e6xxx_g1_vtu_stu_getnext(chip, &stu);
- if (err)
- return err;
-
- if (stu.sid != vtu->sid || !stu.valid)
- return -EINVAL;
-
- return 0;
-}
-
int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -324,7 +274,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
* write the VID only once, when the entry is given as invalid.
*/
if (!entry->valid) {
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, false, entry->vid);
if (err)
return err;
}
@@ -333,7 +283,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- return mv88e6xxx_g1_vtu_vid_read(chip, entry);
+ return mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, &entry->vid);
}
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
@@ -347,11 +297,7 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
- if (err)
- return err;
-
- err = mv88e6185_g1_stu_data_read(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, entry->state);
if (err)
return err;
@@ -381,7 +327,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, NULL);
if (err)
return err;
@@ -389,12 +335,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- /* Fetch VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
- if (err)
- return err;
-
- err = mv88e6185_g1_stu_data_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
@@ -417,16 +358,11 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- /* Fetch VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
- if (err)
- return err;
-
- err = mv88e6390_g1_vtu_data_read(chip, entry->state);
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
@@ -444,12 +380,12 @@ int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_write(chip, entry);
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, entry->state);
if (err)
return err;
@@ -476,27 +412,21 @@ int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- /* Write MemberTag and PortState data */
- err = mv88e6185_g1_vtu_data_write(chip, entry);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ /* Write MemberTag data */
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, NULL);
if (err)
return err;
- /* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip,
- MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
}
@@ -514,41 +444,116 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- /* Write PortState data */
- err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ /* Write MemberTag data */
+ err = mv88e6390_g1_vtu_data_write(chip, entry->member);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
- /* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip,
- MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
+ }
- /* Write MemberTag data */
- err = mv88e6390_g1_vtu_data_write(chip, entry->member);
+ /* Load/Purge VTU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+}
+
+int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ /* As part of the VTU flush, refresh FID map */
+ bitmap_zero(chip->fid_bitmap, MV88E6XXX_N_FID);
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+}
+
+/* Spanning Tree Unit Operations */
+
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ /* To get the next higher active SID, the STU GetNext operation can be
+ * started again without setting the SID registers since it already
+ * contains the last SID.
+ *
+ * To save a few hardware accesses and abstract this to the caller,
+ * write the SID only once, when the entry is given as invalid.
+ */
+ if (!entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
+ }
- err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, NULL);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
- /* Load/Purge VTU entry */
- return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+ return 0;
}
-int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6185_g1_vtu_data_read(chip, NULL, entry->state);
+}
+
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6390_g1_vtu_data_read(chip, entry->state);
+}
+
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
{
int err;
@@ -556,16 +561,59 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
if (err)
return err;
- return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_write(chip, NULL, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
}
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+}
+
+/* VTU Violation Management */
+
static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
- struct mv88e6xxx_vtu_entry entry;
+ u16 val, vid;
int spid;
int err;
- u16 val;
mv88e6xxx_reg_lock(chip);
@@ -577,21 +625,19 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
if (err)
goto out;
- err = mv88e6xxx_g1_vtu_vid_read(chip, &entry);
+ err = mv88e6xxx_g1_vtu_vid_read(chip, NULL, &vid);
if (err)
goto out;
spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK;
if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
- dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
- entry.vid, spid);
+ trace_mv88e6xxx_vtu_member_violation(chip->dev, spid, vid);
chip->ports[spid].vtu_member_violation++;
}
if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) {
- dev_dbg_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
- entry.vid, spid);
+ trace_mv88e6xxx_vtu_miss_violation(chip->dev, spid, vid);
chip->ports[spid].vtu_miss_violation++;
}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index fa65ecd9cb85..30a6ffa7817b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
* Offset 0x15: EEPROM Addr (for 8-bit data access)
*/
-static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
int err;
@@ -739,20 +739,18 @@ static int mv88e6xxx_g2_smi_phy_read_data_c45(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
}
-static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
- bool external, int port, int reg,
- u16 *data)
+static int _mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int devad,
+ int reg, u16 *data)
{
- int dev = (reg >> 16) & 0x1f;
- int addr = reg & 0xffff;
int err;
- err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
- addr);
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad,
+ reg);
if (err)
return err;
- return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, dev,
+ return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, devad,
data);
}
@@ -771,51 +769,65 @@ static int mv88e6xxx_g2_smi_phy_write_data_c45(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
}
-static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
- bool external, int port, int reg,
- u16 data)
+static int _mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int devad,
+ int reg, u16 data)
{
- int dev = (reg >> 16) & 0x1f;
- int addr = reg & 0xffff;
int err;
- err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
- addr);
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad,
+ reg);
if (err)
return err;
- return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, dev,
+ return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, devad,
data);
}
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
- int addr, int reg, u16 *val)
+int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
- if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, reg,
- val);
-
return mv88e6xxx_g2_smi_phy_read_data_c22(chip, external, addr, reg,
val);
}
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
- int addr, int reg, u16 val)
+int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int devad,
+ int reg, u16 *val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
- if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, reg,
- val);
+ return _mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, devad, reg,
+ val);
+}
+
+int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int reg,
+ u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
return mv88e6xxx_g2_smi_phy_write_data_c22(chip, external, addr, reg,
val);
}
+int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int devad,
+ int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
+
+ return _mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, devad, reg,
+ val);
+}
+
/* Offset 0x1B: Watchdog Control */
static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
{
@@ -931,6 +943,26 @@ const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
.irq_free = mv88e6390_watchdog_free,
};
+static int mv88e6393x_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
+{
+ mv88e6390_watchdog_action(chip, irq);
+
+ /* Fix for clearing the force WD event bit.
+ * Unreleased erratum on mv88e6393x.
+ */
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_UPDATE |
+ MV88E6390_G2_WDOG_CTL_PTR_EVENT);
+
+ return IRQ_HANDLED;
+}
+
+const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops = {
+ .irq_action = mv88e6393x_watchdog_action,
+ .irq_setup = mv88e6390_watchdog_setup,
+ .irq_free = mv88e6390_watchdog_free,
+};
+
static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
@@ -1122,8 +1154,8 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
- chip->g2_irq.domain = irq_domain_add_simple(
- chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
+ chip->g2_irq.domain = irq_domain_create_simple(dev_fwnode(chip->dev), 16, 0,
+ &mv88e6xxx_g2_irq_domain_ops, chip);
if (!chip->g2_irq.domain)
return -ENOMEM;
@@ -1164,31 +1196,22 @@ out:
int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip,
struct mii_bus *bus)
{
- int phy, irq, err, err_phy;
+ int phy_start = chip->info->internal_phys_offset;
+ int phy_end = chip->info->internal_phys_offset +
+ chip->info->num_internal_phys;
+ int phy, irq;
- for (phy = 0; phy < chip->info->num_internal_phys; phy++) {
+ for (phy = phy_start; phy < phy_end; phy++) {
irq = irq_find_mapping(chip->g2_irq.domain, phy);
- if (irq < 0) {
- err = irq;
- goto out;
- }
+ if (irq < 0)
+ return irq;
+
bus->irq[chip->info->phy_base_addr + phy] = irq;
}
return 0;
-out:
- err_phy = phy;
-
- for (phy = 0; phy < err_phy; phy++)
- irq_dispose_mapping(bus->irq[phy]);
-
- return err;
}
void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip,
struct mii_bus *bus)
{
- int phy;
-
- for (phy = 0; phy < chip->info->num_internal_phys; phy++)
- irq_dispose_mapping(bus->irq[phy]);
}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index f3e27573a386..82f9b410de0b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -298,7 +298,9 @@
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1 0x71
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU BIT(2)
#define MV88E6352_G2_SCRATCH_CONFIG_DATA2 0x72
-#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0x3
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0xf
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3 0x73
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL BIT(1)
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO 0
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_TRIG 1
@@ -312,12 +314,18 @@ int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg,
int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val);
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 val);
+int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val);
+int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 val);
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
@@ -357,10 +365,12 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
int port);
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
+extern const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops;
extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
@@ -368,8 +378,11 @@ extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops;
extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
-int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+int mv88e6390_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6393x_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external);
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index eda710062933..53a6d3ed63b3 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -146,7 +146,7 @@ static int mv88e6352_g2_scratch_gpio_set_data(struct mv88e6xxx_chip *chip,
* @chip: chip private data
* @pin: gpio index
*
- * Return: 0 for output, 1 for input (same as GPIOF_DIR_XXX).
+ * Return: 0 for output, 1 for input.
*/
static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip,
unsigned int pin)
@@ -240,7 +240,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
};
/**
- * mv88e6xxx_g2_scratch_gpio_set_smi - set gpio muxing for external smi
+ * mv88e6390_g2_scratch_gpio_set_smi - set gpio muxing for external smi
* @chip: chip private data
* @external: set mux for external smi, or free for gpio usage
*
@@ -248,7 +248,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
* an external SMI interface, or they may be made free for other
* GPIO uses.
*/
-int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+int mv88e6390_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external)
{
int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
@@ -289,3 +289,62 @@ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
}
+
+/**
+ * mv88e6393x_g2_scratch_gpio_set_smi - set gpio muxing for external smi
+ * @chip: chip private data
+ * @external: set mux for external smi, or free for gpio usage
+ *
+ * MV88E6191X/6193X/6393X GPIO pins 9 and 10 can be configured as an
+ * external SMI interface or as regular GPIO-s.
+ *
+ * They however have a different register layout then the existing
+ * function.
+ */
+
+int mv88e6393x_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external)
+{
+ int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
+ int err;
+ u8 val;
+
+ err = mv88e6xxx_g2_scratch_read(chip, misc_cfg, &val);
+ if (err)
+ return err;
+
+ if (external)
+ val &= ~MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+ else
+ val |= MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+
+ return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
+}
+
+/**
+ * mv88e6352_g2_scratch_port_has_serdes - indicate if a port can have a serdes
+ * @chip: chip private data
+ * @port: port number to check for serdes
+ *
+ * Indicates whether the port may have a serdes attached according to the
+ * pin strapping. Returns negative error number, 0 if the port is not
+ * configured to have a serdes, and 1 if the port is configured to have a
+ * serdes attached.
+ */
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 config3, p;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_read(chip, MV88E6352_G2_SCRATCH_CONFIG_DATA3,
+ &config3);
+ if (err)
+ return err;
+
+ if (config3 & MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL)
+ p = 5;
+ else
+ p = 4;
+
+ return port == p;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 8f74ffc7a279..6e6472a3b75a 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -64,7 +64,7 @@ static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr,
#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
const struct mv88e6xxx_ptp_ops *ptp_ops;
struct mv88e6xxx_chip *chip;
@@ -89,7 +89,7 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
@@ -100,10 +100,6 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
*/
clear_bit_unlock(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state);
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tstamp_enable = false;
@@ -173,42 +169,38 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
}
int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- struct hwtstamp_config config;
int err;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = mv88e6xxx_set_hwtstamp_config(chip, port, &config);
+ err = mv88e6xxx_set_hwtstamp_config(chip, port, config);
if (err)
return err;
/* Save the chosen configuration to be returned later. */
- memcpy(&ps->tstamp_config, &config, sizeof(config));
+ ps->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- struct hwtstamp_config *config = &ps->tstamp_config;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = ps->tstamp_config;
+
+ return 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp,
@@ -305,7 +297,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
shwt->hwtstamp = ns_to_ktime(ns);
status &= ~MV88E6XXX_PTP_TS_VALID;
}
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
@@ -578,7 +570,7 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
}
/* Set the ethertype of L2 PTP messages */
- err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588);
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_ETHERTYPE, ETH_P_1588);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index cf7fb6d660b1..c359821d5a6e 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -111,9 +111,10 @@
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *cfg);
bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
@@ -121,8 +122,9 @@ void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
+long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port);
@@ -132,14 +134,17 @@ int mv88e6165_global_disable(struct mv88e6xxx_chip *chip);
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
-static inline int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds,
- int port, struct ifreq *ifr)
+static inline int
+mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
-static inline int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds,
- int port, struct ifreq *ifr)
+static inline int
+mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
@@ -157,7 +162,7 @@ static inline void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
}
static inline int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/dsa/mv88e6xxx/leds.c b/drivers/net/dsa/mv88e6xxx/leds.c
new file mode 100644
index 000000000000..ab3bc645da56
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/leds.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/bitfield.h>
+#include <linux/leds.h>
+#include <linux/property.h>
+
+#include "chip.h"
+#include "global2.h"
+#include "port.h"
+
+/* Offset 0x16: LED control */
+
+static int mv88e6xxx_port_led_write(struct mv88e6xxx_chip *chip, int port, u16 reg)
+{
+ reg |= MV88E6XXX_PORT_LED_CONTROL_UPDATE;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_LED_CONTROL, reg);
+}
+
+static int mv88e6xxx_port_led_read(struct mv88e6xxx_chip *chip, int port,
+ u16 ptr, u16 *val)
+{
+ int err;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_LED_CONTROL, ptr);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_LED_CONTROL, val);
+ *val &= 0x3ff;
+
+ return err;
+}
+
+static int mv88e6xxx_led_brightness_set(struct mv88e6xxx_port *p, int led,
+ int brightness)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_led_read(p->chip, p->port,
+ MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL,
+ &reg);
+ if (err)
+ return err;
+
+ if (led == 1)
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK;
+ else
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK;
+
+ if (brightness) {
+ /* Selector 0x0f == Force LED ON */
+ if (led == 1)
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELF;
+ else
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELF;
+ } else {
+ /* Selector 0x0e == Force LED OFF */
+ if (led == 1)
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELE;
+ else
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELE;
+ }
+
+ reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL;
+
+ return mv88e6xxx_port_led_write(p->chip, p->port, reg);
+}
+
+static int mv88e6xxx_led0_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_brightness_set(p, 0, brightness);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+static int mv88e6xxx_led1_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_brightness_set(p, 1, brightness);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+struct mv88e6xxx_led_hwconfig {
+ int led;
+ u8 portmask;
+ unsigned long rules;
+ bool fiber;
+ bool blink_activity;
+ u16 selector;
+};
+
+/* The following is a lookup table to check what rules we can support on a
+ * certain LED given restrictions such as that some rules only work with fiber
+ * (SFP) connections and some blink on activity by default.
+ */
+#define MV88E6XXX_PORTS_0_3 (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define MV88E6XXX_PORTS_4_5 (BIT(4) | BIT(5))
+#define MV88E6XXX_PORT_4 BIT(4)
+#define MV88E6XXX_PORT_5 BIT(5)
+
+/* Entries are listed in selector order.
+ *
+ * These configurations vary across different switch families, list
+ * different tables per-family here.
+ */
+static const struct mv88e6xxx_led_hwconfig mv88e6352_led_hwconfigs[] = {
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORT_4,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL0,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORT_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL0,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL1,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_100),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL1,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_4_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100),
+ .blink_activity = true,
+ .fiber = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL1,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_4_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .fiber = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL1,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL2,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_100),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL2,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_4_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .fiber = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL2,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_4_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100),
+ .blink_activity = true,
+ .fiber = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL2,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL3,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_1000),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL3,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_4_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .fiber = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL3,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORT_4,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL4,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORT_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL5,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL6,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL6,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORT_4,
+ .rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL6,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORT_5,
+ .rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL6,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL7,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL7,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL8,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL8,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORT_5,
+ .rules = BIT(TRIGGER_NETDEV_LINK),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL8,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SEL9,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SEL9,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_10),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SELA,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SELA,
+ },
+ {
+ .led = 0,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED0_SELB,
+ },
+ {
+ .led = 1,
+ .portmask = MV88E6XXX_PORTS_0_3,
+ .rules = BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_1000),
+ .blink_activity = true,
+ .selector = MV88E6XXX_PORT_LED_CONTROL_LED1_SELB,
+ },
+};
+
+/* mv88e6xxx_led_match_selector() - look up the appropriate LED mode selector
+ * @p: port state container
+ * @led: LED number, 0 or 1
+ * @blink_activity: blink the LED (usually blink on indicated activity)
+ * @fiber: the link is connected to fiber such as SFP
+ * @rules: LED status flags from the LED classdev core
+ * @selector: fill in the selector in this parameter with an OR operation
+ */
+static int mv88e6xxx_led_match_selector(struct mv88e6xxx_port *p, int led, bool blink_activity,
+ bool fiber, unsigned long rules, u16 *selector)
+{
+ const struct mv88e6xxx_led_hwconfig *conf;
+ int i;
+
+ /* No rules means we turn the LED off */
+ if (!rules) {
+ if (led == 1)
+ *selector |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELE;
+ else
+ *selector |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELE;
+ return 0;
+ }
+
+ /* TODO: these rules are for MV88E6352, when adding other families,
+ * think about making sure you select the table that match the
+ * specific switch family.
+ */
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_led_hwconfigs); i++) {
+ conf = &mv88e6352_led_hwconfigs[i];
+
+ if (conf->led != led)
+ continue;
+
+ if (!(conf->portmask & BIT(p->port)))
+ continue;
+
+ if (conf->blink_activity != blink_activity)
+ continue;
+
+ if (conf->fiber != fiber)
+ continue;
+
+ if (conf->rules == rules) {
+ dev_dbg(p->chip->dev, "port%d LED %d set selector %04x for rules %08lx\n",
+ p->port, led, conf->selector, rules);
+ *selector |= conf->selector;
+ return 0;
+ }
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* mv88e6xxx_led_match_selector() - find Linux netdev rules from a selector value
+ * @p: port state container
+ * @selector: the selector value from the LED actity register
+ * @led: LED number, 0 or 1
+ * @rules: Linux netdev activity rules found from selector
+ */
+static int
+mv88e6xxx_led_match_rule(struct mv88e6xxx_port *p, u16 selector, int led, unsigned long *rules)
+{
+ const struct mv88e6xxx_led_hwconfig *conf;
+ int i;
+
+ /* Find the selector in the table, we just look for the right selector
+ * and ignore if the activity has special properties such as blinking
+ * or is fiber-only.
+ */
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_led_hwconfigs); i++) {
+ conf = &mv88e6352_led_hwconfigs[i];
+
+ if (conf->led != led)
+ continue;
+
+ if (!(conf->portmask & BIT(p->port)))
+ continue;
+
+ if (conf->selector == selector) {
+ dev_dbg(p->chip->dev, "port%d LED %d has selector %04x, rules %08lx\n",
+ p->port, led, selector, conf->rules);
+ *rules = conf->rules;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/* mv88e6xxx_led_get_selector() - get the appropriate LED mode selector
+ * @p: port state container
+ * @led: LED number, 0 or 1
+ * @fiber: the link is connected to fiber such as SFP
+ * @rules: LED status flags from the LED classdev core
+ * @selector: fill in the selector in this parameter with an OR operation
+ */
+static int mv88e6xxx_led_get_selector(struct mv88e6xxx_port *p, int led,
+ bool fiber, unsigned long rules, u16 *selector)
+{
+ int err;
+
+ /* What happens here is that we first try to locate a trigger with solid
+ * indicator (such as LED is on for a 1000 link) else we try a second
+ * sweep to find something suitable with a trigger that will blink on
+ * activity.
+ */
+ err = mv88e6xxx_led_match_selector(p, led, false, fiber, rules, selector);
+ if (err)
+ return mv88e6xxx_led_match_selector(p, led, true, fiber, rules, selector);
+
+ return 0;
+}
+
+/* Sets up the hardware blinking period */
+static int mv88e6xxx_led_set_blinking_period(struct mv88e6xxx_port *p, int led,
+ unsigned long delay_on, unsigned long delay_off)
+{
+ unsigned long period;
+ u16 reg;
+
+ period = delay_on + delay_off;
+
+ reg = 0;
+
+ switch (period) {
+ case 21:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_21MS;
+ break;
+ case 42:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_42MS;
+ break;
+ case 84:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_84MS;
+ break;
+ case 168:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_168MS;
+ break;
+ case 336:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_336MS;
+ break;
+ case 672:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_672MS;
+ break;
+ default:
+ /* Fall back to software blinking */
+ return -EINVAL;
+ }
+
+ /* This is essentially PWM duty cycle: how long time of the period
+ * will the LED be on. Zero isn't great in most cases.
+ */
+ switch (delay_on) {
+ case 0:
+ /* This is usually pretty useless and will make the LED look OFF */
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_NONE;
+ break;
+ case 21:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_21MS;
+ break;
+ case 42:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_42MS;
+ break;
+ case 84:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_84MS;
+ break;
+ case 168:
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_168MS;
+ break;
+ default:
+ /* Just use something non-zero */
+ reg |= MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_21MS;
+ break;
+ }
+
+ /* Set up blink rate */
+ reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_STRETCH_BLINK;
+
+ return mv88e6xxx_port_led_write(p->chip, p->port, reg);
+}
+
+static int mv88e6xxx_led_blink_set(struct mv88e6xxx_port *p, int led,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ u16 reg;
+ int err;
+
+ /* Choose a sensible default 336 ms (~3 Hz) */
+ if ((*delay_on == 0) && (*delay_off == 0)) {
+ *delay_on = 168;
+ *delay_off = 168;
+ }
+
+ /* No off delay is just on */
+ if (*delay_off == 0)
+ return mv88e6xxx_led_brightness_set(p, led, 1);
+
+ err = mv88e6xxx_led_set_blinking_period(p, led, *delay_on, *delay_off);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_led_read(p->chip, p->port,
+ MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL,
+ &reg);
+ if (err)
+ return err;
+
+ if (led == 1)
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK;
+ else
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK;
+
+ /* This will select the forced blinking status */
+ if (led == 1)
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED1_SELD;
+ else
+ reg |= MV88E6XXX_PORT_LED_CONTROL_LED0_SELD;
+
+ reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL;
+
+ return mv88e6xxx_port_led_write(p->chip, p->port, reg);
+}
+
+static int mv88e6xxx_led0_blink_set(struct led_classdev *ldev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_blink_set(p, 0, delay_on, delay_off);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+static int mv88e6xxx_led1_blink_set(struct led_classdev *ldev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_blink_set(p, 1, delay_on, delay_off);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+static int
+mv88e6xxx_led0_hw_control_is_supported(struct led_classdev *ldev, unsigned long rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+ u16 selector = 0;
+
+ return mv88e6xxx_led_get_selector(p, 0, p->fiber, rules, &selector);
+}
+
+static int
+mv88e6xxx_led1_hw_control_is_supported(struct led_classdev *ldev, unsigned long rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+ u16 selector = 0;
+
+ return mv88e6xxx_led_get_selector(p, 1, p->fiber, rules, &selector);
+}
+
+static int mv88e6xxx_led_hw_control_set(struct mv88e6xxx_port *p,
+ int led, unsigned long rules)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_led_read(p->chip, p->port,
+ MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL,
+ &reg);
+ if (err)
+ return err;
+
+ if (led == 1)
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK;
+ else
+ reg &= ~MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK;
+
+ err = mv88e6xxx_led_get_selector(p, led, p->fiber, rules, &reg);
+ if (err)
+ return err;
+
+ reg |= MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL;
+
+ if (led == 0)
+ dev_dbg(p->chip->dev, "LED 0 hw control on port %d trigger selector 0x%02x\n",
+ p->port,
+ (unsigned int)(reg & MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK));
+ else
+ dev_dbg(p->chip->dev, "LED 1 hw control on port %d trigger selector 0x%02x\n",
+ p->port,
+ (unsigned int)(reg & MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK) >> 4);
+
+ return mv88e6xxx_port_led_write(p->chip, p->port, reg);
+}
+
+static int
+mv88e6xxx_led_hw_control_get(struct mv88e6xxx_port *p, int led, unsigned long *rules)
+{
+ u16 val;
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_port_led_read(p->chip, p->port,
+ MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL, &val);
+ mv88e6xxx_reg_unlock(p->chip);
+ if (err)
+ return err;
+
+ /* Mask out the selector bits for this port */
+ if (led == 1) {
+ val &= MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK;
+ /* It's forced blinking/OFF/ON */
+ if (val == MV88E6XXX_PORT_LED_CONTROL_LED1_SELD ||
+ val == MV88E6XXX_PORT_LED_CONTROL_LED1_SELE ||
+ val == MV88E6XXX_PORT_LED_CONTROL_LED1_SELF) {
+ *rules = 0;
+ return 0;
+ }
+ } else {
+ val &= MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK;
+ /* It's forced blinking/OFF/ON */
+ if (val == MV88E6XXX_PORT_LED_CONTROL_LED0_SELD ||
+ val == MV88E6XXX_PORT_LED_CONTROL_LED0_SELE ||
+ val == MV88E6XXX_PORT_LED_CONTROL_LED0_SELF) {
+ *rules = 0;
+ return 0;
+ }
+ }
+
+ err = mv88e6xxx_led_match_rule(p, val, led, rules);
+ if (!err)
+ return 0;
+
+ dev_dbg(p->chip->dev, "couldn't find matching selector for %04x\n", val);
+ *rules = 0;
+ return 0;
+}
+
+static int
+mv88e6xxx_led0_hw_control_set(struct led_classdev *ldev, unsigned long rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_hw_control_set(p, 0, rules);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+static int
+mv88e6xxx_led1_hw_control_set(struct led_classdev *ldev, unsigned long rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+ int err;
+
+ mv88e6xxx_reg_lock(p->chip);
+ err = mv88e6xxx_led_hw_control_set(p, 1, rules);
+ mv88e6xxx_reg_unlock(p->chip);
+
+ return err;
+}
+
+static int
+mv88e6xxx_led0_hw_control_get(struct led_classdev *ldev, unsigned long *rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+
+ return mv88e6xxx_led_hw_control_get(p, 0, rules);
+}
+
+static int
+mv88e6xxx_led1_hw_control_get(struct led_classdev *ldev, unsigned long *rules)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+
+ return mv88e6xxx_led_hw_control_get(p, 1, rules);
+}
+
+static struct device *mv88e6xxx_led_hw_control_get_device(struct mv88e6xxx_port *p)
+{
+ struct dsa_port *dp;
+
+ dp = dsa_to_port(p->chip->ds, p->port);
+ if (!dp)
+ return NULL;
+ if (dp->user)
+ return &dp->user->dev;
+ return NULL;
+}
+
+static struct device *
+mv88e6xxx_led0_hw_control_get_device(struct led_classdev *ldev)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led0);
+
+ return mv88e6xxx_led_hw_control_get_device(p);
+}
+
+static struct device *
+mv88e6xxx_led1_hw_control_get_device(struct led_classdev *ldev)
+{
+ struct mv88e6xxx_port *p = container_of(ldev, struct mv88e6xxx_port, led1);
+
+ return mv88e6xxx_led_hw_control_get_device(p);
+}
+
+int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip, int port)
+{
+ struct fwnode_handle *led = NULL, *leds = NULL;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct mv88e6xxx_port *p;
+ struct led_classdev *l;
+ struct device *dev;
+ u32 led_num;
+ int ret;
+
+ /* LEDs are on ports 1,2,3,4, 5 and 6 (index 0..5), no more */
+ if (port > 5)
+ return -EOPNOTSUPP;
+
+ p = &chip->ports[port];
+ if (!p->fwnode)
+ return 0;
+
+ dev = chip->dev;
+
+ leds = fwnode_get_named_child_node(p->fwnode, "leds");
+ if (!leds) {
+ dev_dbg(dev, "No Leds node specified in device tree for port %d!\n",
+ port);
+ return 0;
+ }
+
+ fwnode_for_each_child_node(leds, led) {
+ /* Reg represent the led number of the port, max 2
+ * LEDs can be connected to each port, in some designs
+ * only one LED is connected.
+ */
+ if (fwnode_property_read_u32(led, "reg", &led_num))
+ continue;
+ if (led_num > 1) {
+ dev_err(dev, "invalid LED specified port %d\n", port);
+ ret = -EINVAL;
+ goto err_put_led;
+ }
+
+ if (led_num == 0)
+ l = &p->led0;
+ else
+ l = &p->led1;
+
+ state = led_init_default_state_get(led);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ l->brightness = 1;
+ mv88e6xxx_led_brightness_set(p, led_num, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ break;
+ default:
+ l->brightness = 0;
+ mv88e6xxx_led_brightness_set(p, led_num, 0);
+ }
+
+ l->max_brightness = 1;
+ if (led_num == 0) {
+ l->brightness_set_blocking = mv88e6xxx_led0_brightness_set_blocking;
+ l->blink_set = mv88e6xxx_led0_blink_set;
+ l->hw_control_is_supported = mv88e6xxx_led0_hw_control_is_supported;
+ l->hw_control_set = mv88e6xxx_led0_hw_control_set;
+ l->hw_control_get = mv88e6xxx_led0_hw_control_get;
+ l->hw_control_get_device = mv88e6xxx_led0_hw_control_get_device;
+ } else {
+ l->brightness_set_blocking = mv88e6xxx_led1_brightness_set_blocking;
+ l->blink_set = mv88e6xxx_led1_blink_set;
+ l->hw_control_is_supported = mv88e6xxx_led1_hw_control_is_supported;
+ l->hw_control_set = mv88e6xxx_led1_hw_control_set;
+ l->hw_control_get = mv88e6xxx_led1_hw_control_get;
+ l->hw_control_get_device = mv88e6xxx_led1_hw_control_get_device;
+ }
+ l->hw_control_trigger = "netdev";
+
+ init_data.default_label = ":port";
+ init_data.fwnode = led;
+ init_data.devname_mandatory = true;
+ init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d:0%d", chip->info->name,
+ port, led_num);
+ if (!init_data.devicename) {
+ ret = -ENOMEM;
+ goto err_put_led;
+ }
+
+ ret = devm_led_classdev_register_ext(dev, l, &init_data);
+ kfree(init_data.devicename);
+
+ if (ret) {
+ dev_err(dev, "Failed to init LED %d for port %d", led_num, port);
+ goto err_put_led;
+ }
+ }
+
+ fwnode_handle_put(leds);
+ return 0;
+
+err_put_led:
+ fwnode_handle_put(led);
+ fwnode_handle_put(leds);
+ return ret;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
new file mode 100644
index 000000000000..af7e06d265f7
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6185 family SERDES PCS support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ */
+#include <linux/phylink.h>
+
+#include "global2.h"
+#include "port.h"
+#include "serdes.h"
+
+struct mv88e6185_pcs {
+ struct phylink_pcs phylink_pcs;
+ unsigned int irq;
+ char name[64];
+
+ struct mv88e6xxx_chip *chip;
+ int port;
+};
+
+static struct mv88e6185_pcs *pcs_to_mv88e6185_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mv88e6185_pcs, phylink_pcs);
+}
+
+static irqreturn_t mv88e6185_pcs_handle_irq(int irq, void *dev_id)
+{
+ struct mv88e6185_pcs *mpcs = dev_id;
+ struct mv88e6xxx_chip *chip;
+ irqreturn_t ret = IRQ_NONE;
+ bool link_up;
+ u16 status;
+ int port;
+ int err;
+
+ chip = mpcs->chip;
+ port = mpcs->port;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (!err) {
+ link_up = !!(status & MV88E6XXX_PORT_STS_LINK);
+
+ phylink_pcs_change(&mpcs->phylink_pcs, link_up);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ struct mv88e6185_pcs *mpcs = pcs_to_mv88e6185_pcs(pcs);
+ struct mv88e6xxx_chip *chip = mpcs->chip;
+ int port = mpcs->port;
+ u16 status;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ status = 0;
+
+ state->link = !!(status & MV88E6XXX_PORT_STS_LINK);
+ if (state->link) {
+ state->duplex = status & MV88E6XXX_PORT_STS_DUPLEX ?
+ DUPLEX_FULL : DUPLEX_HALF;
+
+ switch (status & MV88E6XXX_PORT_STS_SPEED_MASK) {
+ case MV88E6XXX_PORT_STS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+
+ case MV88E6XXX_PORT_STS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+
+ case MV88E6XXX_PORT_STS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+
+ default:
+ state->link = false;
+ break;
+ }
+ }
+}
+
+static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static void mv88e6185_pcs_an_restart(struct phylink_pcs *pcs)
+{
+}
+
+static const struct phylink_pcs_ops mv88e6185_phylink_pcs_ops = {
+ .pcs_get_state = mv88e6185_pcs_get_state,
+ .pcs_config = mv88e6185_pcs_config,
+ .pcs_an_restart = mv88e6185_pcs_an_restart,
+};
+
+static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port)
+{
+ struct mv88e6185_pcs *mpcs;
+ struct device *dev;
+ unsigned int irq;
+ int err;
+
+ /* There are no configurable serdes lanes on this switch chip, so
+ * we use the static cmode configuration to determine whether we
+ * have a PCS or not.
+ */
+ if (chip->ports[port].cmode != MV88E6185_PORT_STS_CMODE_SERDES &&
+ chip->ports[port].cmode != MV88E6185_PORT_STS_CMODE_1000BASE_X)
+ return 0;
+
+ dev = chip->dev;
+
+ mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL);
+ if (!mpcs)
+ return -ENOMEM;
+
+ mpcs->chip = chip;
+ mpcs->port = port;
+ mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops;
+
+ irq = mv88e6xxx_serdes_irq_mapping(chip, port);
+ if (irq) {
+ snprintf(mpcs->name, sizeof(mpcs->name),
+ "mv88e6xxx-%s-serdes-%d", dev_name(dev), port);
+
+ err = request_threaded_irq(irq, NULL, mv88e6185_pcs_handle_irq,
+ IRQF_ONESHOT, mpcs->name, mpcs);
+ if (err) {
+ kfree(mpcs);
+ return err;
+ }
+
+ mpcs->irq = irq;
+ } else {
+ mpcs->phylink_pcs.poll = true;
+ }
+
+ chip->ports[port].pcs_private = &mpcs->phylink_pcs;
+
+ return 0;
+}
+
+static void mv88e6185_pcs_teardown(struct mv88e6xxx_chip *chip, int port)
+{
+ struct mv88e6185_pcs *mpcs;
+
+ mpcs = chip->ports[port].pcs_private;
+ if (!mpcs)
+ return;
+
+ if (mpcs->irq)
+ free_irq(mpcs->irq, mpcs);
+
+ kfree(mpcs);
+
+ chip->ports[port].pcs_private = NULL;
+}
+
+static struct phylink_pcs *mv88e6185_pcs_select(struct mv88e6xxx_chip *chip,
+ int port,
+ phy_interface_t interface)
+{
+ return chip->ports[port].pcs_private;
+}
+
+const struct mv88e6xxx_pcs_ops mv88e6185_pcs_ops = {
+ .pcs_init = mv88e6185_pcs_init,
+ .pcs_teardown = mv88e6185_pcs_teardown,
+ .pcs_select = mv88e6185_pcs_select,
+};
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6352.c b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
new file mode 100644
index 000000000000..36993400837e
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6352 family SERDES PCS support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ */
+#include <linux/phylink.h>
+
+#include "global2.h"
+#include "port.h"
+#include "serdes.h"
+
+/* Definitions from drivers/net/phy/marvell.c, which would be good to reuse. */
+#define MII_M1011_PHY_STATUS 17
+#define MII_M1011_IMASK 18
+#define MII_M1011_IMASK_LINK_CHANGE BIT(10)
+#define MII_M1011_IEVENT 19
+#define MII_M1011_IEVENT_LINK_CHANGE BIT(10)
+#define MII_MARVELL_PHY_PAGE 22
+#define MII_MARVELL_FIBER_PAGE 1
+
+struct marvell_c22_pcs {
+ struct mdio_device mdio;
+ struct phylink_pcs phylink_pcs;
+ unsigned int irq;
+ char name[64];
+ bool (*link_check)(struct marvell_c22_pcs *mpcs);
+ struct mv88e6xxx_port *port;
+};
+
+static struct marvell_c22_pcs *pcs_to_marvell_c22_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct marvell_c22_pcs, phylink_pcs);
+}
+
+static int marvell_c22_pcs_set_fiber_page(struct marvell_c22_pcs *mpcs)
+{
+ u16 page;
+ int err;
+
+ mutex_lock(&mpcs->mdio.bus->mdio_lock);
+
+ err = __mdiodev_read(&mpcs->mdio, MII_MARVELL_PHY_PAGE);
+ if (err < 0) {
+ dev_err(mpcs->mdio.dev.parent,
+ "%s: can't read Serdes page register: %pe\n",
+ mpcs->name, ERR_PTR(err));
+ return err;
+ }
+
+ page = err;
+
+ err = __mdiodev_write(&mpcs->mdio, MII_MARVELL_PHY_PAGE,
+ MII_MARVELL_FIBER_PAGE);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "%s: can't set Serdes page register: %pe\n",
+ mpcs->name, ERR_PTR(err));
+ return err;
+ }
+
+ return page;
+}
+
+static int marvell_c22_pcs_restore_page(struct marvell_c22_pcs *mpcs,
+ int oldpage, int ret)
+{
+ int err;
+
+ if (oldpage >= 0) {
+ err = __mdiodev_write(&mpcs->mdio, MII_MARVELL_PHY_PAGE,
+ oldpage);
+ if (err)
+ dev_err(mpcs->mdio.dev.parent,
+ "%s: can't restore Serdes page register: %pe\n",
+ mpcs->name, ERR_PTR(err));
+ if (!err || ret < 0)
+ err = ret;
+ } else {
+ err = oldpage;
+ }
+ mutex_unlock(&mpcs->mdio.bus->mdio_lock);
+
+ return err;
+}
+
+static irqreturn_t marvell_c22_pcs_handle_irq(int irq, void *dev_id)
+{
+ struct marvell_c22_pcs *mpcs = dev_id;
+ irqreturn_t status = IRQ_NONE;
+ int err, oldpage;
+
+ oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
+ if (oldpage < 0)
+ goto fail;
+
+ err = __mdiodev_read(&mpcs->mdio, MII_M1011_IEVENT);
+ if (err >= 0 && err & MII_M1011_IEVENT_LINK_CHANGE) {
+ phylink_pcs_change(&mpcs->phylink_pcs, true);
+ status = IRQ_HANDLED;
+ }
+
+fail:
+ marvell_c22_pcs_restore_page(mpcs, oldpage, 0);
+
+ return status;
+}
+
+static int marvell_c22_pcs_modify(struct marvell_c22_pcs *mpcs, u8 reg,
+ u16 mask, u16 val)
+{
+ int oldpage, err = 0;
+
+ oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
+ if (oldpage >= 0)
+ err = __mdiodev_modify(&mpcs->mdio, reg, mask, val);
+
+ return marvell_c22_pcs_restore_page(mpcs, oldpage, err);
+}
+
+static int marvell_c22_pcs_power(struct marvell_c22_pcs *mpcs,
+ bool on)
+{
+ u16 val = on ? 0 : BMCR_PDOWN;
+
+ return marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_PDOWN, val);
+}
+
+static int marvell_c22_pcs_control_irq(struct marvell_c22_pcs *mpcs,
+ bool enable)
+{
+ u16 val = enable ? MII_M1011_IMASK_LINK_CHANGE : 0;
+
+ return marvell_c22_pcs_modify(mpcs, MII_M1011_IMASK,
+ MII_M1011_IMASK_LINK_CHANGE, val);
+}
+
+static int marvell_c22_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+ int err;
+
+ err = marvell_c22_pcs_power(mpcs, true);
+ if (err)
+ return err;
+
+ return marvell_c22_pcs_control_irq(mpcs, !!mpcs->irq);
+}
+
+static void marvell_c22_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+
+ marvell_c22_pcs_control_irq(mpcs, false);
+ marvell_c22_pcs_power(mpcs, false);
+}
+
+static void marvell_c22_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+ int oldpage, bmsr, lpa, status;
+
+ state->link = false;
+
+ if (mpcs->link_check && !mpcs->link_check(mpcs))
+ return;
+
+ oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
+ if (oldpage >= 0) {
+ bmsr = __mdiodev_read(&mpcs->mdio, MII_BMSR);
+ lpa = __mdiodev_read(&mpcs->mdio, MII_LPA);
+ status = __mdiodev_read(&mpcs->mdio, MII_M1011_PHY_STATUS);
+ }
+
+ if (marvell_c22_pcs_restore_page(mpcs, oldpage, 0) >= 0 &&
+ bmsr >= 0 && lpa >= 0 && status >= 0)
+ mv88e6xxx_pcs_decode_state(mpcs->mdio.dev.parent, bmsr, lpa,
+ status, state);
+}
+
+static int marvell_c22_pcs_config(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+ int oldpage, adv, err, ret = 0;
+ u16 bmcr;
+
+ adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
+ if (adv < 0)
+ return 0;
+
+ bmcr = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED ? BMCR_ANENABLE : 0;
+
+ oldpage = marvell_c22_pcs_set_fiber_page(mpcs);
+ if (oldpage < 0)
+ goto restore;
+
+ err = __mdiodev_modify_changed(&mpcs->mdio, MII_ADVERTISE, 0xffff, adv);
+ ret = err;
+ if (err < 0)
+ goto restore;
+
+ err = __mdiodev_modify_changed(&mpcs->mdio, MII_BMCR, BMCR_ANENABLE,
+ bmcr);
+ if (err < 0) {
+ ret = err;
+ goto restore;
+ }
+
+ /* If the ANENABLE bit was changed, the PHY will restart negotiation,
+ * so we don't need to flag a change to trigger its own restart.
+ */
+ if (err)
+ ret = 0;
+
+restore:
+ return marvell_c22_pcs_restore_page(mpcs, oldpage, ret);
+}
+
+static void marvell_c22_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+
+ marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_ANRESTART, BMCR_ANRESTART);
+}
+
+static void marvell_c22_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex)
+{
+ struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
+ u16 bmcr;
+ int err;
+
+ if (phylink_autoneg_inband(mode))
+ return;
+
+ bmcr = mii_bmcr_encode_fixed(speed, duplex);
+
+ err = marvell_c22_pcs_modify(mpcs, MII_BMCR, BMCR_SPEED100 |
+ BMCR_FULLDPLX | BMCR_SPEED1000, bmcr);
+ if (err)
+ dev_err(mpcs->mdio.dev.parent,
+ "%s: failed to configure mpcs: %pe\n", mpcs->name,
+ ERR_PTR(err));
+}
+
+static const struct phylink_pcs_ops marvell_c22_pcs_ops = {
+ .pcs_enable = marvell_c22_pcs_enable,
+ .pcs_disable = marvell_c22_pcs_disable,
+ .pcs_get_state = marvell_c22_pcs_get_state,
+ .pcs_config = marvell_c22_pcs_config,
+ .pcs_an_restart = marvell_c22_pcs_an_restart,
+ .pcs_link_up = marvell_c22_pcs_link_up,
+};
+
+static struct marvell_c22_pcs *marvell_c22_pcs_alloc(struct device *dev,
+ struct mii_bus *bus,
+ unsigned int addr)
+{
+ struct marvell_c22_pcs *mpcs;
+
+ mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL);
+ if (!mpcs)
+ return NULL;
+
+ mpcs->mdio.dev.parent = dev;
+ mpcs->mdio.bus = bus;
+ mpcs->mdio.addr = addr;
+ mpcs->phylink_pcs.ops = &marvell_c22_pcs_ops;
+
+ return mpcs;
+}
+
+static int marvell_c22_pcs_setup_irq(struct marvell_c22_pcs *mpcs,
+ unsigned int irq)
+{
+ int err;
+
+ mpcs->phylink_pcs.poll = !irq;
+ mpcs->irq = irq;
+
+ if (irq) {
+ err = request_threaded_irq(irq, NULL,
+ marvell_c22_pcs_handle_irq,
+ IRQF_ONESHOT, mpcs->name, mpcs);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* mv88e6352 specifics */
+
+static bool mv88e6352_pcs_link_check(struct marvell_c22_pcs *mpcs)
+{
+ struct mv88e6xxx_port *port = mpcs->port;
+ struct mv88e6xxx_chip *chip = port->chip;
+ u8 cmode;
+
+ /* Port 4 can be in auto-media mode. Check that the port is
+ * associated with the mpcs.
+ */
+ mv88e6xxx_reg_lock(chip);
+ chip->info->ops->port_get_cmode(chip, port->port, &cmode);
+ mv88e6xxx_reg_unlock(chip);
+
+ return cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII;
+}
+
+static int mv88e6352_pcs_init(struct mv88e6xxx_chip *chip, int port)
+{
+ struct marvell_c22_pcs *mpcs;
+ struct mii_bus *bus;
+ struct device *dev;
+ unsigned int irq;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+ if (err <= 0)
+ return err;
+
+ irq = mv88e6xxx_serdes_irq_mapping(chip, port);
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ dev = chip->dev;
+
+ mpcs = marvell_c22_pcs_alloc(dev, bus, MV88E6352_ADDR_SERDES);
+ if (!mpcs)
+ return -ENOMEM;
+
+ snprintf(mpcs->name, sizeof(mpcs->name),
+ "mv88e6xxx-%s-serdes-%d", dev_name(dev), port);
+
+ mpcs->link_check = mv88e6352_pcs_link_check;
+ mpcs->port = &chip->ports[port];
+
+ err = marvell_c22_pcs_setup_irq(mpcs, irq);
+ if (err) {
+ kfree(mpcs);
+ return err;
+ }
+
+ chip->ports[port].pcs_private = &mpcs->phylink_pcs;
+
+ return 0;
+}
+
+static void mv88e6352_pcs_teardown(struct mv88e6xxx_chip *chip, int port)
+{
+ struct marvell_c22_pcs *mpcs;
+ struct phylink_pcs *pcs;
+
+ pcs = chip->ports[port].pcs_private;
+ if (!pcs)
+ return;
+
+ mpcs = pcs_to_marvell_c22_pcs(pcs);
+
+ if (mpcs->irq)
+ free_irq(mpcs->irq, mpcs);
+
+ kfree(mpcs);
+
+ chip->ports[port].pcs_private = NULL;
+}
+
+static struct phylink_pcs *mv88e6352_pcs_select(struct mv88e6xxx_chip *chip,
+ int port,
+ phy_interface_t interface)
+{
+ return chip->ports[port].pcs_private;
+}
+
+const struct mv88e6xxx_pcs_ops mv88e6352_pcs_ops = {
+ .pcs_init = mv88e6352_pcs_init,
+ .pcs_teardown = mv88e6352_pcs_teardown,
+ .pcs_select = mv88e6352_pcs_select,
+};
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
new file mode 100644
index 000000000000..5db17c0b77f5
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
@@ -0,0 +1,970 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6352 family SERDES PCS support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ */
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/mii.h>
+#include <linux/string_choices.h>
+
+#include "chip.h"
+#include "global2.h"
+#include "phy.h"
+#include "port.h"
+#include "serdes.h"
+
+struct mv88e639x_pcs {
+ struct mdio_device mdio;
+ struct phylink_pcs sgmii_pcs;
+ struct phylink_pcs xg_pcs;
+ bool erratum_3_14;
+ bool supports_5g;
+ phy_interface_t interface;
+ unsigned int irq;
+ char name[64];
+ irqreturn_t (*handle_irq)(struct mv88e639x_pcs *mpcs);
+};
+
+static int mv88e639x_read(struct mv88e639x_pcs *mpcs, u16 regnum, u16 *val)
+{
+ int err;
+
+ err = mdiodev_c45_read(&mpcs->mdio, MDIO_MMD_PHYXS, regnum);
+ if (err < 0)
+ return err;
+
+ *val = err;
+
+ return 0;
+}
+
+static int mv88e639x_write(struct mv88e639x_pcs *mpcs, u16 regnum, u16 val)
+{
+ return mdiodev_c45_write(&mpcs->mdio, MDIO_MMD_PHYXS, regnum, val);
+}
+
+static int mv88e639x_modify(struct mv88e639x_pcs *mpcs, u16 regnum, u16 mask,
+ u16 val)
+{
+ return mdiodev_c45_modify(&mpcs->mdio, MDIO_MMD_PHYXS, regnum, mask,
+ val);
+}
+
+static int mv88e639x_modify_changed(struct mv88e639x_pcs *mpcs, u16 regnum,
+ u16 mask, u16 set)
+{
+ return mdiodev_c45_modify_changed(&mpcs->mdio, MDIO_MMD_PHYXS, regnum,
+ mask, set);
+}
+
+static struct mv88e639x_pcs *
+mv88e639x_pcs_alloc(struct device *dev, struct mii_bus *bus, unsigned int addr,
+ int port)
+{
+ struct mv88e639x_pcs *mpcs;
+
+ mpcs = kzalloc(sizeof(*mpcs), GFP_KERNEL);
+ if (!mpcs)
+ return NULL;
+
+ mpcs->mdio.dev.parent = dev;
+ mpcs->mdio.bus = bus;
+ mpcs->mdio.addr = addr;
+
+ snprintf(mpcs->name, sizeof(mpcs->name),
+ "mv88e6xxx-%s-serdes-%d", dev_name(dev), port);
+
+ return mpcs;
+}
+
+static irqreturn_t mv88e639x_pcs_handle_irq(int irq, void *dev_id)
+{
+ struct mv88e639x_pcs *mpcs = dev_id;
+ irqreturn_t (*handler)(struct mv88e639x_pcs *);
+
+ handler = READ_ONCE(mpcs->handle_irq);
+ if (!handler)
+ return IRQ_NONE;
+
+ return handler(mpcs);
+}
+
+static int mv88e639x_pcs_setup_irq(struct mv88e639x_pcs *mpcs,
+ struct mv88e6xxx_chip *chip, int port)
+{
+ unsigned int irq;
+
+ irq = mv88e6xxx_serdes_irq_mapping(chip, port);
+ if (!irq) {
+ /* Use polling mode */
+ mpcs->sgmii_pcs.poll = true;
+ mpcs->xg_pcs.poll = true;
+ return 0;
+ }
+
+ mpcs->irq = irq;
+
+ return request_threaded_irq(irq, NULL, mv88e639x_pcs_handle_irq,
+ IRQF_ONESHOT, mpcs->name, mpcs);
+}
+
+static void mv88e639x_pcs_teardown(struct mv88e6xxx_chip *chip, int port)
+{
+ struct mv88e639x_pcs *mpcs = chip->ports[port].pcs_private;
+
+ if (!mpcs)
+ return;
+
+ if (mpcs->irq)
+ free_irq(mpcs->irq, mpcs);
+
+ kfree(mpcs);
+
+ chip->ports[port].pcs_private = NULL;
+}
+
+static struct mv88e639x_pcs *sgmii_pcs_to_mv88e639x_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mv88e639x_pcs, sgmii_pcs);
+}
+
+static irqreturn_t mv88e639x_sgmii_handle_irq(struct mv88e639x_pcs *mpcs)
+{
+ u16 int_status;
+ int err;
+
+ err = mv88e639x_read(mpcs, MV88E6390_SGMII_INT_STATUS, &int_status);
+ if (err)
+ return IRQ_NONE;
+
+ if (int_status & (MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP)) {
+ phylink_pcs_change(&mpcs->sgmii_pcs,
+ int_status & MV88E6390_SGMII_INT_LINK_UP);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mv88e639x_sgmii_pcs_control_irq(struct mv88e639x_pcs *mpcs,
+ bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val |= MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP;
+
+ return mv88e639x_modify(mpcs, MV88E6390_SGMII_INT_ENABLE,
+ MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP, val);
+}
+
+static int mv88e639x_sgmii_pcs_control_pwr(struct mv88e639x_pcs *mpcs,
+ bool enable)
+{
+ u16 mask, val;
+
+ if (enable) {
+ mask = BMCR_RESET | BMCR_LOOPBACK | BMCR_PDOWN;
+ val = 0;
+ } else {
+ mask = val = BMCR_PDOWN;
+ }
+
+ return mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR, mask, val);
+}
+
+static int mv88e639x_sgmii_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ /* power enable done in post_config */
+ mpcs->handle_irq = mv88e639x_sgmii_handle_irq;
+
+ return mv88e639x_sgmii_pcs_control_irq(mpcs, !!mpcs->irq);
+}
+
+static void mv88e639x_sgmii_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_sgmii_pcs_control_irq(mpcs, false);
+ mv88e639x_sgmii_pcs_control_pwr(mpcs, false);
+}
+
+static void mv88e639x_sgmii_pcs_pre_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_sgmii_pcs_control_pwr(mpcs, false);
+}
+
+static int mv88e6390_erratum_3_14(struct mv88e639x_pcs *mpcs)
+{
+ static const int lanes[] = { MV88E6390_PORT9_LANE0, MV88E6390_PORT9_LANE1,
+ MV88E6390_PORT9_LANE2, MV88E6390_PORT9_LANE3,
+ MV88E6390_PORT10_LANE0, MV88E6390_PORT10_LANE1,
+ MV88E6390_PORT10_LANE2, MV88E6390_PORT10_LANE3 };
+ int err, i;
+
+ /* 88e6190x and 88e6390x errata 3.14:
+ * After chip reset, SERDES reconfiguration or SERDES core
+ * Software Reset, the SERDES lanes may not be properly aligned
+ * resulting in CRC errors
+ */
+
+ for (i = 0; i < ARRAY_SIZE(lanes); i++) {
+ err = mdiobus_c45_write(mpcs->mdio.bus, lanes[i],
+ MDIO_MMD_PHYXS,
+ 0xf054, 0x400C);
+ if (err)
+ return err;
+
+ err = mdiobus_c45_write(mpcs->mdio.bus, lanes[i],
+ MDIO_MMD_PHYXS,
+ 0xf054, 0x4000);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e639x_sgmii_pcs_post_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+ int err;
+
+ mv88e639x_sgmii_pcs_control_pwr(mpcs, true);
+
+ if (mpcs->erratum_3_14) {
+ err = mv88e6390_erratum_3_14(mpcs);
+ if (err)
+ dev_err(mpcs->mdio.dev.parent,
+ "failed to apply erratum 3.14: %pe\n",
+ ERR_PTR(err));
+ }
+
+ return 0;
+}
+
+static void mv88e639x_sgmii_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+ u16 bmsr, lpa, status;
+ int err;
+
+ err = mv88e639x_read(mpcs, MV88E6390_SGMII_BMSR, &bmsr);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "can't read Serdes PHY %s: %pe\n",
+ "BMSR", ERR_PTR(err));
+ state->link = false;
+ return;
+ }
+
+ err = mv88e639x_read(mpcs, MV88E6390_SGMII_LPA, &lpa);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "can't read Serdes PHY %s: %pe\n",
+ "LPA", ERR_PTR(err));
+ state->link = false;
+ return;
+ }
+
+ err = mv88e639x_read(mpcs, MV88E6390_SGMII_PHY_STATUS, &status);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "can't read Serdes PHY %s: %pe\n",
+ "status", ERR_PTR(err));
+ state->link = false;
+ return;
+ }
+
+ mv88e6xxx_pcs_decode_state(mpcs->mdio.dev.parent, bmsr, lpa, status,
+ state);
+}
+
+static int mv88e639x_sgmii_pcs_config(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+ u16 val, bmcr;
+ bool changed;
+ int adv, err;
+
+ adv = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
+ if (adv < 0)
+ return 0;
+
+ mpcs->interface = interface;
+
+ err = mv88e639x_modify_changed(mpcs, MV88E6390_SGMII_ADVERTISE,
+ 0xffff, adv);
+ if (err < 0)
+ return err;
+
+ changed = err > 0;
+
+ err = mv88e639x_read(mpcs, MV88E6390_SGMII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ bmcr = val | BMCR_ANENABLE;
+ else
+ bmcr = val & ~BMCR_ANENABLE;
+
+ /* setting ANENABLE triggers a restart of negotiation */
+ if (bmcr == val)
+ return changed;
+
+ return mv88e639x_write(mpcs, MV88E6390_SGMII_BMCR, bmcr);
+}
+
+static void mv88e639x_sgmii_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR,
+ BMCR_ANRESTART, BMCR_ANRESTART);
+}
+
+static void mv88e639x_sgmii_pcs_link_up(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+ u16 bmcr;
+ int err;
+
+ if (phylink_autoneg_inband(mode))
+ return;
+
+ bmcr = mii_bmcr_encode_fixed(speed, duplex);
+
+ err = mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR,
+ BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX,
+ bmcr);
+ if (err)
+ dev_err(mpcs->mdio.dev.parent,
+ "can't access Serdes PHY %s: %pe\n",
+ "BMCR", ERR_PTR(err));
+}
+
+static const struct phylink_pcs_ops mv88e639x_sgmii_pcs_ops = {
+ .pcs_enable = mv88e639x_sgmii_pcs_enable,
+ .pcs_disable = mv88e639x_sgmii_pcs_disable,
+ .pcs_pre_config = mv88e639x_sgmii_pcs_pre_config,
+ .pcs_post_config = mv88e639x_sgmii_pcs_post_config,
+ .pcs_get_state = mv88e639x_sgmii_pcs_get_state,
+ .pcs_an_restart = mv88e639x_sgmii_pcs_an_restart,
+ .pcs_config = mv88e639x_sgmii_pcs_config,
+ .pcs_link_up = mv88e639x_sgmii_pcs_link_up,
+};
+
+static struct mv88e639x_pcs *xg_pcs_to_mv88e639x_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mv88e639x_pcs, xg_pcs);
+}
+
+static int mv88e639x_xg_pcs_enable(struct mv88e639x_pcs *mpcs)
+{
+ return mv88e639x_modify(mpcs, MV88E6390_10G_CTRL1,
+ MDIO_CTRL1_RESET | MDIO_PCS_CTRL1_LOOPBACK |
+ MDIO_CTRL1_LPOWER, 0);
+}
+
+static void mv88e639x_xg_pcs_disable(struct mv88e639x_pcs *mpcs)
+{
+ mv88e639x_modify(mpcs, MV88E6390_10G_CTRL1, MDIO_CTRL1_LPOWER,
+ MDIO_CTRL1_LPOWER);
+}
+
+static void mv88e639x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+ u16 status;
+ int err;
+
+ state->link = false;
+
+ err = mv88e639x_read(mpcs, MV88E6390_10G_STAT1, &status);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "can't read Serdes PHY %s: %pe\n",
+ "STAT1", ERR_PTR(err));
+ return;
+ }
+
+ state->link = !!(status & MDIO_STAT1_LSTATUS);
+ if (state->link) {
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_5GBASER:
+ state->speed = SPEED_5000;
+ break;
+
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ state->speed = SPEED_10000;
+ break;
+
+ default:
+ state->link = false;
+ return;
+ }
+
+ state->duplex = DUPLEX_FULL;
+ }
+}
+
+static int mv88e639x_xg_pcs_config(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static struct phylink_pcs *
+mv88e639x_pcs_select(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ struct mv88e639x_pcs *mpcs;
+
+ mpcs = chip->ports[port].pcs_private;
+ if (!mpcs)
+ return NULL;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return &mpcs->sgmii_pcs;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ if (!mpcs->supports_5g)
+ return NULL;
+ fallthrough;
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_USXGMII:
+ return &mpcs->xg_pcs;
+
+ default:
+ return NULL;
+ }
+}
+
+/* Marvell 88E6390 Specific support */
+
+static irqreturn_t mv88e6390_xg_handle_irq(struct mv88e639x_pcs *mpcs)
+{
+ u16 int_status;
+ int err;
+
+ err = mv88e639x_read(mpcs, MV88E6390_10G_INT_STATUS, &int_status);
+ if (err)
+ return IRQ_NONE;
+
+ if (int_status & (MV88E6390_10G_INT_LINK_DOWN |
+ MV88E6390_10G_INT_LINK_UP)) {
+ phylink_pcs_change(&mpcs->xg_pcs,
+ int_status & MV88E6390_10G_INT_LINK_UP);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mv88e6390_xg_control_irq(struct mv88e639x_pcs *mpcs, bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val = MV88E6390_10G_INT_LINK_DOWN | MV88E6390_10G_INT_LINK_UP;
+
+ return mv88e639x_modify(mpcs, MV88E6390_10G_INT_ENABLE,
+ MV88E6390_10G_INT_LINK_DOWN |
+ MV88E6390_10G_INT_LINK_UP, val);
+}
+
+static int mv88e6390_xg_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+ int err;
+
+ err = mv88e639x_xg_pcs_enable(mpcs);
+ if (err)
+ return err;
+
+ mpcs->handle_irq = mv88e6390_xg_handle_irq;
+
+ return mv88e6390_xg_control_irq(mpcs, !!mpcs->irq);
+}
+
+static void mv88e6390_xg_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e6390_xg_control_irq(mpcs, false);
+ mv88e639x_xg_pcs_disable(mpcs);
+}
+
+static const struct phylink_pcs_ops mv88e6390_xg_pcs_ops = {
+ .pcs_enable = mv88e6390_xg_pcs_enable,
+ .pcs_disable = mv88e6390_xg_pcs_disable,
+ .pcs_get_state = mv88e639x_xg_pcs_get_state,
+ .pcs_config = mv88e639x_xg_pcs_config,
+};
+
+static int mv88e6390_pcs_enable_checker(struct mv88e639x_pcs *mpcs)
+{
+ return mv88e639x_modify(mpcs, MV88E6390_PG_CONTROL,
+ MV88E6390_PG_CONTROL_ENABLE_PC,
+ MV88E6390_PG_CONTROL_ENABLE_PC);
+}
+
+static int mv88e6390_pcs_init(struct mv88e6xxx_chip *chip, int port)
+{
+ struct mv88e639x_pcs *mpcs;
+ struct mii_bus *bus;
+ struct device *dev;
+ int lane, err;
+
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return 0;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ dev = chip->dev;
+
+ mpcs = mv88e639x_pcs_alloc(dev, bus, lane, port);
+ if (!mpcs)
+ return -ENOMEM;
+
+ mpcs->sgmii_pcs.ops = &mv88e639x_sgmii_pcs_ops;
+ mpcs->xg_pcs.ops = &mv88e6390_xg_pcs_ops;
+
+ if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6190X ||
+ chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6390X)
+ mpcs->erratum_3_14 = true;
+
+ err = mv88e639x_pcs_setup_irq(mpcs, chip, port);
+ if (err)
+ goto err_free;
+
+ /* 6390 and 6390x has the checker, 6393x doesn't appear to? */
+ /* This is to enable gathering the statistics. Maybe this
+ * should call out to a helper? Or we could do this at init time.
+ */
+ err = mv88e6390_pcs_enable_checker(mpcs);
+ if (err)
+ goto err_free;
+
+ chip->ports[port].pcs_private = mpcs;
+
+ return 0;
+
+err_free:
+ kfree(mpcs);
+ return err;
+}
+
+const struct mv88e6xxx_pcs_ops mv88e6390_pcs_ops = {
+ .pcs_init = mv88e6390_pcs_init,
+ .pcs_teardown = mv88e639x_pcs_teardown,
+ .pcs_select = mv88e639x_pcs_select,
+};
+
+/* Marvell 88E6393X Specific support */
+
+static int mv88e6393x_power_lane(struct mv88e639x_pcs *mpcs, bool enable)
+{
+ u16 val = MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+ MV88E6393X_SERDES_CTRL1_RX_PDOWN;
+
+ return mv88e639x_modify(mpcs, MV88E6393X_SERDES_CTRL1, val,
+ enable ? 0 : val);
+}
+
+/* mv88e6393x family errata 4.6:
+ * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD mode or
+ * P0_mode is configured for [x]MII.
+ * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1.
+ *
+ * It seems that after this workaround the SERDES is automatically powered up
+ * (the bit is cleared), so power it down.
+ */
+static int mv88e6393x_erratum_4_6(struct mv88e639x_pcs *mpcs)
+{
+ int err;
+
+ err = mv88e639x_modify(mpcs, MV88E6393X_SERDES_POC,
+ MV88E6393X_SERDES_POC_PDOWN |
+ MV88E6393X_SERDES_POC_RESET,
+ MV88E6393X_SERDES_POC_RESET);
+ if (err)
+ return err;
+
+ err = mv88e639x_modify(mpcs, MV88E6390_SGMII_BMCR,
+ BMCR_PDOWN, BMCR_PDOWN);
+ if (err)
+ return err;
+
+ err = mv88e639x_sgmii_pcs_control_pwr(mpcs, false);
+ if (err)
+ return err;
+
+ return mv88e6393x_power_lane(mpcs, false);
+}
+
+/* mv88e6393x family errata 4.8:
+ * When a SERDES port is operating in 1000BASE-X or SGMII mode link may not
+ * come up after hardware reset or software reset of SERDES core. Workaround
+ * is to write SERDES register 4.F074.14=1 for only those modes and 0 in all
+ * other modes.
+ */
+static int mv88e6393x_erratum_4_8(struct mv88e639x_pcs *mpcs)
+{
+ u16 reg, poc;
+ int err;
+
+ err = mv88e639x_read(mpcs, MV88E6393X_SERDES_POC, &poc);
+ if (err)
+ return err;
+
+ poc &= MV88E6393X_SERDES_POC_PCS_MASK;
+ if (poc == MV88E6393X_SERDES_POC_PCS_1000BASEX ||
+ poc == MV88E6393X_SERDES_POC_PCS_SGMII_PHY ||
+ poc == MV88E6393X_SERDES_POC_PCS_SGMII_MAC)
+ reg = MV88E6393X_ERRATA_4_8_BIT;
+ else
+ reg = 0;
+
+ return mv88e639x_modify(mpcs, MV88E6393X_ERRATA_4_8_REG,
+ MV88E6393X_ERRATA_4_8_BIT, reg);
+}
+
+/* mv88e6393x family errata 5.2:
+ * For optimal signal integrity the following sequence should be applied to
+ * SERDES operating in 10G mode. These registers only apply to 10G operation
+ * and have no effect on other speeds.
+ */
+static int mv88e6393x_erratum_5_2(struct mv88e639x_pcs *mpcs)
+{
+ static const struct {
+ u16 dev, reg, val, mask;
+ } fixes[] = {
+ { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
+ { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
+ { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
+ { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
+ { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
+ { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
+ { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
+ MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
+ };
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
+ err = mdiodev_c45_modify(&mpcs->mdio, fixes[i].dev,
+ fixes[i].reg, fixes[i].mask,
+ fixes[i].val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Inband AN is broken on Amethyst in 2500base-x mode when set by standard
+ * mechanism (via cmode).
+ * We can get around this by configuring the PCS mode to 1000base-x and then
+ * writing value 0x58 to register 1e.8000. (This must be done while SerDes
+ * receiver and transmitter are disabled, which is, when this function is
+ * called.)
+ * It seem that when we do this configuration to 2500base-x mode (by changing
+ * PCS mode to 1000base-x and frequency to 3.125 GHz from 1.25 GHz) and then
+ * configure to sgmii or 1000base-x, the device thinks that it already has
+ * SerDes at 1.25 GHz and does not change the 1e.8000 register, leaving SerDes
+ * at 3.125 GHz.
+ * To avoid this, change PCS mode back to 2500base-x when disabling SerDes from
+ * 2500base-x mode.
+ */
+static int mv88e6393x_fix_2500basex_an(struct mv88e639x_pcs *mpcs, bool on)
+{
+ u16 reg;
+ int err;
+
+ if (on)
+ reg = MV88E6393X_SERDES_POC_PCS_1000BASEX |
+ MV88E6393X_SERDES_POC_AN;
+ else
+ reg = MV88E6393X_SERDES_POC_PCS_2500BASEX;
+
+ reg |= MV88E6393X_SERDES_POC_RESET;
+
+ err = mv88e639x_modify(mpcs, MV88E6393X_SERDES_POC,
+ MV88E6393X_SERDES_POC_PCS_MASK |
+ MV88E6393X_SERDES_POC_AN |
+ MV88E6393X_SERDES_POC_RESET, reg);
+ if (err)
+ return err;
+
+ return mdiodev_c45_write(&mpcs->mdio, MDIO_MMD_VEND1, 0x8000, 0x58);
+}
+
+static int mv88e6393x_sgmii_apply_2500basex_an(struct mv88e639x_pcs *mpcs,
+ phy_interface_t interface,
+ bool enable)
+{
+ int err;
+
+ if (interface != PHY_INTERFACE_MODE_2500BASEX)
+ return 0;
+
+ err = mv88e6393x_fix_2500basex_an(mpcs, enable);
+ if (err)
+ dev_err(mpcs->mdio.dev.parent,
+ "failed to %s 2500basex fix: %pe\n",
+ str_enable_disable(enable), ERR_PTR(err));
+
+ return err;
+}
+
+static void mv88e6393x_sgmii_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_sgmii_pcs_disable(pcs);
+ mv88e6393x_power_lane(mpcs, false);
+ mv88e6393x_sgmii_apply_2500basex_an(mpcs, mpcs->interface, false);
+}
+
+static void mv88e6393x_sgmii_pcs_pre_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_sgmii_pcs_pre_config(pcs, interface);
+ mv88e6393x_power_lane(mpcs, false);
+ mv88e6393x_sgmii_apply_2500basex_an(mpcs, mpcs->interface, false);
+}
+
+static int mv88e6393x_sgmii_pcs_post_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
+ int err;
+
+ err = mv88e6393x_erratum_4_8(mpcs);
+ if (err)
+ return err;
+
+ err = mv88e6393x_sgmii_apply_2500basex_an(mpcs, interface, true);
+ if (err)
+ return err;
+
+ err = mv88e6393x_power_lane(mpcs, true);
+ if (err)
+ return err;
+
+ return mv88e639x_sgmii_pcs_post_config(pcs, interface);
+}
+
+static const struct phylink_pcs_ops mv88e6393x_sgmii_pcs_ops = {
+ .pcs_enable = mv88e639x_sgmii_pcs_enable,
+ .pcs_disable = mv88e6393x_sgmii_pcs_disable,
+ .pcs_pre_config = mv88e6393x_sgmii_pcs_pre_config,
+ .pcs_post_config = mv88e6393x_sgmii_pcs_post_config,
+ .pcs_get_state = mv88e639x_sgmii_pcs_get_state,
+ .pcs_an_restart = mv88e639x_sgmii_pcs_an_restart,
+ .pcs_config = mv88e639x_sgmii_pcs_config,
+ .pcs_link_up = mv88e639x_sgmii_pcs_link_up,
+};
+
+static irqreturn_t mv88e6393x_xg_handle_irq(struct mv88e639x_pcs *mpcs)
+{
+ u16 int_status, stat1;
+ bool link_down;
+ int err;
+
+ err = mv88e639x_read(mpcs, MV88E6393X_10G_INT_STATUS, &int_status);
+ if (err)
+ return IRQ_NONE;
+
+ if (int_status & MV88E6393X_10G_INT_LINK_CHANGE) {
+ err = mv88e639x_read(mpcs, MV88E6390_10G_STAT1, &stat1);
+ if (err)
+ return IRQ_NONE;
+
+ link_down = !(stat1 & MDIO_STAT1_LSTATUS);
+
+ phylink_pcs_change(&mpcs->xg_pcs, !link_down);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mv88e6393x_xg_control_irq(struct mv88e639x_pcs *mpcs, bool enable)
+{
+ u16 val = 0;
+
+ if (enable)
+ val = MV88E6393X_10G_INT_LINK_CHANGE;
+
+ return mv88e639x_modify(mpcs, MV88E6393X_10G_INT_ENABLE,
+ MV88E6393X_10G_INT_LINK_CHANGE, val);
+}
+
+static int mv88e6393x_xg_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+
+ mpcs->handle_irq = mv88e6393x_xg_handle_irq;
+
+ return mv88e6393x_xg_control_irq(mpcs, !!mpcs->irq);
+}
+
+static void mv88e6393x_xg_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e6393x_xg_control_irq(mpcs, false);
+ mv88e639x_xg_pcs_disable(mpcs);
+ mv88e6393x_power_lane(mpcs, false);
+}
+
+/* The PCS has to be powered down while CMODE is changed */
+static void mv88e6393x_xg_pcs_pre_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+
+ mv88e639x_xg_pcs_disable(mpcs);
+ mv88e6393x_power_lane(mpcs, false);
+}
+
+static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+ int err;
+
+ if (interface == PHY_INTERFACE_MODE_10GBASER ||
+ interface == PHY_INTERFACE_MODE_USXGMII) {
+ err = mv88e6393x_erratum_5_2(mpcs);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6393x_power_lane(mpcs, true);
+ if (err)
+ return err;
+
+ return mv88e639x_xg_pcs_enable(mpcs);
+}
+
+static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+ u16 status, lp_status;
+ int err;
+
+ if (state->interface != PHY_INTERFACE_MODE_USXGMII)
+ return mv88e639x_xg_pcs_get_state(pcs, neg_mode, state);
+
+ state->link = false;
+
+ err = mv88e639x_read(mpcs, MV88E6390_USXGMII_PHY_STATUS, &status);
+ err = err ? : mv88e639x_read(mpcs, MV88E6390_USXGMII_LP_STATUS, &lp_status);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "can't read USXGMII status: %pe\n", ERR_PTR(err));
+ return;
+ }
+
+ state->link = !!(status & MDIO_USXGMII_LINK);
+ state->an_complete = state->link;
+ phylink_decode_usxgmii_word(state, lp_status);
+}
+
+static const struct phylink_pcs_ops mv88e6393x_xg_pcs_ops = {
+ .pcs_enable = mv88e6393x_xg_pcs_enable,
+ .pcs_disable = mv88e6393x_xg_pcs_disable,
+ .pcs_pre_config = mv88e6393x_xg_pcs_pre_config,
+ .pcs_post_config = mv88e6393x_xg_pcs_post_config,
+ .pcs_get_state = mv88e6393x_xg_pcs_get_state,
+ .pcs_config = mv88e639x_xg_pcs_config,
+};
+
+static int mv88e6393x_pcs_init(struct mv88e6xxx_chip *chip, int port)
+{
+ struct mv88e639x_pcs *mpcs;
+ struct mii_bus *bus;
+ struct device *dev;
+ int lane, err;
+
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return 0;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ dev = chip->dev;
+
+ mpcs = mv88e639x_pcs_alloc(dev, bus, lane, port);
+ if (!mpcs)
+ return -ENOMEM;
+
+ mpcs->sgmii_pcs.ops = &mv88e6393x_sgmii_pcs_ops;
+ mpcs->xg_pcs.ops = &mv88e6393x_xg_pcs_ops;
+ mpcs->supports_5g = true;
+
+ err = mv88e6393x_erratum_4_6(mpcs);
+ if (err)
+ goto err_free;
+
+ err = mv88e639x_pcs_setup_irq(mpcs, chip, port);
+ if (err)
+ goto err_free;
+
+ chip->ports[port].pcs_private = mpcs;
+
+ return 0;
+
+err_free:
+ kfree(mpcs);
+ return err;
+}
+
+const struct mv88e6xxx_pcs_ops mv88e6393x_pcs_ops = {
+ .pcs_init = mv88e6393x_pcs_init,
+ .pcs_teardown = mv88e639x_pcs_teardown,
+ .pcs_select = mv88e639x_pcs_select,
+};
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 252b5b3a3efe..4e7827ee684a 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -55,6 +55,38 @@ int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val)
return chip->info->ops->phy_write(chip, bus, addr, reg, val);
}
+int mv88e6xxx_phy_read_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 *val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_read_c45)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_read_c45(chip, bus, addr, devad, reg, val);
+}
+
+int mv88e6xxx_phy_write_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_write_c45)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_write_c45(chip, bus, addr, devad, reg, val);
+}
+
static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
{
return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
@@ -150,7 +182,7 @@ static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
{
- struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
+ struct mv88e6xxx_chip *chip = timer_container_of(chip, t, ppu_timer);
schedule_work(&chip->ppu_work);
}
@@ -174,7 +206,7 @@ static int mv88e6xxx_phy_ppu_access_get(struct mv88e6xxx_chip *chip)
}
chip->ppu_disabled = 1;
} else {
- del_timer(&chip->ppu_timer);
+ timer_delete(&chip->ppu_timer);
ret = 0;
}
@@ -197,7 +229,10 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
{
- del_timer_sync(&chip->ppu_timer);
+ mutex_lock(&chip->ppu_mutex);
+ timer_delete_sync(&chip->ppu_timer);
+ cancel_work_sync(&chip->ppu_work);
+ mutex_unlock(&chip->ppu_mutex);
}
int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
diff --git a/drivers/net/dsa/mv88e6xxx/phy.h b/drivers/net/dsa/mv88e6xxx/phy.h
index 05ea0d546969..5f47722364cc 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.h
+++ b/drivers/net/dsa/mv88e6xxx/phy.h
@@ -28,6 +28,10 @@ int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 *val);
int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 val);
+int mv88e6xxx_phy_read_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 *val);
+int mv88e6xxx_phy_write_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 val);
int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
u8 page, int reg, u16 *val);
int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index d9817b20ea64..66b1b7277281 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -12,6 +12,8 @@
#include <linux/if_bridge.h>
#include <linux/phy.h>
#include <linux/phylink.h>
+#include <linux/property.h>
+#include <linux/string_choices.h>
#include "chip.h"
#include "global2.h"
@@ -133,6 +135,15 @@ int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
}
+int mv88e6320_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ if (port != 2 && port != 5 && port != 6)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
+}
+
int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
{
u16 reg;
@@ -166,7 +177,7 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
dev_dbg(chip->dev, "p%d: %s link %s\n", port,
reg & MV88E6XXX_PORT_MAC_CTL_FORCE_LINK ? "Force" : "Unforce",
- reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP ? "up" : "down");
+ str_up_down(reg & MV88E6XXX_PORT_MAC_CTL_LINK_UP));
return 0;
}
@@ -283,7 +294,7 @@ static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
if (err)
return err;
- if (speed)
+ if (speed != SPEED_UNFORCED)
dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
else
dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
@@ -294,28 +305,10 @@ static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
return 0;
}
-/* Support 10, 100, 200 Mbps (e.g. 88E6065 family) */
-int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
- int speed, int duplex)
-{
- if (speed == SPEED_MAX)
- speed = 200;
-
- if (speed > 200)
- return -EOPNOTSUPP;
-
- /* Setting 200 Mbps on port 0 to 3 selects 100 Mbps */
- return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
- duplex);
-}
-
/* Support 10, 100, 1000 Mbps (e.g. 88E6185 family) */
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 1000;
-
if (speed == 200 || speed > 1000)
return -EOPNOTSUPP;
@@ -327,9 +320,6 @@ int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 100;
-
if (speed > 100)
return -EOPNOTSUPP;
@@ -341,9 +331,6 @@ int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 5 ? 1000 : 2500;
-
if (speed > 2500)
return -EOPNOTSUPP;
@@ -357,7 +344,8 @@ int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
duplex);
}
-phy_interface_t mv88e6341_port_max_speed_mode(int port)
+phy_interface_t mv88e6341_port_max_speed_mode(struct mv88e6xxx_chip *chip,
+ int port)
{
if (port == 5)
return PHY_INTERFACE_MODE_2500BASEX;
@@ -369,9 +357,6 @@ phy_interface_t mv88e6341_port_max_speed_mode(int port)
int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 1000;
-
if (speed > 1000)
return -EOPNOTSUPP;
@@ -386,9 +371,6 @@ int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 9 ? 1000 : 2500;
-
if (speed > 2500)
return -EOPNOTSUPP;
@@ -402,7 +384,8 @@ int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
duplex);
}
-phy_interface_t mv88e6390_port_max_speed_mode(int port)
+phy_interface_t mv88e6390_port_max_speed_mode(struct mv88e6xxx_chip *chip,
+ int port)
{
if (port == 9 || port == 10)
return PHY_INTERFACE_MODE_2500BASEX;
@@ -414,9 +397,6 @@ phy_interface_t mv88e6390_port_max_speed_mode(int port)
int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 9 ? 1000 : 10000;
-
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
@@ -427,7 +407,8 @@ int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
duplex);
}
-phy_interface_t mv88e6390x_port_max_speed_mode(int port)
+phy_interface_t mv88e6390x_port_max_speed_mode(struct mv88e6xxx_chip *chip,
+ int port)
{
if (port == 9 || port == 10)
return PHY_INTERFACE_MODE_XAUI;
@@ -445,8 +426,9 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
u16 reg, ctrl;
int err;
- if (speed == SPEED_MAX)
- speed = (port > 0 && port < 9) ? 1000 : 10000;
+ if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6361 &&
+ speed > 2500)
+ return -EOPNOTSUPP;
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
@@ -516,7 +498,7 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- if (speed)
+ if (speed != SPEED_UNFORCED)
dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
else
dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
@@ -527,19 +509,23 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
return 0;
}
-phy_interface_t mv88e6393x_port_max_speed_mode(int port)
+phy_interface_t mv88e6393x_port_max_speed_mode(struct mv88e6xxx_chip *chip,
+ int port)
{
- if (port == 0 || port == 9 || port == 10)
- return PHY_INTERFACE_MODE_10GBASER;
- return PHY_INTERFACE_MODE_NA;
+ if (port != 0 && port != 9 && port != 10)
+ return PHY_INTERFACE_MODE_NA;
+
+ if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6361)
+ return PHY_INTERFACE_MODE_2500BASEX;
+
+ return PHY_INTERFACE_MODE_10GBASER;
}
static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode, bool force)
{
u16 cmode;
- int lane;
u16 reg;
int err;
@@ -550,6 +536,15 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
mode = PHY_INTERFACE_MODE_1000BASEX;
switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RGMII;
+ break;
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX;
break;
@@ -572,6 +567,9 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
case PHY_INTERFACE_MODE_10GBASER:
cmode = MV88E6393X_PORT_STS_CMODE_10GBASER;
break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ cmode = MV88E6393X_PORT_STS_CMODE_USXGMII;
+ break;
default:
cmode = 0;
}
@@ -580,19 +578,6 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (cmode == chip->ports[port].cmode && !force)
return 0;
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane >= 0) {
- if (chip->ports[port].serdes_irq) {
- err = mv88e6xxx_serdes_irq_disable(chip, port, lane);
- if (err)
- return err;
- }
-
- err = mv88e6xxx_serdes_power_down(chip, port, lane);
- if (err)
- return err;
- }
-
chip->ports[port].cmode = 0;
if (cmode) {
@@ -608,20 +593,6 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return err;
chip->ports[port].cmode = cmode;
-
- lane = mv88e6xxx_serdes_get_lane(chip, port);
- if (lane < 0)
- return lane;
-
- err = mv88e6xxx_serdes_power_up(chip, port, lane);
- if (err)
- return err;
-
- if (chip->ports[port].serdes_irq) {
- err = mv88e6xxx_serdes_irq_enable(chip, port, lane);
- if (err)
- return err;
- }
}
return 0;
@@ -665,6 +636,19 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (port != 0 && port != 9 && port != 10)
return -EOPNOTSUPP;
+ if (port == 9 || port == 10) {
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
/* mv88e6393x errata 4.5: EEE should be disabled on SERDES ports */
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
if (err)
@@ -1234,6 +1218,35 @@ int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
return err;
}
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL0_SA_FILT_MASK;
+ if (locked)
+ reg |= MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+ if (locked)
+ reg |= MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, reg);
+}
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode)
{
@@ -1278,7 +1291,7 @@ int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new);
}
-int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map)
{
u16 reg;
int err;
@@ -1287,7 +1300,10 @@ int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
+ if (map)
+ reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
+ else
+ reg &= ~MV88E6XXX_PORT_CTL2_MAP_DA;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
@@ -1699,6 +1715,7 @@ int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port,
ptr = shift / 8;
shift %= 8;
mask >>= ptr * 8;
+ ptr <<= 8;
err = mv88e6393x_port_policy_read(chip, port, ptr, &reg);
if (err)
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 03382b66f800..c1d2f99efb1c 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -25,10 +25,25 @@
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
+/* - Modes with PHY suffix use output instead of input clock
+ * - Modes without RMII or RGMII use MII
+ * - Modes without speed do not have a fixed speed specified in the manual
+ * ("DC to x MHz" - variable clock support?)
+ */
+#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED 0x0000
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII 0x0100
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
+#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY 0x0400
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL 0x0600
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL 0x0700
+#define MV88E6250_PORT_STS_PORTMODE_MII_HALF 0x0800
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY 0x0900
+#define MV88E6250_PORT_STS_PORTMODE_MII_FULL 0x0a00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY 0x0b00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY 0x0c00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY 0x0d00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY 0x0e00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY 0x0f00
#define MV88E6XXX_PORT_STS_LINK 0x0800
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
@@ -42,6 +57,11 @@
#define MV88E6XXX_PORT_STS_TX_PAUSED 0x0020
#define MV88E6XXX_PORT_STS_FLOW_CTL 0x0010
#define MV88E6XXX_PORT_STS_CMODE_MASK 0x000f
+#define MV88E6XXX_PORT_STS_CMODE_MII_PHY 0x0001
+#define MV88E6XXX_PORT_STS_CMODE_MII 0x0002
+#define MV88E6XXX_PORT_STS_CMODE_GMII 0x0003
+#define MV88E6XXX_PORT_STS_CMODE_RMII_PHY 0x0004
+#define MV88E6XXX_PORT_STS_CMODE_RMII 0x0005
#define MV88E6XXX_PORT_STS_CMODE_RGMII 0x0007
#define MV88E6XXX_PORT_STS_CMODE_100BASEX 0x0008
#define MV88E6XXX_PORT_STS_CMODE_1000BASEX 0x0009
@@ -106,6 +126,8 @@
/* Offset 0x03: Switch Identifier Register */
#define MV88E6XXX_PORT_SWITCH_ID 0x03
#define MV88E6XXX_PORT_SWITCH_ID_PROD_MASK 0xfff0
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6020 0x0200
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6071 0x0710
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6085 0x04a0
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6095 0x0950
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6097 0x0990
@@ -128,6 +150,7 @@
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6220 0x2200
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6250 0x2500
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6361 0x2610
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6290 0x2900
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6321 0x3100
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6141 0x3400
@@ -142,7 +165,11 @@
/* Offset 0x04: Port Control Register */
#define MV88E6XXX_PORT_CTL0 0x04
#define MV88E6XXX_PORT_CTL0_USE_CORE_TAG 0x8000
-#define MV88E6XXX_PORT_CTL0_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_MASK 0xc000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DISABLED 0x0000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_UNLOCK 0x8000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_CPU 0xc000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_MASK 0x3000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNMODIFIED 0x0000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNTAGGED 0x1000
@@ -267,7 +294,7 @@
/* Offset 0x10: Extended Port Control Command */
#define MV88E6393X_PORT_EPC_CMD 0x10
#define MV88E6393X_PORT_EPC_CMD_BUSY 0x8000
-#define MV88E6393X_PORT_EPC_CMD_WRITE 0x0300
+#define MV88E6393X_PORT_EPC_CMD_WRITE 0x3000
#define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE 0x02
/* Offset 0x11: Extended Port Control Data */
@@ -282,6 +309,130 @@
/* Offset 0x13: OutFiltered Counter */
#define MV88E6XXX_PORT_OUT_FILTERED 0x13
+/* Offset 0x16: LED Control */
+#define MV88E6XXX_PORT_LED_CONTROL 0x16
+#define MV88E6XXX_PORT_LED_CONTROL_UPDATE BIT(15)
+#define MV88E6XXX_PORT_LED_CONTROL_POINTER_MASK GENMASK(14, 12)
+#define MV88E6XXX_PORT_LED_CONTROL_POINTER_LED01_CTRL (0x00 << 12) /* Control for LED 0 and 1 */
+#define MV88E6XXX_PORT_LED_CONTROL_POINTER_STRETCH_BLINK (0x06 << 12) /* Stetch and Blink Rate */
+#define MV88E6XXX_PORT_LED_CONTROL_POINTER_CNTL_SPECIAL (0x07 << 12) /* Control for the Port's Special LED */
+#define MV88E6XXX_PORT_LED_CONTROL_DATA_MASK GENMASK(10, 0)
+/* Selection masks valid for either port 1,2,3,4 or 5 */
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL_MASK GENMASK(3, 0)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL_MASK GENMASK(7, 4)
+/* Selection control for LED 0 and 1, ports 5 and 6 only has LED 0
+ * Bits Function
+ * 0..3 LED 0 control selector on ports 1-5
+ * 4..7 LED 1 control selector on ports 1-4 on port 5 this controls LED 0 of port 6
+ *
+ * Sel Port LED Function for the 6352 family:
+ * 0 1-4 0 Link/Act/Speed by Blink Rate (off=no link, on=link, blink=activity, blink speed=link speed)
+ * 1-4 1 Port 2's Special LED
+ * 5-6 0 Port 5 Link/Act (off=no link, on=link, blink=activity)
+ * 5-6 1 Port 6 Link/Act (off=no link, on=link 1000, blink=activity)
+ * 1 1-4 0 100/1000 Link/Act (off=no link, on=100 or 1000 link, blink=activity)
+ * 1-4 1 10/100 Link Act (off=no link, on=10 or 100 link, blink=activity)
+ * 5-6 0 Fiber 100 Link/Act (off=no link, on=link 100, blink=activity)
+ * 5-6 1 Fiber 1000 Link/Act (off=no link, on=link 1000, blink=activity)
+ * 2 1-4 0 1000 Link/Act (off=no link, on=link 1000, blink=activity)
+ * 1-4 1 10/100 Link/Act (off=no link, on=10 or 100 link, blink=activity)
+ * 5-6 0 Fiber 1000 Link/Act (off=no link, on=link 1000, blink=activity)
+ * 5-6 1 Fiber 100 Link/Act (off=no link, on=link 100, blink=activity)
+ * 3 1-4 0 Link/Act (off=no link, on=link, blink=activity)
+ * 1-4 1 1000 Link (off=no link, on=1000 link)
+ * 5-6 0 Port 0's Special LED
+ * 5-6 1 Fiber Link (off=no link, on=link)
+ * 4 1-4 0 Port 0's Special LED
+ * 1-4 1 Port 1's Special LED
+ * 5-6 0 Port 1's Special LED
+ * 5-6 1 Port 5 Link/Act (off=no link, on=link, blink=activity)
+ * 5 1-4 0 Reserved
+ * 1-4 1 Reserved
+ * 5-6 0 Port 2's Special LED
+ * 5-6 1 Port 6 Link (off=no link, on=link)
+ * 6 1-4 0 Duplex/Collision (off=half-duplex,on=full-duplex,blink=collision)
+ * 1-4 1 10/1000 Link/Act (off=no link, on=10 or 1000 link, blink=activity)
+ * 5-6 0 Port 5 Duplex/Collision (off=half-duplex, on=full-duplex, blink=col)
+ * 5-6 1 Port 6 Duplex/Collision (off=half-duplex, on=full-duplex, blink=col)
+ * 7 1-4 0 10/1000 Link/Act (off=no link, on=10 or 1000 link, blink=activity)
+ * 1-4 1 10/1000 Link (off=no link, on=10 or 1000 link)
+ * 5-6 0 Port 5 Link/Act/Speed by Blink rate (off=no link, on=link, blink=activity, blink speed=link speed)
+ * 5-6 1 Port 6 Link/Act/Speed by Blink rate (off=no link, on=link, blink=activity, blink speed=link speed)
+ * 8 1-4 0 Link (off=no link, on=link)
+ * 1-4 1 Activity (off=no link, blink on=activity)
+ * 5-6 0 Port 6 Link/Act (off=no link, on=link, blink=activity)
+ * 5-6 1 Port 0's Special LED
+ * 9 1-4 0 10 Link (off=no link, on=10 link)
+ * 1-4 1 100 Link (off=no link, on=100 link)
+ * 5-6 0 Reserved
+ * 5-6 1 Port 1's Special LED
+ * a 1-4 0 10 Link/Act (off=no link, on=10 link, blink=activity)
+ * 1-4 1 100 Link/Act (off=no link, on=100 link, blink=activity)
+ * 5-6 0 Reserved
+ * 5-6 1 Port 2's Special LED
+ * b 1-4 0 100/1000 Link (off=no link, on=100 or 1000 link)
+ * 1-4 1 10/100 Link (off=no link, on=100 link, blink=activity)
+ * 5-6 0 Reserved
+ * 5-6 1 Reserved
+ * c * * PTP Act (blink on=PTP activity)
+ * d * * Force Blink
+ * e * * Force Off
+ * f * * Force On
+ */
+/* Select LED0 output */
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL0 0x0
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL1 0x1
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL2 0x2
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL3 0x3
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL4 0x4
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL5 0x5
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL6 0x6
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL7 0x7
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL8 0x8
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SEL9 0x9
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELA 0xa
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELB 0xb
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELC 0xc
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELD 0xd
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELE 0xe
+#define MV88E6XXX_PORT_LED_CONTROL_LED0_SELF 0xf
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL0 (0x0 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL1 (0x1 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL2 (0x2 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL3 (0x3 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL4 (0x4 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL5 (0x5 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL6 (0x6 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL7 (0x7 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL8 (0x8 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SEL9 (0x9 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELA (0xa << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELB (0xb << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELC (0xc << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELD (0xd << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELE (0xe << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_LED1_SELF (0xf << 4)
+/* Stretch and Blink Rate Control (Index 0x06 of LED Control) */
+/* Pulse Stretch Selection for all LED's on this port */
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_NONE (0 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_21MS (1 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_42MS (2 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_84MS (3 << 4)
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_PULSE_STRETCH_168MS (4 << 4)
+/* Blink Rate Selection for all LEDs on this port */
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_21MS 0
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_42MS 1
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_84MS 2
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_168MS 3
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_336MS 4
+#define MV88E6XXX_PORT_LED_CONTROL_0x06_BLINK_RATE_672MS 5
+ /* Control for Special LED (Index 0x7 of LED Control on Port0) */
+#define MV88E6XXX_PORT_LED_CONTROL_0x07_P0_LAN_LINKACT_SHIFT 0 /* bits 6:0 LAN Link Activity LED */
+/* Control for Special LED (Index 0x7 of LED Control on Port 1) */
+#define MV88E6XXX_PORT_LED_CONTROL_0x07_P1_WAN_LINKACT_SHIFT 0 /* bits 6:0 WAN Link Activity LED */
+/* Control for Special LED (Index 0x7 of LED Control on Port 2) */
+#define MV88E6XXX_PORT_LED_CONTROL_0x07_P2_PTP_ACT 0 /* bits 6:0 PTP Activity */
+
/* Offset 0x18: IEEE Priority Mapping Table */
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE 0x18
#define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE 0x8000
@@ -323,6 +474,8 @@ int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg,
int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
int pause);
+int mv88e6320_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
@@ -333,8 +486,6 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link);
int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
-int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
- int speed, int duplex);
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex);
int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
@@ -350,10 +501,14 @@ int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex);
-phy_interface_t mv88e6341_port_max_speed_mode(int port);
-phy_interface_t mv88e6390_port_max_speed_mode(int port);
-phy_interface_t mv88e6390x_port_max_speed_mode(int port);
-phy_interface_t mv88e6393x_port_max_speed_mode(int port);
+phy_interface_t mv88e6341_port_max_speed_mode(struct mv88e6xxx_chip *chip,
+ int port);
+phy_interface_t mv88e6390_port_max_speed_mode(struct mv88e6xxx_chip *chip,
+ int port);
+phy_interface_t mv88e6390x_port_max_speed_mode(struct mv88e6xxx_chip *chip,
+ int port);
+phy_interface_t mv88e6393x_port_max_speed_mode(struct mv88e6xxx_chip *chip,
+ int port);
int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state);
@@ -365,6 +520,9 @@ int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid);
int mv88e6xxx_port_get_pvid(struct mv88e6xxx_chip *chip, int port, u16 *pvid);
int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid);
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked);
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode);
int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
@@ -423,9 +581,18 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+#ifdef CONFIG_NET_DSA_MV88E6XXX_LEDS
+int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip, int port);
+#else
+static inline int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ return 0;
+}
+#endif
int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
bool drop_untagged);
-int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/port_hidden.c b/drivers/net/dsa/mv88e6xxx/port_hidden.c
index b49d05f0e117..7a9f9ff6dedf 100644
--- a/drivers/net/dsa/mv88e6xxx/port_hidden.c
+++ b/drivers/net/dsa/mv88e6xxx/port_hidden.c
@@ -40,8 +40,9 @@ int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
- return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
- MV88E6XXX_PORT_RESERVED_1A, bit, 0);
+ return mv88e6xxx_port_wait_bit(chip,
+ MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, bit, 0);
}
int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index d838c174dc0d..f7603573d3a9 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -11,12 +11,20 @@
*/
#include "chip.h"
+#include "global1.h"
#include "global2.h"
#include "hwtstamp.h"
#include "ptp.h"
#define MV88E6XXX_MAX_ADJ_PPB 1000000
+struct mv88e6xxx_cc_coeffs {
+ u32 cc_shift;
+ u32 cc_mult;
+ u32 cc_mult_num;
+ u32 cc_mult_dem;
+};
+
/* Family MV88E6250:
* Raw timestamps are in units of 10-ns clock periods.
*
@@ -24,22 +32,43 @@
* simplifies to
* clkadj = scaled_ppm * 2^7 / 5^5
*/
-#define MV88E6250_CC_SHIFT 28
-#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT)
-#define MV88E6250_CC_MULT_NUM (1 << 7)
-#define MV88E6250_CC_MULT_DEM 3125ULL
+#define MV88E6XXX_CC_10NS_SHIFT 28
+static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_10ns_coeffs = {
+ .cc_shift = MV88E6XXX_CC_10NS_SHIFT,
+ .cc_mult = 10 << MV88E6XXX_CC_10NS_SHIFT,
+ .cc_mult_num = 1 << 7,
+ .cc_mult_dem = 3125ULL,
+};
-/* Other families:
+/* Other families except MV88E6393X in internal clock mode:
* Raw timestamps are in units of 8-ns clock periods.
*
* clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
* simplifies to
* clkadj = scaled_ppm * 2^9 / 5^6
*/
-#define MV88E6XXX_CC_SHIFT 28
-#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT)
-#define MV88E6XXX_CC_MULT_NUM (1 << 9)
-#define MV88E6XXX_CC_MULT_DEM 15625ULL
+#define MV88E6XXX_CC_8NS_SHIFT 28
+static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_8ns_coeffs = {
+ .cc_shift = MV88E6XXX_CC_8NS_SHIFT,
+ .cc_mult = 8 << MV88E6XXX_CC_8NS_SHIFT,
+ .cc_mult_num = 1 << 9,
+ .cc_mult_dem = 15625ULL
+};
+
+/* Family MV88E6393X using internal clock:
+ * Raw timestamps are in units of 4-ns clock periods.
+ *
+ * clkadj = scaled_ppm * 4*2^28 / (10^6 * 2^16)
+ * simplifies to
+ * clkadj = scaled_ppm * 2^8 / 5^6
+ */
+#define MV88E6XXX_CC_4NS_SHIFT 28
+static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_4ns_coeffs = {
+ .cc_shift = MV88E6XXX_CC_4NS_SHIFT,
+ .cc_mult = 4 << MV88E6XXX_CC_4NS_SHIFT,
+ .cc_mult_num = 1 << 8,
+ .cc_mult_dem = 15625ULL
+};
#define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100)
@@ -82,13 +111,40 @@ static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
return chip->info->ops->gpio_ops->set_pctl(chip, pin, func);
}
-static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
+static const struct mv88e6xxx_cc_coeffs *
+mv88e6xxx_cc_coeff_get(struct mv88e6xxx_chip *chip)
+{
+ u16 period_ps;
+ int err;
+
+ err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_CLOCK_PERIOD, &period_ps, 1);
+ if (err) {
+ dev_err(chip->dev, "failed to read cycle counter period: %d\n",
+ err);
+ return ERR_PTR(err);
+ }
+
+ switch (period_ps) {
+ case 4000:
+ return &mv88e6xxx_cc_4ns_coeffs;
+ case 8000:
+ return &mv88e6xxx_cc_8ns_coeffs;
+ case 10000:
+ return &mv88e6xxx_cc_10ns_coeffs;
+ default:
+ dev_err(chip->dev, "unexpected cycle counter period of %u ps\n",
+ period_ps);
+ return ERR_PTR(-ENODEV);
+ }
+}
+
+static u64 mv88e6352_ptp_clock_read(struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
u16 phc_time[2];
int err;
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_TIME_LO, phc_time,
+ err = mv88e6xxx_tai_read(chip, MV88E6352_TAI_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
@@ -96,13 +152,13 @@ static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
return ((u32)phc_time[1] << 16) | phc_time[0];
}
-static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc)
+static u64 mv88e6165_ptp_clock_read(struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
u16 phc_time[2];
int err;
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time,
+ err = mv88e6xxx_tai_read(chip, MV88E6165_PTP_GC_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
@@ -111,42 +167,26 @@ static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc)
}
/* mv88e6352_config_eventcap - configure TAI event capture
- * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external)
* @rising: zero for falling-edge trigger, else rising-edge trigger
*
* This will also reset the capture sequence counter.
*/
-static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event,
- int rising)
+static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int rising)
{
- u16 global_config;
- u16 cap_config;
+ u16 evcap_config;
int err;
- chip->evcap_config = MV88E6XXX_TAI_CFG_CAP_OVERWRITE |
- MV88E6XXX_TAI_CFG_CAP_CTR_START;
+ evcap_config = MV88E6352_TAI_CFG_CAP_OVERWRITE |
+ MV88E6352_TAI_CFG_CAP_CTR_START;
if (!rising)
- chip->evcap_config |= MV88E6XXX_TAI_CFG_EVREQ_FALLING;
+ evcap_config |= MV88E6352_TAI_CFG_EVREQ_FALLING;
- global_config = (chip->evcap_config | chip->trig_config);
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_CFG, global_config);
+ err = mv88e6xxx_tai_write(chip, MV88E6352_TAI_CFG, evcap_config);
if (err)
return err;
- if (event == PTP_CLOCK_PPS) {
- cap_config = MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG;
- } else if (event == PTP_CLOCK_EXTTS) {
- /* if STATUS_CAP_TRIG is unset we capture PTP_EVREQ events */
- cap_config = 0;
- } else {
- return -EINVAL;
- }
-
/* Write the capture config; this also clears the capture counter */
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS,
- cap_config);
-
- return err;
+ return mv88e6xxx_tai_write(chip, MV88E6352_TAI_EVENT_STATUS, 0);
}
static void mv88e6352_tai_event_work(struct work_struct *ugly)
@@ -159,7 +199,7 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_EVENT_STATUS,
+ err = mv88e6xxx_tai_read(chip, MV88E6352_TAI_EVENT_STATUS,
status, ARRAY_SIZE(status));
mv88e6xxx_reg_unlock(chip);
@@ -167,20 +207,24 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
dev_err(chip->dev, "failed to read TAI status register\n");
return;
}
- if (status[0] & MV88E6XXX_TAI_EVENT_STATUS_ERROR) {
+ if (status[0] & MV88E6352_TAI_EVENT_STATUS_ERROR) {
dev_warn(chip->dev, "missed event capture\n");
return;
}
- if (!(status[0] & MV88E6XXX_TAI_EVENT_STATUS_VALID))
+ if (!(status[0] & MV88E6352_TAI_EVENT_STATUS_VALID))
goto out;
raw_ts = ((u32)status[2] << 16) | status[1];
/* Clear the valid bit so the next timestamp can come in */
- status[0] &= ~MV88E6XXX_TAI_EVENT_STATUS_VALID;
+ status[0] &= ~MV88E6352_TAI_EVENT_STATUS_VALID;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]);
+ err = mv88e6xxx_tai_write(chip, MV88E6352_TAI_EVENT_STATUS, status[0]);
mv88e6xxx_reg_unlock(chip);
+ if (err) {
+ dev_err(chip->dev, "failed to write TAI status register\n");
+ return;
+ }
/* This is an external timestamp */
ev.type = PTP_CLOCK_EXTTS;
@@ -199,7 +243,6 @@ out:
static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
- const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int neg_adj = 0;
u32 diff, mult;
u64 adj;
@@ -209,10 +252,10 @@ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
scaled_ppm = -scaled_ppm;
}
- mult = ptp_ops->cc_mult;
- adj = ptp_ops->cc_mult_num;
+ mult = chip->cc_coeffs->cc_mult;
+ adj = chip->cc_coeffs->cc_mult_num;
adj *= scaled_ppm;
- diff = div_u64(adj, ptp_ops->cc_mult_dem);
+ diff = div_u64(adj, chip->cc_coeffs->cc_mult_dem);
mv88e6xxx_reg_lock(chip);
@@ -273,13 +316,6 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
int pin;
int err;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -303,7 +339,7 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
schedule_delayed_work(&chip->tai_event_work,
TAI_EVENT_WORK_INTERVAL);
- err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
+ err = mv88e6352_config_eventcap(chip, rising);
} else {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO;
@@ -359,13 +395,9 @@ const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
- .cc_shift = MV88E6XXX_CC_SHIFT,
- .cc_mult = MV88E6XXX_CC_MULT,
- .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
- .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
-const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
+const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
.clock_read = mv88e6352_ptp_clock_read,
.ptp_enable = mv88e6352_ptp_enable,
.ptp_verify = mv88e6352_ptp_verify,
@@ -386,19 +418,16 @@ const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
- .cc_shift = MV88E6250_CC_SHIFT,
- .cc_mult = MV88E6250_CC_MULT,
- .cc_mult_num = MV88E6250_CC_MULT_NUM,
- .cc_mult_dem = MV88E6250_CC_MULT_DEM,
};
-const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
+const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
.clock_read = mv88e6352_ptp_clock_read,
.ptp_enable = mv88e6352_ptp_enable,
.ptp_verify = mv88e6352_ptp_verify,
.event_work = mv88e6352_tai_event_work,
.port_enable = mv88e6352_hwtstamp_port_enable,
.port_disable = mv88e6352_hwtstamp_port_disable,
+ .set_ptp_cpu_port = mv88e6390_g1_set_ptp_cpu_port,
.n_ext_ts = 1,
.arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
.arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
@@ -413,13 +442,9 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
- .cc_shift = MV88E6XXX_CC_SHIFT,
- .cc_mult = MV88E6XXX_CC_MULT,
- .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
- .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
-static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
+static u64 mv88e6xxx_ptp_clock_read(struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
@@ -429,10 +454,10 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
return 0;
}
-/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3
+/* With a 250MHz input clock, the 32-bit timestamp counter overflows in ~17.2
* seconds; this task forces periodic reads so that we don't miss any.
*/
-#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 16)
+#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 8)
static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
@@ -451,11 +476,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
int i;
/* Set up the cycle counter */
+ chip->cc_coeffs = mv88e6xxx_cc_coeff_get(chip);
+ if (IS_ERR(chip->cc_coeffs))
+ return PTR_ERR(chip->cc_coeffs);
+
memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc));
chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read;
chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32);
- chip->tstamp_cc.mult = ptp_ops->cc_mult;
- chip->tstamp_cc.shift = ptp_ops->cc_shift;
+ chip->tstamp_cc.mult = chip->cc_coeffs->cc_mult;
+ chip->tstamp_cc.shift = chip->cc_coeffs->cc_shift;
timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc,
ktime_to_ns(ktime_get_real()));
@@ -491,6 +520,27 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
+ chip->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+
+ if (ptp_ops->set_ptp_cpu_port) {
+ struct dsa_port *dp;
+ int upstream = 0;
+ int err;
+
+ dsa_switch_for_each_user_port(dp, chip->ds) {
+ upstream = dsa_upstream_port(chip->ds, dp->index);
+ break;
+ }
+
+ err = ptp_ops->set_ptp_cpu_port(chip, upstream);
+ if (err) {
+ dev_err(chip->dev, "Failed to set PTP CPU destination port!\n");
+ return err;
+ }
+ }
+
chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev);
if (IS_ERR(chip->ptp_clock))
return PTR_ERR(chip->ptp_clock);
@@ -501,6 +551,7 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
return 0;
}
+/* This must never be called holding the register lock */
void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
if (chip->ptp_clock) {
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 269d5d16a466..95bdddb0bf39 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -16,132 +16,56 @@
#include "chip.h"
/* Offset 0x00: TAI Global Config */
-#define MV88E6XXX_TAI_CFG 0x00
-#define MV88E6XXX_TAI_CFG_CAP_OVERWRITE 0x8000
-#define MV88E6XXX_TAI_CFG_CAP_CTR_START 0x4000
-#define MV88E6XXX_TAI_CFG_EVREQ_FALLING 0x2000
-#define MV88E6XXX_TAI_CFG_TRIG_ACTIVE_LO 0x1000
-#define MV88E6XXX_TAI_CFG_IRL_ENABLE 0x0400
-#define MV88E6XXX_TAI_CFG_TRIG_IRQ_EN 0x0200
-#define MV88E6XXX_TAI_CFG_EVREQ_IRQ_EN 0x0100
-#define MV88E6XXX_TAI_CFG_TRIG_LOCK 0x0080
-#define MV88E6XXX_TAI_CFG_BLOCK_UPDATE 0x0008
-#define MV88E6XXX_TAI_CFG_MULTI_PTP 0x0004
-#define MV88E6XXX_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
-#define MV88E6XXX_TAI_CFG_TRIG_ENABLE 0x0001
+#define MV88E6352_TAI_CFG 0x00
+#define MV88E6352_TAI_CFG_CAP_OVERWRITE 0x8000
+#define MV88E6352_TAI_CFG_CAP_CTR_START 0x4000
+#define MV88E6352_TAI_CFG_EVREQ_FALLING 0x2000
+#define MV88E6352_TAI_CFG_TRIG_ACTIVE_LO 0x1000
+#define MV88E6352_TAI_CFG_IRL_ENABLE 0x0400
+#define MV88E6352_TAI_CFG_TRIG_IRQ_EN 0x0200
+#define MV88E6352_TAI_CFG_EVREQ_IRQ_EN 0x0100
+#define MV88E6352_TAI_CFG_TRIG_LOCK 0x0080
+#define MV88E6352_TAI_CFG_BLOCK_UPDATE 0x0008
+#define MV88E6352_TAI_CFG_MULTI_PTP 0x0004
+#define MV88E6352_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
+#define MV88E6352_TAI_CFG_TRIG_ENABLE 0x0001
/* Offset 0x01: Timestamp Clock Period (ps) */
#define MV88E6XXX_TAI_CLOCK_PERIOD 0x01
-/* Offset 0x02/0x03: Trigger Generation Amount */
-#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_LO 0x02
-#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_HI 0x03
-
-/* Offset 0x04: Clock Compensation */
-#define MV88E6XXX_TAI_TRIG_CLOCK_COMP 0x04
-
-/* Offset 0x05: Trigger Configuration */
-#define MV88E6XXX_TAI_TRIG_CFG 0x05
-
-/* Offset 0x06: Ingress Rate Limiter Clock Generation Amount */
-#define MV88E6XXX_TAI_IRL_AMOUNT 0x06
-
-/* Offset 0x07: Ingress Rate Limiter Compensation */
-#define MV88E6XXX_TAI_IRL_COMP 0x07
-
-/* Offset 0x08: Ingress Rate Limiter Compensation */
-#define MV88E6XXX_TAI_IRL_COMP_PS 0x08
-
/* Offset 0x09: Event Status */
-#define MV88E6XXX_TAI_EVENT_STATUS 0x09
-#define MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG 0x4000
-#define MV88E6XXX_TAI_EVENT_STATUS_ERROR 0x0200
-#define MV88E6XXX_TAI_EVENT_STATUS_VALID 0x0100
-#define MV88E6XXX_TAI_EVENT_STATUS_CTR_MASK 0x00ff
-
-/* Offset 0x0A/0x0B: Event Time */
-#define MV88E6XXX_TAI_EVENT_TIME_LO 0x0a
-#define MV88E6XXX_TAI_EVENT_TYPE_HI 0x0b
+#define MV88E6352_TAI_EVENT_STATUS 0x09
+#define MV88E6352_TAI_EVENT_STATUS_ERROR 0x0200
+#define MV88E6352_TAI_EVENT_STATUS_VALID 0x0100
+#define MV88E6352_TAI_EVENT_STATUS_CTR_MASK 0x00ff
+/* Offset 0x0A/0x0B: Event Time Lo/Hi. Always read with Event Status. */
/* Offset 0x0E/0x0F: PTP Global Time */
-#define MV88E6XXX_TAI_TIME_LO 0x0e
-#define MV88E6XXX_TAI_TIME_HI 0x0f
-
-/* Offset 0x10/0x11: Trig Generation Time */
-#define MV88E6XXX_TAI_TRIG_TIME_LO 0x10
-#define MV88E6XXX_TAI_TRIG_TIME_HI 0x11
-
-/* Offset 0x12: Lock Status */
-#define MV88E6XXX_TAI_LOCK_STATUS 0x12
-
-/* Offset 0x00: Ether Type */
-#define MV88E6XXX_PTP_GC_ETYPE 0x00
+#define MV88E6352_TAI_TIME_LO 0x0e
+#define MV88E6352_TAI_TIME_HI 0x0f
/* 6165 Global Control Registers */
-/* Offset 0x00: Ether Type */
-#define MV88E6XXX_PTP_GC_ETYPE 0x00
-
-/* Offset 0x01: Message ID */
-#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01
-
-/* Offset 0x02: Time Stamp Arrive Time */
-#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02
-
-/* Offset 0x03: Port Arrival Interrupt Enable */
-#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03
-
-/* Offset 0x04: Port Departure Interrupt Enable */
-#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04
-
-/* Offset 0x05: Configuration */
-#define MV88E6XXX_PTP_GC_CONFIG 0x05
-#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1)
-#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0)
-
-/* Offset 0x8: Interrupt Status */
-#define MV88E6XXX_PTP_GC_INT_STATUS 0x08
-
/* Offset 0x9/0xa: Global Time */
-#define MV88E6XXX_PTP_GC_TIME_LO 0x09
-#define MV88E6XXX_PTP_GC_TIME_HI 0x0A
+#define MV88E6165_PTP_GC_TIME_LO 0x09
+#define MV88E6165_PTP_GC_TIME_HI 0x0A
-/* 6165 Per Port Registers */
+/* 6165 Per Port Registers. The arrival and departure registers are a
+ * common block consisting of status, two time registers and the sequence ID
+ */
/* Offset 0: Arrival Time 0 Status */
#define MV88E6165_PORT_PTP_ARR0_STS 0x00
-/* Offset 0x01/0x02: PTP Arrival 0 Time */
-#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01
-#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02
-
-/* Offset 0x03: PTP Arrival 0 Sequence ID */
-#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03
-
/* Offset 0x04: PTP Arrival 1 Status */
#define MV88E6165_PORT_PTP_ARR1_STS 0x04
-/* Offset 0x05/0x6E: PTP Arrival 1 Time */
-#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05
-#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06
-
-/* Offset 0x07: PTP Arrival 1 Sequence ID */
-#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07
-
/* Offset 0x08: PTP Departure Status */
#define MV88E6165_PORT_PTP_DEP_STS 0x08
-/* Offset 0x09/0x0a: PTP Deperture Time */
-#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09
-#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a
-
-/* Offset 0x0b: PTP Departure Sequence ID */
-#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b
-
/* Offset 0x0d: Port Status */
#define MV88E6164_PORT_STATUS 0x0d
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
-long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
@@ -149,16 +73,11 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
ptp_clock_info)
extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
-extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops;
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
-static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
-{
- return -1;
-}
-
static inline int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
{
return 0;
@@ -169,8 +88,8 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
}
static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
-static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {};
#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 6ea003678798..b3330211edbc 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -36,25 +36,28 @@ static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
int lane, int device, int reg, u16 *val)
{
- int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
-
- return mv88e6xxx_phy_read(chip, lane, reg_c45, val);
+ return mv88e6xxx_phy_read_c45(chip, lane, device, reg, val);
}
-static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
- int lane, int device, int reg, u16 val)
+int mv88e6xxx_pcs_decode_state(struct device *dev, u16 bmsr, u16 lpa,
+ u16 status, struct phylink_link_state *state)
{
- int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
+ state->link = false;
- return mv88e6xxx_phy_write(chip, lane, reg_c45, val);
-}
+ /* If the BMSR reports that the link had failed, report this to
+ * phylink.
+ */
+ if (!(bmsr & BMSR_LSTATUS))
+ return 0;
+
+ state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
-static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
- u16 status, u16 lpa,
- struct phylink_link_state *state)
-{
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
- state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ /* The Spped and Duplex Resolved register is 1 if AN is enabled
+ * and complete, or if AN is disabled. So with disabled AN we
+ * still get here on link up.
+ */
state->duplex = status &
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
@@ -78,9 +81,21 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
state->speed = SPEED_10;
break;
default:
- dev_err(chip->dev, "invalid PHY speed\n");
+ dev_err(dev, "invalid PHY speed\n");
return -EINVAL;
}
+ } else if (state->link &&
+ state->interface != PHY_INTERFACE_MODE_SGMII) {
+ /* If Speed and Duplex Resolved register is 0 and link is up, it
+ * means that AN was enabled, but link partner had it disabled
+ * and the PHY invoked the Auto-Negotiation Bypass feature and
+ * linked anyway.
+ */
+ state->duplex = DUPLEX_FULL;
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
} else {
state->link = false;
}
@@ -95,162 +110,6 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
return 0;
}
-int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool up)
-{
- u16 val, new_val;
- int err;
-
- err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
- if (err)
- return err;
-
- if (up)
- new_val = val & ~BMCR_PDOWN;
- else
- new_val = val | BMCR_PDOWN;
-
- if (val != new_val)
- err = mv88e6352_serdes_write(chip, MII_BMCR, new_val);
-
- return err;
-}
-
-int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- int lane, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise)
-{
- u16 adv, bmcr, val;
- bool changed;
- int err;
-
- switch (interface) {
- case PHY_INTERFACE_MODE_SGMII:
- adv = 0x0001;
- break;
-
- case PHY_INTERFACE_MODE_1000BASEX:
- adv = linkmode_adv_to_mii_adv_x(advertise,
- ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
- break;
-
- default:
- return 0;
- }
-
- err = mv88e6352_serdes_read(chip, MII_ADVERTISE, &val);
- if (err)
- return err;
-
- changed = val != adv;
- if (changed) {
- err = mv88e6352_serdes_write(chip, MII_ADVERTISE, adv);
- if (err)
- return err;
- }
-
- err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
- if (err)
- return err;
-
- if (phylink_autoneg_inband(mode))
- bmcr = val | BMCR_ANENABLE;
- else
- bmcr = val & ~BMCR_ANENABLE;
-
- if (bmcr == val)
- return changed;
-
- return mv88e6352_serdes_write(chip, MII_BMCR, bmcr);
-}
-
-int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state)
-{
- u16 lpa, status;
- int err;
-
- err = mv88e6352_serdes_read(chip, 0x11, &status);
- if (err) {
- dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
- return err;
- }
-
- err = mv88e6352_serdes_read(chip, MII_LPA, &lpa);
- if (err) {
- dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err);
- return err;
- }
-
- return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
-}
-
-int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- u16 bmcr;
- int err;
-
- err = mv88e6352_serdes_read(chip, MII_BMCR, &bmcr);
- if (err)
- return err;
-
- return mv88e6352_serdes_write(chip, MII_BMCR, bmcr | BMCR_ANRESTART);
-}
-
-int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- int lane, int speed, int duplex)
-{
- u16 val, bmcr;
- int err;
-
- err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
- if (err)
- return err;
-
- bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000);
- switch (speed) {
- case SPEED_1000:
- bmcr |= BMCR_SPEED1000;
- break;
- case SPEED_100:
- bmcr |= BMCR_SPEED100;
- break;
- case SPEED_10:
- break;
- }
-
- if (duplex == DUPLEX_FULL)
- bmcr |= BMCR_FULLDPLX;
-
- if (bmcr == val)
- return 0;
-
- return mv88e6352_serdes_write(chip, MII_BMCR, bmcr);
-}
-
-int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
-{
- u8 cmode = chip->ports[port].cmode;
- int lane = -ENODEV;
-
- if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX) ||
- (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX) ||
- (cmode == MV88E6XXX_PORT_STS_CMODE_SGMII))
- lane = 0xff; /* Unused */
-
- return lane;
-}
-
-static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
-{
- if (mv88e6xxx_serdes_get_lane(chip, port) >= 0)
- return true;
-
- return false;
-}
-
struct mv88e6352_serdes_hw_stat {
char string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -264,25 +123,28 @@ static struct mv88e6352_serdes_hw_stat mv88e6352_serdes_hw_stats[] = {
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
- if (mv88e6352_port_has_serdes(chip, port))
- return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
+ int err;
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
-int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
- int port, uint8_t *data)
+int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port,
+ uint8_t **data)
{
struct mv88e6352_serdes_hw_stat *stat;
- int i;
+ int err, i;
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
stat = &mv88e6352_serdes_hw_stats[i];
- memcpy(data + i * ETH_GSTRING_LEN, stat->string,
- ETH_GSTRING_LEN);
+ ethtool_puts(data, stat->string);
}
return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
@@ -314,15 +176,16 @@ static uint64_t mv88e6352_serdes_get_stat(struct mv88e6xxx_chip *chip,
return val;
}
-int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+size_t mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
{
struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port];
struct mv88e6352_serdes_hw_stat *stat;
+ int i, err;
u64 value;
- int i;
- if (!mv88e6352_port_has_serdes(chip, port))
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
return 0;
BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) >
@@ -338,51 +201,6 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
-static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
-{
- u16 bmsr;
- int err;
-
- /* If the link has dropped, we want to know about it. */
- err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
- if (err) {
- dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
- return;
- }
-
- dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS));
-}
-
-irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- irqreturn_t ret = IRQ_NONE;
- u16 status;
- int err;
-
- err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_INT_STATUS, &status);
- if (err)
- return ret;
-
- if (status & MV88E6352_SERDES_INT_LINK_CHANGE) {
- ret = IRQ_HANDLED;
- mv88e6352_serdes_irq_link(chip, port);
- }
-
- return ret;
-}
-
-int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable)
-{
- u16 val = 0;
-
- if (enable)
- val |= MV88E6352_SERDES_INT_LINK_CHANGE;
-
- return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, val);
-}
-
unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
return irq_find_mapping(chip->g2_irq.domain, MV88E6352_SERDES_IRQ);
@@ -390,8 +208,13 @@ unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
{
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+ if (err <= 0)
+ return err;
return 32 * sizeof(u16);
}
@@ -403,7 +226,8 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
int err;
int i;
- if (!mv88e6352_port_has_serdes(chip, port))
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
return;
for (i = 0 ; i < 32; i++) {
@@ -430,115 +254,6 @@ int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
return lane;
}
-int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool up)
-{
- /* The serdes power can't be controlled on this switch chip but we need
- * to supply this function to avoid returning -EOPNOTSUPP in
- * mv88e6xxx_serdes_power_up/mv88e6xxx_serdes_power_down
- */
- return 0;
-}
-
-int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
-{
- /* There are no configurable serdes lanes on this switch chip but we
- * need to return a non-negative lane number so that callers of
- * mv88e6xxx_serdes_get_lane() know this is a serdes port.
- */
- switch (chip->ports[port].cmode) {
- case MV88E6185_PORT_STS_CMODE_SERDES:
- case MV88E6185_PORT_STS_CMODE_1000BASE_X:
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state)
-{
- int err;
- u16 status;
-
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
- if (err)
- return err;
-
- state->link = !!(status & MV88E6XXX_PORT_STS_LINK);
-
- if (state->link) {
- state->duplex = status & MV88E6XXX_PORT_STS_DUPLEX ? DUPLEX_FULL : DUPLEX_HALF;
-
- switch (status & MV88E6XXX_PORT_STS_SPEED_MASK) {
- case MV88E6XXX_PORT_STS_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- case MV88E6XXX_PORT_STS_SPEED_100:
- state->speed = SPEED_100;
- break;
- case MV88E6XXX_PORT_STS_SPEED_10:
- state->speed = SPEED_10;
- break;
- default:
- dev_err(chip->dev, "invalid PHY speed\n");
- return -EINVAL;
- }
- } else {
- state->duplex = DUPLEX_UNKNOWN;
- state->speed = SPEED_UNKNOWN;
- }
-
- return 0;
-}
-
-int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable)
-{
- u8 cmode = chip->ports[port].cmode;
-
- /* The serdes interrupts are enabled in the G2_INT_MASK register. We
- * need to return 0 to avoid returning -EOPNOTSUPP in
- * mv88e6xxx_serdes_irq_enable/mv88e6xxx_serdes_irq_disable
- */
- switch (cmode) {
- case MV88E6185_PORT_STS_CMODE_SERDES:
- case MV88E6185_PORT_STS_CMODE_1000BASE_X:
- return 0;
- }
-
- return -EOPNOTSUPP;
-}
-
-static void mv88e6097_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
-{
- u16 status;
- int err;
-
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
- if (err) {
- dev_err(chip->dev, "can't read port status: %d\n", err);
- return;
- }
-
- dsa_port_phylink_mac_change(chip->ds, port, !!(status & MV88E6XXX_PORT_STS_LINK));
-}
-
-irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- u8 cmode = chip->ports[port].cmode;
-
- switch (cmode) {
- case MV88E6185_PORT_STS_CMODE_SERDES:
- case MV88E6185_PORT_STS_CMODE_1000BASE_X:
- mv88e6097_serdes_irq_link(chip, port);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
@@ -652,63 +367,13 @@ int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode == MV88E6393X_PORT_STS_CMODE_5GBASER ||
- cmode == MV88E6393X_PORT_STS_CMODE_10GBASER)
+ cmode == MV88E6393X_PORT_STS_CMODE_10GBASER ||
+ cmode == MV88E6393X_PORT_STS_CMODE_USXGMII)
lane = port;
return lane;
}
-/* Set power up/down for 10GBASE-R and 10GBASE-X4/X2 */
-static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane,
- bool up)
-{
- u16 val, new_val;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_10G_CTRL1, &val);
-
- if (err)
- return err;
-
- if (up)
- new_val = val & ~(MDIO_CTRL1_RESET |
- MDIO_PCS_CTRL1_LOOPBACK |
- MDIO_CTRL1_LPOWER);
- else
- new_val = val | MDIO_CTRL1_LPOWER;
-
- if (val != new_val)
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_10G_CTRL1, new_val);
-
- return err;
-}
-
-/* Set power up/down for SGMII and 1000Base-X */
-static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane,
- bool up)
-{
- u16 val, new_val;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &val);
- if (err)
- return err;
-
- if (up)
- new_val = val & ~(BMCR_RESET | BMCR_LOOPBACK | BMCR_PDOWN);
- else
- new_val = val | BMCR_PDOWN;
-
- if (val != new_val)
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, new_val);
-
- return err;
-}
-
struct mv88e6390_serdes_hw_stat {
char string[ETH_GSTRING_LEN];
int reg;
@@ -728,8 +393,8 @@ int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
}
-int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
- int port, uint8_t *data)
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip, int port,
+ uint8_t **data)
{
struct mv88e6390_serdes_hw_stat *stat;
int i;
@@ -739,8 +404,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
stat = &mv88e6390_serdes_hw_stats[i];
- memcpy(data + i * ETH_GSTRING_LEN, stat->string,
- ETH_GSTRING_LEN);
+ ethtool_puts(data, stat->string);
}
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
}
@@ -763,8 +427,8 @@ static uint64_t mv88e6390_serdes_get_stat(struct mv88e6xxx_chip *chip, int lane,
return reg[0] | ((u64)reg[1] << 16) | ((u64)reg[2] << 32);
}
-int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+size_t mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
{
struct mv88e6390_serdes_hw_stat *stat;
int lane;
@@ -782,434 +446,6 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
}
-static int mv88e6390_serdes_enable_checker(struct mv88e6xxx_chip *chip, int lane)
-{
- u16 reg;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_PG_CONTROL, &reg);
- if (err)
- return err;
-
- reg |= MV88E6390_PG_CONTROL_ENABLE_PC;
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_PG_CONTROL, reg);
-}
-
-int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool up)
-{
- u8 cmode = chip->ports[port].cmode;
- int err = 0;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- err = mv88e6390_serdes_power_sgmii(chip, lane, up);
- break;
- case MV88E6XXX_PORT_STS_CMODE_XAUI:
- case MV88E6XXX_PORT_STS_CMODE_RXAUI:
- err = mv88e6390_serdes_power_10g(chip, lane, up);
- break;
- }
-
- if (!err && up)
- err = mv88e6390_serdes_enable_checker(chip, lane);
-
- return err;
-}
-
-int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- int lane, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise)
-{
- u16 val, bmcr, adv;
- bool changed;
- int err;
-
- switch (interface) {
- case PHY_INTERFACE_MODE_SGMII:
- adv = 0x0001;
- break;
-
- case PHY_INTERFACE_MODE_1000BASEX:
- adv = linkmode_adv_to_mii_adv_x(advertise,
- ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
- break;
-
- case PHY_INTERFACE_MODE_2500BASEX:
- adv = linkmode_adv_to_mii_adv_x(advertise,
- ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
- break;
-
- default:
- return 0;
- }
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_ADVERTISE, &val);
- if (err)
- return err;
-
- changed = val != adv;
- if (changed) {
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_ADVERTISE, adv);
- if (err)
- return err;
- }
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &val);
- if (err)
- return err;
-
- if (phylink_autoneg_inband(mode))
- bmcr = val | BMCR_ANENABLE;
- else
- bmcr = val & ~BMCR_ANENABLE;
-
- /* setting ANENABLE triggers a restart of negotiation */
- if (bmcr == val)
- return changed;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, bmcr);
-}
-
-static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
- int port, int lane, struct phylink_link_state *state)
-{
- u16 lpa, status;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_PHY_STATUS, &status);
- if (err) {
- dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
- return err;
- }
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_LPA, &lpa);
- if (err) {
- dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err);
- return err;
- }
-
- return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
-}
-
-static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
- int port, int lane, struct phylink_link_state *state)
-{
- u16 status;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_10G_STAT1, &status);
- if (err)
- return err;
-
- state->link = !!(status & MDIO_STAT1_LSTATUS);
- if (state->link) {
- state->speed = SPEED_10000;
- state->duplex = DUPLEX_FULL;
- }
-
- return 0;
-}
-
-static int mv88e6393x_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
- int port, int lane,
- struct phylink_link_state *state)
-{
- u16 status;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_10G_STAT1, &status);
- if (err)
- return err;
-
- state->link = !!(status & MDIO_STAT1_LSTATUS);
- if (state->link) {
- if (state->interface == PHY_INTERFACE_MODE_5GBASER)
- state->speed = SPEED_5000;
- else
- state->speed = SPEED_10000;
- state->duplex = DUPLEX_FULL;
- }
-
- return 0;
-}
-
-int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state)
-{
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane,
- state);
- case PHY_INTERFACE_MODE_XAUI:
- case PHY_INTERFACE_MODE_RXAUI:
- return mv88e6390_serdes_pcs_get_state_10g(chip, port, lane,
- state);
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state)
-{
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane,
- state);
- case PHY_INTERFACE_MODE_5GBASER:
- case PHY_INTERFACE_MODE_10GBASER:
- return mv88e6393x_serdes_pcs_get_state_10g(chip, port, lane,
- state);
-
- default:
- return -EOPNOTSUPP;
- }
-}
-
-int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- u16 bmcr;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &bmcr);
- if (err)
- return err;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR,
- bmcr | BMCR_ANRESTART);
-}
-
-int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- int lane, int speed, int duplex)
-{
- u16 val, bmcr;
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &val);
- if (err)
- return err;
-
- bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000);
- switch (speed) {
- case SPEED_2500:
- case SPEED_1000:
- bmcr |= BMCR_SPEED1000;
- break;
- case SPEED_100:
- bmcr |= BMCR_SPEED100;
- break;
- case SPEED_10:
- break;
- }
-
- if (duplex == DUPLEX_FULL)
- bmcr |= BMCR_FULLDPLX;
-
- if (bmcr == val)
- return 0;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, bmcr);
-}
-
-static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
- int port, int lane)
-{
- u16 bmsr;
- int err;
-
- /* If the link has dropped, we want to know about it. */
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMSR, &bmsr);
- if (err) {
- dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
- return;
- }
-
- dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS));
-}
-
-static void mv88e6393x_serdes_irq_link_10g(struct mv88e6xxx_chip *chip,
- int port, u8 lane)
-{
- u16 status;
- int err;
-
- /* If the link has dropped, we want to know about it. */
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_10G_STAT1, &status);
- if (err) {
- dev_err(chip->dev, "can't read Serdes STAT1: %d\n", err);
- return;
- }
-
- dsa_port_phylink_mac_change(chip->ds, port, !!(status & MDIO_STAT1_LSTATUS));
-}
-
-static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
- int lane, bool enable)
-{
- u16 val = 0;
-
- if (enable)
- val |= MV88E6390_SGMII_INT_LINK_DOWN |
- MV88E6390_SGMII_INT_LINK_UP;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_INT_ENABLE, val);
-}
-
-int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable)
-{
- u8 cmode = chip->ports[port].cmode;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable);
- }
-
- return 0;
-}
-
-static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip,
- int lane, u16 *status)
-{
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_INT_STATUS, status);
-
- return err;
-}
-
-static int mv88e6393x_serdes_irq_enable_10g(struct mv88e6xxx_chip *chip,
- u8 lane, bool enable)
-{
- u16 val = 0;
-
- if (enable)
- val |= MV88E6393X_10G_INT_LINK_CHANGE;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_10G_INT_ENABLE, val);
-}
-
-int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
- int lane, bool enable)
-{
- u8 cmode = chip->ports[port].cmode;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable);
- case MV88E6393X_PORT_STS_CMODE_5GBASER:
- case MV88E6393X_PORT_STS_CMODE_10GBASER:
- return mv88e6393x_serdes_irq_enable_10g(chip, lane, enable);
- }
-
- return 0;
-}
-
-static int mv88e6393x_serdes_irq_status_10g(struct mv88e6xxx_chip *chip,
- u8 lane, u16 *status)
-{
- int err;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_10G_INT_STATUS, status);
-
- return err;
-}
-
-irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- u8 cmode = chip->ports[port].cmode;
- irqreturn_t ret = IRQ_NONE;
- u16 status;
- int err;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status);
- if (err)
- return ret;
- if (status & (MV88E6390_SGMII_INT_LINK_DOWN |
- MV88E6390_SGMII_INT_LINK_UP)) {
- ret = IRQ_HANDLED;
- mv88e6390_serdes_irq_link_sgmii(chip, port, lane);
- }
- break;
- case MV88E6393X_PORT_STS_CMODE_5GBASER:
- case MV88E6393X_PORT_STS_CMODE_10GBASER:
- err = mv88e6393x_serdes_irq_status_10g(chip, lane, &status);
- if (err)
- return err;
- if (status & MV88E6393X_10G_INT_LINK_CHANGE) {
- ret = IRQ_HANDLED;
- mv88e6393x_serdes_irq_link_10g(chip, port, lane);
- }
- break;
- }
-
- return ret;
-}
-
-irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- u8 cmode = chip->ports[port].cmode;
- irqreturn_t ret = IRQ_NONE;
- u16 status;
- int err;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status);
- if (err)
- return ret;
- if (status & (MV88E6390_SGMII_INT_LINK_DOWN |
- MV88E6390_SGMII_INT_LINK_UP)) {
- ret = IRQ_HANDLED;
- mv88e6390_serdes_irq_link_sgmii(chip, port, lane);
- }
- }
-
- return ret;
-}
-
unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
return irq_find_mapping(chip->g2_irq.domain, port);
@@ -1271,101 +507,40 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
}
}
-static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
+static const int mv88e6352_serdes_p2p_to_reg[] = {
+ /* Index of value in microvolts corresponds to the register value */
+ 14000, 112000, 210000, 308000, 406000, 504000, 602000, 700000,
+};
+
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val)
{
- u16 reg, pcs;
+ bool found = false;
+ u16 ctrl, reg;
int err;
+ int i;
- /* mv88e6393x family errata 4.6:
- * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD
- * mode or P0_mode is configured for [x]MII.
- * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1.
- *
- * It seems that after this workaround the SERDES is automatically
- * powered up (the bit is cleared), so power it down.
- */
- if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE ||
- lane == MV88E6393X_PORT10_LANE) {
- err = mv88e6390_serdes_read(chip, lane,
- MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, &reg);
- if (err)
- return err;
-
- reg &= ~MV88E6393X_SERDES_POC_PDOWN;
- reg |= MV88E6393X_SERDES_POC_RESET;
-
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, reg);
- if (err)
- return err;
-
- err = mv88e6390_serdes_power_sgmii(chip, lane, false);
- if (err)
- return err;
- }
-
- /* mv88e6393x family errata 4.8:
- * When a SERDES port is operating in 1000BASE-X or SGMII mode link may
- * not come up after hardware reset or software reset of SERDES core.
- * Workaround is to write SERDES register 4.F074.14=1 for only those
- * modes and 0 in all other modes.
- */
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, &pcs);
- if (err)
- return err;
-
- pcs &= MV88E6393X_SERDES_POC_PCS_MASK;
-
- err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_ERRATA_4_8_REG, &reg);
- if (err)
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
return err;
- if (pcs == MV88E6393X_SERDES_POC_PCS_1000BASEX ||
- pcs == MV88E6393X_SERDES_POC_PCS_SGMII_PHY ||
- pcs == MV88E6393X_SERDES_POC_PCS_SGMII_MAC)
- reg |= MV88E6393X_ERRATA_4_8_BIT;
- else
- reg &= ~MV88E6393X_ERRATA_4_8_BIT;
-
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_ERRATA_4_8_REG, reg);
-}
-
-int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
-{
- int err;
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_p2p_to_reg); ++i) {
+ if (mv88e6352_serdes_p2p_to_reg[i] == val) {
+ reg = i;
+ found = true;
+ break;
+ }
+ }
- err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE);
- if (err)
- return err;
+ if (!found)
+ return -EINVAL;
- err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE);
+ err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_SPEC_CTRL2, &ctrl);
if (err)
return err;
- return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE);
-}
-
-int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool on)
-{
- u8 cmode = chip->ports[port].cmode;
+ ctrl &= ~MV88E6352_SERDES_OUT_AMP_MASK;
+ ctrl |= reg;
- if (port != 0 && port != 9 && port != 10)
- return -EOPNOTSUPP;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_power_sgmii(chip, lane, on);
- case MV88E6393X_PORT_STS_CMODE_5GBASER:
- case MV88E6393X_PORT_STS_CMODE_10GBASER:
- return mv88e6390_serdes_power_10g(chip, lane, on);
- }
-
- return 0;
+ return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl);
}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index cbb3ba30caea..ad887d8601bc 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -12,6 +12,8 @@
#include "chip.h"
+struct phylink_link_state;
+
#define MV88E6352_ADDR_SERDES 0x0f
#define MV88E6352_SERDES_PAGE_FIBER 0x01
#define MV88E6352_SERDES_IRQ 0x0b
@@ -27,6 +29,8 @@
#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4)
#define MV88E6352_SERDES_INT_STATUS 0x13
+#define MV88E6352_SERDES_SPEC_CTRL2 0x1a
+#define MV88E6352_SERDES_OUT_AMP_MASK 0x0007
#define MV88E6341_PORT5_LANE 0x15
@@ -42,10 +46,18 @@
/* 10GBASE-R and 10GBASE-X4/X2 */
#define MV88E6390_10G_CTRL1 (0x1000 + MDIO_CTRL1)
#define MV88E6390_10G_STAT1 (0x1000 + MDIO_STAT1)
+#define MV88E6390_10G_INT_ENABLE 0x9001
+#define MV88E6390_10G_INT_LINK_DOWN BIT(3)
+#define MV88E6390_10G_INT_LINK_UP BIT(2)
+#define MV88E6390_10G_INT_STATUS 0x9003
#define MV88E6393X_10G_INT_ENABLE 0x9000
#define MV88E6393X_10G_INT_LINK_CHANGE BIT(2)
#define MV88E6393X_10G_INT_STATUS 0x9001
+/* USXGMII */
+#define MV88E6390_USXGMII_LP_STATUS 0xf0a2
+#define MV88E6390_USXGMII_PHY_STATUS 0xf0a6
+
/* 1000BASE-X and SGMII */
#define MV88E6390_SGMII_BMCR (0x2000 + MII_BMCR)
#define MV88E6390_SGMII_BMSR (0x2000 + MII_BMSR)
@@ -93,85 +105,44 @@
#define MV88E6393X_SERDES_POC_PCS_MASK 0x0007
#define MV88E6393X_SERDES_POC_RESET BIT(15)
#define MV88E6393X_SERDES_POC_PDOWN BIT(5)
+#define MV88E6393X_SERDES_POC_AN BIT(3)
+#define MV88E6393X_SERDES_CTRL1 0xf003
+#define MV88E6393X_SERDES_CTRL1_TX_PDOWN BIT(9)
+#define MV88E6393X_SERDES_CTRL1_RX_PDOWN BIT(8)
#define MV88E6393X_ERRATA_4_8_REG 0xF074
#define MV88E6393X_ERRATA_4_8_BIT BIT(14)
-int mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_pcs_decode_state(struct device *dev, u16 bmsr, u16 lpa,
+ u16 status, struct phylink_link_state *state);
+
int mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
-int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
-int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- int lane, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise);
-int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
- int lane, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertise);
-int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state);
-int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state);
-int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state);
-int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
- int lane, struct phylink_link_state *state);
-int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- int lane);
-int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
- int lane);
-int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- int lane, int speed, int duplex);
-int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
- int lane, int speed, int duplex);
unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
int port);
unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
int port);
-int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool up);
-int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool on);
-int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool on);
-int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
- bool on);
-int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip);
-int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable);
-int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable);
-int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane,
- bool enable);
-int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
- int lane, bool enable);
-irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane);
-irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane);
-irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane);
-irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
- int lane);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
-int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
- int port, uint8_t *data);
-int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data);
+int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port,
+ uint8_t **data);
+size_t mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
-int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
- int port, uint8_t *data);
-int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data);
+int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip, int port,
+ uint8_t **data);
+size_t mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Return the (first) SERDES lane address a port is using, -errno otherwise. */
static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
int port)
@@ -182,24 +153,6 @@ static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
return chip->info->ops->serdes_get_lane(chip, port);
}
-static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip,
- int port, int lane)
-{
- if (!chip->info->ops->serdes_power)
- return -EOPNOTSUPP;
-
- return chip->info->ops->serdes_power(chip, port, lane, true);
-}
-
-static inline int mv88e6xxx_serdes_power_down(struct mv88e6xxx_chip *chip,
- int port, int lane)
-{
- if (!chip->info->ops->serdes_power)
- return -EOPNOTSUPP;
-
- return chip->info->ops->serdes_power(chip, port, lane, false);
-}
-
static inline unsigned int
mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
@@ -209,31 +162,9 @@ mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
return chip->info->ops->serdes_irq_mapping(chip, port);
}
-static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip,
- int port, int lane)
-{
- if (!chip->info->ops->serdes_irq_enable)
- return -EOPNOTSUPP;
-
- return chip->info->ops->serdes_irq_enable(chip, port, lane, true);
-}
-
-static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip,
- int port, int lane)
-{
- if (!chip->info->ops->serdes_irq_enable)
- return -EOPNOTSUPP;
-
- return chip->info->ops->serdes_irq_enable(chip, port, lane, false);
-}
-
-static inline irqreturn_t
-mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, int lane)
-{
- if (!chip->info->ops->serdes_irq_status)
- return IRQ_NONE;
-
- return chip->info->ops->serdes_irq_status(chip, port, lane);
-}
+extern const struct mv88e6xxx_pcs_ops mv88e6185_pcs_ops;
+extern const struct mv88e6xxx_pcs_ops mv88e6352_pcs_ops;
+extern const struct mv88e6xxx_pcs_ops mv88e6390_pcs_ops;
+extern const struct mv88e6xxx_pcs_ops mv88e6393x_pcs_ops;
#endif
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
index 282fe08db050..a990271b7482 100644
--- a/drivers/net/dsa/mv88e6xxx/smi.c
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -55,11 +55,15 @@ static int mv88e6xxx_smi_direct_write(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
int dev, int reg, int bit, int val)
{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
- for (i = 0; i < 16; i++) {
+ /* Even if the initial poll takes longer than 50ms, always do
+ * at least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_smi_direct_read(chip, dev, reg, &data);
if (err)
return err;
@@ -67,7 +71,10 @@ static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
if (!!(data & BIT(bit)) == !!val)
return 0;
- usleep_range(1000, 2000);
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
}
return -ETIMEDOUT;
@@ -104,11 +111,6 @@ static int mv88e6xxx_smi_indirect_read(struct mv88e6xxx_chip *chip,
{
int err;
- err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
- MV88E6XXX_SMI_CMD, 15, 0);
- if (err)
- return err;
-
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_CMD,
MV88E6XXX_SMI_CMD_BUSY |
@@ -132,11 +134,6 @@ static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
{
int err;
- err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
- MV88E6XXX_SMI_CMD, 15, 0);
- if (err)
- return err;
-
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_DATA, data);
if (err)
@@ -155,9 +152,20 @@ static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
MV88E6XXX_SMI_CMD, 15, 0);
}
+static int mv88e6xxx_smi_indirect_init(struct mv88e6xxx_chip *chip)
+{
+ /* Ensure that the chip starts out in the ready state. As both
+ * reads and writes always ensure this on return, they can
+ * safely depend on the chip not being busy on entry.
+ */
+ return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+}
+
static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = {
.read = mv88e6xxx_smi_indirect_read,
.write = mv88e6xxx_smi_indirect_write,
+ .init = mv88e6xxx_smi_indirect_init,
};
int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
@@ -175,5 +183,8 @@ int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
chip->bus = bus;
chip->sw_addr = sw_addr;
+ if (chip->smi_ops->init)
+ return chip->smi_ops->init(chip);
+
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx/switchdev.c b/drivers/net/dsa/mv88e6xxx/switchdev.c
new file mode 100644
index 000000000000..4c346a884fb2
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/switchdev.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * switchdev.c
+ *
+ * Authors:
+ * Hans J. Schultz <netdev@kapio-technology.com>
+ *
+ */
+
+#include <net/switchdev.h>
+#include "chip.h"
+#include "global1.h"
+#include "switchdev.h"
+
+struct mv88e6xxx_fid_search_ctx {
+ u16 fid_search;
+ u16 vid_found;
+};
+
+static int __mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv)
+{
+ struct mv88e6xxx_fid_search_ctx *ctx = priv;
+
+ if (ctx->fid_search == entry->fid) {
+ ctx->vid_found = entry->vid;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip, u16 fid, u16 *vid)
+{
+ struct mv88e6xxx_fid_search_ctx ctx;
+ int err;
+
+ ctx.fid_search = fid;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_vtu_walk(chip, __mv88e6xxx_find_vid, &ctx);
+ mv88e6xxx_reg_unlock(chip);
+ if (err < 0)
+ return err;
+ if (err == 1)
+ *vid = ctx.vid_found;
+ else
+ return -ENOENT;
+
+ return 0;
+}
+
+int mv88e6xxx_handle_miss_violation(struct mv88e6xxx_chip *chip, int port,
+ struct mv88e6xxx_atu_entry *entry, u16 fid)
+{
+ struct switchdev_notifier_fdb_info info = {
+ .addr = entry->mac,
+ .locked = true,
+ };
+ struct net_device *brport;
+ struct dsa_port *dp;
+ u16 vid;
+ int err;
+
+ err = mv88e6xxx_find_vid(chip, fid, &vid);
+ if (err)
+ return err;
+
+ info.vid = vid;
+ dp = dsa_to_port(chip->ds, port);
+
+ rtnl_lock();
+ brport = dsa_port_to_bridge_port(dp);
+ if (!brport) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ err = call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ brport, &info.info, NULL);
+ rtnl_unlock();
+
+ return err;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/switchdev.h b/drivers/net/dsa/mv88e6xxx/switchdev.h
new file mode 100644
index 000000000000..62214f9d62b0
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/switchdev.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * switchdev.h
+ *
+ * Authors:
+ * Hans J. Schultz <netdev@kapio-technology.com>
+ *
+ */
+
+#ifndef _MV88E6XXX_SWITCHDEV_H_
+#define _MV88E6XXX_SWITCHDEV_H_
+
+#include "chip.h"
+
+int mv88e6xxx_handle_miss_violation(struct mv88e6xxx_chip *chip, int port,
+ struct mv88e6xxx_atu_entry *entry,
+ u16 fid);
+
+#endif /* _MV88E6XXX_SWITCHDEV_H_ */
diff --git a/drivers/net/dsa/mv88e6xxx/trace.c b/drivers/net/dsa/mv88e6xxx/trace.c
new file mode 100644
index 000000000000..7833cb50ca5d
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/trace.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright 2022 NXP
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/dsa/mv88e6xxx/trace.h b/drivers/net/dsa/mv88e6xxx/trace.h
new file mode 100644
index 000000000000..5bd015b2b97a
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/trace.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright 2022 NXP
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mv88e6xxx
+
+#if !defined(_MV88E6XXX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _MV88E6XXX_TRACE_H
+
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(mv88e6xxx_atu_violation,
+
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+
+ TP_ARGS(dev, spid, portvec, addr, fid),
+
+ TP_STRUCT__entry(
+ __string(name, dev_name(dev))
+ __field(int, spid)
+ __field(u16, portvec)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, fid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->spid = spid;
+ __entry->portvec = portvec;
+ memcpy(__entry->addr, addr, ETH_ALEN);
+ __entry->fid = fid;
+ ),
+
+ TP_printk("dev %s spid %d portvec 0x%x addr %pM fid %u",
+ __get_str(name), __entry->spid, __entry->portvec,
+ __entry->addr, __entry->fid)
+);
+
+DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_member_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+ TP_ARGS(dev, spid, portvec, addr, fid));
+
+DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_miss_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+ TP_ARGS(dev, spid, portvec, addr, fid));
+
+DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_full_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+ TP_ARGS(dev, spid, portvec, addr, fid));
+
+DECLARE_EVENT_CLASS(mv88e6xxx_vtu_violation,
+
+ TP_PROTO(const struct device *dev, int spid, u16 vid),
+
+ TP_ARGS(dev, spid, vid),
+
+ TP_STRUCT__entry(
+ __string(name, dev_name(dev))
+ __field(int, spid)
+ __field(u16, vid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->spid = spid;
+ __entry->vid = vid;
+ ),
+
+ TP_printk("dev %s spid %d vid %u",
+ __get_str(name), __entry->spid, __entry->vid)
+);
+
+DEFINE_EVENT(mv88e6xxx_vtu_violation, mv88e6xxx_vtu_member_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 vid),
+ TP_ARGS(dev, spid, vid));
+
+DEFINE_EVENT(mv88e6xxx_vtu_violation, mv88e6xxx_vtu_miss_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 vid),
+ TP_ARGS(dev, spid, vid));
+
+#endif /* _MV88E6XXX_TRACE_H */
+
+/* We don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 9948544ba1c4..081e7a88ea02 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -1,4 +1,34 @@
# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_MSCC_FELIX_DSA_LIB
+ tristate
+ help
+ This is an umbrella module for all network switches that are
+ register-compatible with Ocelot and that perform I/O to their host
+ CPU through an NPI (Node Processor Interface) Ethernet port.
+ Its name comes from the first hardware chip to make use of it
+ (VSC9959), code named Felix.
+
+config NET_DSA_MSCC_OCELOT_EXT
+ tristate "Ocelot External Ethernet switch support"
+ depends on NET_DSA && SPI
+ depends on NET_VENDOR_MICROSEMI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select MDIO_MSCC_MIIM
+ select MFD_OCELOT
+ select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
+ select NET_DSA_TAG_OCELOT_8021Q
+ select NET_DSA_TAG_OCELOT
+ help
+ This driver supports the VSC7511, VSC7512, VSC7513 and VSC7514 chips
+ when controlled through SPI.
+
+ The Ocelot switch family is a set of multi-port networking chips. All
+ of these chips have the ability to be controlled externally through
+ SPI or PCIe interfaces.
+
+ Say "Y" here to enable external control to these chips.
+
config NET_DSA_MSCC_FELIX
tristate "Ocelot / Felix Ethernet switch support"
depends on NET_DSA && PCI
@@ -6,7 +36,9 @@ config NET_DSA_MSCC_FELIX
depends on NET_VENDOR_FREESCALE
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on NET_SCH_TAPRIO || NET_SCH_TAPRIO=n
select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
select FSL_ENETC_MDIO
@@ -21,7 +53,9 @@ config NET_DSA_MSCC_SEVILLE
depends on NET_VENDOR_MICROSEMI
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL
+ select MDIO_MSCC_MIIM
select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
select PCS_LYNX
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
index f6dd131e7491..ead868a293e3 100644
--- a/drivers/net/dsa/ocelot/Makefile
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -1,11 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_MSCC_FELIX_DSA_LIB) += mscc_felix_dsa_lib.o
obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+obj-$(CONFIG_NET_DSA_MSCC_OCELOT_EXT) += mscc_ocelot_ext.o
obj-$(CONFIG_NET_DSA_MSCC_SEVILLE) += mscc_seville.o
-mscc_felix-objs := \
- felix.o \
- felix_vsc9959.o
-
-mscc_seville-objs := \
- felix.o \
- seville_vsc9953.o
+mscc_felix_dsa_lib-objs := felix.o
+mscc_felix-objs := felix_vsc9959.o
+mscc_ocelot_ext-objs := ocelot_ext.o
+mscc_seville-objs := seville_vsc9953.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 83808e7dbdda..9e5ede932b42 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -21,37 +21,104 @@
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
-#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <net/dsa.h>
#include "felix.h"
-static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+/* Translate the DSA database API into the ocelot switch library API,
+ * which uses VID 0 for all ports that aren't part of a bridge,
+ * and expects the bridge_dev to be NULL in that case.
+ */
+static struct net_device *felix_classify_db(struct dsa_db db)
{
- struct ocelot_vcap_filter *outer_tagging_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int key_length, upstream, err;
+ switch (db.type) {
+ case DSA_DB_PORT:
+ case DSA_DB_LAG:
+ return NULL;
+ case DSA_DB_BRIDGE:
+ return db.bridge.dev;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
- /* We don't need to install the rxvlan into the other ports' filtering
- * tables, because we're just pushing the rxvlan when sending towards
- * the CPU
- */
- if (!pvid)
- return 0;
+static int felix_cpu_port_for_conduit(struct dsa_switch *ds,
+ struct net_device *conduit)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ int lag;
+
+ if (netif_is_lag_master(conduit)) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ lag = ocelot_bond_get_id(ocelot, conduit);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return lag;
+ }
+
+ cpu_dp = conduit->dsa_ptr;
+ return cpu_dp->index;
+}
+
+/**
+ * felix_update_tag_8021q_rx_rule - Update VCAP ES0 tag_8021q rule after
+ * vlan_filtering change
+ * @outer_tagging_rule: Pointer to VCAP filter on which the update is performed
+ * @vlan_filtering: Current bridge VLAN filtering setting
+ *
+ * Source port identification for tag_8021q is done using VCAP ES0 rules on the
+ * CPU port(s). The ES0 tag B (inner tag from the packet) can be configured as
+ * either:
+ * - push_inner_tag=0: the inner tag is never pushed into the frame
+ * (and we lose info about the classified VLAN). This is
+ * good when the classified VLAN is a discardable quantity
+ * for the software RX path: it is either set to
+ * OCELOT_STANDALONE_PVID, or to
+ * ocelot_vlan_unaware_pvid(bridge).
+ * - push_inner_tag=1: the inner tag is always pushed. This is good when the
+ * classified VLAN is not a discardable quantity (the port
+ * is under a VLAN-aware bridge, and software needs to
+ * continue processing the packet in the same VLAN as the
+ * hardware).
+ * The point is that what is good for a VLAN-unaware port is not good for a
+ * VLAN-aware port, and vice versa. Thus, the RX tagging rules must be kept in
+ * sync with the VLAN filtering state of the port.
+ */
+static void
+felix_update_tag_8021q_rx_rule(struct ocelot_vcap_filter *outer_tagging_rule,
+ bool vlan_filtering)
+{
+ if (vlan_filtering)
+ outer_tagging_rule->action.push_inner_tag = OCELOT_ES0_TAG;
+ else
+ outer_tagging_rule->action.push_inner_tag = OCELOT_NO_ES0_TAG;
+}
+
+/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
+ * the tagger can perform RX source port identification.
+ */
+static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
+ int upstream, u16 vid,
+ bool vlan_filtering)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+ int key_length, err;
key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
- upstream = dsa_upstream_port(ds, port);
outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter),
GFP_KERNEL);
if (!outer_tagging_rule)
return -ENOMEM;
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
+
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
- outer_tagging_rule->id.cookie = port;
+ outer_tagging_rule->id.cookie = cookie;
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -64,6 +131,14 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
outer_tagging_rule->action.tag_a_vid_sel = 1;
outer_tagging_rule->action.vid_a_val = vid;
+ felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering);
+ outer_tagging_rule->action.tag_b_tpid_sel = OCELOT_TAG_TPID_SEL_8021Q;
+ /* Leave TAG_B_VID_SEL at 0 (Classified VID + VID_B_VAL). Since we also
+ * leave VID_B_VAL at 0, this makes ES0 tag B (the inner tag) equal to
+ * the classified VID, which we need to see in the DSA tagger's receive
+ * path. Note: the inner tag is only visible in the packet when pushed
+ * (push_inner_tag == OCELOT_ES0_TAG).
+ */
err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL);
if (err)
@@ -72,20 +147,36 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
return err;
}
-static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port,
+ int upstream, u16 vid)
{
- struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int upstream, err;
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
- /* tag_8021q.c assumes we are implementing this via port VLAN
- * membership, which we aren't. So we don't need to add any VCAP filter
- * for the CPU port.
- */
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
+
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ cookie, false);
+ if (!outer_tagging_rule)
+ return -ENOENT;
+
+ return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
+}
+
+/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2
+ * rules for steering those tagged packets towards the correct destination port
+ */
+static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port,
+ u16 vid)
+{
+ struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
+ unsigned long cpu_ports = dsa_cpu_ports(ds);
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+ int err;
untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!untagging_rule)
@@ -97,14 +188,14 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return -ENOMEM;
}
- upstream = dsa_upstream_port(ds, port);
+ cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
- untagging_rule->ingress_port_mask = BIT(upstream);
+ untagging_rule->ingress_port_mask = cpu_ports;
untagging_rule->vlan.vid.value = vid;
untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
untagging_rule->prio = 1;
- untagging_rule->id.cookie = port;
+ untagging_rule->id.cookie = cookie;
untagging_rule->id.tc_offload = false;
untagging_rule->block_id = VCAP_IS1;
untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -121,11 +212,13 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return err;
}
+ cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
+
redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
- redirect_rule->ingress_port_mask = BIT(upstream);
+ redirect_rule->ingress_port_mask = cpu_ports;
redirect_rule->pag = port;
redirect_rule->prio = 1;
- redirect_rule->id.cookie = port;
+ redirect_rule->id.cookie = cookie;
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -143,335 +236,213 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return 0;
}
-static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
- u16 flags)
-{
- bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
- struct ocelot *ocelot = ds->priv;
-
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- return 0;
-}
-
-static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid)
-{
- struct ocelot_vcap_filter *outer_tagging_rule;
- struct ocelot_vcap_block *block_vcap_es0;
- struct ocelot *ocelot = &felix->ocelot;
-
- block_vcap_es0 = &ocelot->block[VCAP_ES0];
-
- outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
- port, false);
- /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid
- * installing outer tagging ES0 rules where they weren't needed.
- * But in rxvlan_del, the API doesn't give us the "flags" anymore,
- * so that forces us to be slightly sloppy here, and just assume that
- * if we didn't find an outer_tagging_rule it means that there was
- * none in the first place, i.e. rxvlan_del is called on a non-pvid
- * port. This is most probably true though.
- */
- if (!outer_tagging_rule)
- return 0;
-
- return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
-}
-
-static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
+static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot_vcap_block *block_vcap_is1;
struct ocelot_vcap_block *block_vcap_is2;
- struct ocelot *ocelot = &felix->ocelot;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
int err;
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
-
block_vcap_is1 = &ocelot->block[VCAP_IS1];
block_vcap_is2 = &ocelot->block[VCAP_IS2];
+ cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
- port, false);
+ cookie, false);
if (!untagging_rule)
- return 0;
+ return -ENOENT;
err = ocelot_vcap_filter_del(ocelot, untagging_rule);
if (err)
return err;
+ cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
- port, false);
+ cookie, false);
if (!redirect_rule)
- return 0;
+ return -ENOENT;
return ocelot_vcap_filter_del(ocelot, redirect_rule);
}
-static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
{
- struct ocelot *ocelot = ds->priv;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp;
+ int err;
+
+ /* tag_8021q.c assumes we are implementing this via port VLAN
+ * membership, which we aren't. So we don't need to add any VCAP filter
+ * for the CPU port.
+ */
+ if (!dsa_port_is_user(dp))
+ return 0;
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid,
+ dsa_port_is_vlan_filtering(dp));
+ if (err)
+ return err;
+ }
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ err = felix_tag_8021q_vlan_add_tx(ds, port, vid);
+ if (err)
+ goto add_tx_failed;
return 0;
+
+add_tx_failed:
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
+
+ return err;
}
-/* Alternatively to using the NPI functionality, that same hardware MAC
- * connected internally to the enetc or fman DSA master can be configured to
- * use the software-defined tag_8021q frame format. As far as the hardware is
- * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
- * module are now disconnected from it, but can still be accessed through
- * register-based MMIO.
- */
-static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
+static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
- ocelot->ports[port]->is_dsa_8021q_cpu = true;
- ocelot->npi = -1;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp;
+ int err;
- /* Overwrite PGID_CPU with the non-tagging port */
- ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
+ if (!dsa_port_is_user(dp))
+ return 0;
- ocelot_apply_bridge_fwd_mask(ocelot);
-}
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
+ if (err)
+ return err;
+ }
-static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
-{
- ocelot->ports[port]->is_dsa_8021q_cpu = false;
+ err = felix_tag_8021q_vlan_del_tx(ds, port, vid);
+ if (err)
+ goto del_tx_failed;
- /* Restore PGID_CPU */
- ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
- PGID_CPU);
+ return 0;
- ocelot_apply_bridge_fwd_mask(ocelot);
+del_tx_failed:
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid,
+ dsa_port_is_vlan_filtering(dp));
+
+ return err;
}
-/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
- * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the
- * tag_8021q CPU port.
- */
-static int felix_setup_mmio_filtering(struct felix *felix)
+static int felix_update_tag_8021q_rx_rules(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
{
- unsigned long user_ports = dsa_user_ports(felix->ds);
- struct ocelot_vcap_filter *redirect_rule;
- struct ocelot_vcap_filter *tagging_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int cpu = -1, port, ret;
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ unsigned long cookie;
+ int err;
- tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!tagging_rule)
- return -ENOMEM;
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
- redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!redirect_rule) {
- kfree(tagging_rule);
- return -ENOMEM;
- }
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port,
+ cpu_dp->index);
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_cpu_port(ds, port)) {
- cpu = port;
- break;
- }
- }
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ cookie, false);
- if (cpu < 0)
- return -EINVAL;
+ felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering);
- tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
- *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
- *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
- tagging_rule->ingress_port_mask = user_ports;
- tagging_rule->prio = 1;
- tagging_rule->id.cookie = ocelot->num_phys_ports;
- tagging_rule->id.tc_offload = false;
- tagging_rule->block_id = VCAP_IS1;
- tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- tagging_rule->lookup = 0;
- tagging_rule->action.pag_override_mask = 0xff;
- tagging_rule->action.pag_val = ocelot->num_phys_ports;
-
- ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL);
- if (ret) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return ret;
- }
-
- redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
- redirect_rule->ingress_port_mask = user_ports;
- redirect_rule->pag = ocelot->num_phys_ports;
- redirect_rule->prio = 1;
- redirect_rule->id.cookie = ocelot->num_phys_ports;
- redirect_rule->id.tc_offload = false;
- redirect_rule->block_id = VCAP_IS2;
- redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- redirect_rule->lookup = 0;
- redirect_rule->action.cpu_copy_ena = true;
- if (felix->info->quirk_no_xtr_irq) {
- /* Redirect to the tag_8021q CPU but also copy PTP packets to
- * the CPU port module
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
- redirect_rule->action.port_mask = BIT(cpu);
- } else {
- /* Trap PTP packets only to the CPU port module (which is
- * redirected to the NPI port)
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- redirect_rule->action.port_mask = 0;
- }
-
- ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
- if (ret) {
- ocelot_vcap_filter_del(ocelot, tagging_rule);
- kfree(redirect_rule);
- return ret;
+ err = ocelot_vcap_filter_replace(ocelot, outer_tagging_rule);
+ if (err)
+ return err;
}
- /* The ownership of the CPU port module's queues might have just been
- * transferred to the tag_8021q tagger from the NPI-based tagger.
- * So there might still be all sorts of crap in the queues. On the
- * other hand, the MMIO-based matching of PTP frames is very brittle,
- * so we need to be careful that there are no extra frames to be
- * dequeued over MMIO, since we would never know to discard them.
- */
- ocelot_drain_cpu_queue(ocelot, 0);
-
return 0;
}
-static int felix_teardown_mmio_filtering(struct felix *felix)
+static int felix_trap_get_cpu_port(struct dsa_switch *ds,
+ const struct ocelot_vcap_filter *trap)
{
- struct ocelot_vcap_filter *tagging_rule, *redirect_rule;
- struct ocelot_vcap_block *block_vcap_is1;
- struct ocelot_vcap_block *block_vcap_is2;
- struct ocelot *ocelot = &felix->ocelot;
- int err;
-
- block_vcap_is1 = &ocelot->block[VCAP_IS1];
- block_vcap_is2 = &ocelot->block[VCAP_IS2];
-
- tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
- ocelot->num_phys_ports,
- false);
- if (!tagging_rule)
- return -ENOENT;
+ struct dsa_port *dp;
+ int first_port;
- err = ocelot_vcap_filter_del(ocelot, tagging_rule);
- if (err)
- return err;
+ if (WARN_ON(!trap->ingress_port_mask))
+ return -1;
- redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
- ocelot->num_phys_ports,
- false);
- if (!redirect_rule)
- return -ENOENT;
+ first_port = __ffs(trap->ingress_port_mask);
+ dp = dsa_to_port(ds, first_port);
- return ocelot_vcap_filter_del(ocelot, redirect_rule);
+ return dp->cpu_dp->index;
}
-static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
+/* On switches with no extraction IRQ wired, trapped packets need to be
+ * replicated over Ethernet as well, otherwise we'd get no notification of
+ * their arrival when using the ocelot-8021q tagging protocol.
+ */
+static int felix_update_trapping_destinations(struct dsa_switch *ds,
+ bool using_tag_8021q)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- unsigned long cpu_flood;
- int port, err;
-
- felix_8021q_cpu_port_init(ocelot, cpu);
-
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot_vcap_filter *trap;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ bool cpu_copy_ena;
+ int err;
- /* This overwrites ocelot_init():
- * Do not forward BPDU frames to the CPU port module,
- * for 2 reasons:
- * - When these packets are injected from the tag_8021q
- * CPU port, we want them to go out, not loop back
- * into the system.
- * - STP traffic ingressing on a user port should go to
- * the tag_8021q CPU port, not to the hardware CPU
- * port module.
- */
- ocelot_write_gix(ocelot,
- ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
- ANA_PORT_CPU_FWD_BPDU_CFG, port);
- }
+ if (!felix->info->quirk_no_xtr_irq)
+ return 0;
- /* In tag_8021q mode, the CPU port module is unused, except for PTP
- * frames. So we want to disable flooding of any kind to the CPU port
- * module, since packets going there will end in a black hole.
+ /* We are sure that "cpu" was found, otherwise
+ * dsa_tree_setup_default_cpu() would have failed earlier.
*/
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
-
- err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
- if (err)
- return err;
-
- err = felix_setup_mmio_filtering(felix);
- if (err)
- goto out_tag_8021q_unregister;
-
- return 0;
-
-out_tag_8021q_unregister:
- dsa_tag_8021q_unregister(ds);
- return err;
-}
-
-static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
-{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- int err, port;
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
- err = felix_teardown_mmio_filtering(felix);
- if (err)
- dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
- err);
+ /* Make sure all traps are set up for that destination */
+ list_for_each_entry(trap, &block_vcap_is2->rules, list) {
+ if (!trap->is_trap)
+ continue;
- dsa_tag_8021q_unregister(ds);
+ /* Figure out the current trapping destination */
+ if (using_tag_8021q) {
+ /* Redirect to the tag_8021q CPU port. If timestamps
+ * are necessary, also copy trapped packets to the CPU
+ * port module.
+ */
+ mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ port_mask = BIT(felix_trap_get_cpu_port(ds, trap));
+ cpu_copy_ena = !!trap->take_ts;
+ } else {
+ /* Trap packets only to the CPU port module, which is
+ * redirected to the NPI port (the DSA CPU port)
+ */
+ mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ port_mask = 0;
+ cpu_copy_ena = true;
+ }
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
+ if (trap->action.mask_mode == mask_mode &&
+ trap->action.port_mask == port_mask &&
+ trap->action.cpu_copy_ena == cpu_copy_ena)
continue;
- /* Restore the logic from ocelot_init:
- * do not forward BPDU frames to the front ports.
- */
- ocelot_write_gix(ocelot,
- ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
- ANA_PORT_CPU_FWD_BPDU_CFG,
- port);
+ trap->action.mask_mode = mask_mode;
+ trap->action.port_mask = port_mask;
+ trap->action.cpu_copy_ena = cpu_copy_ena;
+
+ err = ocelot_vcap_filter_replace(ocelot, trap);
+ if (err)
+ return err;
}
- felix_8021q_cpu_port_deinit(ocelot, cpu);
+ return 0;
}
/* The CPU port module is connected to the Node Processor Interface (NPI). This
* is the mode through which frames can be injected from and extracted to an
* external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
* running Linux, and this forms a DSA setup together with the enetc or fman
- * DSA master.
+ * DSA conduit.
*/
static void felix_npi_port_init(struct ocelot *ocelot, int port)
{
@@ -510,103 +481,322 @@ static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
}
-static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
+static int felix_tag_npi_setup(struct dsa_switch *ds)
{
+ struct dsa_port *dp, *first_cpu_dp = NULL;
struct ocelot *ocelot = ds->priv;
- unsigned long cpu_flood;
- felix_npi_port_init(ocelot, cpu);
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) {
+ dev_err(ds->dev, "Multiple NPI ports not supported\n");
+ return -EINVAL;
+ }
- /* Include the CPU port module (and indirectly, the NPI port)
- * in the forwarding mask for unknown unicast - the hardware
- * default value for ANA_FLOODING_FLD_UNICAST excludes
- * BIT(ocelot->num_phys_ports), and so does ocelot_init,
- * since Ocelot relies on whitelisting MAC addresses towards
- * PGID_CPU.
- * We do this because DSA does not yet perform RX filtering,
- * and the NPI port does not perform source address learning,
- * so traffic sent to Linux is effectively unknown from the
- * switch's perspective.
- */
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC);
+ first_cpu_dp = dp->cpu_dp;
+ }
+
+ if (!first_cpu_dp)
+ return -EINVAL;
+
+ felix_npi_port_init(ocelot, first_cpu_dp->index);
return 0;
}
-static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
+static void felix_tag_npi_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
- felix_npi_port_deinit(ocelot, cpu);
+ felix_npi_port_deinit(ocelot, ocelot->npi);
}
-static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu,
- enum dsa_tag_protocol proto)
+static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
{
- int err;
+ struct ocelot *ocelot = ds->priv;
- switch (proto) {
- case DSA_TAG_PROTO_SEVILLE:
- case DSA_TAG_PROTO_OCELOT:
- err = felix_setup_tag_npi(ds, cpu);
- break;
- case DSA_TAG_PROTO_OCELOT_8021Q:
- err = felix_setup_tag_8021q(ds, cpu);
- break;
- default:
- err = -EPROTONOSUPPORT;
+ return BIT(ocelot->num_phys_ports);
+}
+
+static int felix_tag_npi_change_conduit(struct dsa_switch *ds, int port,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct ocelot *ocelot = ds->priv;
+
+ if (netif_is_lag_master(conduit)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG DSA conduit only supported using ocelot-8021q");
+ return -EOPNOTSUPP;
}
- return err;
+ /* Changing the NPI port breaks user ports still assigned to the old
+ * one, so only allow it while they're down, and don't allow them to
+ * come back up until they're all changed to the new one.
+ */
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *user = other_dp->user;
+
+ if (other_dp != dp && (user->flags & IFF_UP) &&
+ dsa_port_to_conduit(other_dp) != conduit) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change while old conduit still has users");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ felix_npi_port_deinit(ocelot, ocelot->npi);
+ felix_npi_port_init(ocelot, felix_cpu_port_for_conduit(ds, conduit));
+
+ return 0;
}
-static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu,
- enum dsa_tag_protocol proto)
+/* Alternatively to using the NPI functionality, that same hardware MAC
+ * connected internally to the enetc or fman DSA conduit can be configured to
+ * use the software-defined tag_8021q frame format. As far as the hardware is
+ * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
+ * module are now disconnected from it, but can still be accessed through
+ * register-based MMIO.
+ */
+static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
+ .setup = felix_tag_npi_setup,
+ .teardown = felix_tag_npi_teardown,
+ .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
+ .change_conduit = felix_tag_npi_change_conduit,
+};
+
+static int felix_tag_8021q_setup(struct dsa_switch *ds)
{
- switch (proto) {
- case DSA_TAG_PROTO_SEVILLE:
- case DSA_TAG_PROTO_OCELOT:
- felix_teardown_tag_npi(ds, cpu);
- break;
- case DSA_TAG_PROTO_OCELOT_8021Q:
- felix_teardown_tag_8021q(ds, cpu);
- break;
- default:
- break;
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *dp;
+ int err;
+
+ err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
+ if (err)
+ return err;
+
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index);
+
+ dsa_switch_for_each_user_port(dp, ds)
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index,
+ dp->cpu_dp->index);
+
+ dsa_switch_for_each_available_port(dp, ds)
+ /* This overwrites ocelot_init():
+ * Do not forward BPDU frames to the CPU port module,
+ * for 2 reasons:
+ * - When these packets are injected from the tag_8021q
+ * CPU port, we want them to go out, not loop back
+ * into the system.
+ * - STP traffic ingressing on a user port should go to
+ * the tag_8021q CPU port, not to the hardware CPU
+ * port module.
+ */
+ ocelot_write_gix(ocelot,
+ ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
+ ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
+
+ /* The ownership of the CPU port module's queues might have just been
+ * transferred to the tag_8021q tagger from the NPI-based tagger.
+ * So there might still be all sorts of crap in the queues. On the
+ * other hand, the MMIO-based matching of PTP frames is very brittle,
+ * so we need to be careful that there are no extra frames to be
+ * dequeued over MMIO, since we would never know to discard them.
+ */
+ ocelot_lock_xtr_grp_bh(ocelot, 0);
+ ocelot_drain_cpu_queue(ocelot, 0);
+ ocelot_unlock_xtr_grp_bh(ocelot, 0);
+
+ /* Problem: when using push_inner_tag=1 for ES0 tag B, we lose info
+ * about whether the received packets were VLAN-tagged on the wire,
+ * since they are always tagged on egress towards the CPU port.
+ *
+ * Since using push_inner_tag=1 is unavoidable for VLAN-aware bridges,
+ * we must work around the fallout by untagging in software to make
+ * untagged reception work more or less as expected.
+ */
+ ds->untag_vlan_aware_bridge_pvid = true;
+
+ return 0;
+}
+
+static void felix_tag_8021q_teardown(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_available_port(dp, ds)
+ /* Restore the logic from ocelot_init:
+ * do not forward BPDU frames to the front ports.
+ */
+ ocelot_write_gix(ocelot,
+ ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
+ ANA_PORT_CPU_FWD_BPDU_CFG,
+ dp->index);
+
+ dsa_switch_for_each_user_port(dp, ds)
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index);
+
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
+
+ dsa_tag_8021q_unregister(ds);
+
+ ds->untag_vlan_aware_bridge_pvid = false;
+}
+
+static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
+{
+ return dsa_cpu_ports(ds);
+}
+
+static int felix_tag_8021q_change_conduit(struct dsa_switch *ds, int port,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack)
+{
+ int cpu = felix_cpu_port_for_conduit(ds, conduit);
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, port);
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu);
+
+ return felix_update_trapping_destinations(ds, true);
+}
+
+static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
+ .setup = felix_tag_8021q_setup,
+ .teardown = felix_tag_8021q_teardown,
+ .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
+ .change_conduit = felix_tag_8021q_change_conduit,
+};
+
+static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
+ bool uc, bool mc, bool bc)
+{
+ struct ocelot *ocelot = ds->priv;
+ unsigned long val;
+
+ val = uc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC);
+
+ val = mc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6);
+
+ val = bc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC);
+}
+
+static void
+felix_migrate_host_flood(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ unsigned long mask;
+
+ if (old_proto_ops) {
+ mask = old_proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, false, false, false);
}
+
+ mask = proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
+ !!felix->host_flood_mc_mask, true);
+}
+
+static int felix_migrate_mdbs(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
+{
+ struct ocelot *ocelot = ds->priv;
+ unsigned long from, to;
+
+ if (!old_proto_ops)
+ return 0;
+
+ from = old_proto_ops->get_host_fwd_mask(ds);
+ to = proto_ops->get_host_fwd_mask(ds);
+
+ return ocelot_migrate_mdbs(ocelot, from, to);
+}
+
+/* Configure the shared hardware resources for a transition between
+ * @old_proto_ops and @proto_ops.
+ * Manual migration is needed because as far as DSA is concerned, no change of
+ * the CPU port is taking place here, just of the tagging protocol.
+ */
+static int
+felix_tag_proto_setup_shared(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
+{
+ bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops);
+ int err;
+
+ err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops);
+ if (err)
+ return err;
+
+ felix_update_trapping_destinations(ds, using_tag_8021q);
+
+ felix_migrate_host_flood(ds, proto_ops, old_proto_ops);
+
+ return 0;
}
/* This always leaves the switch in a consistent state, because although the
* tag_8021q setup can fail, the NPI setup can't. So either the change is made,
* or the restoration is guaranteed to work.
*/
-static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
+static int felix_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
+ const struct felix_tag_proto_ops *old_proto_ops, *proto_ops;
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- enum dsa_tag_protocol old_proto = felix->tag_proto;
int err;
- if (proto != DSA_TAG_PROTO_SEVILLE &&
- proto != DSA_TAG_PROTO_OCELOT &&
- proto != DSA_TAG_PROTO_OCELOT_8021Q)
+ switch (proto) {
+ case DSA_TAG_PROTO_SEVILLE:
+ case DSA_TAG_PROTO_OCELOT:
+ proto_ops = &felix_tag_npi_proto_ops;
+ break;
+ case DSA_TAG_PROTO_OCELOT_8021Q:
+ proto_ops = &felix_tag_8021q_proto_ops;
+ break;
+ default:
return -EPROTONOSUPPORT;
+ }
- felix_del_tag_protocol(ds, cpu, old_proto);
+ old_proto_ops = felix->tag_proto_ops;
- err = felix_set_tag_protocol(ds, cpu, proto);
- if (err) {
- felix_set_tag_protocol(ds, cpu, old_proto);
- return err;
- }
+ if (proto_ops == old_proto_ops)
+ return 0;
+
+ err = proto_ops->setup(ds);
+ if (err)
+ goto setup_failed;
+ err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops);
+ if (err)
+ goto setup_shared_failed;
+
+ if (old_proto_ops)
+ old_proto_ops->teardown(ds);
+
+ felix->tag_proto_ops = proto_ops;
felix->tag_proto = proto;
return 0;
+
+setup_shared_failed:
+ proto_ops->teardown(ds);
+setup_failed:
+ return err;
}
static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
@@ -619,6 +809,38 @@ static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
return felix->tag_proto;
}
+static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
+ bool uc, bool mc)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ unsigned long mask;
+
+ if (uc)
+ felix->host_flood_uc_mask |= BIT(port);
+ else
+ felix->host_flood_uc_mask &= ~BIT(port);
+
+ if (mc)
+ felix->host_flood_mc_mask |= BIT(port);
+ else
+ felix->host_flood_mc_mask &= ~BIT(port);
+
+ mask = felix->tag_proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
+ !!felix->host_flood_mc_mask, true);
+}
+
+static int felix_port_change_conduit(struct dsa_switch *ds, int port,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ return felix->tag_proto_ops->change_conduit(ds, port, conduit, extack);
+}
+
static int felix_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@@ -629,6 +851,17 @@ static int felix_set_ageing_time(struct dsa_switch *ds,
return 0;
}
+static void felix_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+ int err;
+
+ err = ocelot_mact_flush(ocelot, port);
+ if (err)
+ dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n",
+ port, ERR_PTR(err));
+}
+
static int felix_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -638,35 +871,111 @@ static int felix_fdb_dump(struct dsa_switch *ds, int port,
}
static int felix_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_add(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_port_is_cpu(dp) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ if (dsa_port_is_cpu(dp))
+ port = PGID_CPU;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
}
static int felix_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_del(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_port_is_cpu(dp) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ if (dsa_port_is_cpu(dp))
+ port = PGID_CPU;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev);
}
static int felix_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_add(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
+ return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev);
}
static int felix_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_del(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
+ return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev);
}
static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
@@ -692,46 +1001,63 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port,
{
struct ocelot *ocelot = ds->priv;
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
ocelot_port_bridge_flags(ocelot, port, val);
return 0;
}
static int felix_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_bridge_join(ocelot, port, br);
-
- return 0;
+ return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num,
+ extack);
}
static void felix_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_bridge_leave(ocelot, port, br);
+ ocelot_port_bridge_leave(ocelot, port, bridge.dev);
}
static int felix_lag_join(struct dsa_switch *ds, int port,
- struct net_device *bond,
- struct netdev_lag_upper_info *info)
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
+ int err;
+
+ err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack);
+ if (err)
+ return err;
+
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
- return ocelot_port_lag_join(ocelot, port, bond, info);
+ return felix_port_change_conduit(ds, port, lag.dev, extack);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *bond)
+ struct dsa_lag lag)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_lag_leave(ocelot, port, bond);
+ ocelot_port_lag_leave(ocelot, port, lag.dev);
- return 0;
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_conduit(ds, port, lag.dev, NULL);
}
static int felix_lag_change(struct dsa_switch *ds, int port)
@@ -772,8 +1098,23 @@ static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
+ bool using_tag_8021q;
+ struct felix *felix;
+ int err;
+
+ err = ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
+ if (err)
+ return err;
+
+ felix = ocelot_to_felix(ocelot);
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+ if (using_tag_8021q) {
+ err = felix_update_tag_8021q_rx_rules(ds, port, enabled);
+ if (err)
+ return err;
+ }
- return ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
+ return 0;
}
static int felix_vlan_add(struct dsa_switch *ds, int port,
@@ -801,57 +1142,132 @@ static int felix_vlan_del(struct dsa_switch *ds, int port,
return ocelot_vlan_del(ocelot, port, vlan->vid);
}
-static void felix_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- if (felix->info->phylink_validate)
- felix->info->phylink_validate(ocelot, port, supported, state);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD;
+
+ __set_bit(ocelot->ports[port]->phy_mode,
+ config->supported_interfaces);
+ if (ocelot->ports[port]->phy_mode == PHY_INTERFACE_MODE_USXGMII)
+ __set_bit(PHY_INTERFACE_MODE_10G_QXGMII,
+ config->supported_interfaces);
}
-static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int link_an_mode,
+static void felix_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
const struct phylink_link_state *state)
{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
+
+ if (felix->info->phylink_mac_config)
+ felix->info->phylink_mac_config(ocelot, port, mode, state);
+}
- if (felix->pcs[port])
- phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs);
+static struct phylink_pcs *
+felix_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t iface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ struct phylink_pcs *pcs = NULL;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
+
+ if (felix->pcs && felix->pcs[port])
+ pcs = felix->pcs[port];
+
+ return pcs;
}
-static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void felix_phylink_mac_link_down(struct phylink_config *config,
unsigned int link_an_mode,
phy_interface_t interface)
{
- struct ocelot *ocelot = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
- FELIX_MAC_QUIRKS);
+ felix->info->quirks);
}
-static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void felix_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int link_an_mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
interface, speed, duplex, tx_pause, rx_pause,
- FELIX_MAC_QUIRKS);
+ felix->info->quirks);
if (felix->info->port_sched_speed_set)
felix->info->port_sched_speed_set(ocelot, port, speed);
}
+static int felix_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (!dsa_port_is_user(dp))
+ return 0;
+
+ if (ocelot->npi >= 0) {
+ struct net_device *conduit = dsa_port_to_conduit(dp);
+
+ if (felix_cpu_port_for_conduit(ds, conduit) != ocelot->npi) {
+ dev_err(ds->dev, "Multiple conduits are not allowed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q)
+ return 0;
+
+ return dsa_port_simple_hsr_join(ds, port, dp->hsr_dev, NULL);
+}
+
+static void felix_port_disable(struct dsa_switch *ds, int port)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (!dsa_port_is_user(dp))
+ return;
+
+ if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q)
+ return;
+
+ dsa_port_simple_hsr_leave(ds, port, dp->hsr_dev);
+}
+
static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
{
int i;
@@ -873,6 +1289,63 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
}
}
+static void felix_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_stats64(ocelot, port, stats);
+}
+
+static void felix_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_pause_stats(ocelot, port, pause_stats);
+}
+
+static void felix_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges);
+}
+
+static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats);
+}
+
+static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats);
+}
+
+static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
+}
+
+static void felix_get_ts_stats(struct dsa_switch *ds, int port,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_ts_stats(ocelot, port, ts_stats);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -896,22 +1369,40 @@ static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
}
static int felix_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ocelot *ocelot = ds->priv;
return ocelot_get_ts_info(ocelot, port, info);
}
+static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
+ [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL,
+ [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
+ [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
+ [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
+ [PHY_INTERFACE_MODE_10G_QXGMII] = OCELOT_PORT_MODE_10G_QXGMII,
+ [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX,
+ [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
+};
+
+static int felix_validate_phy_mode(struct felix *felix, int port,
+ phy_interface_t phy_mode)
+{
+ u32 modes = felix->info->port_modes[port];
+
+ if (felix_phy_match_table[phy_mode] & modes)
+ return 0;
+ return -EOPNOTSUPP;
+}
+
static int felix_parse_ports_node(struct felix *felix,
struct device_node *ports_node,
phy_interface_t *port_phy_modes)
{
- struct ocelot *ocelot = &felix->ocelot;
struct device *dev = felix->ocelot.dev;
- struct device_node *child;
- for_each_available_child_of_node(ports_node, child) {
+ for_each_available_child_of_node_scoped(ports_node, child) {
phy_interface_t phy_mode;
u32 port;
int err;
@@ -920,7 +1411,6 @@ static int felix_parse_ports_node(struct felix *felix,
if (of_property_read_u32(child, "reg", &port) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
- of_node_put(child);
return -ENODEV;
}
@@ -930,16 +1420,19 @@ static int felix_parse_ports_node(struct felix *felix,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
port);
- of_node_put(child);
return -ENODEV;
}
- err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode);
+ err = felix_validate_phy_mode(felix, port, phy_mode);
if (err < 0) {
- dev_err(dev, "Unsupported PHY mode %s on port %d\n",
- phy_modes(phy_mode), port);
- of_node_put(child);
- return err;
+ dev_info(dev, "Unsupported PHY mode %s on port %d\n",
+ phy_modes(phy_mode), port);
+
+ /* Leave port_phy_modes[port] = 0, which is also
+ * PHY_INTERFACE_MODE_NA. This will perform a
+ * best-effort to bring up as many ports as possible.
+ */
+ continue;
}
port_phy_modes[port] = phy_mode;
@@ -971,11 +1464,62 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
return err;
}
+static struct regmap *felix_request_regmap_by_name(struct felix *felix,
+ const char *resource_name)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ struct resource res;
+ int i;
+
+ /* In an MFD configuration, regmaps are registered directly to the
+ * parent device before the child devices are probed, so there is no
+ * need to initialize a new one.
+ */
+ if (!felix->info->resources)
+ return dev_get_regmap(ocelot->dev->parent, resource_name);
+
+ for (i = 0; i < felix->info->num_resources; i++) {
+ if (strcmp(resource_name, felix->info->resources[i].name))
+ continue;
+
+ memcpy(&res, &felix->info->resources[i], sizeof(res));
+ res.start += felix->switch_base;
+ res.end += felix->switch_base;
+
+ return ocelot_regmap_init(ocelot, &res);
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct regmap *felix_request_regmap(struct felix *felix,
+ enum ocelot_target target)
+{
+ const char *resource_name = felix->info->resource_names[target];
+
+ /* If the driver didn't provide a resource name for the target,
+ * the resource is optional.
+ */
+ if (!resource_name)
+ return NULL;
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
+static struct regmap *felix_request_port_regmap(struct felix *felix, int port)
+{
+ char resource_name[32];
+
+ sprintf(resource_name, "port%d", port);
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
static int felix_init_structs(struct felix *felix, int num_phys_ports)
{
struct ocelot *ocelot = &felix->ocelot;
phy_interface_t *port_phy_modes;
- struct resource res;
+ struct regmap *target;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
@@ -985,10 +1529,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return -ENOMEM;
ocelot->map = felix->info->map;
- ocelot->stats_layout = felix->info->stats_layout;
- ocelot->num_stats = felix->info->num_stats;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
+ ocelot->vcap_pol.base = felix->info->vcap_pol_base;
+ ocelot->vcap_pol.max = felix->info->vcap_pol_max;
+ ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2;
+ ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2;
ocelot->ops = felix->info->ops;
ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
@@ -1006,20 +1552,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
}
for (i = 0; i < TARGET_MAX; i++) {
- struct regmap *target;
-
- if (!felix->info->target_io_res[i].name)
- continue;
-
- memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = ocelot_regmap_init(ocelot, &res);
+ target = felix_request_regmap(felix, i);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map device memory space\n");
+ "Failed to map device memory space: %pe\n",
+ target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1036,7 +1573,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
for (port = 0; port < num_phys_ports; port++) {
struct ocelot_port *ocelot_port;
- struct regmap *target;
ocelot_port = devm_kzalloc(ocelot->dev,
sizeof(struct ocelot_port),
@@ -1048,16 +1584,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return -ENOMEM;
}
- memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = ocelot_regmap_init(ocelot, &res);
+ target = felix_request_port_regmap(felix, port);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map memory space for port %d\n",
- port);
+ "Failed to map memory space for port %d: %pe\n",
+ port, target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1065,6 +1596,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot_port->phy_mode = port_phy_modes[port];
ocelot_port->ocelot = ocelot;
ocelot_port->target = target;
+ ocelot_port->index = port;
ocelot->ports[port] = ocelot_port;
}
@@ -1119,6 +1651,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
int port = xmit_work->dp->index;
int retries = 10;
+ ocelot_lock_inj_grp(ocelot, 0);
+
do {
if (ocelot_can_inject(ocelot, 0))
break;
@@ -1127,6 +1661,7 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
} while (--retries);
if (!retries) {
+ ocelot_unlock_inj_grp(ocelot, 0);
dev_err(ocelot->dev, "port %d failed to inject skb\n",
port);
ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
@@ -1136,59 +1671,44 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+ ocelot_unlock_inj_grp(ocelot, 0);
+
consume_skb(skb);
kfree(xmit_work);
}
-static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port)
+static int felix_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- struct felix_port *felix_port;
+ struct ocelot_8021q_tagger_data *tagger_data;
- if (!dsa_port_is_user(dp))
+ switch (proto) {
+ case DSA_TAG_PROTO_OCELOT_8021Q:
+ tagger_data = ocelot_8021q_tagger_data(ds);
+ tagger_data->xmit_work_fn = felix_port_deferred_xmit;
return 0;
-
- felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL);
- if (!felix_port)
- return -ENOMEM;
-
- felix_port->xmit_worker = felix->xmit_worker;
- felix_port->xmit_work_fn = felix_port_deferred_xmit;
-
- dp->priv = felix_port;
-
- return 0;
-}
-
-static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port)
-{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct felix_port *felix_port = dp->priv;
-
- if (!felix_port)
- return;
-
- dp->priv = NULL;
- kfree(felix_port);
+ case DSA_TAG_PROTO_OCELOT:
+ case DSA_TAG_PROTO_SEVILLE:
+ return 0;
+ default:
+ return -EPROTONOSUPPORT;
+ }
}
-/* Hardware initialization done here so that we can allocate structures with
- * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
- * us to allocate structures twice (leak memory) and map PCI memory twice
- * (which will not work).
- */
static int felix_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port, err;
+ struct dsa_port *dp;
+ int err;
err = felix_init_structs(felix, ds->num_ports);
if (err)
return err;
+ if (ocelot->targets[HSIO])
+ ocelot_pll5_init(ocelot);
+
err = ocelot_init(ocelot);
if (err)
goto out_mdiobus_free;
@@ -1202,28 +1722,24 @@ static int felix_setup(struct dsa_switch *ds)
}
}
- felix->xmit_worker = kthread_create_worker(0, "felix_xmit");
- if (IS_ERR(felix->xmit_worker)) {
- err = PTR_ERR(felix->xmit_worker);
- goto out_deinit_timestamp;
- }
-
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
+ dsa_switch_for_each_available_port(dp, ds) {
+ ocelot_init_port(ocelot, dp->index);
- ocelot_init_port(ocelot, port);
+ if (felix->info->configure_serdes)
+ felix->info->configure_serdes(ocelot, dp->index,
+ dp->dn);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
*/
- felix_port_qos_map_init(ocelot, port);
+ felix_port_qos_map_init(ocelot, dp->index);
+ }
- err = felix_port_setup_tagger_data(ds, port);
+ if (felix->info->request_irq) {
+ err = felix->info->request_irq(ocelot);
if (err) {
- dev_err(ds->dev,
- "port %d failed to set up tagger data: %pe\n",
- port, ERR_PTR(err));
+ dev_err(ocelot->dev, "Failed to request IRQ: %pe\n",
+ ERR_PTR(err));
goto out_deinit_ports;
}
}
@@ -1232,34 +1748,22 @@ static int felix_setup(struct dsa_switch *ds)
if (err)
goto out_deinit_ports;
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
- /* The initial tag protocol is NPI which always returns 0, so
- * there's no real point in checking for errors.
- */
- felix_set_tag_protocol(ds, port, felix->tag_proto);
- break;
- }
+ /* The initial tag protocol is NPI which won't fail during initial
+ * setup, there's no real point in checking for errors.
+ */
+ felix_change_tag_protocol(ds, felix->tag_proto);
ds->mtu_enforcement_ingress = true;
ds->assisted_learning_on_cpu_port = true;
+ ds->fdb_isolation = true;
+ ds->max_num_bridges = ds->num_ports;
return 0;
out_deinit_ports:
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
- felix_port_teardown_tagger_data(ds, port);
- ocelot_deinit_port(ocelot, port);
- }
-
- kthread_destroy_worker(felix->xmit_worker);
-
-out_deinit_timestamp:
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
@@ -1274,25 +1778,15 @@ static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
- felix_del_tag_protocol(ds, port, felix->tag_proto);
- break;
- }
-
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
+ struct dsa_port *dp;
- felix_port_teardown_tagger_data(ds, port);
- ocelot_deinit_port(ocelot, port);
- }
+ rtnl_lock();
+ if (felix->tag_proto_ops)
+ felix->tag_proto_ops->teardown(ds);
+ rtnl_unlock();
- kthread_destroy_worker(felix->xmit_worker);
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
@@ -1303,25 +1797,37 @@ static void felix_teardown(struct dsa_switch *ds)
}
static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_hwstamp_get(ocelot, port, ifr);
+ ocelot_hwstamp_get(ocelot, port, config);
+
+ return 0;
}
static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
+
+ err = ocelot_hwstamp_set(ocelot, port, config, extack);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
- return ocelot_hwstamp_set(ocelot, port, ifr);
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
-static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
+static bool felix_check_xtr_pkt(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
- int err, grp = 0;
+ int err = 0, grp = 0;
if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
return false;
@@ -1329,8 +1835,7 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
if (!felix->info->quirk_no_xtr_irq)
return false;
- if (ptp_type == PTP_CLASS_NONE)
- return false;
+ ocelot_lock_xtr_grp(ocelot, grp);
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
@@ -1361,8 +1866,14 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
}
out:
- if (err < 0)
+ if (err < 0) {
+ dev_err_ratelimited(ocelot->dev,
+ "Error during packet extraction: %pe\n",
+ ERR_PTR(err));
ocelot_drain_cpu_queue(ocelot, 0);
+ }
+
+ ocelot_unlock_xtr_grp(ocelot, grp);
return true;
}
@@ -1370,19 +1881,31 @@ out:
static bool felix_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type)
{
- u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN;
+ u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo;
struct skb_shared_hwtstamps *shhwtstamps;
struct ocelot *ocelot = ds->priv;
- u32 tstamp_lo, tstamp_hi;
struct timespec64 ts;
- u64 tstamp, val;
+ u32 tstamp_hi;
+ u64 tstamp;
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_L2:
+ if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L2))
+ return false;
+ break;
+ case PTP_CLASS_IPV4:
+ case PTP_CLASS_IPV6:
+ if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L4))
+ return false;
+ break;
+ }
/* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb
* for RX timestamping. Then free it, and poll for its copy through
* MMIO in the CPU port module, and inject that into the stack from
* ocelot_xtr_poll().
*/
- if (felix_check_xtr_pkt(ocelot, type)) {
+ if (felix_check_xtr_pkt(ocelot)) {
kfree_skb(skb);
return true;
}
@@ -1390,9 +1913,6 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port,
ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
tstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- ocelot_xfh_get_rew_val(extraction, &val);
- tstamp_lo = (u32)val;
-
tstamp_hi = tstamp >> 32;
if ((tstamp & 0xffffffff) < tstamp_lo)
tstamp_hi--;
@@ -1428,9 +1948,17 @@ static void felix_txtstamp(struct dsa_switch *ds, int port,
static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
ocelot_port_set_maxlen(ocelot, port, new_mtu);
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ if (ocelot_port->taprio && ocelot->ops->tas_guard_bands_update)
+ ocelot->ops->tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
return 0;
}
@@ -1445,8 +1973,17 @@ static int felix_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
- return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ err = ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static int felix_cls_flower_del(struct dsa_switch *ds, int port,
@@ -1484,6 +2021,24 @@ static void felix_port_policer_del(struct dsa_switch *ds, int port)
ocelot_port_policer_del(ocelot, port);
}
+static int felix_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port,
+ ingress, extack);
+}
+
+static void felix_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_mirror_del(ocelot, port, mirror->ingress);
+}
+
static int felix_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1633,23 +2188,152 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
-const struct dsa_switch_ops felix_switch_ops = {
+static int felix_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_default_prio(ocelot, port);
+}
+
+static int felix_port_set_default_prio(struct dsa_switch *ds, int port,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_set_default_prio(ocelot, port, prio);
+}
+
+static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_dscp_prio(ocelot, port, dscp);
+}
+
+static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio);
+}
+
+static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
+}
+
+static int felix_get_mm(struct dsa_switch *ds, int port,
+ struct ethtool_mm_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_mm(ocelot, port, state);
+}
+
+static int felix_set_mm(struct dsa_switch *ds, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_set_mm(ocelot, port, cfg, extack);
+}
+
+static void felix_get_mm_stats(struct dsa_switch *ds, int port,
+ struct ethtool_mm_stats *stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_mm_stats(ocelot, port, stats);
+}
+
+/* Depending on port type, we may be able to support the offload later (with
+ * the "ocelot"/"seville" tagging protocols), or never.
+ * If we return 0, the dp->hsr_dev reference is kept for later; if we return
+ * -EOPNOTSUPP, it is cleared (which helps to not bother
+ * dsa_port_simple_hsr_leave() with an offload that didn't pass validation).
+ */
+static int felix_port_hsr_join(struct dsa_switch *ds, int port,
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) {
+ int err;
+
+ err = dsa_port_simple_hsr_validate(ds, port, hsr, extack);
+ if (err)
+ return err;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offloading not supported with \"ocelot-8021q\"");
+ return 0;
+ }
+
+ if (!(dsa_to_port(ds, port)->user->flags & IFF_UP))
+ return 0;
+
+ return dsa_port_simple_hsr_join(ds, port, hsr, extack);
+}
+
+static int felix_port_hsr_leave(struct dsa_switch *ds, int port,
+ struct net_device *hsr)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q)
+ return 0;
+
+ if (!(dsa_to_port(ds, port)->user->flags & IFF_UP))
+ return 0;
+
+ return dsa_port_simple_hsr_leave(ds, port, hsr);
+}
+
+static const struct phylink_mac_ops felix_phylink_mac_ops = {
+ .mac_select_pcs = felix_phylink_mac_select_pcs,
+ .mac_config = felix_phylink_mac_config,
+ .mac_link_down = felix_phylink_mac_link_down,
+ .mac_link_up = felix_phylink_mac_link_up,
+};
+
+static const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
+ .connect_tag_protocol = felix_connect_tag_protocol,
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
+ .get_mm = felix_get_mm,
+ .set_mm = felix_set_mm,
+ .get_mm_stats = felix_get_mm_stats,
+ .get_stats64 = felix_get_stats64,
+ .get_pause_stats = felix_get_pause_stats,
+ .get_rmon_stats = felix_get_rmon_stats,
+ .get_ts_stats = felix_get_ts_stats,
+ .get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
+ .get_eth_mac_stats = felix_get_eth_mac_stats,
+ .get_eth_phy_stats = felix_get_eth_phy_stats,
.get_strings = felix_get_strings,
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
- .phylink_validate = felix_phylink_validate,
- .phylink_mac_config = felix_phylink_mac_config,
- .phylink_mac_link_down = felix_phylink_mac_link_down,
- .phylink_mac_link_up = felix_phylink_mac_link_up,
+ .phylink_get_caps = felix_phylink_get_caps,
+ .port_enable = felix_port_enable,
+ .port_disable = felix_port_disable,
+ .port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
+ .lag_fdb_add = felix_lag_fdb_add,
+ .lag_fdb_del = felix_lag_fdb_del,
.port_mdb_add = felix_mdb_add,
.port_mdb_del = felix_mdb_del,
.port_pre_bridge_flags = felix_pre_bridge_flags,
@@ -1671,6 +2355,8 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_max_mtu = felix_get_max_mtu,
.port_policer_add = felix_port_policer_add,
.port_policer_del = felix_port_policer_del,
+ .port_mirror_add = felix_port_mirror_add,
+ .port_mirror_del = felix_port_mirror_del,
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
@@ -1691,8 +2377,64 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_mrp_del_ring_role = felix_mrp_del_ring_role,
.tag_8021q_vlan_add = felix_tag_8021q_vlan_add,
.tag_8021q_vlan_del = felix_tag_8021q_vlan_del,
+ .port_get_default_prio = felix_port_get_default_prio,
+ .port_set_default_prio = felix_port_set_default_prio,
+ .port_get_dscp_prio = felix_port_get_dscp_prio,
+ .port_add_dscp_prio = felix_port_add_dscp_prio,
+ .port_del_dscp_prio = felix_port_del_dscp_prio,
+ .port_set_host_flood = felix_port_set_host_flood,
+ .port_change_conduit = felix_port_change_conduit,
+ .port_hsr_join = felix_port_hsr_join,
+ .port_hsr_leave = felix_port_hsr_leave,
};
+int felix_register_switch(struct device *dev, resource_size_t switch_base,
+ int num_flooding_pgids, bool ptp,
+ bool mm_supported,
+ enum dsa_tag_protocol init_tag_proto,
+ const struct felix_info *info)
+{
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ felix = devm_kzalloc(dev, sizeof(*felix), GFP_KERNEL);
+ if (!felix)
+ return -ENOMEM;
+
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, felix);
+
+ ocelot = &felix->ocelot;
+ ocelot->dev = dev;
+ ocelot->num_flooding_pgids = num_flooding_pgids;
+ ocelot->ptp = ptp;
+ ocelot->mm_supported = mm_supported;
+
+ felix->info = info;
+ felix->switch_base = switch_base;
+ felix->ds = ds;
+ felix->tag_proto = init_tag_proto;
+
+ ds->dev = dev;
+ ds->num_ports = info->num_ports;
+ ds->num_tx_queues = OCELOT_NUM_TC;
+ ds->ops = &felix_switch_ops;
+ ds->phylink_mac_ops = &felix_phylink_mac_ops;
+ ds->priv = ocelot;
+
+ err = dsa_register_switch(ds);
+ if (err)
+ dev_err_probe(dev, err, "Failed to register DSA switch\n");
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(felix_register_switch);
+
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
{
struct felix *felix = ocelot_to_felix(ocelot);
@@ -1701,8 +2443,9 @@ struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
if (!dsa_is_user_port(ds, port))
return NULL;
- return dsa_to_port(ds, port)->slave;
+ return dsa_to_port(ds, port)->user;
}
+EXPORT_SYMBOL_GPL(felix_port_to_netdev);
int felix_netdev_to_port(struct net_device *dev)
{
@@ -1714,3 +2457,7 @@ int felix_netdev_to_port(struct net_device *dev)
return dp->index;
}
+EXPORT_SYMBOL_GPL(felix_netdev_to_port);
+
+MODULE_DESCRIPTION("Felix DSA library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index be3e42e135c0..a657b190c5d7 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -7,23 +7,39 @@
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION
+#define OCELOT_PORT_MODE_NONE 0
+#define OCELOT_PORT_MODE_INTERNAL BIT(0)
+#define OCELOT_PORT_MODE_SGMII BIT(1)
+#define OCELOT_PORT_MODE_QSGMII BIT(2)
+#define OCELOT_PORT_MODE_2500BASEX BIT(3)
+#define OCELOT_PORT_MODE_USXGMII BIT(4) /* compatibility */
+#define OCELOT_PORT_MODE_1000BASEX BIT(5)
+#define OCELOT_PORT_MODE_10G_QXGMII BIT(6)
+
+struct device_node;
+
/* Platform-specific information */
struct felix_info {
- const struct resource *target_io_res;
- const struct resource *port_io_res;
- const struct resource *imdio_res;
+ /* Hardcoded resources provided by the hardware instantiation. */
+ const struct resource *resources;
+ size_t num_resources;
+ /* Names of the mandatory resources that will be requested during
+ * probe. Must have TARGET_MAX elements, since it is indexed by target.
+ */
+ const char *const *resource_names;
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
+ const u32 *port_modes;
int num_mact_rows;
- const struct ocelot_stat_layout *stats_layout;
- unsigned int num_stats;
int num_ports;
- int num_tx_queues;
struct vcap_props *vcap;
- int switch_pci_bar;
- int imdio_pci_bar;
+ u16 vcap_pol_base;
+ u16 vcap_pol_max;
+ u16 vcap_pol_base2;
+ u16 vcap_pol_max2;
const struct ptp_clock_info *ptp_caps;
+ unsigned long quirks;
/* Some Ocelot switches are integrated into the SoC without the
* extraction IRQ line connected to the ARM GIC. By enabling this
@@ -39,18 +55,33 @@ struct felix_info {
int (*mdio_bus_alloc)(struct ocelot *ocelot);
void (*mdio_bus_free)(struct ocelot *ocelot);
- void (*phylink_validate)(struct ocelot *ocelot, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
- int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode);
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
u32 speed);
+ void (*phylink_mac_config)(struct ocelot *ocelot, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+ int (*configure_serdes)(struct ocelot *ocelot, int port,
+ struct device_node *portnp);
+ int (*request_irq)(struct ocelot *ocelot);
};
-extern const struct dsa_switch_ops felix_switch_ops;
+/* Methods for initializing the hardware resources specific to a tagging
+ * protocol (like the NPI port, for "ocelot" or "seville", or the VCAP TCAMs,
+ * for "ocelot-8021q").
+ * It is important that the resources configured here do not have side effects
+ * for the other tagging protocols. If that is the case, their configuration
+ * needs to go to felix_tag_proto_setup_shared().
+ */
+struct felix_tag_proto_ops {
+ int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
+ unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds);
+ int (*change_conduit)(struct dsa_switch *ds, int port,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack);
+};
/* DSA glue / front-end for struct ocelot */
struct felix {
@@ -58,13 +89,20 @@ struct felix {
const struct felix_info *info;
struct ocelot ocelot;
struct mii_bus *imdio;
- struct lynx_pcs **pcs;
+ struct phylink_pcs **pcs;
resource_size_t switch_base;
- resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
+ const struct felix_tag_proto_ops *tag_proto_ops;
struct kthread_worker *xmit_worker;
+ unsigned long host_flood_uc_mask;
+ unsigned long host_flood_mc_mask;
};
+int felix_register_switch(struct device *dev, resource_size_t switch_base,
+ int num_flooding_pgids, bool ptp,
+ bool mm_supported,
+ enum dsa_tag_protocol init_tag_proto,
+ const struct felix_info *info);
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
int felix_netdev_to_port(struct net_device *dev);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 45c5ec7a83ea..8cf4c8986587 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -5,18 +5,46 @@
#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
+#include <net/tc_act/tc_gate.h>
#include <soc/mscc/ocelot.h>
#include <linux/dsa/ocelot.h>
#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <linux/iopoll.h>
#include <linux/mdio.h>
+#include <linux/of.h>
#include <linux/pci.h>
+#include <linux/time.h>
#include "felix.h"
+#define VSC9959_NUM_PORTS 6
+
#define VSC9959_TAS_GCL_ENTRY_MAX 63
+#define VSC9959_TAS_MIN_GATE_LEN_NS 35
+#define VSC9959_VCAP_POLICER_BASE 63
+#define VSC9959_VCAP_POLICER_MAX 383
+#define VSC9959_SWITCH_PCI_BAR 4
+#define VSC9959_IMDIO_PCI_BAR 0
+
+#define VSC9959_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII | \
+ OCELOT_PORT_MODE_1000BASEX | \
+ OCELOT_PORT_MODE_2500BASEX | \
+ OCELOT_PORT_MODE_USXGMII | \
+ OCELOT_PORT_MODE_10G_QXGMII)
+
+static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
@@ -250,27 +278,139 @@ static const u32 vsc9959_rew_regmap[] = {
static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
+ REG(SYS_COUNT_RX_ASSEMBLY_ERRS, 0x0000b0),
+ REG(SYS_COUNT_RX_SMD_ERRS, 0x0000b4),
+ REG(SYS_COUNT_RX_ASSEMBLY_OK, 0x0000b8),
+ REG(SYS_COUNT_RX_MERGE_FRAGMENTS, 0x0000bc),
+ REG(SYS_COUNT_RX_PMAC_OCTETS, 0x0000c0),
+ REG(SYS_COUNT_RX_PMAC_UNICAST, 0x0000c4),
+ REG(SYS_COUNT_RX_PMAC_MULTICAST, 0x0000c8),
+ REG(SYS_COUNT_RX_PMAC_BROADCAST, 0x0000cc),
+ REG(SYS_COUNT_RX_PMAC_SHORTS, 0x0000d0),
+ REG(SYS_COUNT_RX_PMAC_FRAGMENTS, 0x0000d4),
+ REG(SYS_COUNT_RX_PMAC_JABBERS, 0x0000d8),
+ REG(SYS_COUNT_RX_PMAC_CRC_ALIGN_ERRS, 0x0000dc),
+ REG(SYS_COUNT_RX_PMAC_SYM_ERRS, 0x0000e0),
+ REG(SYS_COUNT_RX_PMAC_64, 0x0000e4),
+ REG(SYS_COUNT_RX_PMAC_65_127, 0x0000e8),
+ REG(SYS_COUNT_RX_PMAC_128_255, 0x0000ec),
+ REG(SYS_COUNT_RX_PMAC_256_511, 0x0000f0),
+ REG(SYS_COUNT_RX_PMAC_512_1023, 0x0000f4),
+ REG(SYS_COUNT_RX_PMAC_1024_1526, 0x0000f8),
+ REG(SYS_COUNT_RX_PMAC_1527_MAX, 0x0000fc),
+ REG(SYS_COUNT_RX_PMAC_PAUSE, 0x000100),
+ REG(SYS_COUNT_RX_PMAC_CONTROL, 0x000104),
+ REG(SYS_COUNT_RX_PMAC_LONGS, 0x000108),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_UNICAST, 0x000204),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000208),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00020c),
REG(SYS_COUNT_TX_COLLISION, 0x000210),
REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_PAUSE, 0x000218),
REG(SYS_COUNT_TX_64, 0x00021c),
REG(SYS_COUNT_TX_65_127, 0x000220),
- REG(SYS_COUNT_TX_128_511, 0x000224),
- REG(SYS_COUNT_TX_512_1023, 0x000228),
- REG(SYS_COUNT_TX_1024_1526, 0x00022c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000230),
- REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_COUNT_TX_128_255, 0x000224),
+ REG(SYS_COUNT_TX_256_511, 0x000228),
+ REG(SYS_COUNT_TX_512_1023, 0x00022c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000230),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000234),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
+ REG(SYS_COUNT_TX_AGED, 0x000278),
+ REG(SYS_COUNT_TX_MM_HOLD, 0x00027c),
+ REG(SYS_COUNT_TX_MERGE_FRAGMENTS, 0x000280),
+ REG(SYS_COUNT_TX_PMAC_OCTETS, 0x000284),
+ REG(SYS_COUNT_TX_PMAC_UNICAST, 0x000288),
+ REG(SYS_COUNT_TX_PMAC_MULTICAST, 0x00028c),
+ REG(SYS_COUNT_TX_PMAC_BROADCAST, 0x000290),
+ REG(SYS_COUNT_TX_PMAC_PAUSE, 0x000294),
+ REG(SYS_COUNT_TX_PMAC_64, 0x000298),
+ REG(SYS_COUNT_TX_PMAC_65_127, 0x00029c),
+ REG(SYS_COUNT_TX_PMAC_128_255, 0x0002a0),
+ REG(SYS_COUNT_TX_PMAC_256_511, 0x0002a4),
+ REG(SYS_COUNT_TX_PMAC_512_1023, 0x0002a8),
+ REG(SYS_COUNT_TX_PMAC_1024_1526, 0x0002ac),
+ REG(SYS_COUNT_TX_PMAC_1527_MAX, 0x0002b0),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000400),
+ REG(SYS_COUNT_DROP_TAIL, 0x000404),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
+ REG(SYS_COUNT_SF_MATCHING_FRAMES, 0x000800),
+ REG(SYS_COUNT_SF_NOT_PASSING_FRAMES, 0x000804),
+ REG(SYS_COUNT_SF_NOT_PASSING_SDU, 0x000808),
+ REG(SYS_COUNT_SF_RED_FRAMES, 0x00080c),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
@@ -292,7 +432,6 @@ static const u32 vsc9959_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG_RESERVED(SYS_CNT),
REG(SYS_PTP_STATUS, 0x000f14),
REG(SYS_PTP_TXSTAMP, 0x000f18),
REG(SYS_PTP_NXT, 0x000f1c),
@@ -340,6 +479,9 @@ static const u32 vsc9959_dev_gmii_regmap[] = {
REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
REG(DEV_MAC_STICKY, 0x44),
+ REG(DEV_MM_ENABLE_CONFIG, 0x48),
+ REG(DEV_MM_VERIF_CONFIG, 0x4C),
+ REG(DEV_MM_STATUS, 0x50),
REG_RESERVED(PCS1G_CFG),
REG_RESERVED(PCS1G_MODE_CFG),
REG_RESERVED(PCS1G_SD_CFG),
@@ -378,100 +520,43 @@ static const u32 *vsc9959_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the PCI device's base address */
-static const struct resource vsc9959_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9959_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9959_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
+static const char * const vsc9959_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
* SGMII/QSGMII MAC PCS can be found.
*/
-static const struct resource vsc9959_imdio_res = {
- .start = 0x8030,
- .end = 0x8040,
- .name = "imdio",
-};
+static const struct resource vsc9959_imdio_res =
+ DEFINE_RES_MEM_NAMED(0x8030, 0x10, "imdio");
static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
@@ -523,101 +608,6 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
};
-static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x80, .name = "tx_octets", },
- { .offset = 0x81, .name = "tx_unicast", },
- { .offset = 0x82, .name = "tx_multicast", },
- { .offset = 0x83, .name = "tx_broadcast", },
- { .offset = 0x84, .name = "tx_collision", },
- { .offset = 0x85, .name = "tx_drops", },
- { .offset = 0x86, .name = "tx_pause", },
- { .offset = 0x87, .name = "tx_frames_below_65_octets", },
- { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x89, .name = "tx_frames_128_255_octets", },
- { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
- { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x8E, .name = "tx_yellow_prio_0", },
- { .offset = 0x8F, .name = "tx_yellow_prio_1", },
- { .offset = 0x90, .name = "tx_yellow_prio_2", },
- { .offset = 0x91, .name = "tx_yellow_prio_3", },
- { .offset = 0x92, .name = "tx_yellow_prio_4", },
- { .offset = 0x93, .name = "tx_yellow_prio_5", },
- { .offset = 0x94, .name = "tx_yellow_prio_6", },
- { .offset = 0x95, .name = "tx_yellow_prio_7", },
- { .offset = 0x96, .name = "tx_green_prio_0", },
- { .offset = 0x97, .name = "tx_green_prio_1", },
- { .offset = 0x98, .name = "tx_green_prio_2", },
- { .offset = 0x99, .name = "tx_green_prio_3", },
- { .offset = 0x9A, .name = "tx_green_prio_4", },
- { .offset = 0x9B, .name = "tx_green_prio_5", },
- { .offset = 0x9C, .name = "tx_green_prio_6", },
- { .offset = 0x9D, .name = "tx_green_prio_7", },
- { .offset = 0x9E, .name = "tx_aged", },
- { .offset = 0x100, .name = "drop_local", },
- { .offset = 0x101, .name = "drop_tail", },
- { .offset = 0x102, .name = "drop_yellow_prio_0", },
- { .offset = 0x103, .name = "drop_yellow_prio_1", },
- { .offset = 0x104, .name = "drop_yellow_prio_2", },
- { .offset = 0x105, .name = "drop_yellow_prio_3", },
- { .offset = 0x106, .name = "drop_yellow_prio_4", },
- { .offset = 0x107, .name = "drop_yellow_prio_5", },
- { .offset = 0x108, .name = "drop_yellow_prio_6", },
- { .offset = 0x109, .name = "drop_yellow_prio_7", },
- { .offset = 0x10A, .name = "drop_green_prio_0", },
- { .offset = 0x10B, .name = "drop_green_prio_1", },
- { .offset = 0x10C, .name = "drop_green_prio_2", },
- { .offset = 0x10D, .name = "drop_green_prio_3", },
- { .offset = 0x10E, .name = "drop_green_prio_4", },
- { .offset = 0x10F, .name = "drop_green_prio_5", },
- { .offset = 0x110, .name = "drop_green_prio_6", },
- { .offset = 0x111, .name = "drop_green_prio_7", },
-};
-
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 3},
[VCAP_ES0_IGR_PORT] = { 3, 3},
@@ -934,62 +924,6 @@ static int vsc9959_reset(struct ocelot *ocelot)
return 0;
}
-static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX ||
- state->interface == PHY_INTERFACE_MODE_USXGMII) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode)
-{
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_INTERNAL:
- if (port != 4 && port != 5)
- return -ENOTSUPP;
- return 0;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* Not supported on internal to-CPU ports */
- if (port == 4 || port == 5)
- return -ENOTSUPP;
- return 0;
- default:
- return -ENOTSUPP;
- }
-}
-
/* Watermark encode
* Bit 8: Unit; 0:1, 1:16
* Bit 7-0: Value to be multiplied with unit
@@ -1020,20 +954,13 @@ static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
*maxuse = val & GENMASK(11, 0);
}
-static const struct ocelot_ops vsc9959_ops = {
- .reset = vsc9959_reset,
- .wm_enc = vsc9959_wm_enc,
- .wm_dec = vsc9959_wm_dec,
- .wm_stat = vsc9959_wm_stat,
- .port_to_netdev = felix_port_to_netdev,
- .netdev_to_port = felix_netdev_to_port,
-};
-
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
{
+ struct pci_dev *pdev = to_pci_dev(ocelot->dev);
struct felix *felix = ocelot_to_felix(ocelot);
struct enetc_mdio_priv *mdio_priv;
struct device *dev = ocelot->dev;
+ resource_size_t imdio_base;
void __iomem *imdio_regs;
struct resource res;
struct enetc_hw *hw;
@@ -1042,17 +969,18 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
- sizeof(struct lynx_pcs *),
+ sizeof(struct phylink_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
return -ENOMEM;
}
- memcpy(&res, felix->info->imdio_res, sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->imdio_base;
- res.end += felix->imdio_base;
+ imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
+
+ memcpy(&res, &vsc9959_imdio_res, sizeof(res));
+ res.start += imdio_base;
+ res.end += imdio_base;
imdio_regs = devm_ioremap_resource(dev, &res);
if (IS_ERR(imdio_regs))
@@ -1064,13 +992,15 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return PTR_ERR(hw);
}
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
bus->name = "VSC9959 internal MDIO bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
@@ -1084,6 +1014,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
rc = mdiobus_register(bus);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
+ mdiobus_free(bus);
return rc;
}
@@ -1091,8 +1022,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct mdio_device *pcs;
- struct lynx_pcs *lynx;
+ struct phylink_pcs *phylink_pcs;
if (dsa_is_unused_port(felix->ds, port))
continue;
@@ -1100,17 +1030,11 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
- pcs = mdio_device_create(felix->imdio, port);
- if (IS_ERR(pcs))
- continue;
-
- lynx = lynx_pcs_create(pcs);
- if (!lynx) {
- mdio_device_free(pcs);
+ phylink_pcs = lynx_pcs_create_mdiodev(felix->imdio, port);
+ if (IS_ERR(phylink_pcs))
continue;
- }
- felix->pcs[port] = lynx;
+ felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
}
@@ -1124,20 +1048,308 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct lynx_pcs *pcs = felix->pcs[port];
-
- if (!pcs)
- continue;
+ struct phylink_pcs *phylink_pcs = felix->pcs[port];
- mdio_device_free(pcs->mdio);
- lynx_pcs_destroy(pcs);
+ if (phylink_pcs)
+ lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
+ mdiobus_free(felix->imdio);
+}
+
+/* The switch considers any frame (regardless of size) as eligible
+ * for transmission if the traffic class gate is open for at least
+ * VSC9959_TAS_MIN_GATE_LEN_NS.
+ *
+ * Overruns are prevented by cropping an interval at the end of the gate time
+ * slot for which egress scheduling is blocked, but we need to still keep
+ * VSC9959_TAS_MIN_GATE_LEN_NS available for one packet to be transmitted,
+ * otherwise the port tc will hang.
+ *
+ * This function returns the size of a gate interval that remains available for
+ * setting the guard band, after reserving the space for one egress frame.
+ */
+static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
+{
+ /* Gate always open */
+ if (gate_len_ns == U64_MAX)
+ return U64_MAX;
+
+ if (gate_len_ns < VSC9959_TAS_MIN_GATE_LEN_NS)
+ return 0;
+
+ return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
+}
+
+/* Extract shortest continuous gate open intervals in ns for each traffic class
+ * of a cyclic tc-taprio schedule. If a gate is always open, the duration is
+ * considered U64_MAX. If the gate is always closed, it is considered 0.
+ */
+static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
+ u64 min_gate_len[OCELOT_NUM_TC])
+{
+ struct tc_taprio_sched_entry *entry;
+ u64 gate_len[OCELOT_NUM_TC];
+ u8 gates_ever_opened = 0;
+ int tc, i, n;
+
+ /* Initialize arrays */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ min_gate_len[tc] = U64_MAX;
+ gate_len[tc] = 0;
+ }
+
+ /* If we don't have taprio, consider all gates as permanently open */
+ if (!taprio)
+ return;
+
+ n = taprio->num_entries;
+
+ /* Walk through the gate list twice to determine the length
+ * of consecutively open gates for a traffic class, including
+ * open gates that wrap around. We are just interested in the
+ * minimum window size, and this doesn't change what the
+ * minimum is (if the gate never closes, min_gate_len will
+ * remain U64_MAX).
+ */
+ for (i = 0; i < 2 * n; i++) {
+ entry = &taprio->entries[i % n];
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ if (entry->gate_mask & BIT(tc)) {
+ gate_len[tc] += entry->interval;
+ gates_ever_opened |= BIT(tc);
+ } else {
+ /* Gate closes now, record a potential new
+ * minimum and reinitialize length
+ */
+ if (min_gate_len[tc] > gate_len[tc] &&
+ gate_len[tc])
+ min_gate_len[tc] = gate_len[tc];
+ gate_len[tc] = 0;
+ }
+ }
+ }
+
+ /* min_gate_len[tc] actually tracks minimum *open* gate time, so for
+ * permanently closed gates, min_gate_len[tc] will still be U64_MAX.
+ * Therefore they are currently indistinguishable from permanently
+ * open gates. Overwrite the gate len with 0 when we know they're
+ * actually permanently closed, i.e. after the loop above.
+ */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (!(gates_ever_opened & BIT(tc)))
+ min_gate_len[tc] = 0;
+}
+
+/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ,
+ * so we need to spell out the register access to each traffic class in helper
+ * functions, to simplify callers
+ */
+static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc,
+ u32 max_sdu)
+{
+ switch (tc) {
+ case 0:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
+ port);
+ break;
+ case 1:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
+ port);
+ break;
+ case 2:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
+ port);
+ break;
+ case 3:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
+ port);
+ break;
+ case 4:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
+ port);
+ break;
+ case 5:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
+ port);
+ break;
+ case 6:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
+ port);
+ break;
+ case 7:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
+ port);
+ break;
+ }
+}
+
+static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
+{
+ switch (tc) {
+ case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
+ case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port);
+ case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port);
+ case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port);
+ case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port);
+ case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port);
+ case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port);
+ case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port);
+ default:
+ return 0;
+ }
+}
+
+static u32 vsc9959_tas_tc_max_sdu(struct tc_taprio_qopt_offload *taprio, int tc)
+{
+ if (!taprio || !taprio->max_sdu[tc])
+ return 0;
+
+ return taprio->max_sdu[tc] + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
+}
+
+/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
+ * switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
+ * values (the default value is 1518). Also, for traffic class windows smaller
+ * than one MTU sized frame, update QSYS_QMAXSDU_CFG to enable oversized frame
+ * dropping, such that these won't hang the port, as they will never be sent.
+ */
+static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_mm_state *mm = &ocelot->mm[port];
+ struct tc_taprio_qopt_offload *taprio;
+ u64 min_gate_len[OCELOT_NUM_TC];
+ u32 val, maxlen, add_frag_size;
+ u64 needed_min_frag_time_ps;
+ int speed, picos_per_byte;
+ u64 needed_bit_time_ps;
+ u8 tas_speed;
+ int tc;
+
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ taprio = ocelot_port->taprio;
+
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val);
+
+ switch (tas_speed) {
+ case OCELOT_SPEED_10:
+ speed = SPEED_10;
+ break;
+ case OCELOT_SPEED_100:
+ speed = SPEED_100;
+ break;
+ case OCELOT_SPEED_1000:
+ speed = SPEED_1000;
+ break;
+ case OCELOT_SPEED_2500:
+ speed = SPEED_2500;
+ break;
+ default:
+ return;
+ }
+
+ picos_per_byte = (USEC_PER_SEC * 8) / speed;
+
+ val = ocelot_port_readl(ocelot_port, DEV_MAC_MAXLEN_CFG);
+ /* MAXLEN_CFG accounts automatically for VLAN. We need to include it
+ * manually in the bit time calculation, plus the preamble and SFD.
+ */
+ maxlen = val + 2 * VLAN_HLEN;
+ /* Consider the standard Ethernet overhead of 8 octets preamble+SFD,
+ * 4 octets FCS, 12 octets IFG.
+ */
+ needed_bit_time_ps = (u64)(maxlen + 24) * picos_per_byte;
+
+ /* Preemptible TCs don't need to pass a full MTU, the port will
+ * automatically emit a HOLD request when a preemptible TC gate closes
+ */
+ val = ocelot_read_rix(ocelot, QSYS_PREEMPTION_CFG, port);
+ add_frag_size = QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(val);
+ needed_min_frag_time_ps = picos_per_byte *
+ (u64)(24 + 2 * ethtool_mm_frag_size_add_to_min(add_frag_size));
+
+ dev_dbg(ocelot->dev,
+ "port %d: max frame size %d needs %llu ps, %llu ps for mPackets at speed %d\n",
+ port, maxlen, needed_bit_time_ps, needed_min_frag_time_ps,
+ speed);
+
+ vsc9959_tas_min_gate_lengths(taprio, min_gate_len);
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ u32 requested_max_sdu = vsc9959_tas_tc_max_sdu(taprio, tc);
+ u64 remaining_gate_len_ps;
+ u32 max_sdu;
+
+ remaining_gate_len_ps =
+ vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
+
+ if ((mm->active_preemptible_tcs & BIT(tc)) ?
+ remaining_gate_len_ps > needed_min_frag_time_ps :
+ remaining_gate_len_ps > needed_bit_time_ps) {
+ /* Setting QMAXSDU_CFG to 0 disables oversized frame
+ * dropping.
+ */
+ max_sdu = requested_max_sdu;
+ dev_dbg(ocelot->dev,
+ "port %d tc %d min gate len %llu"
+ ", sending all frames\n",
+ port, tc, min_gate_len[tc]);
+ } else {
+ /* If traffic class doesn't support a full MTU sized
+ * frame, make sure to enable oversize frame dropping
+ * for frames larger than the smallest that would fit.
+ *
+ * However, the exact same register, QSYS_QMAXSDU_CFG_*,
+ * controls not only oversized frame dropping, but also
+ * per-tc static guard band lengths, so it reduces the
+ * useful gate interval length. Therefore, be careful
+ * to calculate a guard band (and therefore max_sdu)
+ * that still leaves VSC9959_TAS_MIN_GATE_LEN_NS
+ * available in the time slot.
+ */
+ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
+ /* A TC gate may be completely closed, which is a
+ * special case where all packets are oversized.
+ * Any limit smaller than 64 octets accomplishes this
+ */
+ if (!max_sdu)
+ max_sdu = 1;
+ /* Take L1 overhead into account, but just don't allow
+ * max_sdu to go negative or to 0. Here we use 20
+ * because QSYS_MAXSDU_CFG_* already counts the 4 FCS
+ * octets as part of packet size.
+ */
+ if (max_sdu > 20)
+ max_sdu -= 20;
+
+ if (requested_max_sdu && requested_max_sdu < max_sdu)
+ max_sdu = requested_max_sdu;
+
+ dev_info(ocelot->dev,
+ "port %d tc %d min gate length %llu"
+ " ns not enough for max frame size %d at %d"
+ " Mbps, dropping frames over %d"
+ " octets including FCS\n",
+ port, tc, min_gate_len[tc], maxlen, speed,
+ max_sdu);
+ }
+
+ vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu);
+ }
+
+ ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
+
+ ocelot->ops->cut_through_fwd(ocelot);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
u32 speed)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 tas_speed;
switch (speed) {
@@ -1158,10 +1370,17 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
break;
}
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port);
+
+ if (ocelot_port->taprio)
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time,
@@ -1204,26 +1423,44 @@ static void vsc9959_tas_gcl_set(struct ocelot *ocelot, const u32 gcl_ix,
static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
struct tc_taprio_qopt_offload *taprio)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
struct timespec64 base_ts;
int ret, i;
u32 val;
- if (!taprio->enable) {
- ocelot_rmw_rix(ocelot,
- QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF),
- QSYS_TAG_CONFIG_ENABLE |
- QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ if (taprio->cmd == TAPRIO_CMD_DESTROY) {
+ ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG, port);
+ taprio_offload_free(ocelot_port->taprio);
+ ocelot_port->taprio = NULL;
+
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
return 0;
+ } else if (taprio->cmd != TAPRIO_CMD_REPLACE) {
+ ret = -EOPNOTSUPP;
+ goto err_unlock;
}
+ ret = ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
+ if (ret)
+ goto err_unlock;
+
if (taprio->cycle_time > NSEC_PER_SEC ||
- taprio->cycle_time_extension >= NSEC_PER_SEC)
- return -EINVAL;
+ taprio->cycle_time_extension >= NSEC_PER_SEC) {
+ ret = -EINVAL;
+ goto err_reset_tc;
+ }
- if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
- return -ERANGE;
+ if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) {
+ ret = -ERANGE;
+ goto err_reset_tc;
+ }
/* Enable guard band. The switch will schedule frames without taking
* their length into account. Thus we'll always need to enable the
@@ -1243,9 +1480,14 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
/* Hardware errata - Admin config could not be overwritten if
* config is pending, need reset the TAS module
*/
- val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING)
- return -EBUSY;
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ if (val & QSYS_TAG_CONFIG_ENABLE) {
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+ ret = -EBUSY;
+ goto err_reset_tc;
+ }
+ }
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_ENABLE |
@@ -1278,10 +1520,77 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
ret = readx_poll_timeout(vsc9959_tas_read_cfg_status, ocelot, val,
!(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE),
10, 100000);
+ if (ret)
+ goto err_reset_tc;
+
+ ocelot_port->taprio = taprio_offload_get(taprio);
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return 0;
+
+err_reset_tc:
+ taprio->mqprio.qopt.num_tc = 0;
+ ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
+err_unlock:
+ mutex_unlock(&ocelot->fwd_domain_lock);
return ret;
}
+static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
+{
+ struct tc_taprio_qopt_offload *taprio;
+ struct ocelot_port *ocelot_port;
+ struct timespec64 base_ts;
+ int i, port;
+ u32 val;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_port = ocelot->ports[port];
+ taprio = ocelot_port->taprio;
+ if (!taprio)
+ continue;
+
+ ocelot_rmw(ocelot,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Disable time-aware shaper */
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+
+ vsc9959_new_base_time(ocelot, taprio->base_time,
+ taprio->cycle_time, &base_ts);
+
+ ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
+ ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec),
+ QSYS_PARAM_CFG_REG_2);
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_rmw(ocelot,
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val),
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
+ QSYS_PARAM_CFG_REG_3);
+
+ for (i = 0; i < taprio->num_entries; i++)
+ vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
+
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Re-enable time-aware shaper */
+ ocelot_rmw_rix(ocelot, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+ }
+ mutex_unlock(&ocelot->fwd_domain_lock);
+}
+
static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
struct tc_cbs_qopt_offload *cbs_qopt)
{
@@ -1328,6 +1637,40 @@ static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
return 0;
}
+static int vsc9959_qos_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int vsc9959_qos_port_mqprio(struct ocelot *ocelot, int port,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ int ret;
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+ ret = ocelot_port_mqprio(ocelot, port, mqprio);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return ret;
+}
+
static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1335,8 +1678,12 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
struct ocelot *ocelot = ds->priv;
switch (type) {
+ case TC_QUERY_CAPS:
+ return vsc9959_qos_query_caps(type_data);
case TC_SETUP_QDISC_TAPRIO:
return vsc9959_qos_port_tas_set(ocelot, port, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return vsc9959_qos_port_mqprio(ocelot, port, type_data);
case TC_SETUP_QDISC_CBS:
return vsc9959_qos_port_cbs_set(ds, port, type_data);
default:
@@ -1344,127 +1691,1023 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
}
}
-static const struct felix_info felix_info_vsc9959 = {
- .target_io_res = vsc9959_target_io_res,
- .port_io_res = vsc9959_port_io_res,
- .imdio_res = &vsc9959_imdio_res,
- .regfields = vsc9959_regfields,
- .map = vsc9959_regmap,
- .ops = &vsc9959_ops,
- .stats_layout = vsc9959_stats_layout,
- .num_stats = ARRAY_SIZE(vsc9959_stats_layout),
- .vcap = vsc9959_vcap_props,
- .num_mact_rows = 2048,
- .num_ports = 6,
- .num_tx_queues = OCELOT_NUM_TC,
- .switch_pci_bar = 4,
- .imdio_pci_bar = 0,
- .quirk_no_xtr_irq = true,
- .ptp_caps = &vsc9959_ptp_caps,
- .mdio_bus_alloc = vsc9959_mdio_bus_alloc,
- .mdio_bus_free = vsc9959_mdio_bus_free,
- .phylink_validate = vsc9959_phylink_validate,
- .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
- .port_setup_tc = vsc9959_port_setup_tc,
- .port_sched_speed_set = vsc9959_sched_speed_set,
+#define VSC9959_PSFP_SFID_MAX 175
+#define VSC9959_PSFP_GATE_ID_MAX 183
+#define VSC9959_PSFP_POLICER_BASE 63
+#define VSC9959_PSFP_POLICER_MAX 383
+#define VSC9959_PSFP_GATE_LIST_NUM 4
+#define VSC9959_PSFP_GATE_CYCLETIME_MIN 5000
+
+struct felix_stream {
+ struct list_head list;
+ unsigned long id;
+ bool dummy;
+ int ports;
+ int port;
+ u8 dmac[ETH_ALEN];
+ u16 vid;
+ s8 prio;
+ u8 sfid_valid;
+ u8 ssid_valid;
+ u32 sfid;
+ u32 ssid;
+};
+
+struct felix_stream_filter_counters {
+ u64 match;
+ u64 not_pass_gate;
+ u64 not_pass_sdu;
+ u64 red;
+};
+
+struct felix_stream_filter {
+ struct felix_stream_filter_counters stats;
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
+ u8 enable;
+ int portmask;
+ u8 sg_valid;
+ u32 sgid;
+ u8 fm_valid;
+ u32 fmid;
+ u8 prio_valid;
+ u8 prio;
+ u32 maxsdu;
+};
+
+struct felix_stream_gate {
+ u32 index;
+ u8 enable;
+ u8 ipv_valid;
+ u8 init_ipv;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletime_ext;
+ u32 num_entries;
+ struct action_gate_entry entries[] __counted_by(num_entries);
+};
+
+struct felix_stream_gate_entry {
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
};
-static irqreturn_t felix_irq_handler(int irq, void *data)
+static int vsc9959_stream_identify(struct flow_cls_offload *f,
+ struct felix_stream *stream)
{
- struct ocelot *ocelot = (struct ocelot *)data;
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS)))
+ return -EOPNOTSUPP;
- /* The INTB interrupt is used for both PTP TX timestamp interrupt
- * and preemption status change interrupt on each port.
- *
- * - Get txtstamp if have
- * - TODO: handle preemption. Without handling it, driver may get
- * interrupt storm.
+ if (flow_rule_match_has_control_flags(rule, f->common.extack))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(stream->dmac, match.key->dst);
+ if (!is_zero_ether_addr(match.mask->src))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority)
+ stream->prio = match.key->vlan_priority;
+ else
+ stream->prio = -1;
+
+ if (!match.mask->vlan_id)
+ return -EOPNOTSUPP;
+ stream->vid = match.key->vlan_id;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ stream->id = f->cookie;
+
+ return 0;
+}
+
+static int vsc9959_mact_stream_set(struct ocelot *ocelot,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ enum macaccess_entry_type type;
+ int ret, sfid, ssid;
+ u32 vid, dst_idx;
+ u8 mac[ETH_ALEN];
+
+ ether_addr_copy(mac, stream->dmac);
+ vid = stream->vid;
+
+ /* Stream identification desn't support to add a stream with non
+ * existent MAC (The MAC entry has not been learned in MAC table).
*/
+ ret = ocelot_mact_lookup(ocelot, &dst_idx, mac, vid, &type);
+ if (ret) {
+ if (extack)
+ NL_SET_ERR_MSG_MOD(extack, "Stream is not learned in MAC table");
+ return -EOPNOTSUPP;
+ }
- ocelot_get_txtstamp(ocelot);
+ if ((stream->sfid_valid || stream->ssid_valid) &&
+ type == ENTRYTYPE_NORMAL)
+ type = ENTRYTYPE_LOCKED;
- return IRQ_HANDLED;
+ sfid = stream->sfid_valid ? stream->sfid : -1;
+ ssid = stream->ssid_valid ? stream->ssid : -1;
+
+ ret = ocelot_mact_learn_streamdata(ocelot, dst_idx, mac, vid, type,
+ sfid, ssid);
+
+ return ret;
}
-static int felix_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static struct felix_stream *
+vsc9959_stream_table_lookup(struct list_head *stream_list,
+ struct felix_stream *stream)
{
- struct dsa_switch *ds;
- struct ocelot *ocelot;
- struct felix *felix;
- int err;
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (ether_addr_equal(tmp->dmac, stream->dmac) &&
+ tmp->vid == stream->vid)
+ return tmp;
+
+ return NULL;
+}
+
+static int vsc9959_stream_table_add(struct ocelot *ocelot,
+ struct list_head *stream_list,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ struct felix_stream *stream_entry;
+ int ret;
+
+ stream_entry = kmemdup(stream, sizeof(*stream_entry), GFP_KERNEL);
+ if (!stream_entry)
+ return -ENOMEM;
- if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
- dev_info(&pdev->dev, "device is disabled, skipping\n");
- return -ENODEV;
+ if (!stream->dummy) {
+ ret = vsc9959_mact_stream_set(ocelot, stream_entry, extack);
+ if (ret) {
+ kfree(stream_entry);
+ return ret;
+ }
}
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "device enable failed\n");
- goto err_pci_enable;
+ list_add_tail(&stream_entry->list, stream_list);
+
+ return 0;
+}
+
+static struct felix_stream *
+vsc9959_stream_table_get(struct list_head *stream_list, unsigned long id)
+{
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (tmp->id == id)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_stream_table_del(struct ocelot *ocelot,
+ struct felix_stream *stream)
+{
+ if (!stream->dummy)
+ vsc9959_mact_stream_set(ocelot, stream, NULL);
+
+ list_del(&stream->list);
+ kfree(stream);
+}
+
+static u32 vsc9959_sfi_access_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_TABLES_SFIDACCESS);
+}
+
+static int vsc9959_psfp_sfi_set(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ u32 val;
+
+ if (sfi->index > VSC9959_PSFP_SFID_MAX)
+ return -EINVAL;
+
+ if (!sfi->enable) {
+ ocelot_write(ocelot, ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ val = ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE);
+ ocelot_write(ocelot, val, ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+ }
+
+ if (sfi->sgid > VSC9959_PSFP_GATE_ID_MAX ||
+ sfi->fmid > VSC9959_PSFP_POLICER_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot,
+ (sfi->sg_valid ? ANA_TABLES_SFIDTIDX_SGID_VALID : 0) |
+ ANA_TABLES_SFIDTIDX_SGID(sfi->sgid) |
+ (sfi->fm_valid ? ANA_TABLES_SFIDTIDX_POL_ENA : 0) |
+ ANA_TABLES_SFIDTIDX_POL_IDX(sfi->fmid) |
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ (sfi->prio_valid ? ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA : 0) |
+ ANA_TABLES_SFIDACCESS_IGR_PRIO(sfi->prio) |
+ ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(sfi->maxsdu) |
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfidmask_set(struct ocelot *ocelot, u32 sfid, int ports)
+{
+ u32 val;
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid),
+ ANA_TABLES_SFIDTIDX_SFID_INDEX_M,
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_SFID_MASK_IGR_PORT_MASK(ports) |
+ ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA,
+ ANA_TABLES_SFID_MASK);
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M,
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfi_list_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct list_head *pos)
+{
+ struct felix_stream_filter *sfi_entry;
+ int ret;
+
+ sfi_entry = kmemdup(sfi, sizeof(*sfi_entry), GFP_KERNEL);
+ if (!sfi_entry)
+ return -ENOMEM;
+
+ refcount_set(&sfi_entry->refcount, 1);
+
+ ret = vsc9959_psfp_sfi_set(ocelot, sfi_entry);
+ if (ret) {
+ kfree(sfi_entry);
+ return ret;
+ }
+
+ vsc9959_psfp_sfidmask_set(ocelot, sfi->index, sfi->portmask);
+
+ list_add(&sfi_entry->list, pos);
+
+ return 0;
+}
+
+static int vsc9959_psfp_sfi_table_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct list_head *pos, *q, *last;
+ struct felix_stream_filter *tmp;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ if (sfi->sg_valid == tmp->sg_valid &&
+ sfi->fm_valid == tmp->fm_valid &&
+ sfi->portmask == tmp->portmask &&
+ tmp->sgid == sfi->sgid &&
+ tmp->fmid == sfi->fmid) {
+ sfi->index = tmp->index;
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index == insert) {
+ last = pos;
+ insert++;
+ }
}
+ sfi->index = insert;
+
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+}
- felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
- if (!felix) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate driver memory\n");
- goto err_alloc_felix;
+static int vsc9959_psfp_sfi_table_add2(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct felix_stream_filter *sfi2)
+{
+ struct felix_stream_filter *tmp;
+ struct list_head *pos, *q, *last;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+ int ret;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index >= insert + 2)
+ break;
+
+ insert = tmp->index + 1;
+ last = pos;
}
+ sfi->index = insert;
- pci_set_drvdata(pdev, felix);
- ocelot = &felix->ocelot;
- ocelot->dev = &pdev->dev;
- ocelot->num_flooding_pgids = OCELOT_NUM_TC;
- felix->info = &felix_info_vsc9959;
- felix->switch_base = pci_resource_start(pdev,
- felix->info->switch_pci_bar);
- felix->imdio_base = pci_resource_start(pdev,
- felix->info->imdio_pci_bar);
+ ret = vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+ if (ret)
+ return ret;
- pci_set_master(pdev);
+ sfi2->index = insert + 1;
- err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL,
- &felix_irq_handler, IRQF_ONESHOT,
- "felix-intb", ocelot);
- if (err) {
- dev_err(&pdev->dev, "Failed to request irq\n");
- goto err_alloc_irq;
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi2, last->next);
+}
+
+static struct felix_stream_filter *
+vsc9959_psfp_sfi_table_get(struct list_head *sfi_list, u32 index)
+{
+ struct felix_stream_filter *tmp;
+
+ list_for_each_entry(tmp, sfi_list, list)
+ if (tmp->index == index)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index)
+{
+ struct felix_stream_filter *tmp, *n;
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sfi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ tmp->enable = 0;
+ vsc9959_psfp_sfi_set(ocelot, tmp);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry,
+ struct felix_stream_gate *sgi)
+{
+ sgi->index = entry->hw_index;
+ sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1;
+ sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0;
+ sgi->basetime = entry->gate.basetime;
+ sgi->cycletime = entry->gate.cycletime;
+ sgi->num_entries = entry->gate.num_entries;
+ sgi->enable = 1;
+
+ memcpy(sgi->entries, entry->gate.entries,
+ entry->gate.num_entries * sizeof(struct action_gate_entry));
+}
+
+static u32 vsc9959_sgi_cfg_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_SG_ACCESS_CTRL);
+}
+
+static int vsc9959_psfp_sgi_set(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct action_gate_entry *e;
+ struct timespec64 base_ts;
+ u32 interval_sum = 0;
+ u32 val;
+ int i;
+
+ if (sgi->index > VSC9959_PSFP_GATE_ID_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot, ANA_SG_ACCESS_CTRL_SGID(sgi->index),
+ ANA_SG_ACCESS_CTRL);
+
+ if (!sgi->enable) {
+ ocelot_rmw(ocelot, ANA_SG_CONFIG_REG_3_INIT_GATE_STATE,
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE,
+ ANA_SG_CONFIG_REG_3);
+
+ return 0;
+ }
+
+ if (sgi->cycletime < VSC9959_PSFP_GATE_CYCLETIME_MIN ||
+ sgi->cycletime > NSEC_PER_SEC)
+ return -EINVAL;
+
+ if (sgi->num_entries > VSC9959_PSFP_GATE_LIST_NUM)
+ return -EINVAL;
+
+ vsc9959_new_base_time(ocelot, sgi->basetime, sgi->cycletime, &base_ts);
+ ocelot_write(ocelot, base_ts.tv_nsec, ANA_SG_CONFIG_REG_1);
+ val = lower_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot, val, ANA_SG_CONFIG_REG_2);
+
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot,
+ (sgi->ipv_valid ? ANA_SG_CONFIG_REG_3_IPV_VALID : 0) |
+ ANA_SG_CONFIG_REG_3_INIT_IPV(sgi->init_ipv) |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE |
+ ANA_SG_CONFIG_REG_3_LIST_LENGTH(sgi->num_entries) |
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val),
+ ANA_SG_CONFIG_REG_3);
+
+ ocelot_write(ocelot, sgi->cycletime, ANA_SG_CONFIG_REG_4);
+
+ e = sgi->entries;
+ for (i = 0; i < sgi->num_entries; i++) {
+ u32 ips = (e[i].ipv < 0) ? 0 : (e[i].ipv + 8);
+
+ ocelot_write_rix(ocelot, ANA_SG_GCL_GS_CONFIG_IPS(ips) |
+ (e[i].gate_state ?
+ ANA_SG_GCL_GS_CONFIG_GATE_STATE : 0),
+ ANA_SG_GCL_GS_CONFIG, i);
+
+ interval_sum += e[i].interval;
+ ocelot_write_rix(ocelot, interval_sum, ANA_SG_GCL_TI_CONFIG, i);
+ }
+
+ ocelot_rmw(ocelot, ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL);
+
+ return readx_poll_timeout(vsc9959_sgi_cfg_status, ocelot, val,
+ (!(ANA_SG_ACCESS_CTRL_CONFIG_CHANGE & val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sgi_table_add(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct felix_stream_gate_entry *tmp;
+ struct ocelot_psfp_list *psfp;
+ int ret;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry(tmp, &psfp->sgi_list, list)
+ if (tmp->index == sgi->index) {
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = vsc9959_psfp_sgi_set(ocelot, sgi);
+ if (ret) {
+ kfree(tmp);
+ return ret;
+ }
+
+ tmp->index = sgi->index;
+ refcount_set(&tmp->refcount, 1);
+ list_add_tail(&tmp->list, &psfp->sgi_list);
+
+ return 0;
+}
+
+static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
+ u32 index)
+{
+ struct felix_stream_gate_entry *tmp, *n;
+ struct felix_stream_gate sgi = {0};
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sgi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ sgi.index = index;
+ sgi.enable = 0;
+ vsc9959_psfp_sgi_set(ocelot, &sgi);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct felix_stream_filter old_sfi, *sfi_entry;
+ struct felix_stream_filter sfi = {0};
+ const struct flow_action_entry *a;
+ struct felix_stream *stream_entry;
+ struct felix_stream stream = {0};
+ struct felix_stream_gate *sgi;
+ struct ocelot_psfp_list *psfp;
+ struct ocelot_policer pol;
+ int ret, i, size;
+ u64 rate, burst;
+ u32 index;
+
+ psfp = &ocelot->psfp;
+
+ ret = vsc9959_stream_identify(f, &stream);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "Only can match on VID, PCP, and dest MAC");
+ return ret;
+ }
+
+ mutex_lock(&psfp->lock);
+
+ flow_action_for_each(i, a, &f->rule->action) {
+ switch (a->id) {
+ case FLOW_ACTION_GATE:
+ size = struct_size(sgi, entries, a->gate.num_entries);
+ sgi = kzalloc(size, GFP_KERNEL);
+ if (!sgi) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ vsc9959_psfp_parse_gate(a, sgi);
+ ret = vsc9959_psfp_sgi_table_add(ocelot, sgi);
+ if (ret) {
+ kfree(sgi);
+ goto err;
+ }
+ sfi.sg_valid = 1;
+ sfi.sgid = sgi->index;
+ kfree(sgi);
+ break;
+ case FLOW_ACTION_POLICE:
+ index = a->hw_index + VSC9959_PSFP_POLICER_BASE;
+ if (index > VSC9959_PSFP_POLICER_MAX) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ rate = a->police.rate_bytes_ps;
+ burst = rate * PSCHED_NS2TICKS(a->police.burst);
+ pol = (struct ocelot_policer) {
+ .burst = div_u64(burst, PSCHED_TICKS_PER_SEC),
+ .rate = div_u64(rate, 1000) * 8,
+ };
+ ret = ocelot_vcap_policer_add(ocelot, index, &pol);
+ if (ret)
+ goto err;
+
+ sfi.fm_valid = 1;
+ sfi.fmid = index;
+ sfi.maxsdu = a->police.mtu;
+ break;
+ default:
+ mutex_unlock(&psfp->lock);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ stream.ports = BIT(port);
+ stream.port = port;
+
+ sfi.portmask = stream.ports;
+ sfi.prio_valid = (stream.prio < 0 ? 0 : 1);
+ sfi.prio = (sfi.prio_valid ? stream.prio : 0);
+ sfi.enable = 1;
+
+ /* Check if stream is set. */
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &stream);
+ if (stream_entry) {
+ if (stream_entry->ports & BIT(port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on this port");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ if (stream_entry->ports != BIT(stream_entry->port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on two ports");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ stream_entry->ports |= BIT(port);
+ stream.ports = stream_entry->ports;
+
+ sfi_entry = vsc9959_psfp_sfi_table_get(&psfp->sfi_list,
+ stream_entry->sfid);
+ memcpy(&old_sfi, sfi_entry, sizeof(old_sfi));
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream_entry->sfid);
+
+ old_sfi.portmask = stream_entry->ports;
+ sfi.portmask = stream.ports;
+
+ if (stream_entry->port > port) {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &sfi,
+ &old_sfi);
+ stream_entry->dummy = true;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &old_sfi,
+ &sfi);
+ stream.dummy = true;
+ }
+ if (ret)
+ goto err;
+
+ stream_entry->sfid = old_sfi.index;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add(ocelot, &sfi);
+ if (ret)
+ goto err;
+ }
+
+ stream.sfid = sfi.index;
+ stream.sfid_valid = 1;
+ ret = vsc9959_stream_table_add(ocelot, &psfp->stream_list,
+ &stream, extack);
+ if (ret) {
+ vsc9959_psfp_sfi_table_del(ocelot, stream.sfid);
+ goto err;
}
- ocelot->ptp = 1;
+ mutex_unlock(&psfp->lock);
- ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
- if (!ds) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
- goto err_alloc_ds;
+ return 0;
+
+err:
+ if (sfi.sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi.sgid);
+
+ if (sfi.fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi.fmid);
+
+ mutex_unlock(&psfp->lock);
+
+ return ret;
+}
+
+static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
+ struct flow_cls_offload *f)
+{
+ struct felix_stream *stream, tmp, *stream_entry;
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ static struct felix_stream_filter *sfi;
+
+ mutex_lock(&psfp->lock);
+
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream) {
+ mutex_unlock(&psfp->lock);
+ return -ENOMEM;
+ }
+
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi) {
+ mutex_unlock(&psfp->lock);
+ return -ENOMEM;
+ }
+
+ if (sfi->sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid);
+
+ if (sfi->fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi->fmid);
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream->sfid);
+
+ memcpy(&tmp, stream, sizeof(tmp));
+
+ stream->sfid_valid = 0;
+ vsc9959_stream_table_del(ocelot, stream);
+
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &tmp);
+ if (stream_entry) {
+ stream_entry->ports = BIT(stream_entry->port);
+ if (stream_entry->dummy) {
+ stream_entry->dummy = false;
+ vsc9959_mact_stream_set(ocelot, stream_entry, NULL);
+ }
+ vsc9959_psfp_sfidmask_set(ocelot, stream_entry->sfid,
+ stream_entry->ports);
+ }
+
+ mutex_unlock(&psfp->lock);
+
+ return 0;
+}
+
+static void vsc9959_update_sfid_stats(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct felix_stream_filter_counters *s = &sfi->stats;
+ u32 match, not_pass_gate, not_pass_sdu, red;
+ u32 sfid = sfi->index;
+
+ lockdep_assert_held(&ocelot->stat_view_lock);
+
+ ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(sfid),
+ SYS_STAT_CFG_STAT_VIEW_M,
+ SYS_STAT_CFG);
+
+ match = ocelot_read(ocelot, SYS_COUNT_SF_MATCHING_FRAMES);
+ not_pass_gate = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_FRAMES);
+ not_pass_sdu = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_SDU);
+ red = ocelot_read(ocelot, SYS_COUNT_SF_RED_FRAMES);
+
+ /* Clear the PSFP counter. */
+ ocelot_write(ocelot,
+ SYS_STAT_CFG_STAT_VIEW(sfid) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
+ SYS_STAT_CFG);
+
+ s->match += match;
+ s->not_pass_gate += not_pass_gate;
+ s->not_pass_sdu += not_pass_sdu;
+ s->red += red;
+}
+
+/* Caller must hold &ocelot->stat_view_lock */
+static void vsc9959_update_stats(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter *sfi;
+
+ mutex_lock(&psfp->lock);
+
+ list_for_each_entry(sfi, &psfp->sfi_list, list)
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ mutex_unlock(&psfp->lock);
+}
+
+static int vsc9959_psfp_stats_get(struct ocelot *ocelot,
+ struct flow_cls_offload *f,
+ struct flow_stats *stats)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter_counters *s;
+ static struct felix_stream_filter *sfi;
+ struct felix_stream *stream;
+
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream)
+ return -ENOMEM;
+
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi)
+ return -EINVAL;
+
+ mutex_lock(&ocelot->stat_view_lock);
+
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ s = &sfi->stats;
+ stats->pkts = s->match;
+ stats->drops = s->not_pass_gate + s->not_pass_sdu + s->red;
+
+ memset(s, 0, sizeof(*s));
+
+ mutex_unlock(&ocelot->stat_view_lock);
+
+ return 0;
+}
+
+static void vsc9959_psfp_init(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+
+ INIT_LIST_HEAD(&psfp->stream_list);
+ INIT_LIST_HEAD(&psfp->sfi_list);
+ INIT_LIST_HEAD(&psfp->sgi_list);
+ mutex_init(&psfp->lock);
+}
+
+/* When using cut-through forwarding and the egress port runs at a higher data
+ * rate than the ingress port, the packet currently under transmission would
+ * suffer an underrun since it would be transmitted faster than it is received.
+ * The Felix switch implementation of cut-through forwarding does not check in
+ * hardware whether this condition is satisfied or not, so we must restrict the
+ * list of ports that have cut-through forwarding enabled on egress to only be
+ * the ports operating at the lowest link speed within their respective
+ * forwarding domain.
+ */
+static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_switch *ds = felix->ds;
+ int tc, port, other_port;
+
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_mm_state *mm = &ocelot->mm[port];
+ int min_speed = ocelot_port->speed;
+ unsigned long mask = 0;
+ u32 tmp, val = 0;
+
+ /* Disable cut-through on ports that are down */
+ if (ocelot_port->speed <= 0)
+ goto set;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* Ocelot switches forward from the NPI port towards
+ * any port, regardless of it being in the NPI port's
+ * forwarding domain or not.
+ */
+ mask = dsa_user_ports(ds);
+ } else {
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port);
+ mask &= ~BIT(port);
+ if (ocelot->npi >= 0)
+ mask |= BIT(ocelot->npi);
+ else
+ mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
+ port);
+ }
+
+ /* Calculate the minimum link speed, among the ports that are
+ * up, of this source port's forwarding domain.
+ */
+ for_each_set_bit(other_port, &mask, ocelot->num_phys_ports) {
+ struct ocelot_port *other_ocelot_port;
+
+ other_ocelot_port = ocelot->ports[other_port];
+ if (other_ocelot_port->speed <= 0)
+ continue;
+
+ if (min_speed > other_ocelot_port->speed)
+ min_speed = other_ocelot_port->speed;
+ }
+
+ /* Enable cut-through forwarding for all traffic classes that
+ * don't have oversized dropping enabled, since this check is
+ * bypassed in cut-through mode. Also exclude preemptible
+ * traffic classes, since these would hang the port for some
+ * reason, if sent as cut-through.
+ */
+ if (ocelot_port->speed == min_speed) {
+ val = GENMASK(7, 0) & ~mm->active_preemptible_tcs;
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
+ val &= ~BIT(tc);
+ }
+
+set:
+ tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
+ if (tmp == val)
+ continue;
+
+ dev_dbg(ocelot->dev,
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n",
+ port, mask, ocelot_port->speed, min_speed,
+ val ? "enabling" : "disabling", val);
+
+ ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
}
+}
+
+/* The INTB interrupt is shared between for PTP TX timestamp availability
+ * notification and MAC Merge status change on each port.
+ */
+static irqreturn_t vsc9959_irq_handler(int irq, void *data)
+{
+ struct ocelot *ocelot = data;
+
+ ocelot_get_txtstamp(ocelot);
+ ocelot_mm_irq(ocelot);
+
+ return IRQ_HANDLED;
+}
+
+static int vsc9959_request_irq(struct ocelot *ocelot)
+{
+ struct pci_dev *pdev = to_pci_dev(ocelot->dev);
+
+ return devm_request_threaded_irq(ocelot->dev, pdev->irq, NULL,
+ &vsc9959_irq_handler, IRQF_ONESHOT,
+ "felix-intb", ocelot);
+}
+
+static const struct ocelot_ops vsc9959_ops = {
+ .reset = vsc9959_reset,
+ .wm_enc = vsc9959_wm_enc,
+ .wm_dec = vsc9959_wm_dec,
+ .wm_stat = vsc9959_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+ .psfp_init = vsc9959_psfp_init,
+ .psfp_filter_add = vsc9959_psfp_filter_add,
+ .psfp_filter_del = vsc9959_psfp_filter_del,
+ .psfp_stats_get = vsc9959_psfp_stats_get,
+ .cut_through_fwd = vsc9959_cut_through_fwd,
+ .tas_clock_adjust = vsc9959_tas_clock_adjust,
+ .update_stats = vsc9959_update_stats,
+ .tas_guard_bands_update = vsc9959_tas_guard_bands_update,
+};
+
+static const struct felix_info felix_info_vsc9959 = {
+ .resources = vsc9959_resources,
+ .num_resources = ARRAY_SIZE(vsc9959_resources),
+ .resource_names = vsc9959_resource_names,
+ .regfields = vsc9959_regfields,
+ .map = vsc9959_regmap,
+ .ops = &vsc9959_ops,
+ .vcap = vsc9959_vcap_props,
+ .vcap_pol_base = VSC9959_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9959_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = 0,
+ .vcap_pol_max2 = 0,
+ .num_mact_rows = 2048,
+ .num_ports = VSC9959_NUM_PORTS,
+ .quirks = FELIX_MAC_QUIRKS,
+ .quirk_no_xtr_irq = true,
+ .ptp_caps = &vsc9959_ptp_caps,
+ .mdio_bus_alloc = vsc9959_mdio_bus_alloc,
+ .mdio_bus_free = vsc9959_mdio_bus_free,
+ .port_modes = vsc9959_port_modes,
+ .port_setup_tc = vsc9959_port_setup_tc,
+ .port_sched_speed_set = vsc9959_sched_speed_set,
+ .request_irq = vsc9959_request_irq,
+};
- ds->dev = &pdev->dev;
- ds->num_ports = felix->info->num_ports;
- ds->num_tx_queues = felix->info->num_tx_queues;
- ds->ops = &felix_switch_ops;
- ds->priv = ocelot;
- felix->ds = ds;
- felix->tag_proto = DSA_TAG_PROTO_OCELOT;
+static int felix_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ resource_size_t switch_base;
+ int err;
- err = dsa_register_switch(ds);
+ err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
- goto err_register_ds;
+ dev_err(dev, "device enable failed: %pe\n", ERR_PTR(err));
+ return err;
}
+ pci_set_master(pdev);
+
+ switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
+
+ err = felix_register_switch(dev, switch_base, OCELOT_NUM_TC,
+ true, true, DSA_TAG_PROTO_OCELOT,
+ &felix_info_vsc9959);
+ if (err)
+ goto out_disable;
+
return 0;
-err_register_ds:
- kfree(ds);
-err_alloc_ds:
-err_alloc_irq:
- kfree(felix);
-err_alloc_felix:
+out_disable:
pci_disable_device(pdev);
-err_pci_enable:
return err;
}
@@ -1477,12 +2720,7 @@ static void felix_pci_remove(struct pci_dev *pdev)
dsa_unregister_switch(felix->ds);
- kfree(felix->ds);
- kfree(felix);
-
pci_disable_device(pdev);
-
- pci_set_drvdata(pdev, NULL);
}
static void felix_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c
new file mode 100644
index 000000000000..d5c557a20292
--- /dev/null
+++ b/drivers/net/dsa/ocelot/ocelot_ext.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021-2022 Innovative Advantage Inc.
+ */
+
+#include <linux/mfd/ocelot.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <soc/mscc/ocelot.h>
+#include <soc/mscc/vsc7514_regs.h>
+#include "felix.h"
+
+#define VSC7514_NUM_PORTS 11
+
+#define OCELOT_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII)
+
+static const u32 vsc7512_port_modes[VSC7514_NUM_PORTS] = {
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_SGMII,
+ OCELOT_PORT_MODE_SERDES,
+};
+
+static const struct ocelot_ops ocelot_ext_ops = {
+ .reset = ocelot_reset,
+ .wm_enc = ocelot_wm_enc,
+ .wm_dec = ocelot_wm_dec,
+ .wm_stat = ocelot_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+};
+
+static const char * const vsc7512_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [QS] = "qs",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
+};
+
+static const struct felix_info vsc7512_info = {
+ .resource_names = vsc7512_resource_names,
+ .regfields = vsc7514_regfields,
+ .map = vsc7514_regmap,
+ .ops = &ocelot_ext_ops,
+ .vcap = vsc7514_vcap_props,
+ .num_mact_rows = 1024,
+ .num_ports = VSC7514_NUM_PORTS,
+ .port_modes = vsc7512_port_modes,
+ .phylink_mac_config = ocelot_phylink_mac_config,
+ .configure_serdes = ocelot_port_configure_serdes,
+};
+
+static int ocelot_ext_probe(struct platform_device *pdev)
+{
+ return felix_register_switch(&pdev->dev, 0, 1, false, false,
+ DSA_TAG_PROTO_OCELOT, &vsc7512_info);
+}
+
+static void ocelot_ext_remove(struct platform_device *pdev)
+{
+ struct felix *felix = dev_get_drvdata(&pdev->dev);
+
+ if (!felix)
+ return;
+
+ dsa_unregister_switch(felix->ds);
+}
+
+static void ocelot_ext_shutdown(struct platform_device *pdev)
+{
+ struct felix *felix = dev_get_drvdata(&pdev->dev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static const struct of_device_id ocelot_ext_switch_of_match[] = {
+ { .compatible = "mscc,vsc7512-switch" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ocelot_ext_switch_of_match);
+
+static struct platform_driver ocelot_ext_switch_driver = {
+ .driver = {
+ .name = "ocelot-ext-switch",
+ .of_match_table = ocelot_ext_switch_of_match,
+ },
+ .probe = ocelot_ext_probe,
+ .remove = ocelot_ext_remove,
+ .shutdown = ocelot_ext_shutdown,
+};
+module_platform_driver(ocelot_ext_switch_driver);
+
+MODULE_DESCRIPTION("External Ocelot Switch driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("MFD_OCELOT");
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 92eae63150ea..eb3944ba2a72 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -2,22 +2,42 @@
/* Distributed Switch Architecture VSC9953 driver
* Copyright (C) 2020, Maxim Kochetkov <fido_max@inbox.ru>
*/
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
-#include <linux/of_platform.h>
+#include <linux/mdio/mdio-mscc-miim.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_mdio.h>
#include <linux/pcs-lynx.h>
#include <linux/dsa/ocelot.h>
#include <linux/iopoll.h>
#include "felix.h"
-#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
-#define MSCC_MIIM_CMD_OPR_READ BIT(2)
-#define MSCC_MIIM_CMD_WRDATA_SHIFT 4
-#define MSCC_MIIM_CMD_REGAD_SHIFT 20
-#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
-#define MSCC_MIIM_CMD_VLD BIT(31)
+#define VSC9953_NUM_PORTS 10
+
+#define VSC9953_VCAP_POLICER_BASE 11
+#define VSC9953_VCAP_POLICER_MAX 31
+#define VSC9953_VCAP_POLICER_BASE2 120
+#define VSC9953_VCAP_POLICER_MAX2 161
+
+#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_1000BASEX | \
+ OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII)
+
+static const u32 vsc9953_port_modes[VSC9953_NUM_PORTS] = {
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
@@ -251,27 +271,98 @@ static const u32 vsc9953_rew_regmap[] = {
static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
REG(SYS_COUNT_TX_COLLISION, 0x000110),
REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGED, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000318),
REG_RESERVED(SYS_SR_ETYPE_CFG),
REG(SYS_VLAN_ETYPE_CFG, 0x000320),
@@ -293,7 +384,6 @@ static const u32 vsc9953_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG_RESERVED(SYS_CNT),
REG_RESERVED(SYS_PTP_STATUS),
REG_RESERVED(SYS_PTP_TXSTAMP),
REG_RESERVED(SYS_PTP_NXT),
@@ -369,110 +459,40 @@ static const u32 *vsc9953_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the device's base address */
-static const struct resource vsc9953_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9953_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0160000, 0x0010000, "port6"),
+ DEFINE_RES_MEM_NAMED(0x0170000, 0x0010000, "port7"),
+ DEFINE_RES_MEM_NAMED(0x0180000, 0x0010000, "port8"),
+ DEFINE_RES_MEM_NAMED(0x0190000, 0x0010000, "port9"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9953_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
- {
- .start = 0x0160000,
- .end = 0x016ffff,
- .name = "port6",
- },
- {
- .start = 0x0170000,
- .end = 0x017ffff,
- .name = "port7",
- },
- {
- .start = 0x0180000,
- .end = 0x018ffff,
- .name = "port8",
- },
- {
- .start = 0x0190000,
- .end = 0x019ffff,
- .name = "port9",
- },
+static const char * const vsc9953_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
@@ -524,102 +544,6 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
};
-static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x40, .name = "tx_octets", },
- { .offset = 0x41, .name = "tx_unicast", },
- { .offset = 0x42, .name = "tx_multicast", },
- { .offset = 0x43, .name = "tx_broadcast", },
- { .offset = 0x44, .name = "tx_collision", },
- { .offset = 0x45, .name = "tx_drops", },
- { .offset = 0x46, .name = "tx_pause", },
- { .offset = 0x47, .name = "tx_frames_below_65_octets", },
- { .offset = 0x48, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x49, .name = "tx_frames_128_255_octets", },
- { .offset = 0x4A, .name = "tx_frames_256_511_octets", },
- { .offset = 0x4B, .name = "tx_frames_512_1023_octets", },
- { .offset = 0x4C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x4D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x4E, .name = "tx_yellow_prio_0", },
- { .offset = 0x4F, .name = "tx_yellow_prio_1", },
- { .offset = 0x50, .name = "tx_yellow_prio_2", },
- { .offset = 0x51, .name = "tx_yellow_prio_3", },
- { .offset = 0x52, .name = "tx_yellow_prio_4", },
- { .offset = 0x53, .name = "tx_yellow_prio_5", },
- { .offset = 0x54, .name = "tx_yellow_prio_6", },
- { .offset = 0x55, .name = "tx_yellow_prio_7", },
- { .offset = 0x56, .name = "tx_green_prio_0", },
- { .offset = 0x57, .name = "tx_green_prio_1", },
- { .offset = 0x58, .name = "tx_green_prio_2", },
- { .offset = 0x59, .name = "tx_green_prio_3", },
- { .offset = 0x5A, .name = "tx_green_prio_4", },
- { .offset = 0x5B, .name = "tx_green_prio_5", },
- { .offset = 0x5C, .name = "tx_green_prio_6", },
- { .offset = 0x5D, .name = "tx_green_prio_7", },
- { .offset = 0x5E, .name = "tx_aged", },
- { .offset = 0x80, .name = "drop_local", },
- { .offset = 0x81, .name = "drop_tail", },
- { .offset = 0x82, .name = "drop_yellow_prio_0", },
- { .offset = 0x83, .name = "drop_yellow_prio_1", },
- { .offset = 0x84, .name = "drop_yellow_prio_2", },
- { .offset = 0x85, .name = "drop_yellow_prio_3", },
- { .offset = 0x86, .name = "drop_yellow_prio_4", },
- { .offset = 0x87, .name = "drop_yellow_prio_5", },
- { .offset = 0x88, .name = "drop_yellow_prio_6", },
- { .offset = 0x89, .name = "drop_yellow_prio_7", },
- { .offset = 0x8A, .name = "drop_green_prio_0", },
- { .offset = 0x8B, .name = "drop_green_prio_1", },
- { .offset = 0x8C, .name = "drop_green_prio_2", },
- { .offset = 0x8D, .name = "drop_green_prio_3", },
- { .offset = 0x8E, .name = "drop_green_prio_4", },
- { .offset = 0x8F, .name = "drop_green_prio_5", },
- { .offset = 0x90, .name = "drop_green_prio_6", },
- { .offset = 0x91, .name = "drop_green_prio_7", },
-};
-
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 4},
[VCAP_ES0_IGR_PORT] = { 4, 4},
@@ -857,7 +781,6 @@ static struct vcap_props vsc9953_vcap_props[] = {
#define VSC9953_INIT_TIMEOUT 50000
#define VSC9953_GCB_RST_SLEEP 100
#define VSC9953_SYS_RAMINIT_SLEEP 80
-#define VCS9953_MII_TIMEOUT 10000
static int vsc9953_gcb_soft_rst_status(struct ocelot *ocelot)
{
@@ -877,82 +800,6 @@ static int vsc9953_sys_ram_init_status(struct ocelot *ocelot)
return val;
}
-static int vsc9953_gcb_miim_pending_status(struct ocelot *ocelot)
-{
- int val;
-
- ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_PENDING, &val);
-
- return val;
-}
-
-static int vsc9953_gcb_miim_busy_status(struct ocelot *ocelot)
-{
- int val;
-
- ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_BUSY, &val);
-
- return val;
-}
-
-static int vsc9953_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 value)
-{
- struct ocelot *ocelot = bus->priv;
- int err, cmd, val;
-
- /* Wait while MIIM controller becomes idle */
- err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot,
- val, !val, 10, VCS9953_MII_TIMEOUT);
- if (err) {
- dev_err(ocelot->dev, "MDIO write: pending timeout\n");
- goto out;
- }
-
- cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
- (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) |
- (value << MSCC_MIIM_CMD_WRDATA_SHIFT) |
- MSCC_MIIM_CMD_OPR_WRITE;
-
- ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD);
-
-out:
- return err;
-}
-
-static int vsc9953_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
-{
- struct ocelot *ocelot = bus->priv;
- int err, cmd, val;
-
- /* Wait until MIIM controller becomes idle */
- err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot,
- val, !val, 10, VCS9953_MII_TIMEOUT);
- if (err) {
- dev_err(ocelot->dev, "MDIO read: pending timeout\n");
- goto out;
- }
-
- /* Write the MIIM COMMAND register */
- cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
- (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ;
-
- ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD);
-
- /* Wait while read operation via the MIIM controller is in progress */
- err = readx_poll_timeout(vsc9953_gcb_miim_busy_status, ocelot,
- val, !val, 10, VCS9953_MII_TIMEOUT);
- if (err) {
- dev_err(ocelot->dev, "MDIO read: busy timeout\n");
- goto out;
- }
-
- val = ocelot_read(ocelot, GCB_MIIM_MII_DATA);
-
- err = val & 0xFFFF;
-out:
- return err;
-}
/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
* MEM_INIT is in SYS:SYSTEM:RESET_CFG
@@ -990,57 +837,6 @@ static int vsc9953_reset(struct ocelot *ocelot)
return 0;
}
-static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_INTERNAL) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode)
-{
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_INTERNAL:
- if (port != 8 && port != 9)
- return -ENOTSUPP;
- return 0;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- /* Not supported on internal to-CPU ports */
- if (port == 8 || port == 9)
- return -ENOTSUPP;
- return 0;
- default:
- return -ENOTSUPP;
- }
-}
-
/* Watermark encode
* Bit 9: Unit; 0:1, 1:16
* Bit 8-0: Value to be multiplied with unit
@@ -1089,26 +885,24 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
- sizeof(struct phy_device *),
+ sizeof(struct phylink_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
return -ENOMEM;
}
- bus = devm_mdiobus_alloc(dev);
- if (!bus)
- return -ENOMEM;
-
- bus->name = "VSC9953 internal MDIO bus";
- bus->read = vsc9953_mdio_read;
- bus->write = vsc9953_mdio_write;
- bus->parent = dev;
- bus->priv = ocelot;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+ rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus",
+ ocelot->targets[GCB],
+ ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK],
+ true);
+ if (rc) {
+ dev_err(dev, "failed to setup MDIO bus\n");
+ return rc;
+ }
/* Needed in order to initialize the bus mutex lock */
- rc = mdiobus_register(bus);
+ rc = devm_of_mdiobus_register(dev, bus, NULL);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
return rc;
@@ -1118,9 +912,8 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct phylink_pcs *phylink_pcs;
int addr = port + 4;
- struct mdio_device *pcs;
- struct lynx_pcs *lynx;
if (dsa_is_unused_port(felix->ds, port))
continue;
@@ -1128,17 +921,11 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
- pcs = mdio_device_create(felix->imdio, addr);
- if (IS_ERR(pcs))
- continue;
-
- lynx = lynx_pcs_create(pcs);
- if (!lynx) {
- mdio_device_free(pcs);
+ phylink_pcs = lynx_pcs_create_mdiodev(felix->imdio, addr);
+ if (IS_ERR(phylink_pcs))
continue;
- }
- felix->pcs[port] = lynx;
+ felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
}
@@ -1152,110 +939,59 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct lynx_pcs *pcs = felix->pcs[port];
-
- if (!pcs)
- continue;
+ struct phylink_pcs *phylink_pcs = felix->pcs[port];
- mdio_device_free(pcs->mdio);
- lynx_pcs_destroy(pcs);
+ if (phylink_pcs)
+ lynx_pcs_destroy(phylink_pcs);
}
- mdiobus_unregister(felix->imdio);
+
+ /* mdiobus_unregister and mdiobus_free handled by devres */
}
static const struct felix_info seville_info_vsc9953 = {
- .target_io_res = vsc9953_target_io_res,
- .port_io_res = vsc9953_port_io_res,
+ .resources = vsc9953_resources,
+ .num_resources = ARRAY_SIZE(vsc9953_resources),
+ .resource_names = vsc9953_resource_names,
.regfields = vsc9953_regfields,
.map = vsc9953_regmap,
.ops = &vsc9953_ops,
- .stats_layout = vsc9953_stats_layout,
- .num_stats = ARRAY_SIZE(vsc9953_stats_layout),
.vcap = vsc9953_vcap_props,
+ .vcap_pol_base = VSC9953_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
+ .vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
+ .quirks = FELIX_MAC_QUIRKS,
.num_mact_rows = 2048,
- .num_ports = 10,
- .num_tx_queues = OCELOT_NUM_TC,
+ .num_ports = VSC9953_NUM_PORTS,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
- .phylink_validate = vsc9953_phylink_validate,
- .prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
+ .port_modes = vsc9953_port_modes,
};
static int seville_probe(struct platform_device *pdev)
{
- struct dsa_switch *ds;
- struct ocelot *ocelot;
+ struct device *dev = &pdev->dev;
struct resource *res;
- struct felix *felix;
- int err;
-
- felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
- if (!felix) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate driver memory\n");
- goto err_alloc_felix;
- }
-
- platform_set_drvdata(pdev, felix);
-
- ocelot = &felix->ocelot;
- ocelot->dev = &pdev->dev;
- ocelot->num_flooding_pgids = 1;
- felix->info = &seville_info_vsc9953;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- err = -EINVAL;
- dev_err(&pdev->dev, "Invalid resource\n");
- goto err_alloc_felix;
- }
- felix->switch_base = res->start;
-
- ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
- if (!ds) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
- goto err_alloc_ds;
+ dev_err(dev, "Invalid resource\n");
+ return -EINVAL;
}
- ds->dev = &pdev->dev;
- ds->num_ports = felix->info->num_ports;
- ds->ops = &felix_switch_ops;
- ds->priv = ocelot;
- felix->ds = ds;
- felix->tag_proto = DSA_TAG_PROTO_SEVILLE;
-
- err = dsa_register_switch(ds);
- if (err) {
- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
- goto err_register_ds;
- }
-
- return 0;
-
-err_register_ds:
- kfree(ds);
-err_alloc_ds:
-err_alloc_felix:
- kfree(felix);
- return err;
+ return felix_register_switch(dev, res->start, 1, false, false,
+ DSA_TAG_PROTO_SEVILLE,
+ &seville_info_vsc9953);
}
-static int seville_remove(struct platform_device *pdev)
+static void seville_remove(struct platform_device *pdev)
{
struct felix *felix = platform_get_drvdata(pdev);
if (!felix)
- return 0;
+ return;
dsa_unregister_switch(felix->ds);
-
- kfree(felix->ds);
- kfree(felix);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static void seville_shutdown(struct platform_device *pdev)
@@ -1282,7 +1018,7 @@ static struct platform_driver seville_vsc9953_driver = {
.shutdown = seville_shutdown,
.driver = {
.name = "mscc_seville",
- .of_match_table = of_match_ptr(seville_of_match),
+ .of_match_table = seville_of_match,
},
};
module_platform_driver(seville_vsc9953_driver);
diff --git a/drivers/net/dsa/qca/Kconfig b/drivers/net/dsa/qca/Kconfig
index 13b7e679b8b5..de9da469908b 100644
--- a/drivers/net/dsa/qca/Kconfig
+++ b/drivers/net/dsa/qca/Kconfig
@@ -7,3 +7,20 @@ config NET_DSA_AR9331
help
This enables support for the Qualcomm Atheros AR9331 built-in Ethernet
switch.
+
+config NET_DSA_QCA8K
+ tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
+ select NET_DSA_TAG_QCA
+ select REGMAP
+ help
+ This enables support for the Qualcomm Atheros QCA8K Ethernet
+ switch chips.
+
+config NET_DSA_QCA8K_LEDS_SUPPORT
+ bool "Qualcomm Atheros QCA8K Ethernet switch family LEDs support"
+ depends on NET_DSA_QCA8K
+ depends on LEDS_CLASS=y || LEDS_CLASS=NET_DSA_QCA8K
+ depends on LEDS_TRIGGERS
+ help
+ This enabled support for LEDs present on the Qualcomm Atheros
+ QCA8K Ethernet switch chips.
diff --git a/drivers/net/dsa/qca/Makefile b/drivers/net/dsa/qca/Makefile
index 274022319066..ce66b1984e5f 100644
--- a/drivers/net/dsa/qca/Makefile
+++ b/drivers/net/dsa/qca/Makefile
@@ -1,2 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
+obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+qca8k-y += qca8k-common.o qca8k-8xxx.o
+ifdef CONFIG_NET_DSA_QCA8K_LEDS_SUPPORT
+qca8k-y += qca8k-leds.o
+endif
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index da0d7e68643a..0526aa96146e 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -231,6 +231,7 @@ struct ar9331_sw_port {
int idx;
struct delayed_work mib_read;
struct rtnl_link_stats64 stats;
+ struct ethtool_pause_stats pause_stats;
struct spinlock stats_lock;
};
@@ -378,7 +379,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
if (!mnp)
return -ENODEV;
- ret = of_mdiobus_register(mbus, mnp);
+ ret = devm_of_mdiobus_register(dev, mbus, mnp);
of_node_put(mnp);
if (ret)
return ret;
@@ -390,7 +391,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
static int ar9331_sw_setup_port(struct dsa_switch *ds, int port)
{
- struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_priv *priv = ds->priv;
struct regmap *regmap = priv->regmap;
u32 port_mask, port_ctrl, val;
int ret;
@@ -438,7 +439,7 @@ error:
static int ar9331_sw_setup(struct dsa_switch *ds)
{
- struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_priv *priv = ds->priv;
struct regmap *regmap = priv->regmap;
int ret, i;
@@ -483,7 +484,7 @@ error:
static void ar9331_sw_port_disable(struct dsa_switch *ds, int port)
{
- struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_priv *priv = ds->priv;
struct regmap *regmap = priv->regmap;
int ret;
@@ -499,76 +500,53 @@ static enum dsa_tag_protocol ar9331_sw_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_AR9331;
}
-static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void ar9331_sw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
switch (port) {
case 0:
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
-
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000;
break;
case 1:
case 2:
case 3:
case 4:
case 5:
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
- default:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
}
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",
- state->interface, port);
}
-static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
int ret;
- ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(dp->index),
AR9331_SW_PORT_STATUS_LINK_EN |
AR9331_SW_PORT_STATUS_FLOW_LINK_EN, 0);
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
}
-static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
- struct ar9331_sw_port *p = &priv->port[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
+ int port = dp->index;
int ret;
ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
@@ -576,23 +554,24 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
- cancel_delayed_work_sync(&p->mib_read);
+ cancel_delayed_work_sync(&priv->port[port].mib_read);
}
-static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
- struct ar9331_sw_port *p = &priv->port[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
+ int port = dp->index;
u32 val;
int ret;
- schedule_delayed_work(&p->mib_read, 0);
+ schedule_delayed_work(&priv->port[port].mib_read, 0);
val = AR9331_SW_PORT_STATUS_MAC_MASK;
switch (speed) {
@@ -629,6 +608,7 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
static void ar9331_read_stats(struct ar9331_sw_port *port)
{
struct ar9331_sw_priv *priv = ar9331_sw_port_to_priv(port);
+ struct ethtool_pause_stats *pstats = &port->pause_stats;
struct rtnl_link_stats64 *stats = &port->stats;
struct ar9331_sw_stats_raw raw;
int ret;
@@ -669,6 +649,9 @@ static void ar9331_read_stats(struct ar9331_sw_port *port)
stats->multicast += raw.rxmulti;
stats->collisions += raw.txcollision;
+ pstats->tx_pause_frames += raw.txpause;
+ pstats->rx_pause_frames += raw.rxpause;
+
spin_unlock(&port->stats_lock);
}
@@ -685,7 +668,7 @@ static void ar9331_do_stats_poll(struct work_struct *work)
static void ar9331_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s)
{
- struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_priv *priv = ds->priv;
struct ar9331_sw_port *p = &priv->port[port];
spin_lock(&p->stats_lock);
@@ -693,15 +676,30 @@ static void ar9331_get_stats64(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
+static void ar9331_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ar9331_sw_priv *priv = ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(pause_stats, &p->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&p->stats_lock);
+}
+
+static const struct phylink_mac_ops ar9331_phylink_mac_ops = {
+ .mac_config = ar9331_sw_phylink_mac_config,
+ .mac_link_down = ar9331_sw_phylink_mac_link_down,
+ .mac_link_up = ar9331_sw_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
.port_disable = ar9331_sw_port_disable,
- .phylink_validate = ar9331_sw_phylink_validate,
- .phylink_mac_config = ar9331_sw_phylink_mac_config,
- .phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
- .phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
+ .phylink_get_caps = ar9331_sw_phylink_get_caps,
.get_stats64 = ar9331_get_stats64,
+ .get_pause_stats = ar9331_get_pause_stats,
};
static irqreturn_t ar9331_sw_irq(int irq, void *data)
@@ -823,8 +821,8 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
return ret;
}
- priv->irqdomain = irq_domain_add_linear(np, 1, &ar9331_sw_irqdomain_ops,
- priv);
+ priv->irqdomain = irq_domain_create_linear(dev_fwnode(dev), 1, &ar9331_sw_irqdomain_ops,
+ priv);
if (!priv->irqdomain) {
dev_err(dev, "failed to create IRQ domain\n");
return -EINVAL;
@@ -843,7 +841,7 @@ static int __ar9331_mdio_write(struct mii_bus *sbus, u8 mode, u16 reg, u16 val)
FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
- return mdiobus_write(sbus, p, r, val);
+ return __mdiobus_write(sbus, p, r, val);
}
static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
@@ -854,7 +852,7 @@ static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
- return mdiobus_read(sbus, p, r);
+ return __mdiobus_read(sbus, p, r);
}
static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
@@ -874,6 +872,8 @@ static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
return 0;
}
+ mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
+
ret = __ar9331_mdio_read(sbus, reg);
if (ret < 0)
goto error;
@@ -885,9 +885,13 @@ static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
*(u32 *)val_buf |= ret << 16;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
error:
+ mutex_unlock(&sbus->mdio_lock);
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to read register.\n");
+
return ret;
}
@@ -897,12 +901,15 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
struct mii_bus *sbus = priv->sbus;
int ret;
+ mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
if (reg == AR9331_SW_REG_PAGE) {
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_PAGE,
0, val);
if (ret < 0)
goto error;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
}
@@ -922,10 +929,14 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
if (ret < 0)
goto error;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
error:
+ mutex_unlock(&sbus->mdio_lock);
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
+
return ret;
}
@@ -997,6 +1008,8 @@ static const struct regmap_config ar9331_mdio_regmap_config = {
.val_bits = 32,
.reg_stride = 4,
.max_register = AR9331_SW_REG_PAGE,
+ .use_single_read = true,
+ .use_single_write = true,
.ranges = ar9331_regmap_range,
.num_ranges = ARRAY_SIZE(ar9331_regmap_range),
@@ -1005,16 +1018,14 @@ static const struct regmap_config ar9331_mdio_regmap_config = {
.wr_table = &ar9331_register_set,
.rd_table = &ar9331_register_set,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
-static struct regmap_bus ar9331_sw_bus = {
+static const struct regmap_bus ar9331_sw_bus = {
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
.read = ar9331_mdio_read,
.write = ar9331_sw_bus_write,
- .max_raw_read = 4,
- .max_raw_write = 4,
};
static int ar9331_sw_probe(struct mdio_device *mdiodev)
@@ -1054,6 +1065,7 @@ static int ar9331_sw_probe(struct mdio_device *mdiodev)
ds->priv = priv;
priv->ops = ar9331_sw_ops;
ds->ops = &priv->ops;
+ ds->phylink_mac_ops = &ar9331_phylink_mac_ops;
dev_set_drvdata(&mdiodev->dev, priv);
for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
@@ -1091,12 +1103,9 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
}
irq_domain_remove(priv->irqdomain);
- mdiobus_unregister(priv->mbus);
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
new file mode 100644
index 000000000000..a36b8b07030e
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -0,0 +1,2227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/mdio.h>
+#include <linux/phylink.h>
+#include <linux/gpio/consumer.h>
+#include <linux/etherdevice.h>
+#include <linux/dsa/tag_qca.h>
+
+#include "qca8k.h"
+#include "qca8k_leds.h"
+
+static void
+qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
+{
+ regaddr >>= 1;
+ *r1 = regaddr & 0x1e;
+
+ regaddr >>= 5;
+ *r2 = regaddr & 0x7;
+
+ regaddr >>= 3;
+ *page = regaddr & 0x3ff;
+}
+
+static int
+qca8k_mii_write_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+{
+ int ret;
+ u16 lo;
+
+ lo = val & 0xffff;
+ ret = bus->write(bus, phy_id, regnum, lo);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit lo register\n");
+
+ return ret;
+}
+
+static int
+qca8k_mii_write_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+{
+ int ret;
+ u16 hi;
+
+ hi = (u16)(val >> 16);
+ ret = bus->write(bus, phy_id, regnum, hi);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit hi register\n");
+
+ return ret;
+}
+
+static int
+qca8k_mii_read_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+{
+ int ret;
+
+ ret = bus->read(bus, phy_id, regnum);
+ if (ret < 0)
+ goto err;
+
+ *val = ret & 0xffff;
+ return 0;
+
+err:
+ dev_err_ratelimited(&bus->dev,
+ "failed to read qca8k 32bit lo register\n");
+ *val = 0;
+
+ return ret;
+}
+
+static int
+qca8k_mii_read_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+{
+ int ret;
+
+ ret = bus->read(bus, phy_id, regnum);
+ if (ret < 0)
+ goto err;
+
+ *val = ret << 16;
+ return 0;
+
+err:
+ dev_err_ratelimited(&bus->dev,
+ "failed to read qca8k 32bit hi register\n");
+ *val = 0;
+
+ return ret;
+}
+
+static int
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+{
+ u32 hi, lo;
+ int ret;
+
+ *val = 0;
+
+ ret = qca8k_mii_read_lo(bus, phy_id, regnum, &lo);
+ if (ret < 0)
+ goto err;
+
+ ret = qca8k_mii_read_hi(bus, phy_id, regnum + 1, &hi);
+ if (ret < 0)
+ goto err;
+
+ *val = lo | hi;
+
+err:
+ return ret;
+}
+
+static void
+qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+{
+ if (qca8k_mii_write_lo(bus, phy_id, regnum, val) < 0)
+ return;
+
+ qca8k_mii_write_hi(bus, phy_id, regnum + 1, val);
+}
+
+static int
+qca8k_set_page(struct qca8k_priv *priv, u16 page)
+{
+ u16 *cached_page = &priv->mdio_cache.page;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ if (page == *cached_page)
+ return 0;
+
+ ret = bus->write(bus, 0x18, 0, page);
+ if (ret < 0) {
+ dev_err_ratelimited(&bus->dev,
+ "failed to set qca8k page\n");
+ return ret;
+ }
+
+ *cached_page = page;
+ usleep_range(1000, 2000);
+ return 0;
+}
+
+static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 command;
+ u8 len, cmd;
+ int i;
+
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
+ mgmt_eth_data = &priv->mgmt_eth_data;
+
+ command = get_unaligned_le32(&mgmt_ethhdr->command);
+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
+
+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
+ /* Special case for len of 15 as this is the max value for len and needs to
+ * be increased before converting it from word to dword.
+ */
+ if (len == 15)
+ len++;
+
+ /* We can ignore odd value, we always round up them in the alloc function. */
+ len *= sizeof(u16);
+
+ /* Make sure the seq match the requested packet */
+ if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
+ mgmt_eth_data->ack = true;
+
+ if (cmd == MDIO_READ) {
+ u32 *val = mgmt_eth_data->data;
+
+ *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
+
+ /* Get the rest of the 12 byte of data.
+ * The read/write function will extract the requested data.
+ */
+ if (len > QCA_HDR_MGMT_DATA1_LEN) {
+ __le32 *data2 = (__le32 *)skb->data;
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ *val = get_unaligned_le32(data2);
+ val++;
+ data2++;
+ }
+ }
+ }
+
+ complete(&mgmt_eth_data->rw_done);
+}
+
+static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
+ int priority, unsigned int len)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ unsigned int real_len;
+ struct sk_buff *skb;
+ __le32 *data2;
+ u32 command;
+ u16 hdr;
+ int i;
+
+ skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
+ if (!skb)
+ return NULL;
+
+ /* Hdr mgmt length value is in step of word size.
+ * As an example to process 4 byte of data the correct length to set is 2.
+ * To process 8 byte 4, 12 byte 6, 16 byte 8...
+ *
+ * Odd values will always return the next size on the ack packet.
+ * (length of 3 (6 byte) will always return 8 bytes of data)
+ *
+ * This means that a value of 15 (0xf) actually means reading/writing 32 bytes
+ * of data.
+ *
+ * To correctly calculate the length we devide the requested len by word and
+ * round up.
+ * On the ack function we can skip the odd check as we already handle the
+ * case here.
+ */
+ real_len = DIV_ROUND_UP(len, sizeof(u16));
+
+ /* We check if the result len is odd and we round up another time to
+ * the next size. (length of 3 will be increased to 4 as switch will always
+ * return 8 bytes)
+ */
+ if (real_len % sizeof(u16) != 0)
+ real_len++;
+
+ /* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
+ if (real_len == 16)
+ real_len--;
+
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, skb->len);
+
+ mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
+
+ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
+ hdr |= QCA_HDR_XMIT_FROM_CPU;
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
+
+ command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
+ command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
+ QCA_HDR_MGMT_CHECK_CODE_VAL);
+
+ put_unaligned_le32(command, &mgmt_ethhdr->command);
+
+ if (cmd == MDIO_WRITE)
+ put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
+
+ mgmt_ethhdr->hdr = htons(hdr);
+
+ data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ put_unaligned_le32(*val, data2);
+ data2++;
+ val++;
+ }
+ }
+
+ return skb;
+}
+
+static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 seq;
+
+ seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
+ put_unaligned_le32(seq, &mgmt_ethhdr->seq);
+}
+
+static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check if the mgmt_conduit if is operational */
+ if (!priv->mgmt_conduit) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_conduit;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ *val = mgmt_eth_data->data[0];
+ if (len > QCA_HDR_MGMT_DATA1_LEN)
+ memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check if the mgmt_conduit if is operational */
+ if (!priv->mgmt_conduit) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_conduit;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ val &= ~mask;
+ val |= write_val;
+
+ return qca8k_write_eth(priv, reg, &val, sizeof(val));
+}
+
+static int
+qca8k_read_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t *val)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ int ret;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
+
+exit:
+ mutex_unlock(&bus->mdio_lock);
+ return ret;
+}
+
+static int
+qca8k_write_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t val)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ int ret;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret < 0)
+ goto exit;
+
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+
+exit:
+ mutex_unlock(&bus->mdio_lock);
+ return ret;
+}
+
+static int
+qca8k_regmap_update_bits_mii(struct qca8k_priv *priv, uint32_t reg,
+ uint32_t mask, uint32_t write_val)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+ int ret;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+ if (ret < 0)
+ goto exit;
+
+ val &= ~mask;
+ val |= write_val;
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+
+exit:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ int i, count = val_len / sizeof(u32), ret;
+ struct qca8k_priv *priv = ctx;
+ u32 reg = *(u16 *)reg_buf;
+
+ if (priv->mgmt_conduit &&
+ !qca8k_read_eth(priv, reg, val_buf, val_len))
+ return 0;
+
+ /* loop count times and increment reg of 4 */
+ for (i = 0; i < count; i++, reg += sizeof(u32)) {
+ ret = qca8k_read_mii(priv, reg, val_buf + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
+ const void *val_buf, size_t val_len)
+{
+ int i, count = val_len / sizeof(u32), ret;
+ struct qca8k_priv *priv = ctx;
+ u32 reg = *(u16 *)reg_buf;
+ u32 *val = (u32 *)val_buf;
+
+ if (priv->mgmt_conduit &&
+ !qca8k_write_eth(priv, reg, val, val_len))
+ return 0;
+
+ /* loop count times, increment reg of 4 and increment val ptr to
+ * the next value
+ */
+ for (i = 0; i < count; i++, reg += sizeof(u32), val++) {
+ ret = qca8k_write_mii(priv, reg, *val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_bulk_write(void *ctx, const void *data, size_t bytes)
+{
+ return qca8k_bulk_gather_write(ctx, data, sizeof(u16), data + sizeof(u16),
+ bytes - sizeof(u16));
+}
+
+static int
+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
+{
+ struct qca8k_priv *priv = ctx;
+
+ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
+ return 0;
+
+ return qca8k_regmap_update_bits_mii(priv, reg, mask, write_val);
+}
+
+static const struct regmap_config qca8k_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x16ac, /* end MIB - Port6 range */
+ .read = qca8k_bulk_read,
+ .write = qca8k_bulk_write,
+ .reg_update_bits = qca8k_regmap_update_bits,
+ .rd_table = &qca8k_readable_table,
+ .disable_locking = true, /* Locking is handled by qca8k read/write */
+ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
+ .max_raw_read = 32, /* mgmt eth can read up to 8 registers at time */
+ /* ATU regs suffer from a bug where some data are not correctly
+ * written. Disable bulk write to correctly write ATU entry.
+ */
+ .use_single_write = true,
+};
+
+static int
+qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
+ struct sk_buff *read_skb, u32 *val)
+{
+ struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
+ bool ack;
+ int ret;
+
+ if (!skb)
+ return -ENOMEM;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the copy pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ *val = mgmt_eth_data->data[0];
+
+ return 0;
+}
+
+static int
+qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+ int regnum, u16 data)
+{
+ struct sk_buff *write_skb, *clear_skb, *read_skb;
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ u32 write_val, clear_val = 0, val;
+ struct net_device *mgmt_conduit;
+ int ret, ret1;
+ bool ack;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ mgmt_eth_data = &priv->mgmt_eth_data;
+
+ write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+ if (read) {
+ write_val |= QCA8K_MDIO_MASTER_READ;
+ } else {
+ write_val |= QCA8K_MDIO_MASTER_WRITE;
+ write_val |= QCA8K_MDIO_MASTER_DATA(data);
+ }
+
+ /* Prealloc all the needed skb before the lock */
+ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
+ if (!write_skb)
+ return -ENOMEM;
+
+ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!clear_skb) {
+ ret = -ENOMEM;
+ goto err_clear_skb;
+ }
+
+ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!read_skb) {
+ ret = -ENOMEM;
+ goto err_read_skb;
+ }
+
+ /* It seems that accessing the switch's internal PHYs via management
+ * packets still uses the MDIO bus within the switch internally, and
+ * these accesses can conflict with external MDIO accesses to other
+ * devices on the MDIO bus.
+ * We therefore need to lock the MDIO bus onto which the switch is
+ * connected.
+ */
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ /* Actually start the request:
+ * 1. Send mdio master packet
+ * 2. Busy Wait for mdio master command
+ * 3. Get the data if we are reading
+ * 4. Reset the mdio master (even with error)
+ */
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check if mgmt_conduit is operational */
+ mgmt_conduit = priv->mgmt_conduit;
+ if (!mgmt_conduit) {
+ mutex_unlock(&mgmt_eth_data->mutex);
+ mutex_unlock(&priv->bus->mdio_lock);
+ ret = -EINVAL;
+ goto err_mgmt_conduit;
+ }
+
+ read_skb->dev = mgmt_conduit;
+ clear_skb->dev = mgmt_conduit;
+ write_skb->dev = mgmt_conduit;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the write pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(write_skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ kfree_skb(read_skb);
+ goto exit;
+ }
+
+ if (!ack) {
+ ret = -EINVAL;
+ kfree_skb(read_skb);
+ goto exit;
+ }
+
+ ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
+ !(val & QCA8K_MDIO_MASTER_BUSY), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+ mgmt_eth_data, read_skb, &val);
+
+ if (ret < 0 && ret1 < 0) {
+ ret = ret1;
+ goto exit;
+ }
+
+ if (read) {
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the read pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(read_skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ ack = mgmt_eth_data->ack;
+
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ if (!ack) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
+ } else {
+ kfree_skb(read_skb);
+ }
+exit:
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the clear pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(clear_skb);
+
+ wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+ mutex_unlock(&priv->bus->mdio_lock);
+
+ return ret;
+
+ /* Error handling before lock */
+err_mgmt_conduit:
+ kfree_skb(read_skb);
+err_read_skb:
+ kfree_skb(clear_skb);
+err_clear_skb:
+ kfree_skb(write_skb);
+
+ return ret;
+}
+
+static int
+qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
+{
+ u16 r1, r2, page;
+ u32 val;
+ int ret, ret1;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ ret = read_poll_timeout(qca8k_mii_read_hi, ret1, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+ bus, 0x10 | r2, r1 + 1, &val);
+
+ /* Check if qca8k_read has failed for a different reason
+ * before returnting -ETIMEDOUT
+ */
+ if (ret < 0 && ret1 < 0)
+ return ret1;
+
+ return ret;
+}
+
+static int
+qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+ int ret;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
+ QCA8K_MDIO_MASTER_DATA(data);
+
+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret)
+ goto exit;
+
+ qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+
+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY);
+
+exit:
+ /* even if the busy_wait timeouts try to clear the MASTER_EN */
+ qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
+{
+ struct mii_bus *bus = priv->bus;
+ u16 r1, r2, page;
+ u32 val;
+ int ret;
+
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
+
+ val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = qca8k_set_page(priv, page);
+ if (ret)
+ goto exit;
+
+ qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, val);
+
+ ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_mii_read_lo(bus, 0x10 | r2, r1, &val);
+
+exit:
+ /* even if the busy_wait timeouts try to clear the MASTER_EN */
+ qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ if (ret >= 0)
+ ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
+
+ return ret;
+}
+
+static int
+qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
+{
+ struct qca8k_priv *priv = slave_bus->priv;
+ int ret;
+
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
+ if (!ret)
+ return 0;
+
+ return qca8k_mdio_write(priv, phy, regnum, data);
+}
+
+static int
+qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
+{
+ struct qca8k_priv *priv = slave_bus->priv;
+ int ret;
+
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
+ if (ret >= 0)
+ return ret;
+
+ ret = qca8k_mdio_read(priv, phy, regnum);
+
+ if (ret < 0)
+ return 0xffff;
+
+ return ret;
+}
+
+static int
+qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
+{
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+
+ return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
+}
+
+static int
+qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
+{
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+
+ return qca8k_internal_mdio_read(slave_bus, port, regnum);
+}
+
+static int
+qca8k_mdio_register(struct qca8k_priv *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ struct device *dev = ds->dev;
+ struct device_node *mdio;
+ struct mii_bus *bus;
+ int ret = 0;
+
+ mdio = of_get_child_by_name(dev->of_node, "mdio");
+ if (mdio && !of_device_is_available(mdio))
+ goto out_put_node;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ priv->internal_mdio_bus = bus;
+ bus->priv = (void *)priv;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
+ ds->dst->index, ds->index);
+ bus->parent = dev;
+
+ if (mdio) {
+ /* Check if the device tree declares the port:phy mapping */
+ bus->name = "qca8k user mii";
+ bus->read = qca8k_internal_mdio_read;
+ bus->write = qca8k_internal_mdio_write;
+ } else {
+ /* If a mapping can't be found, the legacy mapping is used,
+ * using qca8k_port_to_phy()
+ */
+ ds->user_mii_bus = bus;
+ bus->phy_mask = ~ds->phys_mii_mask;
+ bus->name = "qca8k-legacy user mii";
+ bus->read = qca8k_legacy_mdio_read;
+ bus->write = qca8k_legacy_mdio_write;
+ }
+
+ ret = devm_of_mdiobus_register(dev, bus, mdio);
+
+out_put_node:
+ of_node_put(mdio);
+ return ret;
+}
+
+static int
+qca8k_setup_mdio_bus(struct qca8k_priv *priv)
+{
+ u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
+ struct device_node *ports, *port;
+ phy_interface_t mode;
+ int ret;
+
+ ports = of_get_child_by_name(priv->dev->of_node, "ports");
+ if (!ports)
+ ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
+
+ if (!ports)
+ return -EINVAL;
+
+ for_each_available_child_of_node(ports, port) {
+ ret = of_property_read_u32(port, "reg", &reg);
+ if (ret) {
+ of_node_put(port);
+ of_node_put(ports);
+ return ret;
+ }
+
+ if (!dsa_is_user_port(priv->ds, reg))
+ continue;
+
+ of_get_phy_mode(port, &mode);
+
+ if (of_property_present(port, "phy-handle") &&
+ mode != PHY_INTERFACE_MODE_INTERNAL)
+ external_mdio_mask |= BIT(reg);
+ else
+ internal_mdio_mask |= BIT(reg);
+ }
+
+ of_node_put(ports);
+ if (!external_mdio_mask && !internal_mdio_mask) {
+ dev_err(priv->dev, "no PHYs are defined.\n");
+ return -EINVAL;
+ }
+
+ /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
+ * the MDIO_MASTER register also _disconnects_ the external MDC
+ * passthrough to the internal PHYs. It's not possible to use both
+ * configurations at the same time!
+ *
+ * Because this came up during the review process:
+ * If the external mdio-bus driver is capable magically disabling
+ * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
+ * accessors for the time being, it would be possible to pull this
+ * off.
+ */
+ if (!!external_mdio_mask && !!internal_mdio_mask) {
+ dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
+ return -EINVAL;
+ }
+
+ if (external_mdio_mask) {
+ /* Make sure to disable the internal mdio bus in cases
+ * a dt-overlay and driver reload changed the configuration
+ */
+
+ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_EN);
+ }
+
+ return qca8k_mdio_register(priv);
+}
+
+static int
+qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
+{
+ u32 mask = 0;
+ int ret = 0;
+
+ /* SoC specific settings for ipq8064.
+ * If more device require this consider adding
+ * a dedicated binding.
+ */
+ if (of_machine_is_compatible("qcom,ipq8064"))
+ mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
+
+ /* SoC specific settings for ipq8065 */
+ if (of_machine_is_compatible("qcom,ipq8065"))
+ mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
+
+ if (mask) {
+ ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
+ QCA8K_MAC_PWR_RGMII0_1_8V |
+ QCA8K_MAC_PWR_RGMII1_1_8V,
+ mask);
+ }
+
+ return ret;
+}
+
+static int qca8k_find_cpu_port(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Find the connected cpu port. Valid port are 0 or 6 */
+ if (dsa_is_cpu_port(ds, 0))
+ return 0;
+
+ dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
+
+ if (dsa_is_cpu_port(ds, 6))
+ return 6;
+
+ return -EINVAL;
+}
+
+static int
+qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
+{
+ const struct qca8k_match_data *data = priv->info;
+ struct device_node *node = priv->dev->of_node;
+ u32 val = 0;
+ int ret;
+
+ /* QCA8327 require to set to the correct mode.
+ * His bigger brother QCA8328 have the 172 pin layout.
+ * Should be applied by default but we set this just to make sure.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ /* Set the correct package of 148 pin for QCA8327 */
+ if (data->reduced_package)
+ val |= QCA8327_PWS_PACKAGE148_EN;
+
+ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
+ val);
+ if (ret)
+ return ret;
+ }
+
+ if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
+ val |= QCA8K_PWS_POWER_ON_SEL;
+
+ if (of_property_read_bool(node, "qca,led-open-drain")) {
+ if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
+ dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
+ return -EINVAL;
+ }
+
+ val |= QCA8K_PWS_LED_OPEN_EN_CSR;
+ }
+
+ return qca8k_rmw(priv, QCA8K_REG_PWS,
+ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
+ val);
+}
+
+static int
+qca8k_parse_port_config(struct qca8k_priv *priv)
+{
+ int port, cpu_port_index = -1, ret;
+ struct device_node *port_dn;
+ phy_interface_t mode;
+ struct dsa_port *dp;
+ u32 delay;
+
+ /* We have 2 CPU port. Check them */
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+ /* Skip every other port */
+ if (port != 0 && port != 6)
+ continue;
+
+ dp = dsa_to_port(priv->ds, port);
+ port_dn = dp->dn;
+ cpu_port_index++;
+
+ if (!of_device_is_available(port_dn))
+ continue;
+
+ ret = of_get_phy_mode(port_dn, &mode);
+ if (ret)
+ continue;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_SGMII:
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID)
+ delay = 1;
+
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
+
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ delay = 2;
+
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
+
+ /* Skip sgmii parsing for rgmii* mode */
+ if (mode == PHY_INTERFACE_MODE_RGMII ||
+ mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ break;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
+ priv->ports_config.sgmii_tx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
+ priv->ports_config.sgmii_rx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
+ priv->ports_config.sgmii_enable_pll = true;
+
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
+ priv->ports_config.sgmii_enable_pll = false;
+ }
+
+ if (priv->switch_revision < 2)
+ dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
+ }
+
+ break;
+ default:
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+static void
+qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
+ u32 reg)
+{
+ u32 delay, val = 0;
+ int ret;
+
+ /* Delay can be declared in 3 different way.
+ * Mode to rgmii and internal-delay standard binding defined
+ * rgmii-id or rgmii-tx/rx phy mode set.
+ * The parse logic set a delay different than 0 only when one
+ * of the 3 different way is used. In all other case delay is
+ * not enabled. With ID or TX/RXID delay is enabled and set
+ * to the default and recommended value.
+ */
+ if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
+ }
+
+ if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
+ }
+
+ /* Set RGMII delay based on the selected values */
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
+ val);
+ if (ret)
+ dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
+ cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
+}
+
+static struct phylink_pcs *
+qca8k_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
+ struct phylink_pcs *pcs = NULL;
+ int port = dp->index;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ switch (port) {
+ case 0:
+ pcs = &priv->pcs_port_0.pcs;
+ break;
+
+ case 6:
+ pcs = &priv->pcs_port_6.pcs;
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return pcs;
+}
+
+static void
+qca8k_phylink_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
+ struct qca8k_priv *priv;
+ int port = dp->index;
+ int cpu_port_index;
+ u32 reg;
+
+ priv = ds->priv;
+
+ switch (port) {
+ case 0: /* 1st CPU port */
+ if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
+ state->interface != PHY_INTERFACE_MODE_SGMII)
+ return;
+
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ /* Internal PHY, nothing to do */
+ return;
+ case 6: /* 2nd CPU port / external PHY */
+ if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
+ state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ state->interface != PHY_INTERFACE_MODE_1000BASEX)
+ return;
+
+ reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
+ break;
+ default:
+ dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+ return;
+ }
+
+ if (port != 6 && phylink_autoneg_inband(mode)) {
+ dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
+ __func__);
+ return;
+ }
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
+
+ /* Configure rgmii delay */
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
+ /* QCA8337 requires to set rgmii rx delay for all ports.
+ * This is enabled through PORT5_PAD_CTRL for all ports,
+ * rather than individual port registers.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337)
+ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ /* Enable SGMII on the port */
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
+ break;
+ default:
+ dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
+ phy_modes(state->interface), port);
+ return;
+ }
+}
+
+static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ case 0: /* 1st CPU port */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ break;
+
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ /* Internal PHY */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
+
+ case 6: /* 2nd CPU port / external PHY */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ break;
+ }
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+}
+
+static void
+qca8k_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
+
+ qca8k_port_set_status(priv, dp->index, 0);
+}
+
+static void
+qca8k_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
+ int port = dp->index;
+ u32 reg;
+
+ if (phylink_autoneg_inband(mode)) {
+ reg = QCA8K_PORT_STATUS_LINK_AUTO;
+ } else {
+ switch (speed) {
+ case SPEED_10:
+ reg = QCA8K_PORT_STATUS_SPEED_10;
+ break;
+ case SPEED_100:
+ reg = QCA8K_PORT_STATUS_SPEED_100;
+ break;
+ case SPEED_1000:
+ reg = QCA8K_PORT_STATUS_SPEED_1000;
+ break;
+ default:
+ reg = QCA8K_PORT_STATUS_LINK_AUTO;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ reg |= QCA8K_PORT_STATUS_DUPLEX;
+
+ if (rx_pause || dsa_port_is_cpu(dp))
+ reg |= QCA8K_PORT_STATUS_RXFLOW;
+
+ if (tx_pause || dsa_port_is_cpu(dp))
+ reg |= QCA8K_PORT_STATUS_TXFLOW;
+ }
+
+ reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+ qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
+}
+
+static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct qca8k_pcs, pcs);
+}
+
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int port = pcs_to_qca8k_pcs(pcs)->port;
+ u32 reg;
+ int ret;
+
+ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
+ if (ret < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
+ state->an_complete = state->link;
+ state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
+ DUPLEX_HALF;
+
+ switch (reg & QCA8K_PORT_STATUS_SPEED) {
+ case QCA8K_PORT_STATUS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ if (reg & QCA8K_PORT_STATUS_RXFLOW)
+ state->pause |= MLO_PAUSE_RX;
+ if (reg & QCA8K_PORT_STATUS_TXFLOW)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int cpu_port_index, ret, port;
+ u32 reg, val;
+
+ port = pcs_to_qca8k_pcs(pcs)->port;
+ switch (port) {
+ case 0:
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
+ break;
+
+ case 6:
+ reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
+ break;
+
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Enable/disable SerDes auto-negotiation as necessary */
+ val = neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED ?
+ 0 : QCA8K_PWS_SERDES_AEN_DIS;
+
+ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8K_PWS_SERDES_AEN_DIS, val);
+ if (ret)
+ return ret;
+
+ /* Configure the SGMII parameters */
+ ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
+ if (ret)
+ return ret;
+
+ val |= QCA8K_SGMII_EN_SD;
+
+ if (priv->ports_config.sgmii_enable_pll)
+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+ QCA8K_SGMII_EN_TX;
+
+ if (dsa_is_cpu_port(priv->ds, port)) {
+ /* CPU port, we're talking to the CPU MAC, be a PHY */
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_PHY;
+ } else if (interface == PHY_INTERFACE_MODE_SGMII) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_MAC;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_BASEX;
+ }
+
+ qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+ * falling edge is set writing in the PORT0 PAD reg
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327 ||
+ priv->switch_id == QCA8K_ID_QCA8337)
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+
+ val = 0;
+
+ /* SGMII Clock phase configuration */
+ if (priv->ports_config.sgmii_rx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
+
+ if (priv->ports_config.sgmii_tx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
+
+ if (val)
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+ val);
+
+ return 0;
+}
+
+static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
+{
+}
+
+static const struct phylink_pcs_ops qca8k_pcs_ops = {
+ .pcs_get_state = qca8k_pcs_get_state,
+ .pcs_config = qca8k_pcs_config,
+ .pcs_an_restart = qca8k_pcs_an_restart,
+};
+
+static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
+ int port)
+{
+ qpcs->pcs.ops = &qca8k_pcs_ops;
+
+ /* We don't have interrupts for link changes, so we need to poll */
+ qpcs->pcs.poll = true;
+ qpcs->priv = priv;
+ qpcs->port = port;
+}
+
+static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ struct mib_ethhdr *mib_ethhdr;
+ __le32 *data2;
+ u8 port;
+ int i;
+
+ mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
+ mib_eth_data = &priv->mib_eth_data;
+
+ /* The switch autocast every port. Ignore other packet and
+ * parse only the requested one.
+ */
+ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
+ if (port != mib_eth_data->req_port)
+ goto exit;
+
+ data2 = (__le32 *)skb->data;
+
+ for (i = 0; i < priv->info->mib_count; i++) {
+ mib = &ar8327_mib[i];
+
+ /* First 3 mib are present in the skb head */
+ if (i < 3) {
+ mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
+ continue;
+ }
+
+ /* Some mib are 64 bit wide */
+ if (mib->size == 2)
+ mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
+ else
+ mib_eth_data->data[i] = get_unaligned_le32(data2);
+
+ data2 += mib->size;
+ }
+
+exit:
+ /* Complete on receiving all the mib packet */
+ if (refcount_dec_and_test(&mib_eth_data->port_parsed))
+ complete(&mib_eth_data->rw_done);
+}
+
+static int
+qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ mib_eth_data = &priv->mib_eth_data;
+
+ mutex_lock(&mib_eth_data->mutex);
+
+ reinit_completion(&mib_eth_data->rw_done);
+
+ mib_eth_data->req_port = dp->index;
+ mib_eth_data->data = data;
+ refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
+
+ mutex_lock(&priv->reg_mutex);
+
+ /* Send mib autocast request */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
+ QCA8K_MIB_BUSY);
+
+ mutex_unlock(&priv->reg_mutex);
+
+ if (ret)
+ goto exit;
+
+ ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
+
+exit:
+ mutex_unlock(&mib_eth_data->mutex);
+
+ return ret;
+}
+
+static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Communicate to the phy internal driver the switch revision.
+ * Based on the switch revision different values needs to be
+ * set to the dbg and mmd reg on the phy.
+ * The first 2 bit are used to communicate the switch revision
+ * to the phy driver.
+ */
+ if (port > 0 && port < 6)
+ return priv->switch_revision;
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_QCA;
+}
+
+static void
+qca8k_conduit_change(struct dsa_switch *ds, const struct net_device *conduit,
+ bool operational)
+{
+ struct dsa_port *dp = conduit->dsa_ptr;
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Ethernet MIB/MDIO is only supported for CPU port 0 */
+ if (dp->index != 0)
+ return;
+
+ mutex_lock(&priv->mgmt_eth_data.mutex);
+ mutex_lock(&priv->mib_eth_data.mutex);
+
+ priv->mgmt_conduit = operational ? (struct net_device *)conduit : NULL;
+
+ mutex_unlock(&priv->mib_eth_data.mutex);
+ mutex_unlock(&priv->mgmt_eth_data.mutex);
+}
+
+static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct qca_tagger_data *tagger_data;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_QCA:
+ tagger_data = ds->tagger_data;
+
+ tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
+ tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void qca8k_setup_hol_fixup(struct qca8k_priv *priv, int port)
+{
+ u32 mask;
+
+ switch (port) {
+ /* The 2 CPU port and port 5 requires some different
+ * priority than any other ports.
+ */
+ case 0:
+ case 5:
+ case 6:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
+ break;
+ default:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
+ }
+ regmap_write(priv->regmap, QCA8K_REG_PORT_HOL_CTRL0(port), mask);
+
+ mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN;
+ regmap_update_bits(priv->regmap, QCA8K_REG_PORT_HOL_CTRL1(port),
+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN,
+ mask);
+}
+
+static int
+qca8k_setup(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp;
+ int cpu_port, ret;
+ u32 mask;
+
+ cpu_port = qca8k_find_cpu_port(ds);
+ if (cpu_port < 0) {
+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
+ return cpu_port;
+ }
+
+ /* Parse CPU port config to be later used in phy_link mac_config */
+ ret = qca8k_parse_port_config(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mdio_bus(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_of_pws_reg(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mac_pwr_sel(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_led_ctrl(priv);
+ if (ret)
+ return ret;
+
+ qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
+ qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
+
+ /* Make sure MAC06 is disabled */
+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed disabling MAC06 exchange");
+ return ret;
+ }
+
+ /* Enable CPU Port */
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling CPU port");
+ return ret;
+ }
+
+ /* Enable MIB counters */
+ ret = qca8k_mib_init(priv);
+ if (ret)
+ dev_warn(priv->dev, "mib init failed");
+
+ /* Initial setup of all ports */
+ dsa_switch_for_each_port(dp, ds) {
+ /* Disable forwarding by default on all ports */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(dp->index),
+ QCA8K_PORT_LOOKUP_MEMBER, 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Disable MAC by default on all user ports */
+ dsa_switch_for_each_user_port(dp, ds)
+ qca8k_port_set_status(priv, dp->index, 0);
+
+ /* Enable QCA header mode on all cpu ports */
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(dp->index),
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode on port %d", dp->index);
+ return ret;
+ }
+ }
+
+ /* Forward all unknown frames to CPU port for Linux processing
+ * Notice that in multi-cpu config only one port should be set
+ * for igmp, unknown, multicast and broadcast packet
+ */
+ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
+ if (ret)
+ return ret;
+
+ /* CPU port gets connected to all user ports of the switch */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(cpu_port),
+ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
+ if (ret)
+ return ret;
+
+ /* Setup connection between CPU port & user ports
+ * Individual user ports get connected to CPU port only
+ */
+ dsa_switch_for_each_user_port(dp, ds) {
+ u8 port = dp->index;
+
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ BIT(cpu_port));
+ if (ret)
+ return ret;
+
+ ret = regmap_clear_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_LEARN);
+ if (ret)
+ return ret;
+
+ /* For port based vlans to work we need to set the
+ * default egress vid
+ */
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+ QCA8K_EGREES_VLAN_PORT_MASK(port),
+ QCA8K_EGREES_VLAN_PORT(port, QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
+ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+ }
+
+ /* The port 5 of the qca8337 have some problem in flood condition. The
+ * original legacy driver had some specific buffer and priority settings
+ * for the different port suggested by the QCA switch team. Add this
+ * missing settings to improve switch stability under load condition.
+ * This problem is limited to qca8337 and other qca8k switch are not affected.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337)
+ dsa_switch_for_each_available_port(dp, ds)
+ qca8k_setup_hol_fixup(priv, dp->index);
+
+ /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
+ qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
+ mask);
+ }
+
+ /* Setup our port MTUs to match power on defaults */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ dev_warn(priv->dev, "failed setting MTU settings");
+
+ /* Flush the FDB table */
+ qca8k_fdb_flush(priv);
+
+ /* Set min a max ageing value supported */
+ ds->ageing_time_min = 7000;
+ ds->ageing_time_max = 458745000;
+
+ /* Set max number of LAGs supported */
+ ds->num_lag_ids = QCA8K_NUM_LAGS;
+
+ return 0;
+}
+
+static const struct phylink_mac_ops qca8k_phylink_mac_ops = {
+ .mac_select_pcs = qca8k_phylink_mac_select_pcs,
+ .mac_config = qca8k_phylink_mac_config,
+ .mac_link_down = qca8k_phylink_mac_link_down,
+ .mac_link_up = qca8k_phylink_mac_link_up,
+};
+
+static const struct dsa_switch_ops qca8k_switch_ops = {
+ .get_tag_protocol = qca8k_get_tag_protocol,
+ .setup = qca8k_setup,
+ .get_strings = qca8k_get_strings,
+ .get_ethtool_stats = qca8k_get_ethtool_stats,
+ .get_sset_count = qca8k_get_sset_count,
+ .set_ageing_time = qca8k_set_ageing_time,
+ .support_eee = dsa_supports_eee,
+ .set_mac_eee = qca8k_set_mac_eee,
+ .port_enable = qca8k_port_enable,
+ .port_disable = qca8k_port_disable,
+ .port_change_mtu = qca8k_port_change_mtu,
+ .port_max_mtu = qca8k_port_max_mtu,
+ .port_stp_state_set = qca8k_port_stp_state_set,
+ .port_pre_bridge_flags = qca8k_port_pre_bridge_flags,
+ .port_bridge_flags = qca8k_port_bridge_flags,
+ .port_bridge_join = qca8k_port_bridge_join,
+ .port_bridge_leave = qca8k_port_bridge_leave,
+ .port_fast_age = qca8k_port_fast_age,
+ .port_fdb_add = qca8k_port_fdb_add,
+ .port_fdb_del = qca8k_port_fdb_del,
+ .port_fdb_dump = qca8k_port_fdb_dump,
+ .port_mdb_add = qca8k_port_mdb_add,
+ .port_mdb_del = qca8k_port_mdb_del,
+ .port_mirror_add = qca8k_port_mirror_add,
+ .port_mirror_del = qca8k_port_mirror_del,
+ .port_vlan_filtering = qca8k_port_vlan_filtering,
+ .port_vlan_add = qca8k_port_vlan_add,
+ .port_vlan_del = qca8k_port_vlan_del,
+ .phylink_get_caps = qca8k_phylink_get_caps,
+ .get_phy_flags = qca8k_get_phy_flags,
+ .port_lag_join = qca8k_port_lag_join,
+ .port_lag_leave = qca8k_port_lag_leave,
+ .conduit_state_change = qca8k_conduit_change,
+ .connect_tag_protocol = qca8k_connect_tag_protocol,
+};
+
+static int
+qca8k_sw_probe(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv;
+ int ret;
+
+ /* allocate the private data struct so that we can probe the switches
+ * ID register
+ */
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+ priv->info = of_device_get_match_data(priv->dev);
+
+ priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return PTR_ERR(priv->reset_gpio);
+
+ if (priv->reset_gpio) {
+ /* The active low duration must be greater than 10 ms
+ * and checkpatch.pl wants 20 ms.
+ */
+ msleep(20);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ }
+
+ /* Start by setting up the register mapping */
+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
+ &qca8k_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(priv->dev, "regmap initialization failed");
+ return PTR_ERR(priv->regmap);
+ }
+
+ priv->mdio_cache.page = 0xffff;
+
+ /* Check the detected switch id */
+ ret = qca8k_read_switch_id(priv);
+ if (ret)
+ return ret;
+
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ mutex_init(&priv->mgmt_eth_data.mutex);
+ init_completion(&priv->mgmt_eth_data.rw_done);
+
+ mutex_init(&priv->mib_eth_data.mutex);
+ init_completion(&priv->mib_eth_data.rw_done);
+
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->num_ports = QCA8K_NUM_PORTS;
+ priv->ds->priv = priv;
+ priv->ds->ops = &qca8k_switch_ops;
+ priv->ds->phylink_mac_ops = &qca8k_phylink_mac_ops;
+ mutex_init(&priv->reg_mutex);
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ return dsa_register_switch(priv->ds);
+}
+
+static void
+qca8k_sw_remove(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ int i;
+
+ if (!priv)
+ return;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++)
+ qca8k_port_set_status(priv, i, 0);
+
+ dsa_unregister_switch(priv->ds);
+}
+
+static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void
+qca8k_set_pm(struct qca8k_priv *priv, int enable)
+{
+ int port;
+
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+ /* Do not enable on resume if the port was
+ * disabled before.
+ */
+ if (!(priv->port_enabled_map & BIT(port)))
+ continue;
+
+ qca8k_port_set_status(priv, port, enable);
+ }
+}
+
+static int qca8k_suspend(struct device *dev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(dev);
+
+ qca8k_set_pm(priv, 0);
+
+ return dsa_switch_suspend(priv->ds);
+}
+
+static int qca8k_resume(struct device *dev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(dev);
+
+ qca8k_set_pm(priv, 1);
+
+ return dsa_switch_resume(priv->ds);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
+ qca8k_suspend, qca8k_resume);
+
+static const struct qca8k_info_ops qca8xxx_ops = {
+ .autocast_mib = qca8k_get_ethtool_stats_eth,
+};
+
+static const struct qca8k_match_data qca8327 = {
+ .id = QCA8K_ID_QCA8327,
+ .reduced_package = true,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
+};
+
+static const struct qca8k_match_data qca8328 = {
+ .id = QCA8K_ID_QCA8327,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
+};
+
+static const struct qca8k_match_data qca833x = {
+ .id = QCA8K_ID_QCA8337,
+ .mib_count = QCA8K_QCA833X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
+};
+
+static const struct of_device_id qca8k_of_match[] = {
+ { .compatible = "qca,qca8327", .data = &qca8327 },
+ { .compatible = "qca,qca8328", .data = &qca8328 },
+ { .compatible = "qca,qca8334", .data = &qca833x },
+ { .compatible = "qca,qca8337", .data = &qca833x },
+ { /* sentinel */ },
+};
+
+static struct mdio_driver qca8kmdio_driver = {
+ .probe = qca8k_sw_probe,
+ .remove = qca8k_sw_remove,
+ .shutdown = qca8k_sw_shutdown,
+ .mdiodrv.driver = {
+ .name = "qca8k",
+ .of_match_table = qca8k_of_match,
+ .pm = &qca8k_pm_ops,
+ },
+};
+
+mdio_module_driver(qca8kmdio_driver);
+
+MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qca8k");
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
new file mode 100644
index 000000000000..13005f10edb7
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -0,0 +1,1257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/netdevice.h>
+#include <net/dsa.h>
+#include <linux/if_bridge.h>
+
+#include "qca8k.h"
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+const struct qca8k_mib_desc ar8327_mib[] = {
+ MIB_DESC(1, 0x00, "RxBroad"),
+ MIB_DESC(1, 0x04, "RxPause"),
+ MIB_DESC(1, 0x08, "RxMulti"),
+ MIB_DESC(1, 0x0c, "RxFcsErr"),
+ MIB_DESC(1, 0x10, "RxAlignErr"),
+ MIB_DESC(1, 0x14, "RxRunt"),
+ MIB_DESC(1, 0x18, "RxFragment"),
+ MIB_DESC(1, 0x1c, "Rx64Byte"),
+ MIB_DESC(1, 0x20, "Rx128Byte"),
+ MIB_DESC(1, 0x24, "Rx256Byte"),
+ MIB_DESC(1, 0x28, "Rx512Byte"),
+ MIB_DESC(1, 0x2c, "Rx1024Byte"),
+ MIB_DESC(1, 0x30, "Rx1518Byte"),
+ MIB_DESC(1, 0x34, "RxMaxByte"),
+ MIB_DESC(1, 0x38, "RxTooLong"),
+ MIB_DESC(2, 0x3c, "RxGoodByte"),
+ MIB_DESC(2, 0x44, "RxBadByte"),
+ MIB_DESC(1, 0x4c, "RxOverFlow"),
+ MIB_DESC(1, 0x50, "Filtered"),
+ MIB_DESC(1, 0x54, "TxBroad"),
+ MIB_DESC(1, 0x58, "TxPause"),
+ MIB_DESC(1, 0x5c, "TxMulti"),
+ MIB_DESC(1, 0x60, "TxUnderRun"),
+ MIB_DESC(1, 0x64, "Tx64Byte"),
+ MIB_DESC(1, 0x68, "Tx128Byte"),
+ MIB_DESC(1, 0x6c, "Tx256Byte"),
+ MIB_DESC(1, 0x70, "Tx512Byte"),
+ MIB_DESC(1, 0x74, "Tx1024Byte"),
+ MIB_DESC(1, 0x78, "Tx1518Byte"),
+ MIB_DESC(1, 0x7c, "TxMaxByte"),
+ MIB_DESC(1, 0x80, "TxOverSize"),
+ MIB_DESC(2, 0x84, "TxByte"),
+ MIB_DESC(1, 0x8c, "TxCollision"),
+ MIB_DESC(1, 0x90, "TxAbortCol"),
+ MIB_DESC(1, 0x94, "TxMultiCol"),
+ MIB_DESC(1, 0x98, "TxSingleCol"),
+ MIB_DESC(1, 0x9c, "TxExcDefer"),
+ MIB_DESC(1, 0xa0, "TxDefer"),
+ MIB_DESC(1, 0xa4, "TxLateCol"),
+ MIB_DESC(1, 0xa8, "RXUnicast"),
+ MIB_DESC(1, 0xac, "TXUnicast"),
+};
+
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
+{
+ return regmap_read(priv->regmap, reg, val);
+}
+
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ return regmap_write(priv->regmap, reg, val);
+}
+
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ return regmap_update_bits(priv->regmap, reg, mask, write_val);
+}
+
+static const struct regmap_range qca8k_readable_ranges[] = {
+ regmap_reg_range(0x0000, 0x00e4), /* Global control */
+ regmap_reg_range(0x0100, 0x0168), /* EEE control */
+ regmap_reg_range(0x0200, 0x0270), /* Parser control */
+ regmap_reg_range(0x0400, 0x0454), /* ACL */
+ regmap_reg_range(0x0600, 0x0718), /* Lookup */
+ regmap_reg_range(0x0800, 0x0b70), /* QM */
+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */
+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */
+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+};
+
+const struct regmap_access_table qca8k_readable_table = {
+ .yes_ranges = qca8k_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
+};
+
+static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
+}
+
+static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
+{
+ u32 reg[QCA8K_ATU_TABLE_SIZE];
+ int ret;
+
+ /* load the ARL table into an array */
+ ret = regmap_bulk_read(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
+ QCA8K_ATU_TABLE_SIZE);
+ if (ret)
+ return ret;
+
+ /* vid - 83:72 */
+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
+ /* aging - 67:64 */
+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
+ /* portmask - 54:48 */
+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
+ /* mac - 47:0 */
+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
+
+ return 0;
+}
+
+static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
+ const u8 *mac, u8 aging)
+{
+ u32 reg[QCA8K_ATU_TABLE_SIZE] = { 0 };
+
+ /* vid - 83:72 */
+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
+ /* aging - 67:64 */
+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
+ /* portmask - 54:48 */
+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
+ /* mac - 47:0 */
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
+
+ /* load the array into the ARL table */
+ regmap_bulk_write(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
+ QCA8K_ATU_TABLE_SIZE);
+}
+
+static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
+ int port)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and FDB index */
+ reg = QCA8K_ATU_FUNC_BUSY;
+ reg |= cmd;
+ if (port >= 0) {
+ reg |= QCA8K_ATU_FUNC_PORT_EN;
+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
+ }
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_FDB_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_ATU_FUNC_FULL)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
+ int port)
+{
+ int ret;
+
+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
+ if (ret < 0)
+ return ret;
+
+ return qca8k_fdb_read(priv, fdb);
+}
+
+static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid, u8 aging)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+void qca8k_fdb_flush(struct qca8k_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid, u8 aging)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_fdb_read(priv, &fdb);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule exist. Delete first */
+ if (fdb.aging) {
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+ } else {
+ fdb.aging = aging;
+ }
+
+ /* Add port to fdb portmask */
+ fdb.port_mask |= port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_fdb_read(priv, &fdb);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule doesn't exist. Why delete? */
+ if (!fdb.aging) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+
+ /* Only port in the rule is this port. Don't re insert */
+ if (fdb.port_mask == port_mask)
+ goto exit;
+
+ /* Remove port from port mask */
+ fdb.port_mask &= ~port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_vlan_access(struct qca8k_priv *priv,
+ enum qca8k_vlan_cmd cmd, u16 vid)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and VLAN index */
+ reg = QCA8K_VTU_FUNC1_BUSY;
+ reg |= cmd;
+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_VLAN_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_VTU_FUNC1_FULL)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
+ bool untagged)
+{
+ u32 reg;
+ int ret;
+
+ /* We do the right thing with VLAN 0 and treat it as untagged while
+ * preserving the tag on egress.
+ */
+ if (vid == 0)
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ if (untagged)
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
+ else
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
+
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
+{
+ u32 reg, mask;
+ int ret, i;
+ bool del;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
+
+ /* Check if we're the last member to be removed */
+ del = true;
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
+
+ if ((reg & mask) != mask) {
+ del = false;
+ break;
+ }
+ }
+
+ if (del) {
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
+ } else {
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+ }
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+int qca8k_mib_init(struct qca8k_priv *priv)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
+ QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
+{
+ u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+ /* Port 0 and 6 have no internal PHY */
+ if (port > 0 && port < 6)
+ mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+ if (enable)
+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+ else
+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+}
+
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++)
+ ethtool_puts(&data, ar8327_mib[i].name);
+}
+
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ u32 reg, i, val;
+ u32 hi = 0;
+ int ret;
+
+ if (priv->mgmt_conduit && priv->info->ops->autocast_mib &&
+ priv->info->ops->autocast_mib(ds, port, data) > 0)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++) {
+ mib = &ar8327_mib[i];
+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+ ret = qca8k_read(priv, reg, &val);
+ if (ret < 0)
+ continue;
+
+ if (mib->size == 2) {
+ ret = qca8k_read(priv, reg + 4, &hi);
+ if (ret < 0)
+ continue;
+ }
+
+ data[i] = val;
+ if (mib->size == 2)
+ data[i] |= (u64)hi << 32;
+ }
+}
+
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return priv->info->mib_count;
+}
+
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_keee *eee)
+{
+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg;
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
+ if (ret < 0)
+ goto exit;
+
+ if (eee->eee_enabled)
+ reg |= lpi_en;
+ else
+ reg &= ~lpi_en;
+ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_port_configure_learning(struct dsa_switch *ds, int port,
+ bool learning)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ if (learning)
+ return regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_LEARN);
+ else
+ return regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_LEARN);
+}
+
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct qca8k_priv *priv = ds->priv;
+ bool learning = false;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LISTENING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
+ learning = dp->learning;
+ break;
+ case BR_STATE_FORWARDING:
+ learning = dp->learning;
+ fallthrough;
+ default:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
+ break;
+ }
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+
+ qca8k_port_configure_learning(ds, port, learning);
+}
+
+static int qca8k_update_port_member(struct qca8k_priv *priv, int port,
+ const struct net_device *bridge_dev,
+ bool join)
+{
+ bool isolated = !!(priv->port_isolated_map & BIT(port)), other_isolated;
+ struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp;
+ u32 port_mask = BIT(dp->cpu_dp->index);
+ int i, ret;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (i == port)
+ continue;
+ if (dsa_is_cpu_port(priv->ds, i))
+ continue;
+
+ other_dp = dsa_to_port(priv->ds, i);
+ if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev))
+ continue;
+
+ other_isolated = !!(priv->port_isolated_map & BIT(i));
+
+ /* Add/remove this port to/from the portvlan mask of the other
+ * ports in the bridge
+ */
+ if (join && !(isolated && other_isolated)) {
+ port_mask |= BIT(i);
+ ret = regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ } else {
+ ret = regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ /* Add/remove all other ports to/from this port's portvlan mask */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return ret;
+}
+
+int qca8k_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_ISOLATED))
+ return -EINVAL;
+
+ return 0;
+}
+
+int qca8k_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ if (flags.mask & BR_LEARNING) {
+ ret = qca8k_port_configure_learning(ds, port,
+ flags.val & BR_LEARNING);
+ if (ret)
+ return ret;
+ }
+
+ if (flags.mask & BR_ISOLATED) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
+
+ if (flags.val & BR_ISOLATED)
+ priv->port_isolated_map |= BIT(port);
+ else
+ priv->port_isolated_map &= ~BIT(port);
+
+ ret = qca8k_update_port_member(priv, port, bridge_dev, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ return qca8k_update_port_member(priv, port, bridge.dev, true);
+}
+
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int err;
+
+ err = qca8k_update_port_member(priv, port, bridge.dev, false);
+ if (err)
+ dev_err(priv->dev,
+ "Failed to update switch config for bridge leave: %d\n",
+ err);
+}
+
+void qca8k_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct qca8k_priv *priv = ds->priv;
+ unsigned int secs = msecs / 1000;
+ u32 val;
+
+ /* AGE_TIME reg is set in 7s step */
+ val = secs / 7;
+
+ /* Handle case with 0 as val to NOT disable
+ * learning
+ */
+ if (!val)
+ val = 1;
+
+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
+ QCA8K_ATU_AGE_TIME_MASK,
+ QCA8K_ATU_AGE_TIME(val));
+}
+
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 1);
+ priv->port_enabled_map |= BIT(port);
+
+ if (dsa_is_user_port(ds, port))
+ phy_support_asym_pause(phy);
+
+ return 0;
+}
+
+void qca8k_port_disable(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+ priv->port_enabled_map &= ~BIT(port);
+}
+
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ /* We have only have a general MTU setting.
+ * DSA always set the CPU port's MTU to the largest MTU of the user
+ * ports.
+ * Setting MTU just for the CPU port is sufficient to correctly set a
+ * value for every port.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ /* To change the MAX_FRAME_SIZE the cpu ports must be off or
+ * the switch panics.
+ * Turn off both cpu ports before applying the new value to prevent
+ * this.
+ */
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 0);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 0);
+
+ /* Include L2 header / FCS length */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
+ ETH_HLEN + ETH_FCS_LEN);
+
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 1);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 1);
+
+ return ret;
+}
+
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return QCA8K_MAX_MTU;
+}
+
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid)
+{
+ /* Set the vid to the port vlan id if no vid is set */
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_add(priv, addr, port_mask, vid,
+ QCA8K_ATU_STATUS_STATIC);
+}
+
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u16 port_mask = BIT(port);
+
+ return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u16 port_mask = BIT(port);
+
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_del(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ struct qca8k_fdb _fdb = { 0 };
+ int cnt = QCA8K_NUM_FDB_RECORDS;
+ bool is_static;
+ int ret = 0;
+
+ mutex_lock(&priv->reg_mutex);
+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
+ if (!_fdb.aging)
+ break;
+ is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
+ ret = cb(_fdb.mac, _fdb.vid, is_static, data);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid,
+ QCA8K_ATU_STATUS_STATIC);
+}
+
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int monitor_port, ret;
+ u32 reg, val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* QCA83xx can have only one port set to mirror mode.
+ * Check that the correct port is requested and return error otherwise.
+ * When no mirror port is set, the values is set to 0xF
+ */
+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
+ return -EEXIST;
+
+ /* Set the monitor port */
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
+ mirror->to_local_port);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ return ret;
+
+ if (ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_update_bits(priv->regmap, reg, val, val);
+ if (ret)
+ return ret;
+
+ /* Track mirror port for tx and rx to decide when the
+ * mirror port has to be disabled.
+ */
+ if (ingress)
+ priv->mirror_rx |= BIT(port);
+ else
+ priv->mirror_tx |= BIT(port);
+
+ return 0;
+}
+
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg, val;
+ int ret;
+
+ if (mirror->ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_clear_bits(priv->regmap, reg, val);
+ if (ret)
+ goto err;
+
+ if (mirror->ingress)
+ priv->mirror_rx &= ~BIT(port);
+ else
+ priv->mirror_tx &= ~BIT(port);
+
+ /* No port set to send packet to mirror port. Disable mirror port */
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ goto err;
+ }
+err:
+ dev_err(priv->dev, "Failed to del mirror port from %d", port);
+}
+
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ if (vlan_filtering) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
+ } else {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+ if (ret) {
+ dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+ return ret;
+ }
+
+ if (pvid) {
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+ QCA8K_EGREES_VLAN_PORT_MASK(port),
+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+ QCA8K_PORT_VLAN_CVID(vlan->vid) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid));
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_del(priv, port, vlan->vid);
+ if (ret)
+ dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
+
+ return ret;
+}
+
+static bool qca8k_lag_can_offload(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp;
+ int members = 0;
+
+ if (!lag.id)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > QCA8K_NUM_PORTS_FOR_LAG) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 4 LAG ports");
+ return false;
+ }
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
+ return false;
+ }
+
+ if (info->hash_type != NETDEV_LAG_HASH_L2 &&
+ info->hash_type != NETDEV_LAG_HASH_L23) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload L2 or L2+L3 TX hash");
+ return false;
+ }
+
+ return true;
+}
+
+static int qca8k_lag_setup_hash(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct net_device *lag_dev = lag.dev;
+ struct qca8k_priv *priv = ds->priv;
+ bool unique_lag = true;
+ unsigned int i;
+ u32 hash = 0;
+
+ switch (info->hash_type) {
+ case NETDEV_LAG_HASH_L23:
+ hash |= QCA8K_TRUNK_HASH_SIP_EN;
+ hash |= QCA8K_TRUNK_HASH_DIP_EN;
+ fallthrough;
+ case NETDEV_LAG_HASH_L2:
+ hash |= QCA8K_TRUNK_HASH_SA_EN;
+ hash |= QCA8K_TRUNK_HASH_DA_EN;
+ break;
+ default: /* We should NEVER reach this */
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are the unique configured LAG */
+ dsa_lags_foreach_id(i, ds->dst)
+ if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
+ unique_lag = false;
+ break;
+ }
+
+ /* Hash Mode is global. Make sure the same Hash Mode
+ * is set to all the 4 possible lag.
+ * If we are the unique LAG we can set whatever hash
+ * mode we want.
+ * To change hash mode it's needed to remove all LAG
+ * and change the mode with the latest.
+ */
+ if (unique_lag) {
+ priv->lag_hash_mode = hash;
+ } else if (priv->lag_hash_mode != hash) {
+ netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
+ QCA8K_TRUNK_HASH_MASK, hash);
+}
+
+static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
+ struct dsa_lag lag, bool delete)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret, id, i;
+ u32 val;
+
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
+
+ /* Read current port member */
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* Shift val to the correct trunk */
+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
+ if (delete)
+ val &= ~BIT(port);
+ else
+ val |= BIT(port);
+
+ /* Update port member. With empty portmap disable trunk */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
+ QCA8K_REG_GOL_TRUNK_MEMBER(id) |
+ QCA8K_REG_GOL_TRUNK_EN(id),
+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
+
+ /* Search empty member if adding or port on deleting */
+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
+ if (ret)
+ return ret;
+
+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
+
+ if (delete) {
+ /* If port flagged to be disabled assume this member is
+ * empty
+ */
+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
+ if (val != port)
+ continue;
+ } else {
+ /* If port flagged to be enabled assume this member is
+ * already set
+ */
+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+ }
+
+ /* We have found the member to add/remove */
+ break;
+ }
+
+ /* Set port in the correct port mask or disable port if in delete mode */
+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
+}
+
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ int ret;
+
+ if (!qca8k_lag_can_offload(ds, lag, info, extack))
+ return -EOPNOTSUPP;
+
+ ret = qca8k_lag_setup_hash(ds, lag, info);
+ if (ret)
+ return ret;
+
+ return qca8k_lag_refresh_portmap(ds, port, lag, false);
+}
+
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag)
+{
+ return qca8k_lag_refresh_portmap(ds, port, lag, true);
+}
+
+int qca8k_read_switch_id(struct qca8k_priv *priv)
+{
+ u32 val;
+ u8 id;
+ int ret;
+
+ if (!priv->info)
+ return -ENODEV;
+
+ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
+ if (ret < 0)
+ return -ENODEV;
+
+ id = QCA8K_MASK_CTRL_DEVICE_ID(val);
+ if (id != priv->info->id) {
+ dev_err(priv->dev,
+ "Switch id detected %x but expected %x",
+ id, priv->info->id);
+ return -ENODEV;
+ }
+
+ priv->switch_id = id;
+
+ /* Save revision to communicate to the internal PHY driver */
+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
+
+ return 0;
+}
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
new file mode 100644
index 000000000000..43ac68052baf
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+#include "qca8k.h"
+#include "qca8k_leds.h"
+
+static u32 qca8k_phy_to_port(int phy)
+{
+ /* Internal PHY 0 has port at index 1.
+ * Internal PHY 1 has port at index 2.
+ * Internal PHY 2 has port at index 3.
+ * Internal PHY 3 has port at index 4.
+ * Internal PHY 4 has port at index 5.
+ */
+
+ return phy + 1;
+}
+
+static int
+qca8k_get_enable_led_reg(int port_num, int led_num, struct qca8k_led_pattern_en *reg_info)
+{
+ switch (port_num) {
+ case 0:
+ reg_info->reg = QCA8K_LED_CTRL_REG(led_num);
+ reg_info->shift = QCA8K_LED_PHY0123_CONTROL_RULE_SHIFT;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ /* Port 123 are controlled on a different reg */
+ reg_info->reg = QCA8K_LED_CTRL3_REG;
+ reg_info->shift = QCA8K_LED_PHY123_PATTERN_EN_SHIFT(port_num, led_num);
+ break;
+ case 4:
+ reg_info->reg = QCA8K_LED_CTRL_REG(led_num);
+ reg_info->shift = QCA8K_LED_PHY4_CONTROL_RULE_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_get_control_led_reg(int port_num, int led_num, struct qca8k_led_pattern_en *reg_info)
+{
+ reg_info->reg = QCA8K_LED_CTRL_REG(led_num);
+
+ /* 6 total control rule:
+ * 3 control rules for phy0-3 that applies to all their leds
+ * 3 control rules for phy4
+ */
+ if (port_num == 4)
+ reg_info->shift = QCA8K_LED_PHY4_CONTROL_RULE_SHIFT;
+ else
+ reg_info->shift = QCA8K_LED_PHY0123_CONTROL_RULE_SHIFT;
+
+ return 0;
+}
+
+static int
+qca8k_parse_netdev(unsigned long rules, u32 *offload_trigger)
+{
+ /* Parsing specific to netdev trigger */
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA8K_LED_TX_BLINK_MASK;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA8K_LED_RX_BLINK_MASK;
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ *offload_trigger |= QCA8K_LED_LINK_10M_EN_MASK;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA8K_LED_LINK_100M_EN_MASK;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA8K_LED_LINK_1000M_EN_MASK;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA8K_LED_HALF_DUPLEX_MASK;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA8K_LED_FULL_DUPLEX_MASK;
+
+ if (rules && !*offload_trigger)
+ return -EOPNOTSUPP;
+
+ /* Enable some default rule by default to the requested mode:
+ * - Blink at 4Hz by default
+ */
+ *offload_trigger |= QCA8K_LED_BLINK_4HZ;
+
+ return 0;
+}
+
+static int
+qca8k_led_brightness_set(struct qca8k_led *led,
+ enum led_brightness brightness)
+{
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+ u32 mask, val;
+
+ qca8k_get_enable_led_reg(led->port_num, led->led_num, &reg_info);
+
+ val = QCA8K_LED_ALWAYS_OFF;
+ if (brightness)
+ val = QCA8K_LED_ALWAYS_ON;
+
+ /* HW regs to control brightness is special and port 1-2-3
+ * are placed in a different reg.
+ *
+ * To control port 0 brightness:
+ * - the 2 bit (15, 14) of:
+ * - QCA8K_LED_CTRL0_REG for led1
+ * - QCA8K_LED_CTRL1_REG for led2
+ * - QCA8K_LED_CTRL2_REG for led3
+ *
+ * To control port 4:
+ * - the 2 bit (31, 30) of:
+ * - QCA8K_LED_CTRL0_REG for led1
+ * - QCA8K_LED_CTRL1_REG for led2
+ * - QCA8K_LED_CTRL2_REG for led3
+ *
+ * To control port 1:
+ * - the 2 bit at (9, 8) of QCA8K_LED_CTRL3_REG are used for led1
+ * - the 2 bit at (11, 10) of QCA8K_LED_CTRL3_REG are used for led2
+ * - the 2 bit at (13, 12) of QCA8K_LED_CTRL3_REG are used for led3
+ *
+ * To control port 2:
+ * - the 2 bit at (15, 14) of QCA8K_LED_CTRL3_REG are used for led1
+ * - the 2 bit at (17, 16) of QCA8K_LED_CTRL3_REG are used for led2
+ * - the 2 bit at (19, 18) of QCA8K_LED_CTRL3_REG are used for led3
+ *
+ * To control port 3:
+ * - the 2 bit at (21, 20) of QCA8K_LED_CTRL3_REG are used for led1
+ * - the 2 bit at (23, 22) of QCA8K_LED_CTRL3_REG are used for led2
+ * - the 2 bit at (25, 24) of QCA8K_LED_CTRL3_REG are used for led3
+ *
+ * To abstract this and have less code, we use the port and led numm
+ * to calculate the shift and the correct reg due to this problem of
+ * not having a 1:1 map of LED with the regs.
+ */
+ if (led->port_num == 0 || led->port_num == 4) {
+ mask = QCA8K_LED_PATTERN_EN_MASK;
+ val <<= QCA8K_LED_PATTERN_EN_SHIFT;
+ } else {
+ mask = QCA8K_LED_PHY123_PATTERN_EN_MASK;
+ }
+
+ return regmap_update_bits(priv->regmap, reg_info.reg,
+ mask << reg_info.shift,
+ val << reg_info.shift);
+}
+
+static int
+qca8k_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
+
+ return qca8k_led_brightness_set(led, brightness);
+}
+
+static enum led_brightness
+qca8k_led_brightness_get(struct qca8k_led *led)
+{
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+ u32 val;
+ int ret;
+
+ qca8k_get_enable_led_reg(led->port_num, led->led_num, &reg_info);
+
+ ret = regmap_read(priv->regmap, reg_info.reg, &val);
+ if (ret)
+ return 0;
+
+ val >>= reg_info.shift;
+
+ if (led->port_num == 0 || led->port_num == 4) {
+ val &= QCA8K_LED_PATTERN_EN_MASK;
+ val >>= QCA8K_LED_PATTERN_EN_SHIFT;
+ } else {
+ val &= QCA8K_LED_PHY123_PATTERN_EN_MASK;
+ }
+
+ /* Assume brightness ON only when the LED is set to always ON */
+ return val == QCA8K_LED_ALWAYS_ON;
+}
+
+static int
+qca8k_cled_blink_set(struct led_classdev *ldev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
+ u32 mask, val = QCA8K_LED_ALWAYS_BLINK_4HZ;
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ *delay_on = 125;
+ *delay_off = 125;
+ }
+
+ if (*delay_on != 125 || *delay_off != 125) {
+ /* The hardware only supports blinking at 4Hz. Fall back
+ * to software implementation in other cases.
+ */
+ return -EINVAL;
+ }
+
+ qca8k_get_enable_led_reg(led->port_num, led->led_num, &reg_info);
+
+ if (led->port_num == 0 || led->port_num == 4) {
+ mask = QCA8K_LED_PATTERN_EN_MASK;
+ val <<= QCA8K_LED_PATTERN_EN_SHIFT;
+ } else {
+ mask = QCA8K_LED_PHY123_PATTERN_EN_MASK;
+ }
+
+ regmap_update_bits(priv->regmap, reg_info.reg, mask << reg_info.shift,
+ val << reg_info.shift);
+
+ return 0;
+}
+
+static int
+qca8k_cled_trigger_offload(struct led_classdev *ldev, bool enable)
+{
+ struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
+
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+ u32 mask, val = QCA8K_LED_ALWAYS_OFF;
+
+ qca8k_get_enable_led_reg(led->port_num, led->led_num, &reg_info);
+
+ if (enable)
+ val = QCA8K_LED_RULE_CONTROLLED;
+
+ if (led->port_num == 0 || led->port_num == 4) {
+ mask = QCA8K_LED_PATTERN_EN_MASK;
+ val <<= QCA8K_LED_PATTERN_EN_SHIFT;
+ } else {
+ mask = QCA8K_LED_PHY123_PATTERN_EN_MASK;
+ }
+
+ return regmap_update_bits(priv->regmap, reg_info.reg, mask << reg_info.shift,
+ val << reg_info.shift);
+}
+
+static bool
+qca8k_cled_hw_control_status(struct led_classdev *ldev)
+{
+ struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
+
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+ u32 val;
+
+ qca8k_get_enable_led_reg(led->port_num, led->led_num, &reg_info);
+
+ regmap_read(priv->regmap, reg_info.reg, &val);
+
+ val >>= reg_info.shift;
+
+ if (led->port_num == 0 || led->port_num == 4) {
+ val &= QCA8K_LED_PATTERN_EN_MASK;
+ val >>= QCA8K_LED_PATTERN_EN_SHIFT;
+ } else {
+ val &= QCA8K_LED_PHY123_PATTERN_EN_MASK;
+ }
+
+ return val == QCA8K_LED_RULE_CONTROLLED;
+}
+
+static int
+qca8k_cled_hw_control_is_supported(struct led_classdev *ldev, unsigned long rules)
+{
+ u32 offload_trigger = 0;
+
+ return qca8k_parse_netdev(rules, &offload_trigger);
+}
+
+static int
+qca8k_cled_hw_control_set(struct led_classdev *ldev, unsigned long rules)
+{
+ struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+ u32 offload_trigger = 0;
+ int ret;
+
+ ret = qca8k_parse_netdev(rules, &offload_trigger);
+ if (ret)
+ return ret;
+
+ ret = qca8k_cled_trigger_offload(ldev, true);
+ if (ret)
+ return ret;
+
+ qca8k_get_control_led_reg(led->port_num, led->led_num, &reg_info);
+
+ return regmap_update_bits(priv->regmap, reg_info.reg,
+ QCA8K_LED_RULE_MASK << reg_info.shift,
+ offload_trigger << reg_info.shift);
+}
+
+static int
+qca8k_cled_hw_control_get(struct led_classdev *ldev, unsigned long *rules)
+{
+ struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+ u32 val;
+ int ret;
+
+ /* With hw control not active return err */
+ if (!qca8k_cled_hw_control_status(ldev))
+ return -EINVAL;
+
+ qca8k_get_control_led_reg(led->port_num, led->led_num, &reg_info);
+
+ ret = regmap_read(priv->regmap, reg_info.reg, &val);
+ if (ret)
+ return ret;
+
+ val >>= reg_info.shift;
+ val &= QCA8K_LED_RULE_MASK;
+
+ /* Parsing specific to netdev trigger */
+ if (val & QCA8K_LED_TX_BLINK_MASK)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA8K_LED_RX_BLINK_MASK)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA8K_LED_LINK_10M_EN_MASK)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ if (val & QCA8K_LED_LINK_100M_EN_MASK)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA8K_LED_LINK_1000M_EN_MASK)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA8K_LED_HALF_DUPLEX_MASK)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA8K_LED_FULL_DUPLEX_MASK)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+
+ return 0;
+}
+
+static struct device *qca8k_cled_hw_control_get_device(struct led_classdev *ldev)
+{
+ struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
+ struct qca8k_priv *priv = led->priv;
+ struct dsa_port *dp;
+
+ dp = dsa_to_port(priv->ds, qca8k_phy_to_port(led->port_num));
+ if (!dp)
+ return NULL;
+ if (dp->user)
+ return &dp->user->dev;
+ return NULL;
+}
+
+static int
+qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int port_num)
+{
+ struct fwnode_handle *led = NULL, *leds = NULL;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct qca8k_led *port_led;
+ int led_num, led_index;
+ int ret;
+
+ leds = fwnode_get_named_child_node(port, "leds");
+ if (!leds) {
+ dev_dbg(priv->dev, "No Leds node specified in device tree for port %d!\n",
+ port_num);
+ return 0;
+ }
+
+ fwnode_for_each_child_node(leds, led) {
+ /* Reg represent the led number of the port.
+ * Each port can have at most 3 leds attached
+ * Commonly:
+ * 1. is gigabit led
+ * 2. is mbit led
+ * 3. additional status led
+ */
+ if (fwnode_property_read_u32(led, "reg", &led_num))
+ continue;
+
+ if (led_num >= QCA8K_LED_PORT_COUNT) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_num, port_num);
+ continue;
+ }
+
+ led_index = QCA8K_LED_PORT_INDEX(port_num, led_num);
+
+ port_led = &priv->ports_led[led_index];
+ port_led->port_num = port_num;
+ port_led->led_num = led_num;
+ port_led->priv = priv;
+
+ state = led_init_default_state_get(led);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ port_led->cdev.brightness = 1;
+ qca8k_led_brightness_set(port_led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ port_led->cdev.brightness =
+ qca8k_led_brightness_get(port_led);
+ break;
+ default:
+ port_led->cdev.brightness = 0;
+ qca8k_led_brightness_set(port_led, 0);
+ }
+
+ port_led->cdev.max_brightness = 1;
+ port_led->cdev.brightness_set_blocking = qca8k_cled_brightness_set_blocking;
+ port_led->cdev.blink_set = qca8k_cled_blink_set;
+ port_led->cdev.hw_control_is_supported = qca8k_cled_hw_control_is_supported;
+ port_led->cdev.hw_control_set = qca8k_cled_hw_control_set;
+ port_led->cdev.hw_control_get = qca8k_cled_hw_control_get;
+ port_led->cdev.hw_control_get_device = qca8k_cled_hw_control_get_device;
+ port_led->cdev.hw_control_trigger = "netdev";
+ init_data.default_label = ":port";
+ init_data.fwnode = led;
+ init_data.devname_mandatory = true;
+ init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d",
+ priv->internal_mdio_bus->id,
+ port_num);
+ if (!init_data.devicename) {
+ fwnode_handle_put(led);
+ fwnode_handle_put(leds);
+ return -ENOMEM;
+ }
+
+ ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
+ if (ret)
+ dev_warn(priv->dev, "Failed to init LED %d for port %d", led_num, port_num);
+
+ kfree(init_data.devicename);
+ }
+
+ fwnode_handle_put(leds);
+ return 0;
+}
+
+int
+qca8k_setup_led_ctrl(struct qca8k_priv *priv)
+{
+ struct fwnode_handle *ports, *port;
+ int port_num;
+ int ret;
+
+ ports = device_get_named_child_node(priv->dev, "ports");
+ if (!ports) {
+ dev_info(priv->dev, "No ports node specified in device tree!");
+ return 0;
+ }
+
+ fwnode_for_each_child_node(ports, port) {
+ if (fwnode_property_read_u32(port, "reg", &port_num))
+ continue;
+
+ /* Skip checking for CPU port 0 and CPU port 6 as not supported */
+ if (port_num == 0 || port_num == 6)
+ continue;
+
+ /* Each port can have at most 3 different leds attached.
+ * Switch port starts from 0 to 6, but port 0 and 6 are CPU
+ * port. The port index needs to be decreased by one to identify
+ * the correct port for LED setup.
+ */
+ ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
+ if (ret) {
+ fwnode_handle_put(port);
+ fwnode_handle_put(ports);
+ return ret;
+ }
+ }
+
+ fwnode_handle_put(ports);
+ return 0;
+}
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
new file mode 100644
index 000000000000..d046679265fa
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -0,0 +1,595 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCA8K_H
+#define __QCA8K_H
+
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+#include <linux/dsa/tag_qca.h>
+
+#define QCA8K_ETHERNET_MDIO_PRIORITY 7
+#define QCA8K_ETHERNET_PHY_PRIORITY 6
+#define QCA8K_ETHERNET_TIMEOUT msecs_to_jiffies(5)
+
+#define QCA8K_NUM_PORTS 7
+#define QCA8K_NUM_CPU_PORTS 2
+#define QCA8K_MAX_MTU 9000
+#define QCA8K_NUM_LAGS 4
+#define QCA8K_NUM_PORTS_FOR_LAG 4
+
+#define PHY_ID_QCA8327 0x004dd034
+#define QCA8K_ID_QCA8327 0x12
+#define PHY_ID_QCA8337 0x004dd036
+#define QCA8K_ID_QCA8337 0x13
+
+#define QCA8K_QCA832X_MIB_COUNT 39
+#define QCA8K_QCA833X_MIB_COUNT 41
+
+#define QCA8K_BUSY_WAIT_TIMEOUT 2000
+
+#define QCA8K_NUM_FDB_RECORDS 2048
+
+#define QCA8K_PORT_VID_DEF 1
+
+/* Global control registers */
+#define QCA8K_REG_MASK_CTRL 0x000
+#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
+#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x)
+#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
+#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x)
+#define QCA8K_REG_PORT0_PAD_CTRL 0x004
+#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
+#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
+#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
+#define QCA8K_REG_PORT5_PAD_CTRL 0x008
+#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
+#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
+#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
+#define QCA8K_REG_PWS 0x010
+#define QCA8K_PWS_POWER_ON_SEL BIT(31)
+/* This reg is only valid for QCA832x and toggle the package
+ * type from 176 pin (by default) to 148 pin used on QCA8327
+ */
+#define QCA8327_PWS_PACKAGE148_EN BIT(30)
+#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
+#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
+#define QCA8K_REG_MODULE_EN 0x030
+#define QCA8K_MODULE_EN_MIB BIT(0)
+#define QCA8K_REG_MIB 0x034
+#define QCA8K_MIB_FUNC GENMASK(26, 24)
+#define QCA8K_MIB_CPU_KEEP BIT(20)
+#define QCA8K_MIB_BUSY BIT(17)
+#define QCA8K_MDIO_MASTER_CTRL 0x3c
+#define QCA8K_MDIO_MASTER_BUSY BIT(31)
+#define QCA8K_MDIO_MASTER_EN BIT(30)
+#define QCA8K_MDIO_MASTER_READ BIT(27)
+#define QCA8K_MDIO_MASTER_WRITE 0
+#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
+#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21)
+#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x)
+#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16)
+#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x)
+#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
+#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x)
+#define QCA8K_MDIO_MASTER_MAX_PORTS 5
+#define QCA8K_MDIO_MASTER_MAX_REG 32
+
+/* LED control register */
+#define QCA8K_LED_PORT_COUNT 3
+#define QCA8K_LED_COUNT ((QCA8K_NUM_PORTS - QCA8K_NUM_CPU_PORTS) * QCA8K_LED_PORT_COUNT)
+#define QCA8K_LED_RULE_COUNT 6
+#define QCA8K_LED_RULE_MAX 11
+#define QCA8K_LED_PORT_INDEX(_phy, _led) (((_phy) * QCA8K_LED_PORT_COUNT) + (_led))
+
+#define QCA8K_LED_PHY123_PATTERN_EN_SHIFT(_phy, _led) ((((_phy) - 1) * 6) + 8 + (2 * (_led)))
+#define QCA8K_LED_PHY123_PATTERN_EN_MASK GENMASK(1, 0)
+
+#define QCA8K_LED_PHY0123_CONTROL_RULE_SHIFT 0
+#define QCA8K_LED_PHY4_CONTROL_RULE_SHIFT 16
+
+#define QCA8K_LED_CTRL_REG(_i) (0x050 + (_i) * 4)
+#define QCA8K_LED_CTRL0_REG 0x50
+#define QCA8K_LED_CTRL1_REG 0x54
+#define QCA8K_LED_CTRL2_REG 0x58
+#define QCA8K_LED_CTRL3_REG 0x5C
+#define QCA8K_LED_CTRL_SHIFT(_i) (((_i) % 2) * 16)
+#define QCA8K_LED_CTRL_MASK GENMASK(15, 0)
+#define QCA8K_LED_RULE_MASK GENMASK(13, 0)
+#define QCA8K_LED_BLINK_FREQ_MASK GENMASK(1, 0)
+#define QCA8K_LED_BLINK_FREQ_SHITF 0
+#define QCA8K_LED_BLINK_2HZ 0
+#define QCA8K_LED_BLINK_4HZ 1
+#define QCA8K_LED_BLINK_8HZ 2
+#define QCA8K_LED_BLINK_AUTO 3
+#define QCA8K_LED_LINKUP_OVER_MASK BIT(2)
+#define QCA8K_LED_TX_BLINK_MASK BIT(4)
+#define QCA8K_LED_RX_BLINK_MASK BIT(5)
+#define QCA8K_LED_COL_BLINK_MASK BIT(7)
+#define QCA8K_LED_LINK_10M_EN_MASK BIT(8)
+#define QCA8K_LED_LINK_100M_EN_MASK BIT(9)
+#define QCA8K_LED_LINK_1000M_EN_MASK BIT(10)
+#define QCA8K_LED_POWER_ON_LIGHT_MASK BIT(11)
+#define QCA8K_LED_HALF_DUPLEX_MASK BIT(12)
+#define QCA8K_LED_FULL_DUPLEX_MASK BIT(13)
+#define QCA8K_LED_PATTERN_EN_MASK GENMASK(15, 14)
+#define QCA8K_LED_PATTERN_EN_SHIFT 14
+#define QCA8K_LED_ALWAYS_OFF 0
+#define QCA8K_LED_ALWAYS_BLINK_4HZ 1
+#define QCA8K_LED_ALWAYS_ON 2
+#define QCA8K_LED_RULE_CONTROLLED 3
+
+#define QCA8K_GOL_MAC_ADDR0 0x60
+#define QCA8K_GOL_MAC_ADDR1 0x64
+#define QCA8K_MAX_FRAME_SIZE 0x78
+#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
+#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0)
+#define QCA8K_PORT_STATUS_SPEED_10 0
+#define QCA8K_PORT_STATUS_SPEED_100 0x1
+#define QCA8K_PORT_STATUS_SPEED_1000 0x2
+#define QCA8K_PORT_STATUS_TXMAC BIT(2)
+#define QCA8K_PORT_STATUS_RXMAC BIT(3)
+#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
+#define QCA8K_PORT_STATUS_RXFLOW BIT(5)
+#define QCA8K_PORT_STATUS_DUPLEX BIT(6)
+#define QCA8K_PORT_STATUS_LINK_UP BIT(8)
+#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9)
+#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10)
+#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
+#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
+#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
+#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
+#define QCA8K_PORT_HDR_CTRL_ALL 2
+#define QCA8K_PORT_HDR_CTRL_MGMT 1
+#define QCA8K_PORT_HDR_CTRL_NONE 0
+#define QCA8K_REG_SGMII_CTRL 0x0e0
+#define QCA8K_SGMII_EN_PLL BIT(1)
+#define QCA8K_SGMII_EN_RX BIT(2)
+#define QCA8K_SGMII_EN_TX BIT(3)
+#define QCA8K_SGMII_EN_SD BIT(4)
+#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
+#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22)
+#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x)
+#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0)
+#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1)
+#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2)
+
+/* MAC_PWR_SEL registers */
+#define QCA8K_REG_MAC_PWR_SEL 0x0e4
+#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
+#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
+
+/* EEE control registers */
+#define QCA8K_REG_EEE_CTRL 0x100
+#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
+
+/* TRUNK_HASH_EN registers */
+#define QCA8K_TRUNK_HASH_EN_CTRL 0x270
+#define QCA8K_TRUNK_HASH_SIP_EN BIT(3)
+#define QCA8K_TRUNK_HASH_DIP_EN BIT(2)
+#define QCA8K_TRUNK_HASH_SA_EN BIT(1)
+#define QCA8K_TRUNK_HASH_DA_EN BIT(0)
+#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0)
+
+/* ACL registers */
+#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
+#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
+#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x)
+#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0)
+#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x)
+#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
+#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
+#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
+
+/* Lookup registers */
+#define QCA8K_ATU_TABLE_SIZE 3 /* 12 bytes wide table / sizeof(u32) */
+
+#define QCA8K_REG_ATU_DATA0 0x600
+#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
+#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
+#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8)
+#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0)
+#define QCA8K_REG_ATU_DATA1 0x604
+#define QCA8K_ATU_PORT_MASK GENMASK(22, 16)
+#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8)
+#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0)
+#define QCA8K_REG_ATU_DATA2 0x608
+#define QCA8K_ATU_VID_MASK GENMASK(19, 8)
+#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0)
+#define QCA8K_ATU_STATUS_STATIC 0xf
+#define QCA8K_REG_ATU_FUNC 0x60c
+#define QCA8K_ATU_FUNC_BUSY BIT(31)
+#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
+#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
+#define QCA8K_ATU_FUNC_FULL BIT(12)
+#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8)
+#define QCA8K_REG_VTU_FUNC0 0x610
+#define QCA8K_VTU_FUNC0_VALID BIT(20)
+#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
+/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4)
+ * It does contain VLAN_MODE for each port [5:4] for port0,
+ * [7:6] for port1 ... [17:16] for port6. Use virtual port
+ * define to handle this.
+ */
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2)
+#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_REG_VTU_FUNC1 0x614
+#define QCA8K_VTU_FUNC1_BUSY BIT(31)
+#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
+#define QCA8K_VTU_FUNC1_FULL BIT(4)
+#define QCA8K_REG_ATU_CTRL 0x618
+#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
+#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
+#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
+#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
+#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4)
+#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
+#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
+#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
+#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8)
+#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0)
+#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
+#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3)
+#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
+#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x)
+#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0)
+#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1)
+#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2)
+#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
+#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
+#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
+#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
+
+#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
+/* 4 max trunk first
+ * first 6 bit for member bitmap
+ * 7th bit is to enable trunk port
+ */
+#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8)
+#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7)
+#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
+#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0)
+#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
+/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */
+#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0)
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3)
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0)
+#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16)
+#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4)
+/* Complex shift: FIRST shift for port THEN shift for trunk */
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
+
+#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x)
+
+#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x)
+
+#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
+#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
+#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
+#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
+#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
+
+/* Pkt edit registers */
+#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2))
+#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
+#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
+#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
+
+/* L3 registers */
+#define QCA8K_HROUTER_CONTROL 0xe00
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16)
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16
+#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1
+#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08
+#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c
+#define QCA8K_HNAT_CONTROL 0xe38
+
+/* MIB registers */
+#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100)
+
+/* QCA specific MII registers */
+#define MII_ATH_MMD_ADDR 0x0d
+#define MII_ATH_MMD_DATA 0x0e
+
+enum {
+ QCA8K_PORT_SPEED_10M = 0,
+ QCA8K_PORT_SPEED_100M = 1,
+ QCA8K_PORT_SPEED_1000M = 2,
+ QCA8K_PORT_SPEED_ERR = 3,
+};
+
+enum qca8k_fdb_cmd {
+ QCA8K_FDB_FLUSH = 1,
+ QCA8K_FDB_LOAD = 2,
+ QCA8K_FDB_PURGE = 3,
+ QCA8K_FDB_FLUSH_PORT = 5,
+ QCA8K_FDB_NEXT = 6,
+ QCA8K_FDB_SEARCH = 7,
+};
+
+enum qca8k_vlan_cmd {
+ QCA8K_VLAN_FLUSH = 1,
+ QCA8K_VLAN_LOAD = 2,
+ QCA8K_VLAN_PURGE = 3,
+ QCA8K_VLAN_REMOVE_PORT = 4,
+ QCA8K_VLAN_NEXT = 5,
+ QCA8K_VLAN_READ = 6,
+};
+
+enum qca8k_mid_cmd {
+ QCA8K_MIB_FLUSH = 1,
+ QCA8K_MIB_FLUSH_PORT = 2,
+ QCA8K_MIB_CAST = 3,
+};
+
+struct qca8k_priv;
+
+struct qca8k_info_ops {
+ int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data);
+};
+
+struct qca8k_match_data {
+ u8 id;
+ bool reduced_package;
+ u8 mib_count;
+ const struct qca8k_info_ops *ops;
+};
+
+enum {
+ QCA8K_CPU_PORT0,
+ QCA8K_CPU_PORT6,
+};
+
+struct qca8k_mgmt_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Enforce one mdio read/write at time */
+ bool ack;
+ u32 seq;
+ u32 data[4];
+};
+
+struct qca8k_mib_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Process one command at time */
+ refcount_t port_parsed; /* Counter to track parsed port */
+ u8 req_port;
+ u64 *data; /* pointer to ethtool data */
+};
+
+struct qca8k_ports_config {
+ bool sgmii_rx_clk_falling_edge;
+ bool sgmii_tx_clk_falling_edge;
+ bool sgmii_enable_pll;
+ u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
+ u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
+};
+
+struct qca8k_mdio_cache {
+/* The 32bit switch registers are accessed indirectly. To achieve this we need
+ * to set the page of the register. Track the last page that was set to reduce
+ * mdio writes
+ */
+ u16 page;
+};
+
+struct qca8k_pcs {
+ struct phylink_pcs pcs;
+ struct qca8k_priv *priv;
+ int port;
+};
+
+struct qca8k_led_pattern_en {
+ u32 reg;
+ u8 shift;
+};
+
+struct qca8k_led {
+ u8 port_num;
+ u8 led_num;
+ u16 old_rule;
+ struct qca8k_priv *priv;
+ struct led_classdev cdev;
+};
+
+struct qca8k_priv {
+ u8 switch_id;
+ u8 switch_revision;
+ u8 mirror_rx;
+ u8 mirror_tx;
+ u8 lag_hash_mode;
+ /* Each bit correspond to a port. This switch can support a max of 7 port.
+ * Bit 1: port enabled. Bit 0: port disabled.
+ */
+ u8 port_enabled_map;
+ u8 port_isolated_map;
+ struct qca8k_ports_config ports_config;
+ struct regmap *regmap;
+ struct mii_bus *bus;
+ struct mii_bus *internal_mdio_bus;
+ struct dsa_switch *ds;
+ struct mutex reg_mutex;
+ struct device *dev;
+ struct gpio_desc *reset_gpio;
+ struct net_device *mgmt_conduit; /* Track if mdio/mib Ethernet is available */
+ struct qca8k_mgmt_eth_data mgmt_eth_data;
+ struct qca8k_mib_eth_data mib_eth_data;
+ struct qca8k_mdio_cache mdio_cache;
+ struct qca8k_pcs pcs_port_0;
+ struct qca8k_pcs pcs_port_6;
+ const struct qca8k_match_data *info;
+ struct qca8k_led ports_led[QCA8K_LED_COUNT];
+};
+
+struct qca8k_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+struct qca8k_fdb {
+ u16 vid;
+ u8 port_mask;
+ u8 aging;
+ u8 mac[6];
+};
+
+static inline u32 qca8k_port_to_phy(int port)
+{
+ /* From Andrew Lunn:
+ * Port 0 has no internal phy.
+ * Port 1 has an internal PHY at MDIO address 0.
+ * Port 2 has an internal PHY at MDIO address 1.
+ * ...
+ * Port 5 has an internal PHY at MDIO address 4.
+ * Port 6 has no internal PHY.
+ */
+
+ return port - 1;
+}
+
+/* Common setup function */
+extern const struct qca8k_mib_desc ar8327_mib[];
+extern const struct regmap_access_table qca8k_readable_table;
+int qca8k_mib_init(struct qca8k_priv *priv);
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable);
+int qca8k_read_switch_id(struct qca8k_priv *priv);
+
+/* Common read/write/rmw function */
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val);
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val);
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val);
+
+/* Common ops function */
+void qca8k_fdb_flush(struct qca8k_priv *priv);
+
+/* Common ethtool stats function */
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data);
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data);
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* Common eee function */
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *eee);
+
+/* Common bridge function */
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+int qca8k_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+int qca8k_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack);
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
+
+/* Common port enable/disable function */
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+void qca8k_port_disable(struct dsa_switch *ds, int port);
+
+/* Common MTU function */
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu);
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port);
+
+/* Common fast age function */
+void qca8k_port_fast_age(struct dsa_switch *ds, int port);
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
+
+/* Common FDB function */
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid);
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+
+/* Common MDB function */
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+
+/* Common port mirror function */
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+
+/* Common port VLAN function */
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+
+/* Common port LAG function */
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag);
+
+#endif /* __QCA8K_H */
diff --git a/drivers/net/dsa/qca/qca8k_leds.h b/drivers/net/dsa/qca/qca8k_leds.h
new file mode 100644
index 000000000000..ab367f05b173
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k_leds.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __QCA8K_LEDS_H
+#define __QCA8K_LEDS_H
+
+/* Leds Support function */
+#ifdef CONFIG_NET_DSA_QCA8K_LEDS_SUPPORT
+int qca8k_setup_led_ctrl(struct qca8k_priv *priv);
+#else
+static inline int qca8k_setup_led_ctrl(struct qca8k_priv *priv)
+{
+ return 0;
+}
+#endif
+
+#endif /* __QCA8K_LEDS_H */
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
deleted file mode 100644
index ea7f12778922..000000000000
--- a/drivers/net/dsa/qca8k.c
+++ /dev/null
@@ -1,2198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2016 John Crispin <john@phrozen.org>
- */
-
-#include <linux/module.h>
-#include <linux/phy.h>
-#include <linux/netdevice.h>
-#include <net/dsa.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
-#include <linux/if_bridge.h>
-#include <linux/mdio.h>
-#include <linux/phylink.h>
-#include <linux/gpio/consumer.h>
-#include <linux/etherdevice.h>
-
-#include "qca8k.h"
-
-#define MIB_DESC(_s, _o, _n) \
- { \
- .size = (_s), \
- .offset = (_o), \
- .name = (_n), \
- }
-
-static const struct qca8k_mib_desc ar8327_mib[] = {
- MIB_DESC(1, 0x00, "RxBroad"),
- MIB_DESC(1, 0x04, "RxPause"),
- MIB_DESC(1, 0x08, "RxMulti"),
- MIB_DESC(1, 0x0c, "RxFcsErr"),
- MIB_DESC(1, 0x10, "RxAlignErr"),
- MIB_DESC(1, 0x14, "RxRunt"),
- MIB_DESC(1, 0x18, "RxFragment"),
- MIB_DESC(1, 0x1c, "Rx64Byte"),
- MIB_DESC(1, 0x20, "Rx128Byte"),
- MIB_DESC(1, 0x24, "Rx256Byte"),
- MIB_DESC(1, 0x28, "Rx512Byte"),
- MIB_DESC(1, 0x2c, "Rx1024Byte"),
- MIB_DESC(1, 0x30, "Rx1518Byte"),
- MIB_DESC(1, 0x34, "RxMaxByte"),
- MIB_DESC(1, 0x38, "RxTooLong"),
- MIB_DESC(2, 0x3c, "RxGoodByte"),
- MIB_DESC(2, 0x44, "RxBadByte"),
- MIB_DESC(1, 0x4c, "RxOverFlow"),
- MIB_DESC(1, 0x50, "Filtered"),
- MIB_DESC(1, 0x54, "TxBroad"),
- MIB_DESC(1, 0x58, "TxPause"),
- MIB_DESC(1, 0x5c, "TxMulti"),
- MIB_DESC(1, 0x60, "TxUnderRun"),
- MIB_DESC(1, 0x64, "Tx64Byte"),
- MIB_DESC(1, 0x68, "Tx128Byte"),
- MIB_DESC(1, 0x6c, "Tx256Byte"),
- MIB_DESC(1, 0x70, "Tx512Byte"),
- MIB_DESC(1, 0x74, "Tx1024Byte"),
- MIB_DESC(1, 0x78, "Tx1518Byte"),
- MIB_DESC(1, 0x7c, "TxMaxByte"),
- MIB_DESC(1, 0x80, "TxOverSize"),
- MIB_DESC(2, 0x84, "TxByte"),
- MIB_DESC(1, 0x8c, "TxCollision"),
- MIB_DESC(1, 0x90, "TxAbortCol"),
- MIB_DESC(1, 0x94, "TxMultiCol"),
- MIB_DESC(1, 0x98, "TxSingleCol"),
- MIB_DESC(1, 0x9c, "TxExcDefer"),
- MIB_DESC(1, 0xa0, "TxDefer"),
- MIB_DESC(1, 0xa4, "TxLateCol"),
-};
-
-/* The 32bit switch registers are accessed indirectly. To achieve this we need
- * to set the page of the register. Track the last page that was set to reduce
- * mdio writes
- */
-static u16 qca8k_current_page = 0xffff;
-
-static void
-qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
-{
- regaddr >>= 1;
- *r1 = regaddr & 0x1e;
-
- regaddr >>= 5;
- *r2 = regaddr & 0x7;
-
- regaddr >>= 3;
- *page = regaddr & 0x3ff;
-}
-
-static int
-qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
-{
- int ret;
-
- ret = bus->read(bus, phy_id, regnum);
- if (ret >= 0) {
- *val = ret;
- ret = bus->read(bus, phy_id, regnum + 1);
- *val |= ret << 16;
- }
-
- if (ret < 0) {
- dev_err_ratelimited(&bus->dev,
- "failed to read qca8k 32bit register\n");
- *val = 0;
- return ret;
- }
-
- return 0;
-}
-
-static void
-qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
-{
- u16 lo, hi;
- int ret;
-
- lo = val & 0xffff;
- hi = (u16)(val >> 16);
-
- ret = bus->write(bus, phy_id, regnum, lo);
- if (ret >= 0)
- ret = bus->write(bus, phy_id, regnum + 1, hi);
- if (ret < 0)
- dev_err_ratelimited(&bus->dev,
- "failed to write qca8k 32bit register\n");
-}
-
-static int
-qca8k_set_page(struct mii_bus *bus, u16 page)
-{
- int ret;
-
- if (page == qca8k_current_page)
- return 0;
-
- ret = bus->write(bus, 0x18, 0, page);
- if (ret < 0) {
- dev_err_ratelimited(&bus->dev,
- "failed to set qca8k page\n");
- return ret;
- }
-
- qca8k_current_page = page;
- usleep_range(1000, 2000);
- return 0;
-}
-
-static int
-qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
-{
- struct mii_bus *bus = priv->bus;
- u16 r1, r2, page;
- int ret;
-
- qca8k_split_addr(reg, &r1, &r2, &page);
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- ret = qca8k_set_page(bus, page);
- if (ret < 0)
- goto exit;
-
- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
-
-exit:
- mutex_unlock(&bus->mdio_lock);
- return ret;
-}
-
-static int
-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- struct mii_bus *bus = priv->bus;
- u16 r1, r2, page;
- int ret;
-
- qca8k_split_addr(reg, &r1, &r2, &page);
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- ret = qca8k_set_page(bus, page);
- if (ret < 0)
- goto exit;
-
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
-
-exit:
- mutex_unlock(&bus->mdio_lock);
- return ret;
-}
-
-static int
-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
-{
- struct mii_bus *bus = priv->bus;
- u16 r1, r2, page;
- u32 val;
- int ret;
-
- qca8k_split_addr(reg, &r1, &r2, &page);
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- ret = qca8k_set_page(bus, page);
- if (ret < 0)
- goto exit;
-
- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
- if (ret < 0)
- goto exit;
-
- val &= ~mask;
- val |= write_val;
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
-
-exit:
- mutex_unlock(&bus->mdio_lock);
-
- return ret;
-}
-
-static int
-qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return qca8k_rmw(priv, reg, 0, val);
-}
-
-static int
-qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return qca8k_rmw(priv, reg, val, 0);
-}
-
-static int
-qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
-
- return qca8k_read(priv, reg, val);
-}
-
-static int
-qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
-
- return qca8k_write(priv, reg, val);
-}
-
-static const struct regmap_range qca8k_readable_ranges[] = {
- regmap_reg_range(0x0000, 0x00e4), /* Global control */
- regmap_reg_range(0x0100, 0x0168), /* EEE control */
- regmap_reg_range(0x0200, 0x0270), /* Parser control */
- regmap_reg_range(0x0400, 0x0454), /* ACL */
- regmap_reg_range(0x0600, 0x0718), /* Lookup */
- regmap_reg_range(0x0800, 0x0b70), /* QM */
- regmap_reg_range(0x0c00, 0x0c80), /* PKT */
- regmap_reg_range(0x0e00, 0x0e98), /* L3 */
- regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
- regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
- regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
- regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
- regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
- regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
- regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
-
-};
-
-static const struct regmap_access_table qca8k_readable_table = {
- .yes_ranges = qca8k_readable_ranges,
- .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
-};
-
-static struct regmap_config qca8k_regmap_config = {
- .reg_bits = 16,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = 0x16ac, /* end MIB - Port6 range */
- .reg_read = qca8k_regmap_read,
- .reg_write = qca8k_regmap_write,
- .rd_table = &qca8k_readable_table,
-};
-
-static int
-qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
-{
- int ret, ret1;
- u32 val;
-
- ret = read_poll_timeout(qca8k_read, ret1, !(val & mask),
- 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
- priv, reg, &val);
-
- /* Check if qca8k_read has failed for a different reason
- * before returning -ETIMEDOUT
- */
- if (ret < 0 && ret1 < 0)
- return ret1;
-
- return ret;
-}
-
-static int
-qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
-{
- u32 reg[4], val;
- int i, ret;
-
- /* load the ARL table into an array */
- for (i = 0; i < 4; i++) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val);
- if (ret < 0)
- return ret;
-
- reg[i] = val;
- }
-
- /* vid - 83:72 */
- fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
- /* aging - 67:64 */
- fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
- /* portmask - 54:48 */
- fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
- /* mac - 47:0 */
- fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
- fdb->mac[1] = reg[1] & 0xff;
- fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
- fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
- fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
- fdb->mac[5] = reg[0] & 0xff;
-
- return 0;
-}
-
-static void
-qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
- u8 aging)
-{
- u32 reg[3] = { 0 };
- int i;
-
- /* vid - 83:72 */
- reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
- /* aging - 67:64 */
- reg[2] |= aging & QCA8K_ATU_STATUS_M;
- /* portmask - 54:48 */
- reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
- /* mac - 47:0 */
- reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
- reg[1] |= mac[1];
- reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
- reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
- reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
- reg[0] |= mac[5];
-
- /* load the array into the ARL table */
- for (i = 0; i < 3; i++)
- qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
-}
-
-static int
-qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
-{
- u32 reg;
- int ret;
-
- /* Set the command and FDB index */
- reg = QCA8K_ATU_FUNC_BUSY;
- reg |= cmd;
- if (port >= 0) {
- reg |= QCA8K_ATU_FUNC_PORT_EN;
- reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
- }
-
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
- if (ret)
- return ret;
-
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
- if (ret)
- return ret;
-
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_FDB_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_ATU_FUNC_FULL)
- return -1;
- }
-
- return 0;
-}
-
-static int
-qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
-{
- int ret;
-
- qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
- if (ret < 0)
- return ret;
-
- return qca8k_fdb_read(priv, fdb);
-}
-
-static int
-qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
- u16 vid, u8 aging)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static void
-qca8k_fdb_flush(struct qca8k_priv *priv)
-{
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
- mutex_unlock(&priv->reg_mutex);
-}
-
-static int
-qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
-{
- u32 reg;
- int ret;
-
- /* Set the command and VLAN index */
- reg = QCA8K_VTU_FUNC1_BUSY;
- reg |= cmd;
- reg |= vid << QCA8K_VTU_FUNC1_VID_S;
-
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
- if (ret)
- return ret;
-
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
- if (ret)
- return ret;
-
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_VLAN_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_VTU_FUNC1_FULL)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int
-qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
-{
- u32 reg;
- int ret;
-
- /*
- We do the right thing with VLAN 0 and treat it as untagged while
- preserving the tag on egress.
- */
- if (vid == 0)
- return 0;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
-
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
- if (ret < 0)
- goto out;
- reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
- reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
- if (untagged)
- reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
- else
- reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
-
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
-
-out:
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
-{
- u32 reg, mask;
- int ret, i;
- bool del;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
-
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
- if (ret < 0)
- goto out;
- reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
- reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
-
- /* Check if we're the last member to be removed */
- del = true;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- mask = QCA8K_VTU_FUNC0_EG_MODE_NOT;
- mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i);
-
- if ((reg & mask) != mask) {
- del = false;
- break;
- }
- }
-
- if (del) {
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
- } else {
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
- }
-
-out:
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_mib_init(struct qca8k_priv *priv)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
- ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
- if (ret)
- goto exit;
-
- ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static void
-qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
-{
- u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
-
- /* Port 0 and 6 have no internal PHY */
- if (port > 0 && port < 6)
- mask |= QCA8K_PORT_STATUS_LINK_AUTO;
-
- if (enable)
- qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
- else
- qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
-}
-
-static u32
-qca8k_port_to_phy(int port)
-{
- /* From Andrew Lunn:
- * Port 0 has no internal phy.
- * Port 1 has an internal PHY at MDIO address 0.
- * Port 2 has an internal PHY at MDIO address 1.
- * ...
- * Port 5 has an internal PHY at MDIO address 4.
- * Port 6 has no internal PHY.
- */
-
- return port - 1;
-}
-
-static int
-qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
-{
- u16 r1, r2, page;
- u32 val;
- int ret, ret1;
-
- qca8k_split_addr(reg, &r1, &r2, &page);
-
- ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
- QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
- bus, 0x10 | r2, r1, &val);
-
- /* Check if qca8k_read has failed for a different reason
- * before returnting -ETIMEDOUT
- */
- if (ret < 0 && ret1 < 0)
- return ret1;
-
- return ret;
-}
-
-static int
-qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
-{
- u16 r1, r2, page;
- u32 val;
- int ret;
-
- if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
- return -EINVAL;
-
- val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
- QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
- QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
- QCA8K_MDIO_MASTER_DATA(data);
-
- qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- ret = qca8k_set_page(bus, page);
- if (ret)
- goto exit;
-
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
-
- ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_BUSY);
-
-exit:
- /* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
-
- mutex_unlock(&bus->mdio_lock);
-
- return ret;
-}
-
-static int
-qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
-{
- u16 r1, r2, page;
- u32 val;
- int ret;
-
- if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
- return -EINVAL;
-
- val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
- QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
- QCA8K_MDIO_MASTER_REG_ADDR(regnum);
-
- qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-
- ret = qca8k_set_page(bus, page);
- if (ret)
- goto exit;
-
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
-
- ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_BUSY);
- if (ret)
- goto exit;
-
- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
-
-exit:
- /* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
-
- mutex_unlock(&bus->mdio_lock);
-
- if (ret >= 0)
- ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
-
- return ret;
-}
-
-static int
-qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
-{
- struct qca8k_priv *priv = slave_bus->priv;
- struct mii_bus *bus = priv->bus;
-
- return qca8k_mdio_write(bus, phy, regnum, data);
-}
-
-static int
-qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
-{
- struct qca8k_priv *priv = slave_bus->priv;
- struct mii_bus *bus = priv->bus;
-
- return qca8k_mdio_read(bus, phy, regnum);
-}
-
-static int
-qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
-{
- struct qca8k_priv *priv = ds->priv;
-
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
- return qca8k_mdio_write(priv->bus, port, regnum, data);
-}
-
-static int
-qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
-
- ret = qca8k_mdio_read(priv->bus, port, regnum);
-
- if (ret < 0)
- return 0xffff;
-
- return ret;
-}
-
-static int
-qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
-{
- struct dsa_switch *ds = priv->ds;
- struct mii_bus *bus;
-
- bus = devm_mdiobus_alloc(ds->dev);
-
- if (!bus)
- return -ENOMEM;
-
- bus->priv = (void *)priv;
- bus->name = "qca8k slave mii";
- bus->read = qca8k_internal_mdio_read;
- bus->write = qca8k_internal_mdio_write;
- snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
- ds->index);
-
- bus->parent = ds->dev;
- bus->phy_mask = ~ds->phys_mii_mask;
-
- ds->slave_mii_bus = bus;
-
- return devm_of_mdiobus_register(priv->dev, bus, mdio);
-}
-
-static int
-qca8k_setup_mdio_bus(struct qca8k_priv *priv)
-{
- u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
- struct device_node *ports, *port, *mdio;
- phy_interface_t mode;
- int err;
-
- ports = of_get_child_by_name(priv->dev->of_node, "ports");
- if (!ports)
- ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
-
- if (!ports)
- return -EINVAL;
-
- for_each_available_child_of_node(ports, port) {
- err = of_property_read_u32(port, "reg", &reg);
- if (err) {
- of_node_put(port);
- of_node_put(ports);
- return err;
- }
-
- if (!dsa_is_user_port(priv->ds, reg))
- continue;
-
- of_get_phy_mode(port, &mode);
-
- if (of_property_read_bool(port, "phy-handle") &&
- mode != PHY_INTERFACE_MODE_INTERNAL)
- external_mdio_mask |= BIT(reg);
- else
- internal_mdio_mask |= BIT(reg);
- }
-
- of_node_put(ports);
- if (!external_mdio_mask && !internal_mdio_mask) {
- dev_err(priv->dev, "no PHYs are defined.\n");
- return -EINVAL;
- }
-
- /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
- * the MDIO_MASTER register also _disconnects_ the external MDC
- * passthrough to the internal PHYs. It's not possible to use both
- * configurations at the same time!
- *
- * Because this came up during the review process:
- * If the external mdio-bus driver is capable magically disabling
- * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
- * accessors for the time being, it would be possible to pull this
- * off.
- */
- if (!!external_mdio_mask && !!internal_mdio_mask) {
- dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
- return -EINVAL;
- }
-
- if (external_mdio_mask) {
- /* Make sure to disable the internal mdio bus in cases
- * a dt-overlay and driver reload changed the configuration
- */
-
- return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_EN);
- }
-
- /* Check if the devicetree declare the port:phy mapping */
- mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
- if (of_device_is_available(mdio)) {
- err = qca8k_mdio_register(priv, mdio);
- if (err)
- of_node_put(mdio);
-
- return err;
- }
-
- /* If a mapping can't be found the legacy mapping is used,
- * using the qca8k_port_to_phy function
- */
- priv->legacy_phy_port_mapping = true;
- priv->ops.phy_read = qca8k_phy_read;
- priv->ops.phy_write = qca8k_phy_write;
-
- return 0;
-}
-
-static int
-qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
-{
- u32 mask = 0;
- int ret = 0;
-
- /* SoC specific settings for ipq8064.
- * If more device require this consider adding
- * a dedicated binding.
- */
- if (of_machine_is_compatible("qcom,ipq8064"))
- mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
-
- /* SoC specific settings for ipq8065 */
- if (of_machine_is_compatible("qcom,ipq8065"))
- mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
-
- if (mask) {
- ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
- QCA8K_MAC_PWR_RGMII0_1_8V |
- QCA8K_MAC_PWR_RGMII1_1_8V,
- mask);
- }
-
- return ret;
-}
-
-static int qca8k_find_cpu_port(struct dsa_switch *ds)
-{
- struct qca8k_priv *priv = ds->priv;
-
- /* Find the connected cpu port. Valid port are 0 or 6 */
- if (dsa_is_cpu_port(ds, 0))
- return 0;
-
- dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
-
- if (dsa_is_cpu_port(ds, 6))
- return 6;
-
- return -EINVAL;
-}
-
-static int
-qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
-{
- struct device_node *node = priv->dev->of_node;
- const struct qca8k_match_data *data;
- u32 val = 0;
- int ret;
-
- /* QCA8327 require to set to the correct mode.
- * His bigger brother QCA8328 have the 172 pin layout.
- * Should be applied by default but we set this just to make sure.
- */
- if (priv->switch_id == QCA8K_ID_QCA8327) {
- data = of_device_get_match_data(priv->dev);
-
- /* Set the correct package of 148 pin for QCA8327 */
- if (data->reduced_package)
- val |= QCA8327_PWS_PACKAGE148_EN;
-
- ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
- val);
- if (ret)
- return ret;
- }
-
- if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
- val |= QCA8K_PWS_POWER_ON_SEL;
-
- if (of_property_read_bool(node, "qca,led-open-drain")) {
- if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
- dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
- return -EINVAL;
- }
-
- val |= QCA8K_PWS_LED_OPEN_EN_CSR;
- }
-
- return qca8k_rmw(priv, QCA8K_REG_PWS,
- QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
- val);
-}
-
-static int
-qca8k_parse_port_config(struct qca8k_priv *priv)
-{
- int port, cpu_port_index = -1, ret;
- struct device_node *port_dn;
- phy_interface_t mode;
- struct dsa_port *dp;
- u32 delay;
-
- /* We have 2 CPU port. Check them */
- for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) {
- /* Skip every other port */
- if (port != 0 && port != 6)
- continue;
-
- dp = dsa_to_port(priv->ds, port);
- port_dn = dp->dn;
- cpu_port_index++;
-
- if (!of_device_is_available(port_dn))
- continue;
-
- ret = of_get_phy_mode(port_dn, &mode);
- if (ret)
- continue;
-
- switch (mode) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_SGMII:
- delay = 0;
-
- if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
- /* Switch regs accept value in ns, convert ps to ns */
- delay = delay / 1000;
- else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
- mode == PHY_INTERFACE_MODE_RGMII_TXID)
- delay = 1;
-
- if (delay > QCA8K_MAX_DELAY) {
- dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
- delay = 3;
- }
-
- priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
-
- delay = 0;
-
- if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
- /* Switch regs accept value in ns, convert ps to ns */
- delay = delay / 1000;
- else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
- mode == PHY_INTERFACE_MODE_RGMII_RXID)
- delay = 2;
-
- if (delay > QCA8K_MAX_DELAY) {
- dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
- delay = 3;
- }
-
- priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
-
- /* Skip sgmii parsing for rgmii* mode */
- if (mode == PHY_INTERFACE_MODE_RGMII ||
- mode == PHY_INTERFACE_MODE_RGMII_ID ||
- mode == PHY_INTERFACE_MODE_RGMII_TXID ||
- mode == PHY_INTERFACE_MODE_RGMII_RXID)
- break;
-
- if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
- priv->ports_config.sgmii_tx_clk_falling_edge = true;
-
- if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
- priv->ports_config.sgmii_rx_clk_falling_edge = true;
-
- if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
- priv->ports_config.sgmii_enable_pll = true;
-
- if (priv->switch_id == QCA8K_ID_QCA8327) {
- dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
- priv->ports_config.sgmii_enable_pll = false;
- }
-
- if (priv->switch_revision < 2)
- dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
- }
-
- break;
- default:
- continue;
- }
- }
-
- return 0;
-}
-
-static int
-qca8k_setup(struct dsa_switch *ds)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int cpu_port, ret, i;
- u32 mask;
-
- cpu_port = qca8k_find_cpu_port(ds);
- if (cpu_port < 0) {
- dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
- return cpu_port;
- }
-
- /* Parse CPU port config to be later used in phy_link mac_config */
- ret = qca8k_parse_port_config(priv);
- if (ret)
- return ret;
-
- mutex_init(&priv->reg_mutex);
-
- /* Start by setting up the register mapping */
- priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
- &qca8k_regmap_config);
- if (IS_ERR(priv->regmap))
- dev_warn(priv->dev, "regmap initialization failed");
-
- ret = qca8k_setup_mdio_bus(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_of_pws_reg(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_mac_pwr_sel(priv);
- if (ret)
- return ret;
-
- /* Enable CPU Port */
- ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
- if (ret) {
- dev_err(priv->dev, "failed enabling CPU port");
- return ret;
- }
-
- /* Enable MIB counters */
- ret = qca8k_mib_init(priv);
- if (ret)
- dev_warn(priv->dev, "mib init failed");
-
- /* Initial setup of all ports */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- /* Disable forwarding by default on all ports */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, 0);
- if (ret)
- return ret;
-
- /* Enable QCA header mode on all cpu ports */
- if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
- if (ret) {
- dev_err(priv->dev, "failed enabling QCA header mode");
- return ret;
- }
- }
-
- /* Disable MAC by default on all user ports */
- if (dsa_is_user_port(ds, i))
- qca8k_port_set_status(priv, i, 0);
- }
-
- /* Forward all unknown frames to CPU port for Linux processing
- * Notice that in multi-cpu config only one port should be set
- * for igmp, unknown, multicast and broadcast packet
- */
- ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
- if (ret)
- return ret;
-
- /* Setup connection between CPU port & user ports
- * Configure specific switch configuration for ports
- */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- /* CPU port gets connected to all user ports of the switch */
- if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
- if (ret)
- return ret;
- }
-
- /* Individual user ports get connected to CPU port only */
- if (dsa_is_user_port(ds, i)) {
- int shift = 16 * (i % 2);
-
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER,
- BIT(cpu_port));
- if (ret)
- return ret;
-
- /* Enable ARP Auto-learning by default */
- ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_LEARN);
- if (ret)
- return ret;
-
- /* For port based vlans to work we need to set the
- * default egress vid
- */
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
- 0xfff << shift,
- QCA8K_PORT_VID_DEF << shift);
- if (ret)
- return ret;
-
- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
- QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
- QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
- if (ret)
- return ret;
- }
-
- /* The port 5 of the qca8337 have some problem in flood condition. The
- * original legacy driver had some specific buffer and priority settings
- * for the different port suggested by the QCA switch team. Add this
- * missing settings to improve switch stability under load condition.
- * This problem is limited to qca8337 and other qca8k switch are not affected.
- */
- if (priv->switch_id == QCA8K_ID_QCA8337) {
- switch (i) {
- /* The 2 CPU port and port 5 requires some different
- * priority than any other ports.
- */
- case 0:
- case 5:
- case 6:
- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
- break;
- default:
- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
- }
- qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
-
- mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN;
- qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
- QCA8K_PORT_HOL_CTRL1_ING_BUF |
- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN,
- mask);
- }
-
- /* Set initial MTU for every port.
- * We have only have a general MTU setting. So track
- * every port and set the max across all port.
- */
- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
- }
-
- /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
- if (priv->switch_id == QCA8K_ID_QCA8327) {
- mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
- qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
- QCA8K_GLOBAL_FC_GOL_XON_THRES_S |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S,
- mask);
- }
-
- /* Setup our port MTUs to match power on defaults */
- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
- if (ret)
- dev_warn(priv->dev, "failed setting MTU settings");
-
- /* Flush the FDB table */
- qca8k_fdb_flush(priv);
-
- /* We don't have interrupts for link changes, so we need to poll */
- ds->pcs_poll = true;
-
- return 0;
-}
-
-static void
-qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
- u32 reg)
-{
- u32 delay, val = 0;
- int ret;
-
- /* Delay can be declared in 3 different way.
- * Mode to rgmii and internal-delay standard binding defined
- * rgmii-id or rgmii-tx/rx phy mode set.
- * The parse logic set a delay different than 0 only when one
- * of the 3 different way is used. In all other case delay is
- * not enabled. With ID or TX/RXID delay is enabled and set
- * to the default and recommended value.
- */
- if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
- delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
-
- val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
- }
-
- if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
- delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
-
- val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
- }
-
- /* Set RGMII delay based on the selected values */
- ret = qca8k_rmw(priv, reg,
- QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
- QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
- val);
- if (ret)
- dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
- cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
-}
-
-static void
-qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct qca8k_priv *priv = ds->priv;
- int cpu_port_index, ret;
- u32 reg, val;
-
- switch (port) {
- case 0: /* 1st CPU port */
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII)
- return;
-
- reg = QCA8K_REG_PORT0_PAD_CTRL;
- cpu_port_index = QCA8K_CPU_PORT0;
- break;
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- /* Internal PHY, nothing to do */
- return;
- case 6: /* 2nd CPU port / external PHY */
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_1000BASEX)
- return;
-
- reg = QCA8K_REG_PORT6_PAD_CTRL;
- cpu_port_index = QCA8K_CPU_PORT6;
- break;
- default:
- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
- return;
- }
-
- if (port != 6 && phylink_autoneg_inband(mode)) {
- dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
- __func__);
- return;
- }
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
-
- /* Configure rgmii delay */
- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-
- /* QCA8337 requires to set rgmii rx delay for all ports.
- * This is enabled through PORT5_PAD_CTRL for all ports,
- * rather than individual port registers.
- */
- if (priv->switch_id == QCA8K_ID_QCA8337)
- qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
- break;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- /* Enable SGMII on the port */
- qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
-
- /* Enable/disable SerDes auto-negotiation as necessary */
- ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
- if (ret)
- return;
- if (phylink_autoneg_inband(mode))
- val &= ~QCA8K_PWS_SERDES_AEN_DIS;
- else
- val |= QCA8K_PWS_SERDES_AEN_DIS;
- qca8k_write(priv, QCA8K_REG_PWS, val);
-
- /* Configure the SGMII parameters */
- ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
- if (ret)
- return;
-
- val |= QCA8K_SGMII_EN_SD;
-
- if (priv->ports_config.sgmii_enable_pll)
- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
- QCA8K_SGMII_EN_TX;
-
- if (dsa_is_cpu_port(ds, port)) {
- /* CPU port, we're talking to the CPU MAC, be a PHY */
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_PHY;
- } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_MAC;
- } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_BASEX;
- }
-
- qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
-
- /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
- * falling edge is set writing in the PORT0 PAD reg
- */
- if (priv->switch_id == QCA8K_ID_QCA8327 ||
- priv->switch_id == QCA8K_ID_QCA8337)
- reg = QCA8K_REG_PORT0_PAD_CTRL;
-
- val = 0;
-
- /* SGMII Clock phase configuration */
- if (priv->ports_config.sgmii_rx_clk_falling_edge)
- val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
-
- if (priv->ports_config.sgmii_tx_clk_falling_edge)
- val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
-
- if (val)
- ret = qca8k_rmw(priv, reg,
- QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
- QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
- val);
-
- /* From original code is reported port instability as SGMII also
- * require delay set. Apply advised values here or take them from DT.
- */
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-
- break;
- default:
- dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
- phy_modes(state->interface), port);
- return;
- }
-}
-
-static void
-qca8k_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- switch (port) {
- case 0: /* 1st CPU port */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII)
- goto unsupported;
- break;
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- /* Internal PHY */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- case 6: /* 2nd CPU port / external PHY */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_1000BASEX)
- goto unsupported;
- break;
- default:
-unsupported:
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
-
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- phylink_set(mask, 1000baseX_Full);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static int
-qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg;
- int ret;
-
- ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
- if (ret < 0)
- return ret;
-
- state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
- state->an_complete = state->link;
- state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
- state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
- DUPLEX_HALF;
-
- switch (reg & QCA8K_PORT_STATUS_SPEED) {
- case QCA8K_PORT_STATUS_SPEED_10:
- state->speed = SPEED_10;
- break;
- case QCA8K_PORT_STATUS_SPEED_100:
- state->speed = SPEED_100;
- break;
- case QCA8K_PORT_STATUS_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
- break;
- }
-
- state->pause = MLO_PAUSE_NONE;
- if (reg & QCA8K_PORT_STATUS_RXFLOW)
- state->pause |= MLO_PAUSE_RX;
- if (reg & QCA8K_PORT_STATUS_TXFLOW)
- state->pause |= MLO_PAUSE_TX;
-
- return 1;
-}
-
-static void
-qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
-{
- struct qca8k_priv *priv = ds->priv;
-
- qca8k_port_set_status(priv, port, 0);
-}
-
-static void
-qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface, struct phy_device *phydev,
- int speed, int duplex, bool tx_pause, bool rx_pause)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg;
-
- if (phylink_autoneg_inband(mode)) {
- reg = QCA8K_PORT_STATUS_LINK_AUTO;
- } else {
- switch (speed) {
- case SPEED_10:
- reg = QCA8K_PORT_STATUS_SPEED_10;
- break;
- case SPEED_100:
- reg = QCA8K_PORT_STATUS_SPEED_100;
- break;
- case SPEED_1000:
- reg = QCA8K_PORT_STATUS_SPEED_1000;
- break;
- default:
- reg = QCA8K_PORT_STATUS_LINK_AUTO;
- break;
- }
-
- if (duplex == DUPLEX_FULL)
- reg |= QCA8K_PORT_STATUS_DUPLEX;
-
- if (rx_pause || dsa_is_cpu_port(ds, port))
- reg |= QCA8K_PORT_STATUS_RXFLOW;
-
- if (tx_pause || dsa_is_cpu_port(ds, port))
- reg |= QCA8K_PORT_STATUS_TXFLOW;
- }
-
- reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
-
- qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
-}
-
-static void
-qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
-{
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
- strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
- ETH_GSTRING_LEN);
-}
-
-static void
-qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- const struct qca8k_mib_desc *mib;
- u32 reg, i, val;
- u32 hi = 0;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
- mib = &ar8327_mib[i];
- reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
-
- ret = qca8k_read(priv, reg, &val);
- if (ret < 0)
- continue;
-
- if (mib->size == 2) {
- ret = qca8k_read(priv, reg + 4, &hi);
- if (ret < 0)
- continue;
- }
-
- data[i] = val;
- if (mib->size == 2)
- data[i] |= (u64)hi << 32;
- }
-}
-
-static int
-qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
-{
- if (sset != ETH_SS_STATS)
- return 0;
-
- return ARRAY_SIZE(ar8327_mib);
-}
-
-static int
-qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
- u32 reg;
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
- if (ret < 0)
- goto exit;
-
- if (eee->eee_enabled)
- reg |= lpi_en;
- else
- reg &= ~lpi_en;
- ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
-
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
-
-static int
-qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
-static void
-qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 stp_state;
-
- switch (state) {
- case BR_STATE_DISABLED:
- stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
- break;
- case BR_STATE_BLOCKING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
- break;
- case BR_STATE_LISTENING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
- break;
- case BR_STATE_LEARNING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
- break;
- case BR_STATE_FORWARDING:
- default:
- stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
- break;
- }
-
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
-}
-
-static int
-qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int port_mask, cpu_port;
- int i, ret;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- port_mask = BIT(cpu_port);
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (dsa_to_port(ds, i)->bridge_dev != br)
- continue;
- /* Add this port to the portvlan mask of the other ports
- * in the bridge
- */
- ret = qca8k_reg_set(priv,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- if (ret)
- return ret;
- if (i != port)
- port_mask |= BIT(i);
- }
-
- /* Add all other ports to this ports portvlan mask */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, port_mask);
-
- return ret;
-}
-
-static void
-qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int cpu_port, i;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (dsa_to_port(ds, i)->bridge_dev != br)
- continue;
- /* Remove this port to the portvlan mask of the other ports
- * in the bridge
- */
- qca8k_reg_clear(priv,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- }
-
- /* Set the cpu port to be the only one in the portvlan mask of
- * this port
- */
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
-}
-
-static int
-qca8k_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- qca8k_port_set_status(priv, port, 1);
- priv->port_sts[port].enabled = 1;
-
- if (dsa_is_user_port(ds, port))
- phy_support_asym_pause(phy);
-
- return 0;
-}
-
-static void
-qca8k_port_disable(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- qca8k_port_set_status(priv, port, 0);
- priv->port_sts[port].enabled = 0;
-}
-
-static int
-qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
-{
- struct qca8k_priv *priv = ds->priv;
- int i, mtu = 0;
-
- priv->port_mtu[port] = new_mtu;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- if (priv->port_mtu[i] > mtu)
- mtu = priv->port_mtu[i];
-
- /* Include L2 header / FCS length */
- return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
-}
-
-static int
-qca8k_port_max_mtu(struct dsa_switch *ds, int port)
-{
- return QCA8K_MAX_MTU;
-}
-
-static int
-qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
- u16 port_mask, u16 vid)
-{
- /* Set the vid to the port vlan id if no vid is set */
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
-
- return qca8k_fdb_add(priv, addr, port_mask, vid,
- QCA8K_ATU_STATUS_STATIC);
-}
-
-static int
-qca8k_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
-
- return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
-
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
-
- return qca8k_fdb_del(priv, addr, port_mask, vid);
-}
-
-static int
-qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- struct qca8k_fdb _fdb = { 0 };
- int cnt = QCA8K_NUM_FDB_RECORDS;
- bool is_static;
- int ret = 0;
-
- mutex_lock(&priv->reg_mutex);
- while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
- if (!_fdb.aging)
- break;
- is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
- ret = cb(_fdb.mac, _fdb.vid, is_static, data);
- if (ret)
- break;
- }
- mutex_unlock(&priv->reg_mutex);
-
- return 0;
-}
-
-static int
-qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- if (vlan_filtering) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
- QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
- } else {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
- QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
- }
-
- return ret;
-}
-
-static int
-qca8k_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
- if (ret) {
- dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
- return ret;
- }
-
- if (pvid) {
- int shift = 16 * (port % 2);
-
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- 0xfff << shift, vlan->vid << shift);
- if (ret)
- return ret;
-
- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
- QCA8K_PORT_VLAN_CVID(vlan->vid) |
- QCA8K_PORT_VLAN_SVID(vlan->vid));
- }
-
- return ret;
-}
-
-static int
-qca8k_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- ret = qca8k_vlan_del(priv, port, vlan->vid);
- if (ret)
- dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
-
- return ret;
-}
-
-static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = ds->priv;
-
- /* Communicate to the phy internal driver the switch revision.
- * Based on the switch revision different values needs to be
- * set to the dbg and mmd reg on the phy.
- * The first 2 bit are used to communicate the switch revision
- * to the phy driver.
- */
- if (port > 0 && port < 6)
- return priv->switch_revision;
-
- return 0;
-}
-
-static enum dsa_tag_protocol
-qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
- enum dsa_tag_protocol mp)
-{
- return DSA_TAG_PROTO_QCA;
-}
-
-static const struct dsa_switch_ops qca8k_switch_ops = {
- .get_tag_protocol = qca8k_get_tag_protocol,
- .setup = qca8k_setup,
- .get_strings = qca8k_get_strings,
- .get_ethtool_stats = qca8k_get_ethtool_stats,
- .get_sset_count = qca8k_get_sset_count,
- .get_mac_eee = qca8k_get_mac_eee,
- .set_mac_eee = qca8k_set_mac_eee,
- .port_enable = qca8k_port_enable,
- .port_disable = qca8k_port_disable,
- .port_change_mtu = qca8k_port_change_mtu,
- .port_max_mtu = qca8k_port_max_mtu,
- .port_stp_state_set = qca8k_port_stp_state_set,
- .port_bridge_join = qca8k_port_bridge_join,
- .port_bridge_leave = qca8k_port_bridge_leave,
- .port_fdb_add = qca8k_port_fdb_add,
- .port_fdb_del = qca8k_port_fdb_del,
- .port_fdb_dump = qca8k_port_fdb_dump,
- .port_vlan_filtering = qca8k_port_vlan_filtering,
- .port_vlan_add = qca8k_port_vlan_add,
- .port_vlan_del = qca8k_port_vlan_del,
- .phylink_validate = qca8k_phylink_validate,
- .phylink_mac_link_state = qca8k_phylink_mac_link_state,
- .phylink_mac_config = qca8k_phylink_mac_config,
- .phylink_mac_link_down = qca8k_phylink_mac_link_down,
- .phylink_mac_link_up = qca8k_phylink_mac_link_up,
- .get_phy_flags = qca8k_get_phy_flags,
-};
-
-static int qca8k_read_switch_id(struct qca8k_priv *priv)
-{
- const struct qca8k_match_data *data;
- u32 val;
- u8 id;
- int ret;
-
- /* get the switches ID from the compatible */
- data = of_device_get_match_data(priv->dev);
- if (!data)
- return -ENODEV;
-
- ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
- if (ret < 0)
- return -ENODEV;
-
- id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK);
- if (id != data->id) {
- dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
- return -ENODEV;
- }
-
- priv->switch_id = id;
-
- /* Save revision to communicate to the internal PHY driver */
- priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK);
-
- return 0;
-}
-
-static int
-qca8k_sw_probe(struct mdio_device *mdiodev)
-{
- struct qca8k_priv *priv;
- int ret;
-
- /* allocate the private data struct so that we can probe the switches
- * ID register
- */
- priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->bus = mdiodev->bus;
- priv->dev = &mdiodev->dev;
-
- priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
- GPIOD_ASIS);
- if (IS_ERR(priv->reset_gpio))
- return PTR_ERR(priv->reset_gpio);
-
- if (priv->reset_gpio) {
- gpiod_set_value_cansleep(priv->reset_gpio, 1);
- /* The active low duration must be greater than 10 ms
- * and checkpatch.pl wants 20 ms.
- */
- msleep(20);
- gpiod_set_value_cansleep(priv->reset_gpio, 0);
- }
-
- /* Check the detected switch id */
- ret = qca8k_read_switch_id(priv);
- if (ret)
- return ret;
-
- priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = &mdiodev->dev;
- priv->ds->num_ports = QCA8K_NUM_PORTS;
- priv->ds->priv = priv;
- priv->ops = qca8k_switch_ops;
- priv->ds->ops = &priv->ops;
- mutex_init(&priv->reg_mutex);
- dev_set_drvdata(&mdiodev->dev, priv);
-
- return dsa_register_switch(priv->ds);
-}
-
-static void
-qca8k_sw_remove(struct mdio_device *mdiodev)
-{
- struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
- int i;
-
- if (!priv)
- return;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- qca8k_port_set_status(priv, i, 0);
-
- dsa_unregister_switch(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
-}
-
-static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
-{
- struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
-
- if (!priv)
- return;
-
- dsa_switch_shutdown(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static void
-qca8k_set_pm(struct qca8k_priv *priv, int enable)
-{
- int i;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (!priv->port_sts[i].enabled)
- continue;
-
- qca8k_port_set_status(priv, i, enable);
- }
-}
-
-static int qca8k_suspend(struct device *dev)
-{
- struct qca8k_priv *priv = dev_get_drvdata(dev);
-
- qca8k_set_pm(priv, 0);
-
- return dsa_switch_suspend(priv->ds);
-}
-
-static int qca8k_resume(struct device *dev)
-{
- struct qca8k_priv *priv = dev_get_drvdata(dev);
-
- qca8k_set_pm(priv, 1);
-
- return dsa_switch_resume(priv->ds);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
- qca8k_suspend, qca8k_resume);
-
-static const struct qca8k_match_data qca8327 = {
- .id = QCA8K_ID_QCA8327,
- .reduced_package = true,
-};
-
-static const struct qca8k_match_data qca8328 = {
- .id = QCA8K_ID_QCA8327,
-};
-
-static const struct qca8k_match_data qca833x = {
- .id = QCA8K_ID_QCA8337,
-};
-
-static const struct of_device_id qca8k_of_match[] = {
- { .compatible = "qca,qca8327", .data = &qca8327 },
- { .compatible = "qca,qca8328", .data = &qca8328 },
- { .compatible = "qca,qca8334", .data = &qca833x },
- { .compatible = "qca,qca8337", .data = &qca833x },
- { /* sentinel */ },
-};
-
-static struct mdio_driver qca8kmdio_driver = {
- .probe = qca8k_sw_probe,
- .remove = qca8k_sw_remove,
- .shutdown = qca8k_sw_shutdown,
- .mdiodrv.driver = {
- .name = "qca8k",
- .of_match_table = qca8k_of_match,
- .pm = &qca8k_pm_ops,
- },
-};
-
-mdio_module_driver(qca8kmdio_driver);
-
-MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
-MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:qca8k");
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
deleted file mode 100644
index e10571a398c9..000000000000
--- a/drivers/net/dsa/qca8k.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __QCA8K_H
-#define __QCA8K_H
-
-#include <linux/delay.h>
-#include <linux/regmap.h>
-#include <linux/gpio.h>
-
-#define QCA8K_NUM_PORTS 7
-#define QCA8K_NUM_CPU_PORTS 2
-#define QCA8K_MAX_MTU 9000
-
-#define PHY_ID_QCA8327 0x004dd034
-#define QCA8K_ID_QCA8327 0x12
-#define PHY_ID_QCA8337 0x004dd036
-#define QCA8K_ID_QCA8337 0x13
-
-#define QCA8K_BUSY_WAIT_TIMEOUT 2000
-
-#define QCA8K_NUM_FDB_RECORDS 2048
-
-#define QCA8K_PORT_VID_DEF 1
-
-/* Global control registers */
-#define QCA8K_REG_MASK_CTRL 0x000
-#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
-#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0)
-#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
-#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
-#define QCA8K_REG_PORT0_PAD_CTRL 0x004
-#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
-#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
-#define QCA8K_REG_PORT5_PAD_CTRL 0x008
-#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
-#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
-#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
-#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
-#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
-#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
-#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
-#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
-#define QCA8K_MAX_DELAY 3
-#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
-#define QCA8K_REG_PWS 0x010
-#define QCA8K_PWS_POWER_ON_SEL BIT(31)
-/* This reg is only valid for QCA832x and toggle the package
- * type from 176 pin (by default) to 148 pin used on QCA8327
- */
-#define QCA8327_PWS_PACKAGE148_EN BIT(30)
-#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
-#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
-#define QCA8K_REG_MODULE_EN 0x030
-#define QCA8K_MODULE_EN_MIB BIT(0)
-#define QCA8K_REG_MIB 0x034
-#define QCA8K_MIB_FLUSH BIT(24)
-#define QCA8K_MIB_CPU_KEEP BIT(20)
-#define QCA8K_MIB_BUSY BIT(17)
-#define QCA8K_MDIO_MASTER_CTRL 0x3c
-#define QCA8K_MDIO_MASTER_BUSY BIT(31)
-#define QCA8K_MDIO_MASTER_EN BIT(30)
-#define QCA8K_MDIO_MASTER_READ BIT(27)
-#define QCA8K_MDIO_MASTER_WRITE 0
-#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
-#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
-#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
-#define QCA8K_MDIO_MASTER_DATA(x) (x)
-#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
-#define QCA8K_MDIO_MASTER_MAX_PORTS 5
-#define QCA8K_MDIO_MASTER_MAX_REG 32
-#define QCA8K_GOL_MAC_ADDR0 0x60
-#define QCA8K_GOL_MAC_ADDR1 0x64
-#define QCA8K_MAX_FRAME_SIZE 0x78
-#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
-#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0)
-#define QCA8K_PORT_STATUS_SPEED_10 0
-#define QCA8K_PORT_STATUS_SPEED_100 0x1
-#define QCA8K_PORT_STATUS_SPEED_1000 0x2
-#define QCA8K_PORT_STATUS_TXMAC BIT(2)
-#define QCA8K_PORT_STATUS_RXMAC BIT(3)
-#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
-#define QCA8K_PORT_STATUS_RXFLOW BIT(5)
-#define QCA8K_PORT_STATUS_DUPLEX BIT(6)
-#define QCA8K_PORT_STATUS_LINK_UP BIT(8)
-#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9)
-#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10)
-#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
-#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
-#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
-#define QCA8K_PORT_HDR_CTRL_RX_S 2
-#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
-#define QCA8K_PORT_HDR_CTRL_TX_S 0
-#define QCA8K_PORT_HDR_CTRL_ALL 2
-#define QCA8K_PORT_HDR_CTRL_MGMT 1
-#define QCA8K_PORT_HDR_CTRL_NONE 0
-#define QCA8K_REG_SGMII_CTRL 0x0e0
-#define QCA8K_SGMII_EN_PLL BIT(1)
-#define QCA8K_SGMII_EN_RX BIT(2)
-#define QCA8K_SGMII_EN_TX BIT(3)
-#define QCA8K_SGMII_EN_SD BIT(4)
-#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
-#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23))
-#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22)
-#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
-#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
-
-/* MAC_PWR_SEL registers */
-#define QCA8K_REG_MAC_PWR_SEL 0x0e4
-#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
-#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
-
-/* EEE control registers */
-#define QCA8K_REG_EEE_CTRL 0x100
-#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
-
-/* ACL registers */
-#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
-#define QCA8K_PORT_VLAN_CVID(x) (x << 16)
-#define QCA8K_PORT_VLAN_SVID(x) x
-#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
-#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
-#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
-
-/* Lookup registers */
-#define QCA8K_REG_ATU_DATA0 0x600
-#define QCA8K_ATU_ADDR2_S 24
-#define QCA8K_ATU_ADDR3_S 16
-#define QCA8K_ATU_ADDR4_S 8
-#define QCA8K_REG_ATU_DATA1 0x604
-#define QCA8K_ATU_PORT_M 0x7f
-#define QCA8K_ATU_PORT_S 16
-#define QCA8K_ATU_ADDR0_S 8
-#define QCA8K_REG_ATU_DATA2 0x608
-#define QCA8K_ATU_VID_M 0xfff
-#define QCA8K_ATU_VID_S 8
-#define QCA8K_ATU_STATUS_M 0xf
-#define QCA8K_ATU_STATUS_STATIC 0xf
-#define QCA8K_REG_ATU_FUNC 0x60c
-#define QCA8K_ATU_FUNC_BUSY BIT(31)
-#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
-#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
-#define QCA8K_ATU_FUNC_FULL BIT(12)
-#define QCA8K_ATU_FUNC_PORT_M 0xf
-#define QCA8K_ATU_FUNC_PORT_S 8
-#define QCA8K_REG_VTU_FUNC0 0x610
-#define QCA8K_VTU_FUNC0_VALID BIT(20)
-#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
-#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
-#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3
-#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0
-#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1
-#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2
-#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3
-#define QCA8K_REG_VTU_FUNC1 0x614
-#define QCA8K_VTU_FUNC1_BUSY BIT(31)
-#define QCA8K_VTU_FUNC1_VID_S 16
-#define QCA8K_VTU_FUNC1_FULL BIT(4)
-#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
-#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
-#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
-#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24
-#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16
-#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8
-#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
-#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
-#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8)
-#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
-#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16)
-#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
-#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
-
-#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
-#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16)
-#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16)
-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0)
-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0)
-
-#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20)
-#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24)
-#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24)
-
-#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
-#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0)
-#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0)
-#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
-#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
-#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
-#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
-
-/* Pkt edit registers */
-#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
-
-/* L3 registers */
-#define QCA8K_HROUTER_CONTROL 0xe00
-#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16)
-#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16
-#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1
-#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08
-#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c
-#define QCA8K_HNAT_CONTROL 0xe38
-
-/* MIB registers */
-#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100)
-
-/* QCA specific MII registers */
-#define MII_ATH_MMD_ADDR 0x0d
-#define MII_ATH_MMD_DATA 0x0e
-
-enum {
- QCA8K_PORT_SPEED_10M = 0,
- QCA8K_PORT_SPEED_100M = 1,
- QCA8K_PORT_SPEED_1000M = 2,
- QCA8K_PORT_SPEED_ERR = 3,
-};
-
-enum qca8k_fdb_cmd {
- QCA8K_FDB_FLUSH = 1,
- QCA8K_FDB_LOAD = 2,
- QCA8K_FDB_PURGE = 3,
- QCA8K_FDB_NEXT = 6,
- QCA8K_FDB_SEARCH = 7,
-};
-
-enum qca8k_vlan_cmd {
- QCA8K_VLAN_FLUSH = 1,
- QCA8K_VLAN_LOAD = 2,
- QCA8K_VLAN_PURGE = 3,
- QCA8K_VLAN_REMOVE_PORT = 4,
- QCA8K_VLAN_NEXT = 5,
- QCA8K_VLAN_READ = 6,
-};
-
-struct ar8xxx_port_status {
- int enabled;
-};
-
-struct qca8k_match_data {
- u8 id;
- bool reduced_package;
-};
-
-enum {
- QCA8K_CPU_PORT0,
- QCA8K_CPU_PORT6,
-};
-
-struct qca8k_ports_config {
- bool sgmii_rx_clk_falling_edge;
- bool sgmii_tx_clk_falling_edge;
- bool sgmii_enable_pll;
- u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
- u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
-};
-
-struct qca8k_priv {
- u8 switch_id;
- u8 switch_revision;
- bool legacy_phy_port_mapping;
- struct qca8k_ports_config ports_config;
- struct regmap *regmap;
- struct mii_bus *bus;
- struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
- struct dsa_switch *ds;
- struct mutex reg_mutex;
- struct device *dev;
- struct dsa_switch_ops ops;
- struct gpio_desc *reset_gpio;
- unsigned int port_mtu[QCA8K_NUM_PORTS];
-};
-
-struct qca8k_mib_desc {
- unsigned int size;
- unsigned int offset;
- const char *name;
-};
-
-struct qca8k_fdb {
- u16 vid;
- u8 port_mask;
- u8 aging;
- u8 mac[6];
-};
-
-#endif /* __QCA8K_H */
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
deleted file mode 100644
index c66ebd0ee217..000000000000
--- a/drivers/net/dsa/realtek-smi-core.c
+++ /dev/null
@@ -1,523 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* Realtek Simple Management Interface (SMI) driver
- * It can be discussed how "simple" this interface is.
- *
- * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
- * but the protocol is not MDIO at all. Instead it is a Realtek
- * pecularity that need to bit-bang the lines in a special way to
- * communicate with the switch.
- *
- * ASICs we intend to support with this driver:
- *
- * RTL8366 - The original version, apparently
- * RTL8369 - Similar enough to have the same datsheet as RTL8366
- * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
- * different register layout from the other two
- * RTL8366S - Is this "RTL8366 super"?
- * RTL8367 - Has an OpenWRT driver as well
- * RTL8368S - Seems to be an alternative name for RTL8366RB
- * RTL8370 - Also uses SMI
- *
- * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
- * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
- * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
- * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
- * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_mdio.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/bitops.h>
-#include <linux/if_bridge.h>
-
-#include "realtek-smi-core.h"
-
-#define REALTEK_SMI_ACK_RETRY_COUNT 5
-#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */
-#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */
-
-static inline void realtek_smi_clk_delay(struct realtek_smi *smi)
-{
- ndelay(smi->clk_delay);
-}
-
-static void realtek_smi_start(struct realtek_smi *smi)
-{
- /* Set GPIO pins to output mode, with initial state:
- * SCK = 0, SDA = 1
- */
- gpiod_direction_output(smi->mdc, 0);
- gpiod_direction_output(smi->mdio, 1);
- realtek_smi_clk_delay(smi);
-
- /* CLK 1: 0 -> 1, 1 -> 0 */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
-
- /* CLK 2: */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 1);
-}
-
-static void realtek_smi_stop(struct realtek_smi *smi)
-{
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 0);
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
-
- /* Add a click */
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
-
- /* Set GPIO pins to input mode */
- gpiod_direction_input(smi->mdio);
- gpiod_direction_input(smi->mdc);
-}
-
-static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len)
-{
- for (; len > 0; len--) {
- realtek_smi_clk_delay(smi);
-
- /* Prepare data */
- gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1))));
- realtek_smi_clk_delay(smi);
-
- /* Clocking */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- }
-}
-
-static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data)
-{
- gpiod_direction_input(smi->mdio);
-
- for (*data = 0; len > 0; len--) {
- u32 u;
-
- realtek_smi_clk_delay(smi);
-
- /* Clocking */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- u = !!gpiod_get_value(smi->mdio);
- gpiod_set_value(smi->mdc, 0);
-
- *data |= (u << (len - 1));
- }
-
- gpiod_direction_output(smi->mdio, 0);
-}
-
-static int realtek_smi_wait_for_ack(struct realtek_smi *smi)
-{
- int retry_cnt;
-
- retry_cnt = 0;
- do {
- u32 ack;
-
- realtek_smi_read_bits(smi, 1, &ack);
- if (ack == 0)
- break;
-
- if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
- dev_err(smi->dev, "ACK timeout\n");
- return -ETIMEDOUT;
- }
- } while (1);
-
- return 0;
-}
-
-static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data)
-{
- realtek_smi_write_bits(smi, data, 8);
- return realtek_smi_wait_for_ack(smi);
-}
-
-static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data)
-{
- realtek_smi_write_bits(smi, data, 8);
- return 0;
-}
-
-static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data)
-{
- u32 t;
-
- /* Read data */
- realtek_smi_read_bits(smi, 8, &t);
- *data = (t & 0xff);
-
- /* Send an ACK */
- realtek_smi_write_bits(smi, 0x00, 1);
-
- return 0;
-}
-
-static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data)
-{
- u32 t;
-
- /* Read data */
- realtek_smi_read_bits(smi, 8, &t);
- *data = (t & 0xff);
-
- /* Send an ACK */
- realtek_smi_write_bits(smi, 0x01, 1);
-
- return 0;
-}
-
-static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data)
-{
- unsigned long flags;
- u8 lo = 0;
- u8 hi = 0;
- int ret;
-
- spin_lock_irqsave(&smi->lock, flags);
-
- realtek_smi_start(smi);
-
- /* Send READ command */
- ret = realtek_smi_write_byte(smi, smi->cmd_read);
- if (ret)
- goto out;
-
- /* Set ADDR[7:0] */
- ret = realtek_smi_write_byte(smi, addr & 0xff);
- if (ret)
- goto out;
-
- /* Set ADDR[15:8] */
- ret = realtek_smi_write_byte(smi, addr >> 8);
- if (ret)
- goto out;
-
- /* Read DATA[7:0] */
- realtek_smi_read_byte0(smi, &lo);
- /* Read DATA[15:8] */
- realtek_smi_read_byte1(smi, &hi);
-
- *data = ((u32)lo) | (((u32)hi) << 8);
-
- ret = 0;
-
- out:
- realtek_smi_stop(smi);
- spin_unlock_irqrestore(&smi->lock, flags);
-
- return ret;
-}
-
-static int realtek_smi_write_reg(struct realtek_smi *smi,
- u32 addr, u32 data, bool ack)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&smi->lock, flags);
-
- realtek_smi_start(smi);
-
- /* Send WRITE command */
- ret = realtek_smi_write_byte(smi, smi->cmd_write);
- if (ret)
- goto out;
-
- /* Set ADDR[7:0] */
- ret = realtek_smi_write_byte(smi, addr & 0xff);
- if (ret)
- goto out;
-
- /* Set ADDR[15:8] */
- ret = realtek_smi_write_byte(smi, addr >> 8);
- if (ret)
- goto out;
-
- /* Write DATA[7:0] */
- ret = realtek_smi_write_byte(smi, data & 0xff);
- if (ret)
- goto out;
-
- /* Write DATA[15:8] */
- if (ack)
- ret = realtek_smi_write_byte(smi, data >> 8);
- else
- ret = realtek_smi_write_byte_noack(smi, data >> 8);
- if (ret)
- goto out;
-
- ret = 0;
-
- out:
- realtek_smi_stop(smi);
- spin_unlock_irqrestore(&smi->lock, flags);
-
- return ret;
-}
-
-/* There is one single case when we need to use this accessor and that
- * is when issueing soft reset. Since the device reset as soon as we write
- * that bit, no ACK will come back for natural reasons.
- */
-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
- u32 data)
-{
- return realtek_smi_write_reg(smi, addr, data, false);
-}
-EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack);
-
-/* Regmap accessors */
-
-static int realtek_smi_write(void *ctx, u32 reg, u32 val)
-{
- struct realtek_smi *smi = ctx;
-
- return realtek_smi_write_reg(smi, reg, val, true);
-}
-
-static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
-{
- struct realtek_smi *smi = ctx;
-
- return realtek_smi_read_reg(smi, reg, val);
-}
-
-static const struct regmap_config realtek_smi_mdio_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_smi_read,
- .reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
-};
-
-static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct realtek_smi *smi = bus->priv;
-
- return smi->ops->phy_read(smi, addr, regnum);
-}
-
-static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct realtek_smi *smi = bus->priv;
-
- return smi->ops->phy_write(smi, addr, regnum, val);
-}
-
-int realtek_smi_setup_mdio(struct realtek_smi *smi)
-{
- struct device_node *mdio_np;
- int ret;
-
- mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
- if (!mdio_np) {
- dev_err(smi->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
- if (!smi->slave_mii_bus) {
- ret = -ENOMEM;
- goto err_put_node;
- }
- smi->slave_mii_bus->priv = smi;
- smi->slave_mii_bus->name = "SMI slave MII";
- smi->slave_mii_bus->read = realtek_smi_mdio_read;
- smi->slave_mii_bus->write = realtek_smi_mdio_write;
- snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
- smi->ds->index);
- smi->slave_mii_bus->dev.of_node = mdio_np;
- smi->slave_mii_bus->parent = smi->dev;
- smi->ds->slave_mii_bus = smi->slave_mii_bus;
-
- ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
- if (ret) {
- dev_err(smi->dev, "unable to register MDIO bus %s\n",
- smi->slave_mii_bus->id);
- goto err_put_node;
- }
-
- return 0;
-
-err_put_node:
- of_node_put(mdio_np);
-
- return ret;
-}
-
-static int realtek_smi_probe(struct platform_device *pdev)
-{
- const struct realtek_smi_variant *var;
- struct device *dev = &pdev->dev;
- struct realtek_smi *smi;
- struct device_node *np;
- int ret;
-
- var = of_device_get_match_data(dev);
- np = dev->of_node;
-
- smi = devm_kzalloc(dev, sizeof(*smi) + var->chip_data_sz, GFP_KERNEL);
- if (!smi)
- return -ENOMEM;
- smi->chip_data = (void *)smi + sizeof(*smi);
- smi->map = devm_regmap_init(dev, NULL, smi,
- &realtek_smi_mdio_regmap_config);
- if (IS_ERR(smi->map)) {
- ret = PTR_ERR(smi->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- /* Link forward and backward */
- smi->dev = dev;
- smi->clk_delay = var->clk_delay;
- smi->cmd_read = var->cmd_read;
- smi->cmd_write = var->cmd_write;
- smi->ops = var->ops;
-
- dev_set_drvdata(dev, smi);
- spin_lock_init(&smi->lock);
-
- /* TODO: if power is software controlled, set up any regulators here */
-
- /* Assert then deassert RESET */
- smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(smi->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(smi->reset);
- }
- msleep(REALTEK_SMI_HW_STOP_DELAY);
- gpiod_set_value(smi->reset, 0);
- msleep(REALTEK_SMI_HW_START_DELAY);
- dev_info(dev, "deasserted RESET\n");
-
- /* Fetch MDIO pins */
- smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
- if (IS_ERR(smi->mdc))
- return PTR_ERR(smi->mdc);
- smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
- if (IS_ERR(smi->mdio))
- return PTR_ERR(smi->mdio);
-
- smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- ret = smi->ops->detect(smi);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
- }
-
- smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
- if (!smi->ds)
- return -ENOMEM;
-
- smi->ds->dev = dev;
- smi->ds->num_ports = smi->num_ports;
- smi->ds->priv = smi;
-
- smi->ds->ops = var->ds_ops;
- ret = dsa_register_switch(smi->ds);
- if (ret) {
- dev_err(dev, "unable to register switch ret = %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static int realtek_smi_remove(struct platform_device *pdev)
-{
- struct realtek_smi *smi = platform_get_drvdata(pdev);
-
- if (!smi)
- return 0;
-
- dsa_unregister_switch(smi->ds);
- if (smi->slave_mii_bus)
- of_node_put(smi->slave_mii_bus->dev.of_node);
- gpiod_set_value(smi->reset, 1);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static void realtek_smi_shutdown(struct platform_device *pdev)
-{
- struct realtek_smi *smi = platform_get_drvdata(pdev);
-
- if (!smi)
- return;
-
- dsa_switch_shutdown(smi->ds);
-
- platform_set_drvdata(pdev, NULL);
-}
-
-static const struct of_device_id realtek_smi_of_match[] = {
- {
- .compatible = "realtek,rtl8366rb",
- .data = &rtl8366rb_variant,
- },
- {
- /* FIXME: add support for RTL8366S and more */
- .compatible = "realtek,rtl8366s",
- .data = NULL,
- },
- {
- .compatible = "realtek,rtl8365mb",
- .data = &rtl8365mb_variant,
- },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
-
-static struct platform_driver realtek_smi_driver = {
- .driver = {
- .name = "realtek-smi",
- .of_match_table = of_match_ptr(realtek_smi_of_match),
- },
- .probe = realtek_smi_probe,
- .remove = realtek_smi_remove,
- .shutdown = realtek_smi_shutdown,
-};
-module_platform_driver(realtek_smi_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
new file mode 100644
index 000000000000..d6eb6713e5f6
--- /dev/null
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig NET_DSA_REALTEK
+ tristate "Realtek Ethernet switch family support"
+ depends on NET_DSA
+ select FIXED_PHY
+ select IRQ_DOMAIN
+ select REALTEK_PHY
+ select REGMAP
+ help
+ Select to enable support for Realtek Ethernet switch chips.
+
+ Note that at least one interface driver must be enabled for the
+ subdrivers to be loaded. Moreover, an interface driver cannot achieve
+ anything without at least one subdriver enabled.
+
+if NET_DSA_REALTEK
+
+config NET_DSA_REALTEK_MDIO
+ bool "Realtek MDIO interface support"
+ depends on OF
+ help
+ Select to enable support for registering switches configured
+ through MDIO.
+
+config NET_DSA_REALTEK_SMI
+ bool "Realtek SMI interface support"
+ depends on OF
+ help
+ Select to enable support for registering switches connected
+ through SMI.
+
+config NET_DSA_REALTEK_RTL8365MB
+ tristate "Realtek RTL8365MB switch driver"
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL8_4
+ help
+ Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
+
+config NET_DSA_REALTEK_RTL8366RB
+ tristate "Realtek RTL8366RB switch driver"
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL4_A
+ help
+ Select to enable support for Realtek RTL8366RB.
+
+config NET_DSA_REALTEK_RTL8366RB_LEDS
+ bool
+ depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
+ depends on NET_DSA_REALTEK_RTL8366RB
+ default NET_DSA_REALTEK_RTL8366RB
+
+endif
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
new file mode 100644
index 000000000000..17367bcba496
--- /dev/null
+++ b/drivers/net/dsa/realtek/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_REALTEK) += realtek_dsa.o
+realtek_dsa-objs := rtl83xx.o
+
+ifdef CONFIG_NET_DSA_REALTEK_MDIO
+realtek_dsa-objs += realtek-mdio.o
+endif
+
+ifdef CONFIG_NET_DSA_REALTEK_SMI
+realtek_dsa-objs += realtek-smi.o
+endif
+
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
+rtl8366-objs := rtl8366-core.o rtl8366rb.o
+ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS
+rtl8366-objs += rtl8366rb-leds.o
+endif
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
new file mode 100644
index 000000000000..a5e7dff96e91
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek MDIO interface driver
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/overflow.h>
+#include <linux/regmap.h>
+
+#include "realtek.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
+
+/* Read/write via mdiobus */
+#define REALTEK_MDIO_CTRL0_REG 31
+#define REALTEK_MDIO_START_REG 29
+#define REALTEK_MDIO_CTRL1_REG 21
+#define REALTEK_MDIO_ADDRESS_REG 23
+#define REALTEK_MDIO_DATA_WRITE_REG 24
+#define REALTEK_MDIO_DATA_READ_REG 25
+
+#define REALTEK_MDIO_START_OP 0xFFFF
+#define REALTEK_MDIO_ADDR_OP 0x000E
+#define REALTEK_MDIO_READ_OP 0x0001
+#define REALTEK_MDIO_WRITE_OP 0x0003
+
+static int realtek_mdio_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_DATA_WRITE_REG, val);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_WRITE_OP);
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int realtek_mdio_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_READ_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->read(bus, priv->mdio_addr, REALTEK_MDIO_DATA_READ_REG);
+ if (ret >= 0) {
+ *val = ret;
+ ret = 0;
+ }
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static const struct realtek_interface_info realtek_mdio_info = {
+ .reg_read = realtek_mdio_read,
+ .reg_write = realtek_mdio_write,
+};
+
+/**
+ * realtek_mdio_probe() - Probe a platform device for an MDIO-connected switch
+ * @mdiodev: mdio_device to probe on.
+ *
+ * This function should be used as the .probe in an mdio_driver. After
+ * calling the common probe function for both interfaces, it initializes the
+ * values specific for MDIO-connected devices. Finally, it calls a common
+ * function to register the DSA switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: Returns 0 on success, a negative error on failure.
+ */
+int realtek_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct realtek_priv *priv;
+ int ret;
+
+ priv = rtl83xx_probe(dev, &realtek_mdio_info);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ priv->bus = mdiodev->bus;
+ priv->mdio_addr = mdiodev->addr;
+ priv->write_reg_noack = realtek_mdio_write;
+
+ ret = rtl83xx_register_switch(priv);
+ if (ret) {
+ rtl83xx_remove(priv);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_probe, "REALTEK_DSA");
+
+/**
+ * realtek_mdio_remove() - Remove the driver of an MDIO-connected switch
+ * @mdiodev: mdio_device to be removed.
+ *
+ * This function should be used as the .remove in an mdio_driver. First
+ * it unregisters the DSA switch and then it calls the common remove function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ rtl83xx_unregister_switch(priv);
+
+ rtl83xx_remove(priv);
+}
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_remove, "REALTEK_DSA");
+
+/**
+ * realtek_mdio_shutdown() - Shutdown the driver of a MDIO-connected switch
+ * @mdiodev: mdio_device shutting down.
+ *
+ * This function should be used as the .shutdown in a platform_driver. It calls
+ * the common shutdown function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ rtl83xx_shutdown(priv);
+}
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_shutdown, "REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/realtek-mdio.h b/drivers/net/dsa/realtek/realtek-mdio.h
new file mode 100644
index 000000000000..ee70f6a5b8ff
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-mdio.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _REALTEK_MDIO_H
+#define _REALTEK_MDIO_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO)
+
+static inline int realtek_mdio_driver_register(struct mdio_driver *drv)
+{
+ return mdio_driver_register(drv);
+}
+
+static inline void realtek_mdio_driver_unregister(struct mdio_driver *drv)
+{
+ mdio_driver_unregister(drv);
+}
+
+int realtek_mdio_probe(struct mdio_device *mdiodev);
+void realtek_mdio_remove(struct mdio_device *mdiodev);
+void realtek_mdio_shutdown(struct mdio_device *mdiodev);
+
+#else /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO) */
+
+static inline int realtek_mdio_driver_register(struct mdio_driver *drv)
+{
+ return 0;
+}
+
+static inline void realtek_mdio_driver_unregister(struct mdio_driver *drv)
+{
+}
+
+static inline int realtek_mdio_probe(struct mdio_device *mdiodev)
+{
+ return -ENOENT;
+}
+
+static inline void realtek_mdio_remove(struct mdio_device *mdiodev)
+{
+}
+
+static inline void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO) */
+
+#endif /* _REALTEK_MDIO_H */
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
new file mode 100644
index 000000000000..972e22218418
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek Simple Management Interface (SMI) driver
+ * It can be discussed how "simple" this interface is.
+ *
+ * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
+ * but the protocol is not MDIO at all. Instead it is a Realtek
+ * pecularity that need to bit-bang the lines in a special way to
+ * communicate with the switch.
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+
+#include "realtek.h"
+#include "realtek-smi.h"
+#include "rtl83xx.h"
+
+#define REALTEK_SMI_ACK_RETRY_COUNT 5
+
+static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
+{
+ ndelay(priv->variant->clk_delay);
+}
+
+static void realtek_smi_start(struct realtek_priv *priv)
+{
+ /* Set GPIO pins to output mode, with initial state:
+ * SCK = 0, SDA = 1
+ */
+ gpiod_direction_output(priv->mdc, 0);
+ gpiod_direction_output(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 1: 0 -> 1, 1 -> 0 */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 2: */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+}
+
+static void realtek_smi_stop(struct realtek_priv *priv)
+{
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Add a click */
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Set GPIO pins to input mode */
+ gpiod_direction_input(priv->mdio);
+ gpiod_direction_input(priv->mdc);
+}
+
+static void realtek_smi_write_bits(struct realtek_priv *priv, u32 data, u32 len)
+{
+ for (; len > 0; len--) {
+ realtek_smi_clk_delay(priv);
+
+ /* Prepare data */
+ gpiod_set_value(priv->mdio, !!(data & (1 << (len - 1))));
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ }
+}
+
+static void realtek_smi_read_bits(struct realtek_priv *priv, u32 len, u32 *data)
+{
+ gpiod_direction_input(priv->mdio);
+
+ for (*data = 0; len > 0; len--) {
+ u32 u;
+
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ u = !!gpiod_get_value(priv->mdio);
+ gpiod_set_value(priv->mdc, 0);
+
+ *data |= (u << (len - 1));
+ }
+
+ gpiod_direction_output(priv->mdio, 0);
+}
+
+static int realtek_smi_wait_for_ack(struct realtek_priv *priv)
+{
+ int retry_cnt;
+
+ retry_cnt = 0;
+ do {
+ u32 ack;
+
+ realtek_smi_read_bits(priv, 1, &ack);
+ if (ack == 0)
+ break;
+
+ if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
+ dev_err(priv->dev, "ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int realtek_smi_write_byte(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return realtek_smi_wait_for_ack(priv);
+}
+
+static int realtek_smi_write_byte_noack(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return 0;
+}
+
+static int realtek_smi_read_byte0(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x00, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_byte1(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x01, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_reg(struct realtek_priv *priv, u32 addr, u32 *data)
+{
+ unsigned long flags;
+ u8 lo = 0;
+ u8 hi = 0;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send READ command */
+ ret = realtek_smi_write_byte(priv, priv->variant->cmd_read);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Read DATA[7:0] */
+ realtek_smi_read_byte0(priv, &lo);
+ /* Read DATA[15:8] */
+ realtek_smi_read_byte1(priv, &hi);
+
+ *data = ((u32)lo) | (((u32)hi) << 8);
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static int realtek_smi_write_reg(struct realtek_priv *priv,
+ u32 addr, u32 data, bool ack)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send WRITE command */
+ ret = realtek_smi_write_byte(priv, priv->variant->cmd_write);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Write DATA[7:0] */
+ ret = realtek_smi_write_byte(priv, data & 0xff);
+ if (ret)
+ goto out;
+
+ /* Write DATA[15:8] */
+ if (ack)
+ ret = realtek_smi_write_byte(priv, data >> 8);
+ else
+ ret = realtek_smi_write_byte_noack(priv, data >> 8);
+ if (ret)
+ goto out;
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+/* There is one single case when we need to use this accessor and that
+ * is when issueing soft reset. Since the device reset as soon as we write
+ * that bit, no ACK will come back for natural reasons.
+ */
+static int realtek_smi_write_reg_noack(void *ctx, u32 reg, u32 val)
+{
+ return realtek_smi_write_reg(ctx, reg, val, false);
+}
+
+/* Regmap accessors */
+
+static int realtek_smi_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_write_reg(priv, reg, val, true);
+}
+
+static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_read_reg(priv, reg, val);
+}
+
+static const struct realtek_interface_info realtek_smi_info = {
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+};
+
+/**
+ * realtek_smi_probe() - Probe a platform device for an SMI-connected switch
+ * @pdev: platform_device to probe on.
+ *
+ * This function should be used as the .probe in a platform_driver. After
+ * calling the common probe function for both interfaces, it initializes the
+ * values specific for SMI-connected devices. Finally, it calls a common
+ * function to register the DSA switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: Returns 0 on success, a negative error on failure.
+ */
+int realtek_smi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct realtek_priv *priv;
+ int ret;
+
+ priv = rtl83xx_probe(dev, &realtek_smi_info);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ /* Fetch MDIO pins */
+ priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdc)) {
+ rtl83xx_remove(priv);
+ return PTR_ERR(priv->mdc);
+ }
+
+ priv->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdio)) {
+ rtl83xx_remove(priv);
+ return PTR_ERR(priv->mdio);
+ }
+
+ priv->write_reg_noack = realtek_smi_write_reg_noack;
+
+ ret = rtl83xx_register_switch(priv);
+ if (ret) {
+ rtl83xx_remove(priv);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(realtek_smi_probe, "REALTEK_DSA");
+
+/**
+ * realtek_smi_remove() - Remove the driver of a SMI-connected switch
+ * @pdev: platform_device to be removed.
+ *
+ * This function should be used as the .remove in a platform_driver. First
+ * it unregisters the DSA switch and then it calls the common remove function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_smi_remove(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ rtl83xx_unregister_switch(priv);
+
+ rtl83xx_remove(priv);
+}
+EXPORT_SYMBOL_NS_GPL(realtek_smi_remove, "REALTEK_DSA");
+
+/**
+ * realtek_smi_shutdown() - Shutdown the driver of a SMI-connected switch
+ * @pdev: platform_device shutting down.
+ *
+ * This function should be used as the .shutdown in a platform_driver. It calls
+ * the common shutdown function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_smi_shutdown(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ rtl83xx_shutdown(priv);
+}
+EXPORT_SYMBOL_NS_GPL(realtek_smi_shutdown, "REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/realtek-smi.h b/drivers/net/dsa/realtek/realtek-smi.h
new file mode 100644
index 000000000000..ea49a2edd3c8
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-smi.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _REALTEK_SMI_H
+#define _REALTEK_SMI_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI)
+
+static inline int realtek_smi_driver_register(struct platform_driver *drv)
+{
+ return platform_driver_register(drv);
+}
+
+static inline void realtek_smi_driver_unregister(struct platform_driver *drv)
+{
+ platform_driver_unregister(drv);
+}
+
+int realtek_smi_probe(struct platform_device *pdev);
+void realtek_smi_remove(struct platform_device *pdev);
+void realtek_smi_shutdown(struct platform_device *pdev);
+
+#else /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI) */
+
+static inline int realtek_smi_driver_register(struct platform_driver *drv)
+{
+ return 0;
+}
+
+static inline void realtek_smi_driver_unregister(struct platform_driver *drv)
+{
+}
+
+static inline int realtek_smi_probe(struct platform_device *pdev)
+{
+ return -ENOENT;
+}
+
+static inline void realtek_smi_remove(struct platform_device *pdev)
+{
+}
+
+static inline void realtek_smi_shutdown(struct platform_device *pdev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI) */
+
+#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek/realtek.h
index 5bfa53e2480a..c03485a80d93 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -5,18 +5,20 @@
* Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
*/
-#ifndef _REALTEK_SMI_H
-#define _REALTEK_SMI_H
+#ifndef _REALTEK_H
+#define _REALTEK_H
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
+#include <linux/reset.h>
-struct realtek_smi_ops;
-struct dentry;
-struct inode;
-struct file;
+#define REALTEK_HW_STOP_DELAY 25 /* msecs */
+#define REALTEK_HW_START_DELAY 100 /* msecs */
+
+struct phylink_mac_ops;
+struct realtek_ops;
struct rtl8366_mib_counter {
unsigned int base;
@@ -25,7 +27,7 @@ struct rtl8366_mib_counter {
const char *name;
};
-/**
+/*
* struct rtl8366_vlan_mc - Virtual LAN member configuration
*/
struct rtl8366_vlan_mc {
@@ -43,19 +45,23 @@ struct rtl8366_vlan_4k {
u8 fid;
};
-struct realtek_smi {
+struct realtek_priv {
struct device *dev;
+ struct reset_control *reset_ctl;
struct gpio_desc *reset;
struct gpio_desc *mdc;
struct gpio_desc *mdio;
struct regmap *map;
- struct mii_bus *slave_mii_bus;
+ struct regmap *map_nolock;
+ struct mutex map_lock;
+ struct mii_bus *user_mii_bus;
+ struct mii_bus *bus;
+ int mdio_addr;
+
+ const struct realtek_variant *variant;
- unsigned int clk_delay;
- u8 cmd_read;
- u8 cmd_write;
spinlock_t lock; /* Locks around command writes */
- struct dsa_switch *ds;
+ struct dsa_switch ds;
struct irq_domain *irqdomain;
bool leds_disabled;
@@ -65,7 +71,8 @@ struct realtek_smi {
unsigned int num_mib_counters;
struct rtl8366_mib_counter *mib_counters;
- const struct realtek_smi_ops *ops;
+ const struct realtek_ops *ops;
+ int (*write_reg_noack)(void *ctx, u32 addr, u32 data);
int vlan_enabled;
int vlan4k_enabled;
@@ -74,61 +81,56 @@ struct realtek_smi {
void *chip_data; /* Per-chip extra variant data */
};
-/**
- * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations
+/*
+ * struct realtek_ops - vtable for the per-SMI-chiptype operations
* @detect: detects the chiptype
*/
-struct realtek_smi_ops {
- int (*detect)(struct realtek_smi *smi);
- int (*reset_chip)(struct realtek_smi *smi);
- int (*setup)(struct realtek_smi *smi);
- void (*cleanup)(struct realtek_smi *smi);
- int (*get_mib_counter)(struct realtek_smi *smi,
+struct realtek_ops {
+ int (*detect)(struct realtek_priv *priv);
+ int (*reset_chip)(struct realtek_priv *priv);
+ int (*setup)(struct realtek_priv *priv);
+ int (*get_mib_counter)(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
u64 *mibvalue);
- int (*get_vlan_mc)(struct realtek_smi *smi, u32 index,
+ int (*get_vlan_mc)(struct realtek_priv *priv, u32 index,
struct rtl8366_vlan_mc *vlanmc);
- int (*set_vlan_mc)(struct realtek_smi *smi, u32 index,
+ int (*set_vlan_mc)(struct realtek_priv *priv, u32 index,
const struct rtl8366_vlan_mc *vlanmc);
- int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid,
+ int (*get_vlan_4k)(struct realtek_priv *priv, u32 vid,
struct rtl8366_vlan_4k *vlan4k);
- int (*set_vlan_4k)(struct realtek_smi *smi,
+ int (*set_vlan_4k)(struct realtek_priv *priv,
const struct rtl8366_vlan_4k *vlan4k);
- int (*get_mc_index)(struct realtek_smi *smi, int port, int *val);
- int (*set_mc_index)(struct realtek_smi *smi, int port, int index);
- bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan);
- int (*enable_vlan)(struct realtek_smi *smi, bool enable);
- int (*enable_vlan4k)(struct realtek_smi *smi, bool enable);
- int (*enable_port)(struct realtek_smi *smi, int port, bool enable);
- int (*phy_read)(struct realtek_smi *smi, int phy, int regnum);
- int (*phy_write)(struct realtek_smi *smi, int phy, int regnum,
+ int (*get_mc_index)(struct realtek_priv *priv, int port, int *val);
+ int (*set_mc_index)(struct realtek_priv *priv, int port, int index);
+ bool (*is_vlan_valid)(struct realtek_priv *priv, unsigned int vlan);
+ int (*enable_vlan)(struct realtek_priv *priv, bool enable);
+ int (*enable_vlan4k)(struct realtek_priv *priv, bool enable);
+ int (*enable_port)(struct realtek_priv *priv, int port, bool enable);
+ int (*phy_read)(struct realtek_priv *priv, int phy, int regnum);
+ int (*phy_write)(struct realtek_priv *priv, int phy, int regnum,
u16 val);
};
-struct realtek_smi_variant {
+struct realtek_variant {
const struct dsa_switch_ops *ds_ops;
- const struct realtek_smi_ops *ops;
+ const struct realtek_ops *ops;
+ const struct phylink_mac_ops *phylink_mac_ops;
unsigned int clk_delay;
u8 cmd_read;
u8 cmd_write;
size_t chip_data_sz;
};
-/* SMI core calls */
-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
- u32 data);
-int realtek_smi_setup_mdio(struct realtek_smi *smi);
-
/* RTL8366 library helpers */
-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used);
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
u32 untag, u32 fid);
-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid);
-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
-int rtl8366_reset_vlan(struct realtek_smi *smi);
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable);
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable);
+int rtl8366_reset_vlan(struct realtek_priv *priv);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack);
@@ -139,7 +141,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
-extern const struct realtek_smi_variant rtl8366rb_variant;
-extern const struct realtek_smi_variant rtl8365mb_variant;
+extern const struct realtek_variant rtl8366rb_variant;
+extern const struct realtek_variant rtl8365mb_variant;
-#endif /* _REALTEK_SMI_H */
+#endif /* _REALTEK_H */
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index baaae97283c5..c575e164368c 100644
--- a/drivers/net/dsa/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -98,18 +98,20 @@
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
-#include "realtek-smi-core.h"
-
-/* Chip-specific data and limits */
-#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
-#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6
-#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
+#include "realtek.h"
+#include "realtek-smi.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
/* Family-specific data and limits */
-#define RTL8365MB_NUM_PHYREGS 32
-#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
-#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
+#define RTL8365MB_PHYADDRMAX 7
+#define RTL8365MB_NUM_PHYREGS 32
+#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
+#define RTL8365MB_MAX_NUM_PORTS 11
+#define RTL8365MB_MAX_NUM_EXTINTS 3
+#define RTL8365MB_LEARN_LIMIT_MAX 2112
/* Chip identification registers */
#define RTL8365MB_CHIP_ID_REG 0x1300
@@ -176,7 +178,7 @@
#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
-#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(7, 5)
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
#define RTL8365MB_PHY_BASE 0x2000
#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
@@ -190,7 +192,7 @@
/* The PHY OCP addresses of PHY registers 0~31 start here */
#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
-/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */
+/* External interface port mode values - used in DIGITAL_INTERFACE_SELECT */
#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
#define RTL8365MB_EXT_PORT_MODE_RGMII 1
#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
@@ -206,39 +208,44 @@
#define RTL8365MB_EXT_PORT_MODE_1000X 12
#define RTL8365MB_EXT_PORT_MODE_100FX 13
-/* EXT port interface mode configuration registers 0~1 */
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \
- (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \
- ((_extport) >> 1) * (0x13C3 - 0x1305))
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \
- (0xF << (((_extport) % 2)))
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \
- (((_extport) % 2) * 4)
-
-/* EXT port RGMII TX/RX delay configuration registers 1~2 */
-#define RTL8365MB_EXT_RGMXF_REG1 0x1307
-#define RTL8365MB_EXT_RGMXF_REG2 0x13C5
-#define RTL8365MB_EXT_RGMXF_REG(_extport) \
- (RTL8365MB_EXT_RGMXF_REG1 + \
- (((_extport) >> 1) * (0x13C5 - 0x1307)))
+/* External interface mode configuration registers 0~1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT0,EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
+ ((_extint) <= 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
+ 0x0)
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
+ (0xF << (((_extint) % 2)))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extint) \
+ (((_extint) % 2) * 4)
+
+/* External interface RGMII TX/RX delay configuration registers 0~2 */
+#define RTL8365MB_EXT_RGMXF_REG0 0x1306 /* EXT0 */
+#define RTL8365MB_EXT_RGMXF_REG1 0x1307 /* EXT1 */
+#define RTL8365MB_EXT_RGMXF_REG2 0x13C5 /* EXT2 */
+#define RTL8365MB_EXT_RGMXF_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_EXT_RGMXF_REG0 : \
+ (_extint) == 1 ? RTL8365MB_EXT_RGMXF_REG1 : \
+ (_extint) == 2 ? RTL8365MB_EXT_RGMXF_REG2 : \
+ 0x0)
#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
-/* External port speed values - used in DIGITAL_INTERFACE_FORCE */
+/* External interface port speed values - used in DIGITAL_INTERFACE_FORCE */
#define RTL8365MB_PORT_SPEED_10M 0
#define RTL8365MB_PORT_SPEED_100M 1
#define RTL8365MB_PORT_SPEED_1000M 2
-/* EXT port force configuration registers 0~2 */
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \
- (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \
- ((_extport) & 0x1) + \
- ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310)))
+/* External interface force configuration registers 0~2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310 /* EXT0 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 : \
+ (_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 : \
+ 0x0)
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
@@ -264,6 +271,7 @@
/* Maximum packet length register */
#define RTL8365MB_CFG0_MAX_LEN_REG 0x088C
#define RTL8365MB_CFG0_MAX_LEN_MASK 0x3FFF
+#define RTL8365MB_CFG0_MAX_LEN_MAX 0x3FFF
/* Port learning limit registers */
#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE 0x0A20
@@ -276,7 +284,7 @@
(RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport))
#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF
-/* MSTP port state registers - indexed by tree instancrSTI (tree ine */
+/* MSTP port state registers - indexed by tree instance */
#define RTL8365MB_MSTI_CTRL_BASE 0x0A00
#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \
(RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3))
@@ -462,6 +470,95 @@ static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
{ 0x1D32, 0x0002 },
};
+enum rtl8365mb_phy_interface_mode {
+ RTL8365MB_PHY_INTERFACE_MODE_INVAL = 0,
+ RTL8365MB_PHY_INTERFACE_MODE_INTERNAL = BIT(0),
+ RTL8365MB_PHY_INTERFACE_MODE_MII = BIT(1),
+ RTL8365MB_PHY_INTERFACE_MODE_TMII = BIT(2),
+ RTL8365MB_PHY_INTERFACE_MODE_RMII = BIT(3),
+ RTL8365MB_PHY_INTERFACE_MODE_RGMII = BIT(4),
+ RTL8365MB_PHY_INTERFACE_MODE_SGMII = BIT(5),
+ RTL8365MB_PHY_INTERFACE_MODE_HSGMII = BIT(6),
+};
+
+/**
+ * struct rtl8365mb_extint - external interface info
+ * @port: the port with an external interface
+ * @id: the external interface ID, which is either 0, 1, or 2
+ * @supported_interfaces: a bitmask of supported PHY interface modes
+ *
+ * Represents a mapping: port -> { id, supported_interfaces }. To be embedded
+ * in &struct rtl8365mb_chip_info for every port with an external interface.
+ */
+struct rtl8365mb_extint {
+ int port;
+ int id;
+ unsigned int supported_interfaces;
+};
+
+/**
+ * struct rtl8365mb_chip_info - static chip-specific info
+ * @name: human-readable chip name
+ * @chip_id: chip identifier
+ * @chip_ver: chip silicon revision
+ * @extints: available external interfaces
+ * @jam_table: chip-specific initialization jam table
+ * @jam_size: size of the chip's jam table
+ *
+ * These data are specific to a given chip in the family of switches supported
+ * by this driver. When adding support for another chip in the family, a new
+ * chip info should be added to the rtl8365mb_chip_infos array.
+ */
+struct rtl8365mb_chip_info {
+ const char *name;
+ u32 chip_id;
+ u32 chip_ver;
+ const struct rtl8365mb_extint extints[RTL8365MB_MAX_NUM_EXTINTS];
+ const struct rtl8365mb_jam_tbl_entry *jam_table;
+ size_t jam_size;
+};
+
+/* Chip info for each supported switch in the family */
+#define PHY_INTF(_mode) (RTL8365MB_PHY_INTERFACE_MODE_ ## _mode)
+static const struct rtl8365mb_chip_info rtl8365mb_chip_infos[] = {
+ {
+ .name = "RTL8365MB-VC",
+ .chip_id = 0x6367,
+ .chip_ver = 0x0040,
+ .extints = {
+ { 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+ {
+ .name = "RTL8367S",
+ .chip_id = 0x6367,
+ .chip_ver = 0x00A0,
+ .extints = {
+ { 6, 1, PHY_INTF(SGMII) | PHY_INTF(HSGMII) },
+ { 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+ {
+ .name = "RTL8367RB-VB",
+ .chip_id = 0x6367,
+ .chip_ver = 0x0020,
+ .extints = {
+ { 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ { 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+};
+
enum rtl8365mb_stp_state {
RTL8365MB_STP_STATE_DISABLED = 0,
RTL8365MB_STP_STATE_BLOCKING = 1,
@@ -515,7 +612,7 @@ struct rtl8365mb_cpu {
/**
* struct rtl8365mb_port - private per-port data
- * @smi: pointer to parent realtek_smi data
+ * @priv: pointer to parent realtek_priv data
* @index: DSA port index, same as dsa_port::index
* @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
* access via rtl8365mb_get_stats64
@@ -523,7 +620,7 @@ struct rtl8365mb_cpu {
* @mib_work: delayed work for polling MIB counters
*/
struct rtl8365mb_port {
- struct realtek_smi *smi;
+ struct realtek_priv *priv;
unsigned int index;
struct rtnl_link_stats64 stats;
spinlock_t stats_lock;
@@ -531,45 +628,35 @@ struct rtl8365mb_port {
};
/**
- * struct rtl8365mb - private chip-specific driver data
- * @smi: pointer to parent realtek_smi data
+ * struct rtl8365mb - driver private data
+ * @priv: pointer to parent realtek_priv data
* @irq: registered IRQ or zero
- * @chip_id: chip identifier
- * @chip_ver: chip silicon revision
- * @port_mask: mask of all ports
- * @learn_limit_max: maximum number of L2 addresses the chip can learn
+ * @chip_info: chip-specific info about the attached switch
* @cpu: CPU tagging and CPU port configuration for this chip
* @mib_lock: prevent concurrent reads of MIB counters
* @ports: per-port data
- * @jam_table: chip-specific initialization jam table
- * @jam_size: size of the chip's jam table
*
* Private data for this driver.
*/
struct rtl8365mb {
- struct realtek_smi *smi;
+ struct realtek_priv *priv;
int irq;
- u32 chip_id;
- u32 chip_ver;
- u32 port_mask;
- u32 learn_limit_max;
+ const struct rtl8365mb_chip_info *chip_info;
struct rtl8365mb_cpu cpu;
struct mutex mib_lock;
struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
- const struct rtl8365mb_jam_tbl_entry *jam_table;
- size_t jam_size;
};
-static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
+static int rtl8365mb_phy_poll_busy(struct realtek_priv *priv)
{
u32 val;
- return regmap_read_poll_timeout(smi->map,
+ return regmap_read_poll_timeout(priv->map_nolock,
RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
val, !val, 10, 100);
}
-static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_prepare(struct realtek_priv *priv, int phy,
u32 ocp_addr)
{
u32 val;
@@ -578,7 +665,7 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
/* Set OCP prefix */
val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
ret = regmap_update_bits(
- smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
+ priv->map_nolock, RTL8365MB_GPHY_OCP_MSB_0_REG,
RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
if (ret)
@@ -591,164 +678,210 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
ocp_addr >> 1);
val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
ocp_addr >> 6);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
- val);
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, val);
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 *data)
{
u32 val;
int ret;
- ret = rtl8365mb_phy_poll_busy(smi);
+ rtl83xx_lock(priv);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
- return ret;
+ goto out;
/* Execute read operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_poll_busy(smi);
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
/* Get PHY register data */
- ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
- &val);
+ ret = regmap_read(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, &val);
if (ret)
- return ret;
+ goto out;
*data = val & 0xFFFF;
- return 0;
+out:
+ rtl83xx_unlock(priv);
+
+ return ret;
}
-static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 data)
{
u32 val;
int ret;
- ret = rtl8365mb_phy_poll_busy(smi);
+ rtl83xx_lock(priv);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
- return ret;
+ goto out;
/* Set PHY register data */
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
- data);
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, data);
if (ret)
- return ret;
+ goto out;
/* Execute write operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_poll_busy(smi);
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
+
+out:
+ rtl83xx_unlock(priv);
return 0;
}
-static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 ocp_addr;
u16 val;
int ret;
+ if (phy > RTL8365MB_PHYADDRMAX)
+ return -EINVAL;
+
if (regnum > RTL8365MB_PHYREGMAX)
return -EINVAL;
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
- ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val);
+ ret = rtl8365mb_phy_ocp_read(priv, phy, ocp_addr, &val);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
- dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
+ dev_dbg(priv->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
phy, regnum, ocp_addr, val);
return val;
}
-static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+static int rtl8365mb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 ocp_addr;
int ret;
+ if (phy > RTL8365MB_PHYADDRMAX)
+ return -EINVAL;
+
if (regnum > RTL8365MB_PHYREGMAX)
return -EINVAL;
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
- ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val);
+ ret = rtl8365mb_phy_ocp_write(priv, phy, ocp_addr, val);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
- dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
+ dev_dbg(priv->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
phy, regnum, ocp_addr, val);
return 0;
}
+static const struct rtl8365mb_extint *
+rtl8365mb_get_port_extint(struct realtek_priv *priv, int port)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ int i;
+
+ for (i = 0; i < RTL8365MB_MAX_NUM_EXTINTS; i++) {
+ const struct rtl8365mb_extint *extint =
+ &mb->chip_info->extints[i];
+
+ if (!extint->supported_interfaces)
+ continue;
+
+ if (extint->port == port)
+ return extint;
+ }
+
+ return NULL;
+}
+
static enum dsa_tag_protocol
rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ if (cpu->position == RTL8365MB_CPU_POS_BEFORE_CRC)
+ return DSA_TAG_PROTO_RTL8_4T;
+
return DSA_TAG_PROTO_RTL8_4;
}
-static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
+static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
phy_interface_t interface)
{
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(priv, port);
+ struct dsa_switch *ds = &priv->ds;
struct device_node *dn;
struct dsa_port *dp;
int tx_delay = 0;
int rx_delay = 0;
- int ext_port;
u32 val;
int ret;
- if (port == smi->cpu_port) {
- ext_port = 1;
- } else {
- dev_err(smi->dev, "only one EXT port is currently supported\n");
- return -EINVAL;
- }
+ if (!extint)
+ return -ENODEV;
- dp = dsa_to_port(smi->ds, port);
+ dp = dsa_to_port(ds, port);
dn = dp->dn;
/* Set the RGMII TX/RX delay
@@ -760,7 +893,8 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
* 0 = no delay, 1 = 2 ns delay
* RX delay:
* 0 = no delay, 7 = maximum delay
- * No units are specified, but there are a total of 8 steps.
+ * Each step is approximately 0.3 ns, so the maximum delay is about
+ * 2.1 ns.
*
* The vendor driver also states that this must be configured *before*
* forcing the external interface into a particular mode, which is done
@@ -771,10 +905,6 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
* specified. We ignore the detail of the RGMII interface mode
* (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only
* property.
- *
- * For the RX delay, we assume that a register value of 4 corresponds to
- * 2 ns. But this is just an educated guess, so ignore all other values
- * to avoid too much confusion.
*/
if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) {
val = val / 1000; /* convert to ns */
@@ -782,22 +912,22 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
if (val == 0 || val == 2)
tx_delay = val / 2;
else
- dev_warn(smi->dev,
- "EXT port TX delay must be 0 or 2 ns\n");
+ dev_warn(priv->dev,
+ "RGMII TX delay must be 0 or 2 ns\n");
}
if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
- val = val / 1000; /* convert to ns */
+ val = DIV_ROUND_CLOSEST(val, 300); /* convert to 0.3 ns step */
- if (val == 0 || val == 2)
- rx_delay = val * 2;
+ if (val <= 7)
+ rx_delay = val;
else
- dev_warn(smi->dev,
- "EXT port RX delay must be 0 to 2 ns\n");
+ dev_warn(priv->dev,
+ "RGMII RX delay must be 0 to 2.1 ns\n");
}
ret = regmap_update_bits(
- smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port),
+ priv->map, RTL8365MB_EXT_RGMXF_REG(extint->id),
RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
@@ -806,36 +936,33 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
return ret;
ret = regmap_update_bits(
- smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port),
- RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port),
+ priv->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(extint->id),
+ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(extint->id),
RTL8365MB_EXT_PORT_MODE_RGMII
<< RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
- ext_port));
+ extint->id));
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
+static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
bool link, int speed, int duplex,
bool tx_pause, bool rx_pause)
{
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(priv, port);
u32 r_tx_pause;
u32 r_rx_pause;
u32 r_duplex;
u32 r_speed;
u32 r_link;
- int ext_port;
int val;
int ret;
- if (port == smi->cpu_port) {
- ext_port = 1;
- } else {
- dev_err(smi->dev, "only one EXT port is currently supported\n");
- return -EINVAL;
- }
+ if (!extint)
+ return -ENODEV;
if (link) {
/* Force the link up with the desired configuration */
@@ -850,7 +977,7 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
} else if (speed == SPEED_10) {
r_speed = RTL8365MB_PORT_SPEED_10M;
} else {
- dev_err(smi->dev, "unsupported port speed %s\n",
+ dev_err(priv->dev, "unsupported port speed %s\n",
phy_speed_to_str(speed));
return -EINVAL;
}
@@ -860,7 +987,7 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
} else if (duplex == DUPLEX_HALF) {
r_duplex = 0;
} else {
- dev_err(smi->dev, "unsupported duplex %s\n",
+ dev_err(priv->dev, "unsupported duplex %s\n",
phy_duplex_to_str(duplex));
return -EINVAL;
}
@@ -882,8 +1009,8 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
r_duplex) |
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
- ret = regmap_write(smi->map,
- RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port),
+ ret = regmap_write(priv->map,
+ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(extint->id),
val);
if (ret)
return ret;
@@ -891,81 +1018,56 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
return 0;
}
-static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
- phy_interface_t interface)
+static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- if (dsa_is_user_port(ds, port) &&
- (interface == PHY_INTERFACE_MODE_NA ||
- interface == PHY_INTERFACE_MODE_INTERNAL))
- /* Internal PHY */
- return true;
- else if (dsa_is_cpu_port(ds, port) &&
- phy_interface_mode_is_rgmii(interface))
- /* Extension MAC */
- return true;
-
- return false;
-}
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(ds->priv, port);
-static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct realtek_smi *smi = ds->priv;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 };
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
- /* include/linux/phylink.h says:
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
- * expects the MAC driver to return all supported link modes.
- */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
- phy_modes(state->interface), port);
- linkmode_zero(supported);
+ if (!extint) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ /* GMII is the default interface mode for phylib, so
+ * we have to support it for ports with integrated PHY.
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
return;
}
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
+ /* Populate according to the modes supported by _this driver_,
+ * not necessarily the modes supported by the hardware, some of
+ * which remain unimplemented.
+ */
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ if (extint->supported_interfaces & RTL8365MB_PHY_INTERFACE_MODE_RGMII)
+ phy_interface_set_rgmii(config->supported_interfaces);
}
-static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct realtek_smi *smi = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ u8 port = dp->index;
int ret;
- if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
- phy_modes(state->interface), port);
- return;
- }
-
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"port %d supports only conventional PHY or fixed-link\n",
port);
return;
}
if (phy_interface_mode_is_rgmii(state->interface)) {
- ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface);
+ ret = rtl8365mb_ext_config_rgmii(priv, port, state->interface);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to configure RGMII mode on port %d: %d\n",
port, ret);
return;
@@ -976,24 +1078,26 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
*/
}
-static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct realtek_smi *smi = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
+ u8 port = dp->index;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
cancel_delayed_work_sync(&p->mib_work);
if (phy_interface_mode_is_rgmii(interface)) {
- ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0,
+ ret = rtl8365mb_ext_config_forcemode(priv, port, false, 0, 0,
false, false);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to reset forced mode on port %d: %d\n",
port, ret);
@@ -1001,28 +1105,30 @@ static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
}
}
-static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause)
{
- struct realtek_smi *smi = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
+ u8 port = dp->index;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
schedule_delayed_work(&p->mib_work, 0);
if (phy_interface_mode_is_rgmii(interface)) {
- ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed,
+ ret = rtl8365mb_ext_config_forcemode(priv, port, true, speed,
duplex, tx_pause,
rx_pause);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to force mode on port %d: %d\n", port,
ret);
@@ -1030,10 +1136,39 @@ static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
}
}
+static int rtl8365mb_port_change_mtu(struct dsa_switch *ds, int port,
+ int new_mtu)
+{
+ struct realtek_priv *priv = ds->priv;
+ int frame_size;
+
+ /* When a new MTU is set, DSA always sets the CPU port's MTU to the
+ * largest MTU of the user ports. Because the switch only has a global
+ * RX length register, only allowing CPU port here is enough.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ frame_size = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ dev_dbg(priv->dev, "changing mtu to %d (frame size: %d)\n",
+ new_mtu, frame_size);
+
+ return regmap_update_bits(priv->map, RTL8365MB_CFG0_MAX_LEN_REG,
+ RTL8365MB_CFG0_MAX_LEN_MASK,
+ FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK,
+ frame_size));
+}
+
+static int rtl8365mb_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return RTL8365MB_CFG0_MAX_LEN_MAX - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
enum rtl8365mb_stp_state val;
int msti = 0;
@@ -1052,36 +1187,34 @@ static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
val = RTL8365MB_STP_STATE_FORWARDING;
break;
default:
- dev_err(smi->dev, "invalid STP state: %u\n", state);
+ dev_err(priv->dev, "invalid STP state: %u\n", state);
return;
}
- regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
+ regmap_update_bits(priv->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
}
-static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port,
+static int rtl8365mb_port_set_learning(struct realtek_priv *priv, int port,
bool enable)
{
- struct rtl8365mb *mb = smi->chip_data;
-
/* Enable/disable learning by limiting the number of L2 addresses the
* port can learn. Realtek documentation states that a limit of zero
* disables learning. When enabling learning, set it to the chip's
* maximum.
*/
- return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
- enable ? mb->learn_limit_max : 0);
+ return regmap_write(priv->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
+ enable ? RTL8365MB_LEARN_LIMIT_MAX : 0);
}
-static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port,
+static int rtl8365mb_port_set_isolation(struct realtek_priv *priv, int port,
u32 mask)
{
- return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
+ return regmap_write(priv->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
}
-static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
+static int rtl8365mb_mib_counter_read(struct realtek_priv *priv, int port,
u32 offset, u32 length, u64 *mibvalue)
{
u64 tmpvalue = 0;
@@ -1093,13 +1226,13 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
* and then poll the control register before reading the value from some
* counter registers.
*/
- ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG,
+ ret = regmap_write(priv->map, RTL8365MB_MIB_ADDRESS_REG,
RTL8365MB_MIB_ADDRESS(port, offset));
if (ret)
return ret;
/* Poll for completion */
- ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val,
+ ret = regmap_read_poll_timeout(priv->map, RTL8365MB_MIB_CTRL0_REG, val,
!(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
10, 100);
if (ret)
@@ -1121,7 +1254,7 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
/* Read the MIB counter 16 bits at a time */
for (i = 0; i < length; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
if (ret)
return ret;
@@ -1137,21 +1270,21 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &data[i]);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to read port %d counters: %d\n", port,
ret);
break;
@@ -1169,8 +1302,7 @@ static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset
for (i = 0; i < RTL8365MB_MIB_END; i++) {
struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
-
- strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, mib->name);
}
}
@@ -1185,15 +1317,15 @@ static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_phy_stats *phy_stats)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
mutex_lock(&mb->mib_lock);
- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&phy_stats->SymbolErrorDuringCarrier);
mutex_unlock(&mb->mib_lock);
}
@@ -1221,12 +1353,12 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
[RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
};
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
@@ -1236,7 +1368,7 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
if (!cnt[i])
continue;
- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &cnt[i]);
if (ret)
break;
@@ -1286,20 +1418,20 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
mutex_lock(&mb->mib_lock);
- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&ctrl_stats->UnsupportedOpcodesReceived);
mutex_unlock(&mb->mib_lock);
}
-static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
+static void rtl8365mb_stats_update(struct realtek_priv *priv, int port)
{
u64 cnt[RTL8365MB_MIB_END] = {
[RTL8365MB_MIB_ifOutOctets] = 1,
@@ -1318,7 +1450,7 @@ static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
[RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
[RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
};
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct rtnl_link_stats64 *stats;
int ret;
int i;
@@ -1333,7 +1465,7 @@ static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
if (!cnt[i])
continue;
- ret = rtl8365mb_mib_counter_read(smi, port, c->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, c->offset,
c->length, &cnt[i]);
if (ret)
break;
@@ -1383,9 +1515,9 @@ static void rtl8365mb_stats_poll(struct work_struct *work)
struct rtl8365mb_port *p = container_of(to_delayed_work(work),
struct rtl8365mb_port,
mib_work);
- struct realtek_smi *smi = p->smi;
+ struct realtek_priv *priv = p->priv;
- rtl8365mb_stats_update(smi, p->index);
+ rtl8365mb_stats_update(priv, p->index);
schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
}
@@ -1393,11 +1525,11 @@ static void rtl8365mb_stats_poll(struct work_struct *work)
static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
spin_lock(&p->stats_lock);
@@ -1405,9 +1537,10 @@ static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
-static void rtl8365mb_stats_setup(struct realtek_smi *smi)
+static void rtl8365mb_stats_setup(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
+ struct dsa_switch *ds = &priv->ds;
int i;
/* Per-chip global mutex to protect MIB counter access, since doing
@@ -1415,10 +1548,10 @@ static void rtl8365mb_stats_setup(struct realtek_smi *smi)
*/
mutex_init(&mb->mib_lock);
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
/* Per-port spinlock to protect the stats64 data */
@@ -1431,45 +1564,43 @@ static void rtl8365mb_stats_setup(struct realtek_smi *smi)
}
}
-static void rtl8365mb_stats_teardown(struct realtek_smi *smi)
+static void rtl8365mb_stats_teardown(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
+ struct dsa_switch *ds = &priv->ds;
int i;
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
cancel_delayed_work_sync(&p->mib_work);
}
}
-static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg,
+static int rtl8365mb_get_and_clear_status_reg(struct realtek_priv *priv, u32 reg,
u32 *val)
{
int ret;
- ret = regmap_read(smi->map, reg, val);
+ ret = regmap_read(priv->map, reg, val);
if (ret)
return ret;
- return regmap_write(smi->map, reg, *val);
+ return regmap_write(priv->map, reg, *val);
}
static irqreturn_t rtl8365mb_irq(int irq, void *data)
{
- struct realtek_smi *smi = data;
+ struct realtek_priv *priv = data;
unsigned long line_changes = 0;
- struct rtl8365mb *mb;
u32 stat;
int line;
int ret;
- mb = smi->chip_data;
-
- ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG,
+ ret = rtl8365mb_get_and_clear_status_reg(priv, RTL8365MB_INTR_STATUS_REG,
&stat);
if (ret)
goto out_error;
@@ -1480,27 +1611,27 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
u32 val;
ret = rtl8365mb_get_and_clear_status_reg(
- smi, RTL8365MB_PORT_LINKUP_IND_REG, &val);
+ priv, RTL8365MB_PORT_LINKUP_IND_REG, &val);
if (ret)
goto out_error;
linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
ret = rtl8365mb_get_and_clear_status_reg(
- smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
+ priv, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
if (ret)
goto out_error;
linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
- line_changes = (linkup_ind | linkdown_ind) & mb->port_mask;
+ line_changes = linkup_ind | linkdown_ind;
}
if (!line_changes)
goto out_none;
- for_each_set_bit(line, &line_changes, smi->num_ports) {
- int child_irq = irq_find_mapping(smi->irqdomain, line);
+ for_each_set_bit(line, &line_changes, priv->num_ports) {
+ int child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
@@ -1508,7 +1639,7 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
return IRQ_HANDLED;
out_error:
- dev_err(smi->dev, "failed to read interrupt status: %d\n", ret);
+ dev_err(priv->dev, "failed to read interrupt status: %d\n", ret);
out_none:
return IRQ_NONE;
@@ -1543,27 +1674,27 @@ static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable)
+static int rtl8365mb_set_irq_enable(struct realtek_priv *priv, bool enable)
{
- return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG,
+ return regmap_update_bits(priv->map, RTL8365MB_INTR_CTRL_REG,
RTL8365MB_INTR_LINK_CHANGE_MASK,
FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
enable ? 1 : 0));
}
-static int rtl8365mb_irq_enable(struct realtek_smi *smi)
+static int rtl8365mb_irq_enable(struct realtek_priv *priv)
{
- return rtl8365mb_set_irq_enable(smi, true);
+ return rtl8365mb_set_irq_enable(priv, true);
}
-static int rtl8365mb_irq_disable(struct realtek_smi *smi)
+static int rtl8365mb_irq_disable(struct realtek_priv *priv)
{
- return rtl8365mb_set_irq_enable(smi, false);
+ return rtl8365mb_set_irq_enable(priv, false);
}
-static int rtl8365mb_irq_setup(struct realtek_smi *smi)
+static int rtl8365mb_irq_setup(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct device_node *intc;
u32 irq_trig;
int virq;
@@ -1572,9 +1703,9 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
int ret;
int i;
- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
- dev_err(smi->dev, "missing child interrupt-controller node\n");
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
@@ -1582,24 +1713,24 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
irq = of_irq_get(intc, 0);
if (irq <= 0) {
if (irq != -EPROBE_DEFER)
- dev_err(smi->dev, "failed to get parent irq: %d\n",
+ dev_err(priv->dev, "failed to get parent irq: %d\n",
irq);
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
- smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports,
- &rtl8365mb_irqdomain_ops, smi);
- if (!smi->irqdomain) {
- dev_err(smi->dev, "failed to add irq domain\n");
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), priv->num_ports,
+ &rtl8365mb_irqdomain_ops, priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to add irq domain\n");
ret = -ENOMEM;
goto out_put_node;
}
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_create_mapping(smi->irqdomain, i);
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_create_mapping(priv->irqdomain, i);
if (!virq) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to create irq domain mapping\n");
ret = -EINVAL;
goto out_remove_irqdomain;
@@ -1609,7 +1740,7 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
}
/* Configure chip interrupt signal polarity */
- irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ irq_trig = irq_get_trigger_type(irq);
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
@@ -1620,40 +1751,40 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
val = RTL8365MB_INTR_POLARITY_LOW;
break;
default:
- dev_err(smi->dev, "unsupported irq trigger type %u\n",
+ dev_err(priv->dev, "unsupported irq trigger type %u\n",
irq_trig);
ret = -EINVAL;
goto out_remove_irqdomain;
}
- ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_INTR_POLARITY_REG,
RTL8365MB_INTR_POLARITY_MASK,
FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
if (ret)
goto out_remove_irqdomain;
/* Disable the interrupt in case the chip has it enabled on reset */
- ret = rtl8365mb_irq_disable(smi);
+ ret = rtl8365mb_irq_disable(priv);
if (ret)
goto out_remove_irqdomain;
/* Clear the interrupt status register */
- ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG,
+ ret = regmap_write(priv->map, RTL8365MB_INTR_STATUS_REG,
RTL8365MB_INTR_ALL_MASK);
if (ret)
goto out_remove_irqdomain;
ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
- "rtl8365mb", smi);
+ "rtl8365mb", priv);
if (ret) {
- dev_err(smi->dev, "failed to request irq: %d\n", ret);
+ dev_err(priv->dev, "failed to request irq: %d\n", ret);
goto out_remove_irqdomain;
}
/* Store the irq so that we know to free it during teardown */
mb->irq = irq;
- ret = rtl8365mb_irq_enable(smi);
+ ret = rtl8365mb_irq_enable(priv);
if (ret)
goto out_free_irq;
@@ -1662,17 +1793,17 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
return 0;
out_free_irq:
- free_irq(mb->irq, smi);
+ free_irq(mb->irq, priv);
mb->irq = 0;
out_remove_irqdomain:
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_find_mapping(smi->irqdomain, i);
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
- irq_domain_remove(smi->irqdomain);
- smi->irqdomain = NULL;
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
out_put_node:
of_node_put(intc);
@@ -1680,36 +1811,36 @@ out_put_node:
return ret;
}
-static void rtl8365mb_irq_teardown(struct realtek_smi *smi)
+static void rtl8365mb_irq_teardown(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int virq;
int i;
if (mb->irq) {
- free_irq(mb->irq, smi);
+ free_irq(mb->irq, priv);
mb->irq = 0;
}
- if (smi->irqdomain) {
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_find_mapping(smi->irqdomain, i);
+ if (priv->irqdomain) {
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
- irq_domain_remove(smi->irqdomain);
- smi->irqdomain = NULL;
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
}
}
-static int rtl8365mb_cpu_config(struct realtek_smi *smi)
+static int rtl8365mb_cpu_config(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct rtl8365mb_cpu *cpu = &mb->cpu;
u32 val;
int ret;
- ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_CPU_PORT_MASK_REG,
RTL8365MB_CPU_PORT_MASK_MASK,
FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
cpu->mask));
@@ -1721,27 +1852,61 @@ static int rtl8365mb_cpu_config(struct realtek_smi *smi)
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
- FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port & 0x7) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
- cpu->trap_port >> 3);
- ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val);
+ cpu->trap_port >> 3 & 0x1);
+ ret = regmap_write(priv->map, RTL8365MB_CPU_CTRL_REG, val);
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_switch_init(struct realtek_smi *smi)
+static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_RTL8_4:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_AFTER_SA;
+ break;
+ case DSA_TAG_PROTO_RTL8_4T:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_BEFORE_CRC;
+ break;
+ /* The switch also supports a 4-byte format, similar to rtl4a but with
+ * the same 0x04 8-bit version and probably 8-bit port source/dest.
+ * There is no public doc about it. Not supported yet and it will probably
+ * never be.
+ */
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return rtl8365mb_cpu_config(priv);
+}
+
+static int rtl8365mb_switch_init(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
+ const struct rtl8365mb_chip_info *ci;
int ret;
int i;
+ ci = mb->chip_info;
+
/* Do any chip-specific init jam before getting to the common stuff */
- if (mb->jam_table) {
- for (i = 0; i < mb->jam_size; i++) {
- ret = regmap_write(smi->map, mb->jam_table[i].reg,
- mb->jam_table[i].val);
+ if (ci->jam_table) {
+ for (i = 0; i < ci->jam_size; i++) {
+ ret = regmap_write(priv->map, ci->jam_table[i].reg,
+ ci->jam_table[i].val);
if (ret)
return ret;
}
@@ -1749,7 +1914,7 @@ static int rtl8365mb_switch_init(struct realtek_smi *smi)
/* Common init jam */
for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
- ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg,
+ ret = regmap_write(priv->map, rtl8365mb_init_jam_common[i].reg,
rtl8365mb_init_jam_common[i].val);
if (ret)
return ret;
@@ -1758,75 +1923,80 @@ static int rtl8365mb_switch_init(struct realtek_smi *smi)
return 0;
}
-static int rtl8365mb_reset_chip(struct realtek_smi *smi)
+static int rtl8365mb_reset_chip(struct realtek_priv *priv)
{
u32 val;
- realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG,
- FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK,
- 1));
+ priv->write_reg_noack(priv, RTL8365MB_CHIP_RESET_REG,
+ FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK, 1));
/* Realtek documentation says the chip needs 1 second to reset. Sleep
* for 100 ms before accessing any registers to prevent ACK timeouts.
*/
msleep(100);
- return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val,
+ return regmap_read_poll_timeout(priv->map, RTL8365MB_CHIP_RESET_REG, val,
!(val & RTL8365MB_CHIP_RESET_HW_MASK),
20000, 1e6);
}
static int rtl8365mb_setup(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct dsa_port *cpu_dp;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
- ret = rtl8365mb_reset_chip(smi);
+ ret = rtl8365mb_reset_chip(priv);
if (ret) {
- dev_err(smi->dev, "failed to reset chip: %d\n", ret);
+ dev_err(priv->dev, "failed to reset chip: %d\n", ret);
goto out_error;
}
/* Configure switch to vendor-defined initial state */
- ret = rtl8365mb_switch_init(smi);
+ ret = rtl8365mb_switch_init(priv);
if (ret) {
- dev_err(smi->dev, "failed to initialize switch: %d\n", ret);
+ dev_err(priv->dev, "failed to initialize switch: %d\n", ret);
goto out_error;
}
/* Set up cascading IRQs */
- ret = rtl8365mb_irq_setup(smi);
+ ret = rtl8365mb_irq_setup(priv);
if (ret == -EPROBE_DEFER)
return ret;
else if (ret)
- dev_info(smi->dev, "no interrupt support\n");
+ dev_info(priv->dev, "no interrupt support\n");
/* Configure CPU tagging */
- ret = rtl8365mb_cpu_config(smi);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ cpu->mask |= BIT(cpu_dp->index);
+
+ if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
+ cpu->trap_port = cpu_dp->index;
+ }
+ cpu->enable = cpu->mask > 0;
+ ret = rtl8365mb_cpu_config(priv);
if (ret)
goto out_teardown_irq;
/* Configure ports */
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
- /* Set up per-port private data */
- p->smi = smi;
- p->index = i;
-
/* Forward only to the CPU */
- ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port));
+ ret = rtl8365mb_port_set_isolation(priv, i, cpu->mask);
if (ret)
goto out_teardown_irq;
/* Disable learning */
- ret = rtl8365mb_port_set_learning(smi, i, false);
+ ret = rtl8365mb_port_set_learning(priv, i, false);
if (ret)
goto out_teardown_irq;
@@ -1834,29 +2004,30 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
* ports will still forward frames to the CPU despite being
* administratively down by default.
*/
- rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED);
+ rtl8365mb_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+
+ /* Set up per-port private data */
+ p->priv = priv;
+ p->index = i;
}
- /* Set maximum packet length to 1536 bytes */
- ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG,
- RTL8365MB_CFG0_MAX_LEN_MASK,
- FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
+ ret = rtl8365mb_port_change_mtu(ds, cpu->trap_port, ETH_DATA_LEN);
if (ret)
goto out_teardown_irq;
- ret = realtek_smi_setup_mdio(smi);
+ ret = rtl83xx_setup_user_mdio(ds);
if (ret) {
- dev_err(smi->dev, "could not set up MDIO bus\n");
+ dev_err(priv->dev, "could not set up MDIO bus\n");
goto out_teardown_irq;
}
/* Start statistics counter polling */
- rtl8365mb_stats_setup(smi);
+ rtl8365mb_stats_setup(priv);
return 0;
out_teardown_irq:
- rtl8365mb_irq_teardown(smi);
+ rtl8365mb_irq_teardown(priv);
out_error:
return ret;
@@ -1864,10 +2035,10 @@ out_error:
static void rtl8365mb_teardown(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
- rtl8365mb_stats_teardown(smi);
- rtl8365mb_irq_teardown(smi);
+ rtl8365mb_stats_teardown(priv);
+ rtl8365mb_irq_teardown(priv);
}
static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
@@ -1897,64 +2068,62 @@ static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
return 0;
}
-static int rtl8365mb_detect(struct realtek_smi *smi)
+static int rtl8365mb_detect(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
u32 chip_id;
u32 chip_ver;
int ret;
+ int i;
- ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver);
+ ret = rtl8365mb_get_chip_id_and_ver(priv->map, &chip_id, &chip_ver);
if (ret) {
- dev_err(smi->dev, "failed to read chip id and version: %d\n",
+ dev_err(priv->dev, "failed to read chip id and version: %d\n",
ret);
return ret;
}
- switch (chip_id) {
- case RTL8365MB_CHIP_ID_8365MB_VC:
- dev_info(smi->dev,
- "found an RTL8365MB-VC switch (ver=0x%04x)\n",
- chip_ver);
-
- smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC;
- smi->num_ports = smi->cpu_port + 1;
-
- mb->smi = smi;
- mb->chip_id = chip_id;
- mb->chip_ver = chip_ver;
- mb->port_mask = BIT(smi->num_ports) - 1;
- mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC;
- mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
- mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
-
- mb->cpu.enable = 1;
- mb->cpu.mask = BIT(smi->cpu_port);
- mb->cpu.trap_port = smi->cpu_port;
- mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
- mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
- mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
- mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+ for (i = 0; i < ARRAY_SIZE(rtl8365mb_chip_infos); i++) {
+ const struct rtl8365mb_chip_info *ci = &rtl8365mb_chip_infos[i];
- break;
- default:
- dev_err(smi->dev,
- "found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
- chip_id, chip_ver);
+ if (ci->chip_id == chip_id && ci->chip_ver == chip_ver) {
+ mb->chip_info = ci;
+ break;
+ }
+ }
+
+ if (!mb->chip_info) {
+ dev_err(priv->dev,
+ "unrecognized switch (id=0x%04x, ver=0x%04x)", chip_id,
+ chip_ver);
return -ENODEV;
}
+ dev_info(priv->dev, "found an %s switch\n", mb->chip_info->name);
+
+ priv->num_ports = RTL8365MB_MAX_NUM_PORTS;
+ mb->priv = priv;
+ mb->cpu.trap_port = RTL8365MB_MAX_NUM_PORTS;
+ mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
+ mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
+ mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
+ mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+
return 0;
}
+static const struct phylink_mac_ops rtl8365mb_phylink_mac_ops = {
+ .mac_config = rtl8365mb_phylink_mac_config,
+ .mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .mac_link_up = rtl8365mb_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
.teardown = rtl8365mb_teardown,
- .phylink_validate = rtl8365mb_phylink_validate,
- .phylink_mac_config = rtl8365mb_phylink_mac_config,
- .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
- .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .phylink_get_caps = rtl8365mb_phylink_get_caps,
.port_stp_state_set = rtl8365mb_port_stp_state_set,
.get_strings = rtl8365mb_get_strings,
.get_ethtool_stats = rtl8365mb_get_ethtool_stats,
@@ -1963,20 +2132,80 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_eth_mac_stats = rtl8365mb_get_mac_stats,
.get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
.get_stats64 = rtl8365mb_get_stats64,
+ .port_change_mtu = rtl8365mb_port_change_mtu,
+ .port_max_mtu = rtl8365mb_port_max_mtu,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
-static const struct realtek_smi_ops rtl8365mb_smi_ops = {
+static const struct realtek_ops rtl8365mb_ops = {
.detect = rtl8365mb_detect,
.phy_read = rtl8365mb_phy_read,
.phy_write = rtl8365mb_phy_write,
};
-const struct realtek_smi_variant rtl8365mb_variant = {
+const struct realtek_variant rtl8365mb_variant = {
.ds_ops = &rtl8365mb_switch_ops,
- .ops = &rtl8365mb_smi_ops,
+ .ops = &rtl8365mb_ops,
+ .phylink_mac_ops = &rtl8365mb_phylink_mac_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
.chip_data_sz = sizeof(struct rtl8365mb),
};
-EXPORT_SYMBOL_GPL(rtl8365mb_variant);
+
+static const struct of_device_id rtl8365mb_of_match[] = {
+ { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rtl8365mb_of_match);
+
+static struct platform_driver rtl8365mb_smi_driver = {
+ .driver = {
+ .name = "rtl8365mb-smi",
+ .of_match_table = rtl8365mb_of_match,
+ },
+ .probe = realtek_smi_probe,
+ .remove = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+
+static struct mdio_driver rtl8365mb_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "rtl8365mb-mdio",
+ .of_match_table = rtl8365mb_of_match,
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+static int rtl8365mb_init(void)
+{
+ int ret;
+
+ ret = realtek_mdio_driver_register(&rtl8365mb_mdio_driver);
+ if (ret)
+ return ret;
+
+ ret = realtek_smi_driver_register(&rtl8365mb_smi_driver);
+ if (ret) {
+ realtek_mdio_driver_unregister(&rtl8365mb_mdio_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(rtl8365mb_init);
+
+static void __exit rtl8365mb_exit(void)
+{
+ realtek_smi_driver_unregister(&rtl8365mb_smi_driver);
+ realtek_mdio_driver_unregister(&rtl8365mb_mdio_driver);
+}
+module_exit(rtl8365mb_exit);
+
+MODULE_AUTHOR("Alvin Å ipraga <alsi@bang-olufsen.dk>");
+MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("REALTEK_DSA");
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/realtek/rtl8366-core.c
index bdb8d8d34880..047feeed96a2 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -11,18 +11,18 @@
#include <linux/if_bridge.h>
#include <net/dsa.h>
-#include "realtek-smi-core.h"
+#include "realtek.h"
-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
{
int ret;
int i;
*used = 0;
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
int index = 0;
- ret = smi->ops->get_mc_index(smi, i, &index);
+ ret = priv->ops->get_mc_index(priv, i, &index);
if (ret)
return ret;
@@ -34,17 +34,17 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+EXPORT_SYMBOL_NS_GPL(rtl8366_mc_is_used, "REALTEK_DSA");
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
- * @smi: the Realtek SMI device instance
+ * @priv: the Realtek SMI device instance
* @vid: the VLAN ID to look up or allocate
* @vlanmc: the pointer will be assigned to a pointer to a valid member config
* if successful
* @return: index of a new member config or negative error number
*/
-static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
+static int rtl8366_obtain_mc(struct realtek_priv *priv, int vid,
struct rtl8366_vlan_mc *vlanmc)
{
struct rtl8366_vlan_4k vlan4k;
@@ -52,10 +52,10 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
int i;
/* Try to find an existing member config entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
@@ -65,19 +65,19 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
}
/* We have no MC entry for this VID, try to find an empty one */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
if (vlanmc->vid == 0 && vlanmc->member == 0) {
/* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret) {
- dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error looking for 4K VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
@@ -86,30 +86,30 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
- dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
+ dev_dbg(priv->dev, "created new MC at index %d for VID %d\n",
i, vid);
return i;
}
}
/* MC table is full, try to find an unused entry and replace it */
- for (i = 0; i < smi->num_vlan_mc; i++) {
+ for (i = 0; i < priv->num_vlan_mc; i++) {
int used;
- ret = rtl8366_mc_is_used(smi, i, &used);
+ ret = rtl8366_mc_is_used(priv, i, &used);
if (ret)
return ret;
if (!used) {
/* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
@@ -117,23 +117,23 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
- dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
+ dev_dbg(priv->dev, "recycled MC at index %i for VID %d\n",
i, vid);
return i;
}
}
- dev_err(smi->dev, "all VLAN member configurations are in use\n");
+ dev_err(priv->dev, "all VLAN member configurations are in use\n");
return -ENOSPC;
}
-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
u32 untag, u32 fid)
{
struct rtl8366_vlan_mc vlanmc;
@@ -141,31 +141,31 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
int mc;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vid))
+ if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, member, untag);
/* Update the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
vlan4k.member |= member;
vlan4k.untag |= untag;
vlan4k.fid = fid;
- ret = smi->ops->set_vlan_4k(smi, &vlan4k);
+ ret = priv->ops->set_vlan_4k(priv, &vlan4k);
if (ret)
return ret;
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, vlan4k.member, vlan4k.untag);
/* Find or allocate a member config for this VID */
- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
@@ -176,50 +176,50 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
vlanmc.fid = fid;
/* Commit updates to the MC entry */
- ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, mc, &vlanmc);
if (ret)
- dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
+ dev_err(priv->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
mc, vid);
else
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
vid, vlanmc.member, vlanmc.untag);
return ret;
}
-EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_vlan, "REALTEK_DSA");
-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
{
struct rtl8366_vlan_mc vlanmc;
int mc;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vid))
+ if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
/* Find or allocate a member config for this VID */
- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
- ret = smi->ops->set_mc_index(smi, port, mc);
+ ret = priv->ops->set_mc_index(priv, port, mc);
if (ret) {
- dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
+ dev_err(priv->dev, "set PVID: failed to set MC index %d for port %d\n",
mc, port);
return ret;
}
- dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
+ dev_dbg(priv->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
port, vid, mc);
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_pvid, "REALTEK_DSA");
-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
int ret;
@@ -229,52 +229,52 @@ int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
*/
if (enable) {
/* Make sure VLAN is ON */
- ret = smi->ops->enable_vlan(smi, true);
+ ret = priv->ops->enable_vlan(priv, true);
if (ret)
return ret;
- smi->vlan_enabled = true;
+ priv->vlan_enabled = true;
}
- ret = smi->ops->enable_vlan4k(smi, enable);
+ ret = priv->ops->enable_vlan4k(priv, enable);
if (ret)
return ret;
- smi->vlan4k_enabled = enable;
+ priv->vlan4k_enabled = enable;
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan4k, "REALTEK_DSA");
-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable)
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
int ret;
- ret = smi->ops->enable_vlan(smi, enable);
+ ret = priv->ops->enable_vlan(priv, enable);
if (ret)
return ret;
- smi->vlan_enabled = enable;
+ priv->vlan_enabled = enable;
/* If we turn VLAN off, make sure that we turn off
* 4k VLAN as well, if that happened to be on.
*/
if (!enable) {
- smi->vlan4k_enabled = false;
- ret = smi->ops->enable_vlan4k(smi, false);
+ priv->vlan4k_enabled = false;
+ ret = priv->ops->enable_vlan4k(priv, false);
}
return ret;
}
-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan, "REALTEK_DSA");
-int rtl8366_reset_vlan(struct realtek_smi *smi)
+int rtl8366_reset_vlan(struct realtek_priv *priv)
{
struct rtl8366_vlan_mc vlanmc;
int ret;
int i;
- rtl8366_enable_vlan(smi, false);
- rtl8366_enable_vlan4k(smi, false);
+ rtl8366_enable_vlan(priv, false);
+ rtl8366_enable_vlan4k(priv, false);
/* Clear the 16 VLAN member configurations */
vlanmc.vid = 0;
@@ -282,15 +282,15 @@ int rtl8366_reset_vlan(struct realtek_smi *smi)
vlanmc.member = 0;
vlanmc.untag = 0;
vlanmc.fid = 0;
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
}
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_reset_vlan, "REALTEK_DSA");
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
@@ -298,12 +298,12 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
{
bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
u32 member = 0;
u32 untag = 0;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vlan->vid)) {
+ if (!priv->ops->is_vlan_valid(priv, vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack, "VLAN ID not valid");
return -EINVAL;
}
@@ -312,13 +312,13 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
* FIXME: what's with this 4k business?
* Just rtl8366_enable_vlan() seems inconclusive.
*/
- ret = rtl8366_enable_vlan4k(smi, true);
+ ret = rtl8366_enable_vlan4k(priv, true);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to enable VLAN 4K");
return ret;
}
- dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ dev_dbg(priv->dev, "add VLAN %d on port %d, %s, %s\n",
vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? "PVID" : "no PVID");
@@ -327,38 +327,38 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (untagged)
untag |= BIT(port);
- ret = rtl8366_set_vlan(smi, vlan->vid, member, untag, 0);
+ ret = rtl8366_set_vlan(priv, vlan->vid, member, untag, 0);
if (ret) {
- dev_err(smi->dev, "failed to set up VLAN %04x", vlan->vid);
+ dev_err(priv->dev, "failed to set up VLAN %04x", vlan->vid);
return ret;
}
if (!pvid)
return 0;
- ret = rtl8366_set_pvid(smi, port, vlan->vid);
+ ret = rtl8366_set_pvid(priv, port, vlan->vid);
if (ret) {
- dev_err(smi->dev, "failed to set PVID on port %d to VLAN %04x",
+ dev_err(priv->dev, "failed to set PVID on port %d to VLAN %04x",
port, vlan->vid);
return ret;
}
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_add, "REALTEK_DSA");
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret, i;
- dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
+ dev_dbg(priv->dev, "del VLAN %d on port %d\n", vlan->vid, port);
- for (i = 0; i < smi->num_vlan_mc; i++) {
+ for (i = 0; i < priv->num_vlan_mc; i++) {
struct rtl8366_vlan_mc vlanmc;
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ ret = priv->ops->get_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
@@ -376,9 +376,9 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
vlanmc.priority = 0;
vlanmc.fid = 0;
}
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to remove VLAN %04x\n",
vlan->vid);
return ret;
@@ -389,60 +389,56 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_del, "REALTEK_DSA");
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
- struct realtek_smi *smi = ds->priv;
- struct rtl8366_mib_counter *mib;
+ struct realtek_priv *priv = ds->priv;
int i;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return;
- for (i = 0; i < smi->num_mib_counters; i++) {
- mib = &smi->mib_counters[i];
- strncpy(data + i * ETH_GSTRING_LEN,
- mib->name, ETH_GSTRING_LEN);
- }
+ for (i = 0; i < priv->num_mib_counters; i++)
+ ethtool_puts(&data, priv->mib_counters[i].name);
}
-EXPORT_SYMBOL_GPL(rtl8366_get_strings);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_strings, "REALTEK_DSA");
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
/* We only support SS_STATS */
if (sset != ETH_SS_STATS)
return 0;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return -EINVAL;
- return smi->num_mib_counters;
+ return priv->num_mib_counters;
}
-EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_sset_count, "REALTEK_DSA");
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int i;
int ret;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return;
- for (i = 0; i < smi->num_mib_counters; i++) {
+ for (i = 0; i < priv->num_mib_counters; i++) {
struct rtl8366_mib_counter *mib;
u64 mibvalue = 0;
- mib = &smi->mib_counters[i];
- ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue);
+ mib = &priv->mib_counters[i];
+ ret = priv->ops->get_mib_counter(priv, port, mib, &mibvalue);
if (ret) {
- dev_err(smi->dev, "error reading MIB counter %s\n",
+ dev_err(priv->dev, "error reading MIB counter %s\n",
mib->name);
}
data[i] = mibvalue;
}
}
-EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_ethtool_stats, "REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c
new file mode 100644
index 000000000000..99c890681ae6
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include "rtl83xx.h"
+#include "rtl8366rb.h"
+
+static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+{
+ switch (led_group) {
+ case 0:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 1:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 2:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 3:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ default:
+ return 0;
+ }
+}
+
+static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ &val);
+ if (ret) {
+ dev_err(priv->dev, "error reading LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+}
+
+static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ rtl8366rb_led_group_port_mask(led_group,
+ port_num),
+ enable ? 0xffff : 0);
+ if (ret) {
+ dev_err(priv->dev, "error updating LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ /* Change the LED group to manual controlled LEDs if required */
+ ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+ RTL8366RB_LEDGROUP_FORCE);
+
+ if (ret) {
+ dev_err(priv->dev, "error updating LED GROUP group %d\n",
+ led_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+ cdev);
+
+ return rb8366rb_set_port_led(led, brightness == LED_ON);
+}
+
+static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+ struct fwnode_handle *led_fwnode)
+{
+ struct rtl8366rb *rb = priv->chip_data;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct rtl8366rb_led *led;
+ u32 led_group;
+ int ret;
+
+ ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+ if (ret)
+ return ret;
+
+ if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_group, dp->index);
+ return -EINVAL;
+ }
+
+ led = &rb->leds[dp->index][led_group];
+ led->port_num = dp->index;
+ led->led_group = led_group;
+ led->priv = priv;
+
+ state = led_init_default_state_get(led_fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = 1;
+ rb8366rb_set_port_led(led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ led->cdev.brightness =
+ rb8366rb_get_port_led(led);
+ break;
+ case LEDS_DEFSTATE_OFF:
+ default:
+ led->cdev.brightness = 0;
+ rb8366rb_set_port_led(led, 0);
+ }
+
+ led->cdev.max_brightness = 1;
+ led->cdev.brightness_set_blocking =
+ rtl8366rb_cled_brightness_set_blocking;
+ init_data.fwnode = led_fwnode;
+ init_data.devname_mandatory = true;
+
+ init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+ dp->ds->index, dp->index, led_group);
+ if (!init_data.devicename)
+ return -ENOMEM;
+
+ ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+ if (ret) {
+ dev_warn(priv->dev, "Failed to init LED %d for port %d",
+ led_group, dp->index);
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ struct device_node *leds_np;
+ struct dsa_port *dp;
+ int ret = 0;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->dn)
+ continue;
+
+ leds_np = of_get_child_by_name(dp->dn, "leds");
+ if (!leds_np) {
+ dev_dbg(priv->dev, "No leds defined for port %d",
+ dp->index);
+ continue;
+ }
+
+ for_each_child_of_node_scoped(leds_np, led_np) {
+ ret = rtl8366rb_setup_led(priv, dp,
+ of_fwnode_handle(led_np));
+ if (ret)
+ break;
+ }
+
+ of_node_put(leds_np);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 03deacd83e61..d96ae72b0a5c 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -15,18 +15,19 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
-#include "realtek-smi-core.h"
-
-#define RTL8366RB_PORT_NUM_CPU 5
-#define RTL8366RB_NUM_PORTS 6
-#define RTL8366RB_PHY_NO_MAX 4
-#define RTL8366RB_PHY_ADDR_MAX 31
+#include "realtek.h"
+#include "realtek-smi.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
+#include "rtl8366rb.h"
/* Switch Global Configuration register */
#define RTL8366RB_SGCR 0x0000
@@ -95,12 +96,6 @@
#define RTL8366RB_PAACR_RX_PAUSE BIT(6)
#define RTL8366RB_PAACR_AN BIT(7)
-#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \
- RTL8366RB_PAACR_FULL_DUPLEX | \
- RTL8366RB_PAACR_LINK_UP | \
- RTL8366RB_PAACR_TX_PAUSE | \
- RTL8366RB_PAACR_RX_PAUSE)
-
/* bits 0..7 = port 0, bits 8..15 = port 1 */
#define RTL8366RB_PSTAT0 0x0014
/* bits 0..7 = port 2, bits 8..15 = port 3 */
@@ -123,10 +118,11 @@
RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK)
/* CPU port control reg */
-#define RTL8368RB_CPU_CTRL_REG 0x0061
-#define RTL8368RB_CPU_PORTS_MSK 0x00FF
+#define RTL8366RB_CPU_CTRL_REG 0x0061
+#define RTL8366RB_CPU_PORTS_MSK 0x00FF
/* Disables inserting custom tag length/type 0x8899 */
-#define RTL8368RB_CPU_NO_TAG BIT(15)
+#define RTL8366RB_CPU_NO_TAG BIT(15)
+#define RTL8366RB_CPU_TAG_SIZE 4
#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */
#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */
@@ -176,38 +172,6 @@
*/
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
-/* LED control registers */
-#define RTL8366RB_LED_BLINKRATE_REG 0x0430
-#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
-#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
-#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
-#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
-#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
-#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
-#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
-
-#define RTL8366RB_LED_CTRL_REG 0x0431
-#define RTL8366RB_LED_OFF 0x0
-#define RTL8366RB_LED_DUP_COL 0x1
-#define RTL8366RB_LED_LINK_ACT 0x2
-#define RTL8366RB_LED_SPD1000 0x3
-#define RTL8366RB_LED_SPD100 0x4
-#define RTL8366RB_LED_SPD10 0x5
-#define RTL8366RB_LED_SPD1000_ACT 0x6
-#define RTL8366RB_LED_SPD100_ACT 0x7
-#define RTL8366RB_LED_SPD10_ACT 0x8
-#define RTL8366RB_LED_SPD100_10_ACT 0x9
-#define RTL8366RB_LED_FIBER 0xa
-#define RTL8366RB_LED_AN_FAULT 0xb
-#define RTL8366RB_LED_LINK_RX 0xc
-#define RTL8366RB_LED_LINK_TX 0xd
-#define RTL8366RB_LED_MASTER 0xe
-#define RTL8366RB_LED_FORCE 0xf
-#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
-#define RTL8366RB_LED_1_OFFSET 6
-#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
-#define RTL8366RB_LED_3_OFFSET 6
-
#define RTL8366RB_MIB_COUNT 33
#define RTL8366RB_GLOBAL_MIB_COUNT 1
#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
@@ -243,7 +207,6 @@
#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
#define RTL8366RB_NUM_VLANS 16
-#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
#define RTL8366RB_NUM_FIDS 8
@@ -350,16 +313,6 @@
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
-/**
- * struct rtl8366rb - RTL8366RB-specific data
- * @max_mtu: per-port max MTU setting
- * @pvid_enabled: if PVID is set for respective port
- */
-struct rtl8366rb {
- unsigned int max_mtu[RTL8366RB_NUM_PORTS];
- bool pvid_enabled[RTL8366RB_NUM_PORTS];
-};
-
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 0, 4, "IfInOctets" },
{ 0, 4, 4, "EtherStatsOctets" },
@@ -396,7 +349,7 @@ static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 70, 2, "IfOutBroadcastPkts" },
};
-static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
+static int rtl8366rb_get_mib_counter(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
u64 *mibvalue)
@@ -412,12 +365,12 @@ static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
/* Writing access counter address first
* then ASIC will prepare 64bits counter wait for being retrived
*/
- ret = regmap_write(smi->map, addr, 0); /* Write whatever */
+ ret = regmap_write(priv->map, addr, 0); /* Write whatever */
if (ret)
return ret;
/* Read MIB control register */
- ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_MIB_CTRL_REG, &val);
if (ret)
return -EIO;
@@ -430,7 +383,7 @@ static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
/* Read each individual MIB 16 bits at the time */
*mibvalue = 0;
for (i = mib->length; i > 0; i--) {
- ret = regmap_read(smi->map, addr + (i - 1), &val);
+ ret = regmap_read(priv->map, addr + (i - 1), &val);
if (ret)
return ret;
*mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
@@ -455,38 +408,38 @@ static u32 rtl8366rb_get_irqmask(struct irq_data *d)
static void rtl8366rb_mask_irq(struct irq_data *d)
{
- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d), 0);
if (ret)
- dev_err(smi->dev, "could not mask IRQ\n");
+ dev_err(priv->dev, "could not mask IRQ\n");
}
static void rtl8366rb_unmask_irq(struct irq_data *d)
{
- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d),
rtl8366rb_get_irqmask(d));
if (ret)
- dev_err(smi->dev, "could not unmask IRQ\n");
+ dev_err(priv->dev, "could not unmask IRQ\n");
}
static irqreturn_t rtl8366rb_irq(int irq, void *data)
{
- struct realtek_smi *smi = data;
+ struct realtek_priv *priv = data;
u32 stat;
int ret;
/* This clears the IRQ status register */
- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&stat);
if (ret) {
- dev_err(smi->dev, "can't read interrupt status\n");
+ dev_err(priv->dev, "can't read interrupt status\n");
return IRQ_NONE;
}
stat &= RTL8366RB_INTERRUPT_VALID;
@@ -502,7 +455,7 @@ static irqreturn_t rtl8366rb_irq(int irq, void *data)
*/
if (line < 12 && line > 5)
line -= 5;
- child_irq = irq_find_mapping(smi->irqdomain, line);
+ child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
return IRQ_HANDLED;
@@ -538,7 +491,7 @@ static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
{
struct device_node *intc;
unsigned long irq_trig;
@@ -547,74 +500,72 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
u32 val;
int i;
- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
- dev_err(smi->dev, "missing child interrupt-controller node\n");
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
/* RB8366RB IRQs cascade off this one */
irq = of_irq_get(intc, 0);
if (irq <= 0) {
- dev_err(smi->dev, "failed to get parent IRQ\n");
+ dev_err(priv->dev, "failed to get parent IRQ\n");
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
/* This clears the IRQ status register */
- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&val);
if (ret) {
- dev_err(smi->dev, "can't read interrupt status\n");
+ dev_err(priv->dev, "can't read interrupt status\n");
goto out_put_node;
}
/* Fetch IRQ edge information from the descriptor */
- irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ irq_trig = irq_get_trigger_type(irq);
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
- dev_info(smi->dev, "active high/rising IRQ\n");
+ dev_info(priv->dev, "active high/rising IRQ\n");
val = 0;
break;
case IRQF_TRIGGER_FALLING:
case IRQF_TRIGGER_LOW:
- dev_info(smi->dev, "active low/falling IRQ\n");
+ dev_info(priv->dev, "active low/falling IRQ\n");
val = RTL8366RB_INTERRUPT_POLARITY;
break;
}
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_INTERRUPT_POLARITY,
val);
if (ret) {
- dev_err(smi->dev, "could not configure IRQ polarity\n");
+ dev_err(priv->dev, "could not configure IRQ polarity\n");
goto out_put_node;
}
- ret = devm_request_threaded_irq(smi->dev, irq, NULL,
+ ret = devm_request_threaded_irq(priv->dev, irq, NULL,
rtl8366rb_irq, IRQF_ONESHOT,
- "RTL8366RB", smi);
+ "RTL8366RB", priv);
if (ret) {
- dev_err(smi->dev, "unable to request irq: %d\n", ret);
+ dev_err(priv->dev, "unable to request irq: %d\n", ret);
goto out_put_node;
}
- smi->irqdomain = irq_domain_add_linear(intc,
- RTL8366RB_NUM_INTERRUPT,
- &rtl8366rb_irqdomain_ops,
- smi);
- if (!smi->irqdomain) {
- dev_err(smi->dev, "failed to create IRQ domain\n");
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops, priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to create IRQ domain\n");
ret = -EINVAL;
goto out_put_node;
}
- for (i = 0; i < smi->num_ports; i++)
- irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
+ for (i = 0; i < priv->num_ports; i++)
+ irq_set_parent(irq_create_mapping(priv->irqdomain, i), irq);
out_put_node:
of_node_put(intc);
return ret;
}
-static int rtl8366rb_set_addr(struct realtek_smi *smi)
+static int rtl8366rb_set_addr(struct realtek_priv *priv)
{
u8 addr[ETH_ALEN];
u16 val;
@@ -622,18 +573,18 @@ static int rtl8366rb_set_addr(struct realtek_smi *smi)
eth_random_addr(addr);
- dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev_info(priv->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
val = addr[0] << 8 | addr[1];
- ret = regmap_write(smi->map, RTL8366RB_SMAR0, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR0, val);
if (ret)
return ret;
val = addr[2] << 8 | addr[3];
- ret = regmap_write(smi->map, RTL8366RB_SMAR1, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR1, val);
if (ret)
return ret;
val = addr[4] << 8 | addr[5];
- ret = regmap_write(smi->map, RTL8366RB_SMAR2, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR2, val);
if (ret)
return ret;
@@ -765,7 +716,7 @@ static const struct rtl8366rb_jam_tbl_entry rtl8366rb_green_jam[] = {
/* Function that jams the tables in the proper registers */
static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
- int jam_size, struct realtek_smi *smi,
+ int jam_size, struct realtek_priv *priv,
bool write_dbg)
{
u32 val;
@@ -774,24 +725,24 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
for (i = 0; i < jam_size; i++) {
if ((jam_table[i].reg & 0xBE00) == 0xBE00) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_PHY_ACCESS_BUSY_REG,
&val);
if (ret)
return ret;
if (!(val & RTL8366RB_PHY_INT_BUSY)) {
- ret = regmap_write(smi->map,
- RTL8366RB_PHY_ACCESS_CTRL_REG,
- RTL8366RB_PHY_CTRL_WRITE);
+ ret = regmap_write(priv->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
if (ret)
return ret;
}
}
if (write_dbg)
- dev_dbg(smi->dev, "jam %04x into register %04x\n",
+ dev_dbg(priv->dev, "jam %04x into register %04x\n",
jam_table[i].val,
jam_table[i].reg);
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
jam_table[i].reg,
jam_table[i].val);
if (ret)
@@ -800,23 +751,63 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
return 0;
}
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode)
+{
+ int ret;
+ u32 val;
+
+ val = mode << RTL8366RB_LED_CTRL_OFFSET(led_group);
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_CTRL_REG,
+ RTL8366RB_LED_CTRL_MASK(led_group),
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* This code is used also with LEDs disabled */
+static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
+{
+ int ret = 0;
+ int i;
+
+ regmap_update_bits(priv->map,
+ RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_P4_RGMII_LED,
+ 0);
+
+ for (i = 0; i < RTL8366RB_NUM_LEDGROUPS; i++) {
+ ret = rb8366rb_set_ledgroup_mode(priv, i,
+ RTL8366RB_LEDGROUP_OFF);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static int rtl8366rb_setup(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
const struct rtl8366rb_jam_tbl_entry *jam_table;
struct rtl8366rb *rb;
u32 chip_ver = 0;
u32 chip_id = 0;
int jam_size;
- u32 val;
int ret;
int i;
- rb = smi->chip_data;
+ rb = priv->chip_data;
- ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_ID_REG, &chip_id);
if (ret) {
- dev_err(smi->dev, "unable to read chip id\n");
+ dev_err(priv->dev, "unable to read chip id\n");
return ret;
}
@@ -824,18 +815,18 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
case RTL8366RB_CHIP_ID_8366:
break;
default:
- dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id);
+ dev_err(priv->dev, "unknown chip id (%04x)\n", chip_id);
return -ENODEV;
}
- ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
&chip_ver);
if (ret) {
- dev_err(smi->dev, "unable to read chip version\n");
+ dev_err(priv->dev, "unable to read chip version\n");
return ret;
}
- dev_info(smi->dev, "RTL%04x ver %u chip found\n",
+ dev_info(priv->dev, "RTL%04x ver %u chip found\n",
chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
/* Do the init dance using the right jam table */
@@ -872,20 +863,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
}
- ret = rtl8366rb_jam_table(jam_table, jam_size, smi, true);
+ ret = rtl8366rb_jam_table(jam_table, jam_size, priv, true);
if (ret)
return ret;
/* Isolate all user ports so they can only send packets to itself and the CPU port */
for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
return ret;
}
/* CPU port can send packets to all ports */
- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
@@ -893,65 +884,69 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
/* Set up the "green ethernet" feature */
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
- ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
+ ARRAY_SIZE(rtl8366rb_green_jam), priv, false);
if (ret)
return ret;
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_GREEN_FEATURE_REG,
(chip_ver == 1) ? 0x0007 : 0x0003);
if (ret)
return ret;
/* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
- ret = regmap_write(smi->map, 0x0c, 0x240);
+ ret = regmap_write(priv->map, 0x0c, 0x240);
if (ret)
return ret;
- ret = regmap_write(smi->map, 0x0d, 0x240);
+ ret = regmap_write(priv->map, 0x0d, 0x240);
if (ret)
return ret;
/* Set some random MAC address */
- ret = rtl8366rb_set_addr(smi);
+ ret = rtl8366rb_set_addr(priv);
if (ret)
return ret;
/* Enable CPU port with custom DSA tag 8899.
*
- * If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers
+ * If you set RTL8366RB_CPU_NO_TAG (bit 15) in this register
* the custom tag is turned off.
*/
- ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_CPU_CTRL_REG,
0xFFFF,
- BIT(smi->cpu_port));
+ BIT(priv->cpu_port));
if (ret)
return ret;
/* Make sure we default-enable the fixed CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR,
- BIT(smi->cpu_port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR,
+ BIT(priv->cpu_port),
0);
if (ret)
return ret;
- /* Set maximum packet length to 1536 bytes */
- ret = regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ /* Set default maximum packet length to 1536 bytes */
+ ret = regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
RTL8366RB_SGCR_MAX_LENGTH_1536);
if (ret)
return ret;
- for (i = 0; i < RTL8366RB_NUM_PORTS; i++)
- /* layer 2 size, see rtl8366rb_change_mtu() */
- rb->max_mtu[i] = 1532;
+ for (i = 0; i < RTL8366RB_NUM_PORTS; i++) {
+ if (i == priv->cpu_port)
+ /* CPU port need to also accept the tag */
+ rb->max_mtu[i] = ETH_DATA_LEN + RTL8366RB_CPU_TAG_SIZE;
+ else
+ rb->max_mtu[i] = ETH_DATA_LEN;
+ }
/* Disable learning for all ports */
- ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ ret = regmap_write(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
RTL8366RB_PORT_ALL);
if (ret)
return ret;
/* Enable auto ageing for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
+ ret = regmap_write(priv->map, RTL8366RB_SECURITY_CTRL, 0);
if (ret)
return ret;
@@ -962,77 +957,64 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* connected to something exotic such as fiber, then this might
* be worth experimenting with.
*/
- ret = regmap_update_bits(smi->map, RTL8366RB_PMC0,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PMC0,
RTL8366RB_PMC0_P4_IOMODE_MASK,
0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
if (ret)
return ret;
/* Accept all packets by default, we enable filtering on-demand */
- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
0);
if (ret)
return ret;
- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
0);
if (ret)
return ret;
/* Don't drop packets whose DA has not been learned */
- ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2,
+ ret = regmap_update_bits(priv->map, RTL8366RB_SSCR2,
RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
if (ret)
return ret;
- /* Set blinking, TODO: make this configurable */
- ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG,
+ /* Set blinking, used by all LED groups using HW triggers.
+ * TODO: make this configurable
+ */
+ ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG,
RTL8366RB_LED_BLINKRATE_MASK,
RTL8366RB_LED_BLINKRATE_56MS);
if (ret)
return ret;
/* Set up LED activity:
- * Each port has 4 LEDs, we configure all ports to the same
- * behaviour (no individual config) but we can set up each
- * LED separately.
+ * Each port has 4 LEDs on fixed groups. Each group shares the same
+ * hardware trigger across all ports. LEDs can only be indiviually
+ * controlled setting the LED group to fixed mode and using the driver
+ * to toggle them LEDs on/off.
*/
- if (smi->leds_disabled) {
- /* Turn everything off */
- regmap_update_bits(smi->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x0FFF, 0);
- regmap_update_bits(smi->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x0FFF, 0);
- regmap_update_bits(smi->map,
- RTL8366RB_INTERRUPT_CONTROL_REG,
- RTL8366RB_P4_RGMII_LED,
- 0);
- val = RTL8366RB_LED_OFF;
+ if (priv->leds_disabled) {
+ ret = rtl8366rb_setup_all_leds_off(priv);
+ if (ret)
+ return ret;
} else {
- /* TODO: make this configurable per LED */
- val = RTL8366RB_LED_FORCE;
- }
- for (i = 0; i < 4; i++) {
- ret = regmap_update_bits(smi->map,
- RTL8366RB_LED_CTRL_REG,
- 0xf << (i * 4),
- val << (i * 4));
+ ret = rtl8366rb_setup_leds(priv);
if (ret)
return ret;
}
- ret = rtl8366_reset_vlan(smi);
+ ret = rtl8366_reset_vlan(priv);
if (ret)
return ret;
- ret = rtl8366rb_setup_cascaded_irq(smi);
+ ret = rtl8366rb_setup_cascaded_irq(priv);
if (ret)
- dev_info(smi->dev, "no interrupt support\n");
+ dev_info(priv->dev, "no interrupt support\n");
- ret = realtek_smi_setup_mdio(smi);
+ ret = rtl83xx_setup_user_mdio(ds);
if (ret) {
- dev_info(smi->dev, "could not set up MDIO bus\n");
+ dev_err(priv->dev, "could not set up MDIO bus\n");
return -ENODEV;
}
@@ -1047,148 +1029,169 @@ static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_RTL4_A;
}
+static void rtl8366rb_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ unsigned long *interfaces = config->supported_interfaces;
+ struct realtek_priv *priv = ds->priv;
+
+ if (port == priv->cpu_port) {
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ /* REVMII only supports 100M FD */
+ __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
+ /* RGMII only supports 1G FD */
+ phy_interface_set_rgmii(interfaces);
+
+ config->mac_capabilities = MAC_1000 | MAC_100 |
+ MAC_SYM_PAUSE;
+ } else {
+ /* RSGMII port, but we don't have that, and we don't
+ * specify in DT, so phylib uses the default of GMII
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ config->mac_capabilities = MAC_1000 | MAC_100 | MAC_10 |
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ }
+}
+
+static void
+rtl8366rb_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
static void
-rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface, struct phy_device *phydev,
+rtl8366rb_mac_link_up(struct phylink_config *config, struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct realtek_smi *smi = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ int port = dp->index;
+ unsigned int val;
int ret;
- if (port != smi->cpu_port)
+ /* Allow forcing the mode on the fixed CPU port, no autonegotiation.
+ * We assume autonegotiation works on the PHY-facing ports.
+ */
+ if (port != priv->cpu_port)
return;
- dev_dbg(smi->dev, "MAC link up on CPU port (%d)\n", port);
+ dev_dbg(priv->dev, "MAC link up on CPU port (%d)\n", port);
- /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
- ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_MAC_FORCE_CTRL_REG,
BIT(port), BIT(port));
if (ret) {
- dev_err(smi->dev, "failed to force 1Gbit on CPU port\n");
+ dev_err(priv->dev, "failed to force CPU port\n");
return;
}
- ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
+ /* Conjure port config */
+ switch (speed) {
+ case SPEED_10:
+ val = RTL8366RB_PAACR_SPEED_10M;
+ break;
+ case SPEED_100:
+ val = RTL8366RB_PAACR_SPEED_100M;
+ break;
+ case SPEED_1000:
+ val = RTL8366RB_PAACR_SPEED_1000M;
+ break;
+ default:
+ val = RTL8366RB_PAACR_SPEED_1000M;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ val |= RTL8366RB_PAACR_FULL_DUPLEX;
+
+ if (tx_pause)
+ val |= RTL8366RB_PAACR_TX_PAUSE;
+
+ if (rx_pause)
+ val |= RTL8366RB_PAACR_RX_PAUSE;
+
+ val |= RTL8366RB_PAACR_LINK_UP;
+
+ ret = regmap_update_bits(priv->map, RTL8366RB_PAACR2,
0xFF00U,
- RTL8366RB_PAACR_CPU_PORT << 8);
+ val << 8);
if (ret) {
- dev_err(smi->dev, "failed to set PAACR on CPU port\n");
+ dev_err(priv->dev, "failed to set PAACR on CPU port\n");
return;
}
+ dev_dbg(priv->dev, "set PAACR to %04x\n", val);
+
/* Enable the CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret) {
- dev_err(smi->dev, "failed to enable the CPU port\n");
+ dev_err(priv->dev, "failed to enable the CPU port\n");
return;
}
}
static void
-rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+rtl8366rb_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
- struct realtek_smi *smi = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ int port = dp->index;
int ret;
- if (port != smi->cpu_port)
+ if (port != priv->cpu_port)
return;
- dev_dbg(smi->dev, "MAC link down on CPU port (%d)\n", port);
+ dev_dbg(priv->dev, "MAC link down on CPU port (%d)\n", port);
/* Disable the CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret) {
- dev_err(smi->dev, "failed to disable the CPU port\n");
- return;
- }
-}
-
-static void rb8366rb_set_port_led(struct realtek_smi *smi,
- int port, bool enable)
-{
- u16 val = enable ? 0x3f : 0;
- int ret;
-
- if (smi->leds_disabled)
- return;
-
- switch (port) {
- case 0:
- ret = regmap_update_bits(smi->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x3F, val);
- break;
- case 1:
- ret = regmap_update_bits(smi->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x3F << RTL8366RB_LED_1_OFFSET,
- val << RTL8366RB_LED_1_OFFSET);
- break;
- case 2:
- ret = regmap_update_bits(smi->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x3F, val);
- break;
- case 3:
- ret = regmap_update_bits(smi->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x3F << RTL8366RB_LED_3_OFFSET,
- val << RTL8366RB_LED_3_OFFSET);
- break;
- case 4:
- ret = regmap_update_bits(smi->map,
- RTL8366RB_INTERRUPT_CONTROL_REG,
- RTL8366RB_P4_RGMII_LED,
- enable ? RTL8366RB_P4_RGMII_LED : 0);
- break;
- default:
- dev_err(smi->dev, "no LED for port %d\n", port);
+ dev_err(priv->dev, "failed to disable the CPU port\n");
return;
}
- if (ret)
- dev_err(smi->dev, "error updating LED on port %d\n", port);
}
static int
rtl8366rb_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- dev_dbg(smi->dev, "enable port %d\n", port);
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ dev_dbg(priv->dev, "enable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret)
return ret;
- rb8366rb_set_port_led(smi, port, true);
return 0;
}
static void
rtl8366rb_port_disable(struct dsa_switch *ds, int port)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- dev_dbg(smi->dev, "disable port %d\n", port);
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ dev_dbg(priv->dev, "disable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret)
return;
-
- rb8366rb_set_port_led(smi, port, false);
}
static int
rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
@@ -1198,29 +1201,29 @@ rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
if (i == port)
continue;
/* Not on this bridge */
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Join this port to each other port on the bridge */
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)),
RTL8366RB_PORT_ISO_PORTS(BIT(port)));
if (ret)
- dev_err(smi->dev, "failed to join port %d\n", port);
+ dev_err(priv->dev, "failed to join port %d\n", port);
port_bitmap |= BIT(i);
}
/* Set the bits for the ports we can access */
- return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ return regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap),
RTL8366RB_PORT_ISO_PORTS(port_bitmap));
}
static void
rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
@@ -1230,31 +1233,33 @@ rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
if (i == port)
continue;
/* Not on this bridge */
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Remove this port from any other port on the bridge */
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
if (ret)
- dev_err(smi->dev, "failed to leave port %d\n", port);
+ dev_err(priv->dev, "failed to leave port %d\n", port);
port_bitmap |= BIT(i);
}
/* Clear the bits for the ports we can not access, leave ourselves */
- regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
}
/**
* rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
- * @smi: SMI state container
+ * @priv: SMI state container
* @port: the port to drop untagged and C-tagged frames on
* @drop: whether to drop or pass untagged and C-tagged frames
+ *
+ * Return: zero for success, a negative number on error.
*/
-static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
+static int rtl8366rb_drop_untagged(struct realtek_priv *priv, int port, bool drop)
{
- return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ return regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
}
@@ -1263,17 +1268,17 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
int ret;
- rb = smi->chip_data;
+ rb = priv->chip_data;
- dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
- vlan_filtering ? "enable" : "disable");
+ dev_dbg(priv->dev, "port %d: %s VLAN filtering\n", port,
+ str_enable_disable(vlan_filtering));
/* If the port is not in the member set, the frame will be dropped */
- ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
BIT(port), vlan_filtering ? BIT(port) : 0);
if (ret)
return ret;
@@ -1283,9 +1288,9 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
* filtering on a port, we need to accept any frames.
*/
if (vlan_filtering)
- ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
+ ret = rtl8366rb_drop_untagged(priv, port, !rb->pvid_enabled[port]);
else
- ret = rtl8366rb_drop_untagged(smi, port, false);
+ ret = rtl8366rb_drop_untagged(priv, port, false);
return ret;
}
@@ -1307,11 +1312,11 @@ rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
if (flags.mask & BR_LEARNING) {
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
BIT(port),
(flags.val & BR_LEARNING) ? 0 : BIT(port));
if (ret)
@@ -1324,7 +1329,7 @@ rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
static void
rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
u32 val;
int i;
@@ -1343,13 +1348,13 @@ rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
val = RTL8366RB_STP_STATE_FORWARDING;
break;
default:
- dev_err(smi->dev, "unknown bridge state requested\n");
+ dev_err(priv->dev, "unknown bridge state requested\n");
return;
}
/* Set the same status for the port on all the FIDs */
for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
- regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
+ regmap_update_bits(priv->map, RTL8366RB_STP_STATE_BASE + i,
RTL8366RB_STP_STATE_MASK(port),
RTL8366RB_STP_STATE(port, val));
}
@@ -1358,67 +1363,74 @@ rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
static void
rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
/* This will age out any learned L2 entries */
- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), BIT(port));
/* Restore the normal state of things */
- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), 0);
}
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
unsigned int max_mtu;
u32 len;
int i;
/* Cache the per-port MTU setting */
- rb = smi->chip_data;
+ rb = priv->chip_data;
rb->max_mtu[port] = new_mtu;
/* Roof out the MTU for the entire switch to the greatest
* common denominator: the biggest set for any one port will
* be the biggest MTU for the switch.
- *
- * The first setting, 1522 bytes, is max IP packet 1500 bytes,
- * plus ethernet header, 1518 bytes, plus CPU tag, 4 bytes.
- * This function should consider the parameter an SDU, so the
- * MTU passed for this setting is 1518 bytes. The same logic
- * of subtracting the DSA tag of 4 bytes apply to the other
- * settings.
*/
- max_mtu = 1518;
+ max_mtu = ETH_DATA_LEN;
for (i = 0; i < RTL8366RB_NUM_PORTS; i++) {
if (rb->max_mtu[i] > max_mtu)
max_mtu = rb->max_mtu[i];
}
- if (max_mtu <= 1518)
+
+ /* Translate to layer 2 size.
+ * Add ethernet and (possible) VLAN headers, and checksum to the size.
+ * For ETH_DATA_LEN (1500 bytes) this will add up to 1522 bytes.
+ */
+ max_mtu += VLAN_ETH_HLEN;
+ max_mtu += ETH_FCS_LEN;
+
+ if (max_mtu <= 1522)
len = RTL8366RB_SGCR_MAX_LENGTH_1522;
- else if (max_mtu > 1518 && max_mtu <= 1532)
+ else if (max_mtu > 1522 && max_mtu <= 1536)
+ /* This will be the most common default if using VLAN and
+ * CPU tagging on a port as both VLAN and CPU tag will
+ * result in 1518 + 4 + 4 = 1526 bytes.
+ */
len = RTL8366RB_SGCR_MAX_LENGTH_1536;
- else if (max_mtu > 1532 && max_mtu <= 1548)
+ else if (max_mtu > 1536 && max_mtu <= 1552)
len = RTL8366RB_SGCR_MAX_LENGTH_1552;
else
len = RTL8366RB_SGCR_MAX_LENGTH_16000;
- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
len);
}
static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
{
- /* The max MTU is 16000 bytes, so we subtract the CPU tag
- * and the max presented to the system is 15996 bytes.
+ /* The max MTU is 16000 bytes, so we subtract the ethernet
+ * headers with VLAN and checksum and arrive at
+ * 16000 - 18 - 4 = 15978. This does not include the CPU tag
+ * since that is added to the requested MTU by the DSA framework.
*/
- return 15996;
+ return 16000 - VLAN_ETH_HLEN - ETH_FCS_LEN;
}
-static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
+static int rtl8366rb_get_vlan_4k(struct realtek_priv *priv, u32 vid,
struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
@@ -1431,19 +1443,19 @@ static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
return -EINVAL;
/* write VID */
- ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
vid & RTL8366RB_VLAN_VID_MASK);
if (ret)
return ret;
/* write table access control word */
- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_READ_CTRL);
if (ret)
return ret;
for (i = 0; i < 3; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_VLAN_TABLE_READ_BASE + i,
&data[i]);
if (ret)
@@ -1459,7 +1471,7 @@ static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
return 0;
}
-static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
+static int rtl8366rb_set_vlan_4k(struct realtek_priv *priv,
const struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
@@ -1479,7 +1491,7 @@ static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
data[i]);
if (ret)
@@ -1487,13 +1499,13 @@ static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
}
/* write table access control word */
- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_WRITE_CTRL);
return ret;
}
-static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
+static int rtl8366rb_get_vlan_mc(struct realtek_priv *priv, u32 index,
struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
@@ -1506,7 +1518,7 @@ static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
return -EINVAL;
for (i = 0; i < 3; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
&data[i]);
if (ret)
@@ -1524,7 +1536,7 @@ static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
return 0;
}
-static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
+static int rtl8366rb_set_vlan_mc(struct realtek_priv *priv, u32 index,
const struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
@@ -1548,7 +1560,7 @@ static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
data[i]);
if (ret)
@@ -1558,15 +1570,15 @@ static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
return 0;
}
-static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
+static int rtl8366rb_get_mc_index(struct realtek_priv *priv, int port, int *val)
{
u32 data;
int ret;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return -EINVAL;
- ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ ret = regmap_read(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
&data);
if (ret)
return ret;
@@ -1577,22 +1589,23 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
return 0;
}
-static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
+static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index)
{
+ struct dsa_switch *ds = &priv->ds;
struct rtl8366rb *rb;
bool pvid_enabled;
int ret;
- rb = smi->chip_data;
+ rb = priv->chip_data;
pvid_enabled = !!index;
- if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
+ if (port >= priv->num_ports || index >= RTL8366RB_NUM_VLANS)
return -EINVAL;
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
- RTL8366RB_PORT_VLAN_CTRL_MASK <<
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ RTL8366RB_PORT_VLAN_CTRL_MASK <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
- (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
+ (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
if (ret)
return ret;
@@ -1603,17 +1616,17 @@ static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
* not drop any untagged or C-tagged frames. Make sure to update the
* filtering setting.
*/
- if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
- ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
+ ret = rtl8366rb_drop_untagged(priv, port, !pvid_enabled);
return ret;
}
-static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
+static bool rtl8366rb_is_vlan_valid(struct realtek_priv *priv, unsigned int vlan)
{
unsigned int max = RTL8366RB_NUM_VLANS - 1;
- if (smi->vlan4k_enabled)
+ if (priv->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
if (vlan > max)
@@ -1622,23 +1635,23 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
return true;
}
-static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable)
+static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
{
- dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable");
- return regmap_update_bits(smi->map,
+ dev_dbg(priv->dev, "%s VLAN\n", str_enable_disable(enable));
+ return regmap_update_bits(priv->map,
RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
enable ? RTL8366RB_SGCR_EN_VLAN : 0);
}
-static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable)
+static int rtl8366rb_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
- dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ dev_dbg(priv->dev, "%s VLAN 4k\n", str_enable_disable(enable));
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_EN_VLAN_4KTB,
enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
}
-static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 val;
u32 reg;
@@ -1647,32 +1660,40 @@ static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ rtl83xx_lock(priv);
+
+ ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_READ);
if (ret)
- return ret;
+ goto out;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
- ret = regmap_write(smi->map, reg, 0);
+ ret = regmap_write(priv->map_nolock, reg, 0);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to write PHY%d reg %04x @ %04x, ret %d\n",
phy, regnum, reg, ret);
- return ret;
+ goto out;
}
- ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
+ ret = regmap_read(priv->map_nolock, RTL8366RB_PHY_ACCESS_DATA_REG,
+ &val);
if (ret)
- return ret;
+ goto out;
+
+ ret = val;
- dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
+ dev_dbg(priv->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
phy, regnum, reg, val);
- return val;
+out:
+ rtl83xx_unlock(priv);
+
+ return ret;
}
-static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 reg;
@@ -1681,34 +1702,39 @@ static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ rtl83xx_lock(priv);
+
+ ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
if (ret)
- return ret;
+ goto out;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
- dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
+ dev_dbg(priv->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
phy, regnum, reg, val);
- ret = regmap_write(smi->map, reg, val);
+ ret = regmap_write(priv->map_nolock, reg, val);
if (ret)
- return ret;
+ goto out;
- return 0;
+out:
+ rtl83xx_unlock(priv);
+
+ return ret;
}
-static int rtl8366rb_reset_chip(struct realtek_smi *smi)
+static int rtl8366rb_reset_chip(struct realtek_priv *priv)
{
int timeout = 10;
u32 val;
int ret;
- realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
- RTL8366RB_CHIP_CTRL_RESET_HW);
+ priv->write_reg_noack(priv, RTL8366RB_RESET_CTRL_REG,
+ RTL8366RB_CHIP_CTRL_RESET_HW);
do {
usleep_range(20000, 25000);
- ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_RESET_CTRL_REG, &val);
if (ret)
return ret;
@@ -1717,21 +1743,21 @@ static int rtl8366rb_reset_chip(struct realtek_smi *smi)
} while (--timeout);
if (!timeout) {
- dev_err(smi->dev, "timeout waiting for the switch to reset\n");
+ dev_err(priv->dev, "timeout waiting for the switch to reset\n");
return -EIO;
}
return 0;
}
-static int rtl8366rb_detect(struct realtek_smi *smi)
+static int rtl8366rb_detect(struct realtek_priv *priv)
{
- struct device *dev = smi->dev;
+ struct device *dev = priv->dev;
int ret;
u32 val;
/* Detect device */
- ret = regmap_read(smi->map, 0x5c, &val);
+ ret = regmap_read(priv->map, 0x5c, &val);
if (ret) {
dev_err(dev, "can't get chip ID (%d)\n", ret);
return ret;
@@ -1744,11 +1770,11 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
return -ENODEV;
case 0x5937:
dev_info(dev, "found an RTL8366RB switch\n");
- smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
- smi->num_ports = RTL8366RB_NUM_PORTS;
- smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
- smi->mib_counters = rtl8366rb_mib_counters;
- smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
+ priv->cpu_port = RTL8366RB_PORT_NUM_CPU;
+ priv->num_ports = RTL8366RB_NUM_PORTS;
+ priv->num_vlan_mc = RTL8366RB_NUM_VLANS;
+ priv->mib_counters = rtl8366rb_mib_counters;
+ priv->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
break;
default:
dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
@@ -1756,18 +1782,23 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
break;
}
- ret = rtl8366rb_reset_chip(smi);
+ ret = rtl8366rb_reset_chip(priv);
if (ret)
return ret;
return 0;
}
+static const struct phylink_mac_ops rtl8366rb_phylink_mac_ops = {
+ .mac_config = rtl8366rb_mac_config,
+ .mac_link_down = rtl8366rb_mac_link_down,
+ .mac_link_up = rtl8366rb_mac_link_up,
+};
+
static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
- .phylink_mac_link_up = rtl8366rb_mac_link_up,
- .phylink_mac_link_down = rtl8366rb_mac_link_down,
+ .phylink_get_caps = rtl8366rb_phylink_get_caps,
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
@@ -1784,9 +1815,11 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.port_fast_age = rtl8366rb_port_fast_age,
.port_change_mtu = rtl8366rb_change_mtu,
.port_max_mtu = rtl8366rb_max_mtu,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
};
-static const struct realtek_smi_ops rtl8366rb_smi_ops = {
+static const struct realtek_ops rtl8366rb_ops = {
.detect = rtl8366rb_detect,
.get_vlan_mc = rtl8366rb_get_vlan_mc,
.set_vlan_mc = rtl8366rb_set_vlan_mc,
@@ -1802,12 +1835,68 @@ static const struct realtek_smi_ops rtl8366rb_smi_ops = {
.phy_write = rtl8366rb_phy_write,
};
-const struct realtek_smi_variant rtl8366rb_variant = {
+const struct realtek_variant rtl8366rb_variant = {
.ds_ops = &rtl8366rb_switch_ops,
- .ops = &rtl8366rb_smi_ops,
+ .ops = &rtl8366rb_ops,
+ .phylink_mac_ops = &rtl8366rb_phylink_mac_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
.chip_data_sz = sizeof(struct rtl8366rb),
};
-EXPORT_SYMBOL_GPL(rtl8366rb_variant);
+
+static const struct of_device_id rtl8366rb_of_match[] = {
+ { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rtl8366rb_of_match);
+
+static struct platform_driver rtl8366rb_smi_driver = {
+ .driver = {
+ .name = "rtl8366rb-smi",
+ .of_match_table = rtl8366rb_of_match,
+ },
+ .probe = realtek_smi_probe,
+ .remove = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+
+static struct mdio_driver rtl8366rb_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "rtl8366rb-mdio",
+ .of_match_table = rtl8366rb_of_match,
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+static int rtl8366rb_init(void)
+{
+ int ret;
+
+ ret = realtek_mdio_driver_register(&rtl8366rb_mdio_driver);
+ if (ret)
+ return ret;
+
+ ret = realtek_smi_driver_register(&rtl8366rb_smi_driver);
+ if (ret) {
+ realtek_mdio_driver_unregister(&rtl8366rb_mdio_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(rtl8366rb_init);
+
+static void __exit rtl8366rb_exit(void)
+{
+ realtek_smi_driver_unregister(&rtl8366rb_smi_driver);
+ realtek_mdio_driver_unregister(&rtl8366rb_mdio_driver);
+}
+module_exit(rtl8366rb_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("REALTEK_DSA");
diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h
new file mode 100644
index 000000000000..685ff3275faa
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL8366RB_H
+#define _RTL8366RB_H
+
+#include "realtek.h"
+
+#define RTL8366RB_PORT_NUM_CPU 5
+#define RTL8366RB_NUM_PORTS 6
+#define RTL8366RB_PHY_NO_MAX 4
+#define RTL8366RB_NUM_LEDGROUPS 4
+#define RTL8366RB_PHY_ADDR_MAX 31
+
+/* LED control registers */
+/* The LED blink rate is global; it is used by all triggers in all groups. */
+#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
+#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
+#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
+#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
+#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+
+/* LED trigger event for each group */
+#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
+ (4 * (led_group))
+#define RTL8366RB_LED_CTRL_MASK(led_group) \
+ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+
+/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+ */
+#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
+ ((led_group) <= 1 ? \
+ RTL8366RB_LED_0_1_CTRL_REG : \
+ RTL8366RB_LED_2_3_CTRL_REG)
+#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
+#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
+
+enum rtl8366_ledgroup_mode {
+ RTL8366RB_LEDGROUP_OFF = 0x0,
+ RTL8366RB_LEDGROUP_DUP_COL = 0x1,
+ RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
+ RTL8366RB_LEDGROUP_SPD1000 = 0x3,
+ RTL8366RB_LEDGROUP_SPD100 = 0x4,
+ RTL8366RB_LEDGROUP_SPD10 = 0x5,
+ RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
+ RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
+ RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
+ RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
+ RTL8366RB_LEDGROUP_FIBER = 0xa,
+ RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
+ RTL8366RB_LEDGROUP_LINK_RX = 0xc,
+ RTL8366RB_LEDGROUP_LINK_TX = 0xd,
+ RTL8366RB_LEDGROUP_MASTER = 0xe,
+ RTL8366RB_LEDGROUP_FORCE = 0xf,
+
+ __RTL8366RB_LEDGROUP_MODE_MAX
+};
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+
+struct rtl8366rb_led {
+ u8 port_num;
+ u8 led_group;
+ struct realtek_priv *priv;
+ struct led_classdev cdev;
+};
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv);
+
+#else
+
+static inline int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */
+
+/**
+ * struct rtl8366rb - RTL8366RB-specific data
+ * @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
+ * @leds: per-port and per-ledgroup led info
+ */
+struct rtl8366rb {
+ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+ struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
+#endif
+};
+
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode);
+
+#endif /* _RTL8366RB_H */
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
new file mode 100644
index 000000000000..2b9bd4462714
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/of_mdio.h>
+
+#include "realtek.h"
+#include "rtl83xx.h"
+
+/**
+ * rtl83xx_lock() - Locks the mutex used by regmaps
+ * @ctx: realtek_priv pointer
+ *
+ * This function is passed to regmap to be used as the lock function.
+ * It is also used externally to block regmap before executing multiple
+ * operations that must happen in sequence (which will use
+ * realtek_priv.map_nolock instead).
+ *
+ * Context: Can sleep. Holds priv->map_lock lock.
+ * Return: nothing
+ */
+void rtl83xx_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_lock, "REALTEK_DSA");
+
+/**
+ * rtl83xx_unlock() - Unlocks the mutex used by regmaps
+ * @ctx: realtek_priv pointer
+ *
+ * This function unlocks the lock acquired by rtl83xx_lock.
+ *
+ * Context: Releases priv->map_lock lock.
+ * Return: nothing
+ */
+void rtl83xx_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unlock, "REALTEK_DSA");
+
+static int rtl83xx_user_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_read(priv, addr, regnum);
+}
+
+static int rtl83xx_user_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_write(priv, addr, regnum, val);
+}
+
+/**
+ * rtl83xx_setup_user_mdio() - register the user mii bus driver
+ * @ds: DSA switch associated with this user_mii_bus
+ *
+ * Registers the MDIO bus for built-in Ethernet PHYs, and associates it with
+ * the mandatory 'mdio' child OF node of the switch.
+ *
+ * Context: Can sleep.
+ * Return: 0 on success, negative value for failure.
+ */
+int rtl83xx_setup_user_mdio(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret = 0;
+
+ mdio_np = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (!mdio_np) {
+ dev_err(priv->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ bus = devm_mdiobus_alloc(priv->dev);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ bus->priv = priv;
+ bus->name = "Realtek user MII";
+ bus->read = rtl83xx_user_mdio_read;
+ bus->write = rtl83xx_user_mdio_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s:user_mii", dev_name(priv->dev));
+ bus->parent = priv->dev;
+
+ ret = devm_of_mdiobus_register(priv->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(priv->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ goto err_put_node;
+ }
+
+ priv->user_mii_bus = bus;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_setup_user_mdio, "REALTEK_DSA");
+
+/**
+ * rtl83xx_probe() - probe a Realtek switch
+ * @dev: the device being probed
+ * @interface_info: specific management interface info.
+ *
+ * This function initializes realtek_priv and reads data from the device tree
+ * node. The switch is hard resetted if a method is provided.
+ *
+ * Context: Can sleep.
+ * Return: Pointer to the realtek_priv or ERR_PTR() in case of failure.
+ *
+ * The realtek_priv pointer does not need to be freed as it is controlled by
+ * devres.
+ */
+struct realtek_priv *
+rtl83xx_probe(struct device *dev,
+ const struct realtek_interface_info *interface_info)
+{
+ const struct realtek_variant *var;
+ struct realtek_priv *priv;
+ struct regmap_config rc = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = interface_info->reg_read,
+ .reg_write = interface_info->reg_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = rtl83xx_lock,
+ .unlock = rtl83xx_unlock,
+ };
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ if (!var)
+ return ERR_PTR(-EINVAL);
+
+ priv = devm_kzalloc(dev, size_add(sizeof(*priv), var->chip_data_sz),
+ GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&priv->map_lock);
+
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ rc.disable_locking = true;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* Link forward and backward */
+ priv->dev = dev;
+ priv->variant = var;
+ priv->ops = var->ops;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ spin_lock_init(&priv->lock);
+
+ priv->leds_disabled = of_property_read_bool(dev->of_node,
+ "realtek,disable-leds");
+
+ /* TODO: if power is software controlled, set up any regulators here */
+ priv->reset_ctl = devm_reset_control_get_optional(dev, NULL);
+ if (IS_ERR(priv->reset_ctl))
+ return dev_err_cast_probe(dev, priv->reset_ctl,
+ "failed to get reset control\n");
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return ERR_CAST(priv->reset);
+ }
+
+ dev_set_drvdata(dev, priv);
+
+ if (priv->reset_ctl || priv->reset) {
+ rtl83xx_reset_assert(priv);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ rtl83xx_reset_deassert(priv);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ return priv;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_probe, "REALTEK_DSA");
+
+/**
+ * rtl83xx_register_switch() - detects and register a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function first checks the switch chip ID and register a DSA
+ * switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: 0 on success, negative value for failure.
+ */
+int rtl83xx_register_switch(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ int ret;
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err_probe(priv->dev, ret, "unable to detect switch\n");
+ return ret;
+ }
+
+ ds->priv = priv;
+ ds->dev = priv->dev;
+ ds->ops = priv->variant->ds_ops;
+ ds->phylink_mac_ops = priv->variant->phylink_mac_ops;
+ ds->num_ports = priv->num_ports;
+
+ ret = dsa_register_switch(ds);
+ if (ret) {
+ dev_err_probe(priv->dev, ret, "unable to register switch\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_register_switch, "REALTEK_DSA");
+
+/**
+ * rtl83xx_unregister_switch() - unregister a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function unregister a DSA switch.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void rtl83xx_unregister_switch(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+
+ dsa_unregister_switch(ds);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unregister_switch, "REALTEK_DSA");
+
+/**
+ * rtl83xx_shutdown() - shutdown a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function shuts down the DSA switch and cleans the platform driver data,
+ * to prevent realtek_{smi,mdio}_remove() from running afterwards, which is
+ * possible if the parent bus implements its own .shutdown() as .remove().
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void rtl83xx_shutdown(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(priv->dev, NULL);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, "REALTEK_DSA");
+
+/**
+ * rtl83xx_remove() - Cleanup a realtek switch driver
+ * @priv: realtek_priv pointer
+ *
+ * Placehold for common cleanup procedures.
+ *
+ * Context: Any
+ * Return: nothing
+ */
+void rtl83xx_remove(struct realtek_priv *priv)
+{
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, "REALTEK_DSA");
+
+void rtl83xx_reset_assert(struct realtek_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_assert(priv->reset_ctl);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to assert the switch reset control: %pe\n",
+ ERR_PTR(ret));
+
+ gpiod_set_value(priv->reset, true);
+}
+
+void rtl83xx_reset_deassert(struct realtek_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_deassert(priv->reset_ctl);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to deassert the switch reset control: %pe\n",
+ ERR_PTR(ret));
+
+ gpiod_set_value(priv->reset, false);
+}
+
+MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Realtek DSA switches common module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/rtl83xx.h b/drivers/net/dsa/realtek/rtl83xx.h
new file mode 100644
index 000000000000..c8a0ff8fd75e
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl83xx.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL83XX_H
+#define _RTL83XX_H
+
+struct realtek_interface_info {
+ int (*reg_read)(void *ctx, u32 reg, u32 *val);
+ int (*reg_write)(void *ctx, u32 reg, u32 val);
+};
+
+void rtl83xx_lock(void *ctx);
+void rtl83xx_unlock(void *ctx);
+int rtl83xx_setup_user_mdio(struct dsa_switch *ds);
+struct realtek_priv *
+rtl83xx_probe(struct device *dev,
+ const struct realtek_interface_info *interface_info);
+int rtl83xx_register_switch(struct realtek_priv *priv);
+void rtl83xx_unregister_switch(struct realtek_priv *priv);
+void rtl83xx_shutdown(struct realtek_priv *priv);
+void rtl83xx_remove(struct realtek_priv *priv);
+void rtl83xx_reset_assert(struct realtek_priv *priv);
+void rtl83xx_reset_deassert(struct realtek_priv *priv);
+
+#endif /* _RTL83XX_H */
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
new file mode 100644
index 000000000000..4d857e3be10b
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -0,0 +1,1319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <net/dsa.h>
+
+#include "rzn1_a5psw.h"
+
+struct a5psw_stats {
+ u16 offset;
+ const char name[ETH_GSTRING_LEN];
+};
+
+#define STAT_DESC(_offset) { \
+ .offset = A5PSW_##_offset, \
+ .name = __stringify(_offset), \
+}
+
+static const struct a5psw_stats a5psw_stats[] = {
+ STAT_DESC(aFramesTransmittedOK),
+ STAT_DESC(aFramesReceivedOK),
+ STAT_DESC(aFrameCheckSequenceErrors),
+ STAT_DESC(aAlignmentErrors),
+ STAT_DESC(aOctetsTransmittedOK),
+ STAT_DESC(aOctetsReceivedOK),
+ STAT_DESC(aTxPAUSEMACCtrlFrames),
+ STAT_DESC(aRxPAUSEMACCtrlFrames),
+ STAT_DESC(ifInErrors),
+ STAT_DESC(ifOutErrors),
+ STAT_DESC(ifInUcastPkts),
+ STAT_DESC(ifInMulticastPkts),
+ STAT_DESC(ifInBroadcastPkts),
+ STAT_DESC(ifOutDiscards),
+ STAT_DESC(ifOutUcastPkts),
+ STAT_DESC(ifOutMulticastPkts),
+ STAT_DESC(ifOutBroadcastPkts),
+ STAT_DESC(etherStatsDropEvents),
+ STAT_DESC(etherStatsOctets),
+ STAT_DESC(etherStatsPkts),
+ STAT_DESC(etherStatsUndersizePkts),
+ STAT_DESC(etherStatsOversizePkts),
+ STAT_DESC(etherStatsPkts64Octets),
+ STAT_DESC(etherStatsPkts65to127Octets),
+ STAT_DESC(etherStatsPkts128to255Octets),
+ STAT_DESC(etherStatsPkts256to511Octets),
+ STAT_DESC(etherStatsPkts1024to1518Octets),
+ STAT_DESC(etherStatsPkts1519toXOctets),
+ STAT_DESC(etherStatsJabbers),
+ STAT_DESC(etherStatsFragments),
+ STAT_DESC(VLANReceived),
+ STAT_DESC(VLANTransmitted),
+ STAT_DESC(aDeferred),
+ STAT_DESC(aMultipleCollisions),
+ STAT_DESC(aSingleCollisions),
+ STAT_DESC(aLateCollisions),
+ STAT_DESC(aExcessiveCollisions),
+ STAT_DESC(aCarrierSenseErrors),
+};
+
+static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
+{
+ writel(value, a5psw->base + offset);
+}
+
+static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
+{
+ return readl(a5psw->base + offset);
+}
+
+static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
+{
+ u32 reg;
+
+ spin_lock(&a5psw->reg_lock);
+
+ reg = a5psw_reg_readl(a5psw, offset);
+ reg &= ~mask;
+ reg |= val;
+ a5psw_reg_writel(a5psw, offset, reg);
+
+ spin_unlock(&a5psw->reg_lock);
+}
+
+static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_RZN1_A5PSW;
+}
+
+static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
+ bool enable)
+{
+ u32 rx_match = 0;
+
+ if (enable)
+ rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
+
+ a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
+ A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
+}
+
+static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
+{
+ /* Enable "management forward" pattern matching, this will forward
+ * packets from this port only towards the management port and thus
+ * isolate the port.
+ */
+ a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
+}
+
+static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable)
+{
+ u32 mask = A5PSW_PORT_ENA_TX(port);
+ u32 reg = enable ? mask : 0;
+
+ /* Even though the port TX is disabled through TXENA bit in the
+ * PORT_ENA register, it can still send BPDUs. This depends on the tag
+ * configuration added when sending packets from the CPU port to the
+ * switch port. Indeed, when using forced forwarding without filtering,
+ * even disabled ports will be able to send packets that are tagged.
+ * This allows to implement STP support when ports are in a state where
+ * forwarding traffic should be stopped but BPDUs should still be sent.
+ */
+ a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg);
+}
+
+static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
+{
+ u32 port_ena = 0;
+
+ if (enable)
+ port_ena |= A5PSW_PORT_ENA_TX_RX(port);
+
+ a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
+ port_ena);
+}
+
+static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
+{
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
+ !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret)
+ dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
+
+ return ret;
+}
+
+static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
+{
+ u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+ a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ mutex_unlock(&a5psw->lk_lock);
+}
+
+static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
+ bool authorize)
+{
+ u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
+
+ if (authorize)
+ reg |= A5PSW_AUTH_PORT_AUTHORIZED;
+ else
+ reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
+
+ a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
+}
+
+static void a5psw_port_disable(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, false);
+ a5psw_port_enable_set(a5psw, port, false);
+}
+
+static int a5psw_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, true);
+ a5psw_port_enable_set(a5psw, port, true);
+
+ return 0;
+}
+
+static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
+ a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
+
+ return 0;
+}
+
+static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return A5PSW_MAX_MTU;
+}
+
+static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ unsigned long *intf = config->supported_interfaces;
+
+ config->mac_capabilities = MAC_1000FD;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* GMII is used internally and GMAC2 is connected to the switch
+ * using 1000Mbps Full-Duplex mode only (cf ethernet manual)
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII, intf);
+ } else {
+ config->mac_capabilities |= MAC_100 | MAC_10;
+ phy_interface_set_rgmii(intf);
+ __set_bit(PHY_INTERFACE_MODE_RMII, intf);
+ __set_bit(PHY_INTERFACE_MODE_MII, intf);
+ }
+}
+
+static struct phylink_pcs *
+a5psw_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
+
+ if (dsa_port_is_cpu(dp))
+ return NULL;
+
+ return a5psw->pcs[dp->index];
+}
+
+static void a5psw_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void a5psw_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
+ int port = dp->index;
+ u32 cmd_cfg;
+
+ cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
+ cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+}
+
+static void a5psw_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
+ A5PSW_CMD_CFG_TX_CRC_APPEND;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
+
+ if (speed == SPEED_1000)
+ cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
+
+ if (duplex == DUPLEX_HALF)
+ cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
+
+ cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
+
+ if (!rx_pause)
+ cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
+
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(dp->index), cmd_cfg);
+}
+
+static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned long rate;
+ u64 max, tmp;
+ u32 agetime;
+
+ rate = clk_get_rate(a5psw->clk);
+ max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
+ rate) * 1000;
+ if (msecs > max)
+ return -EINVAL;
+
+ tmp = div_u64(rate, MSEC_PER_SEC);
+ agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
+
+ return 0;
+}
+
+static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn)
+{
+ u32 mask = A5PSW_INPUT_LEARN_DIS(port);
+ u32 reg = !learn ? mask : 0;
+
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
+static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
+{
+ u32 mask = A5PSW_INPUT_LEARN_BLOCK(port);
+ u32 reg = block ? mask : 0;
+
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
+static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
+ bool set)
+{
+ static const u8 offsets[] = {
+ A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK, A5PSW_MCAST_DEF_MASK
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(offsets); i++)
+ a5psw_reg_rmw(a5psw, offsets[i], BIT(port),
+ set ? BIT(port) : 0);
+}
+
+static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
+ bool standalone)
+{
+ a5psw_port_learning_set(a5psw, port, !standalone);
+ a5psw_flooding_set_resolution(a5psw, port, !standalone);
+ a5psw_port_mgmtfwd_set(a5psw, port, standalone);
+}
+
+static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ /* We only support 1 bridge device */
+ if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Forwarding offload supported for a single bridge");
+ return -EOPNOTSUPP;
+ }
+
+ a5psw->br_dev = bridge.dev;
+ a5psw_port_set_standalone(a5psw, port, false);
+
+ a5psw->bridged_ports |= BIT(port);
+
+ return 0;
+}
+
+static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw->bridged_ports &= ~BIT(port);
+
+ a5psw_port_set_standalone(a5psw, port, true);
+
+ /* No more ports bridged */
+ if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
+ a5psw->br_dev = NULL;
+}
+
+static int a5psw_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+a5psw_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct a5psw *a5psw = ds->priv;
+ u32 val;
+
+ /* If a port is set as standalone, we do not want to be able to
+ * configure flooding nor learning which would result in joining the
+ * unique bridge. This can happen when a port leaves the bridge, in
+ * which case the DSA core will try to "clear" all flags for the
+ * standalone port (ie enable flooding, disable learning). In that case
+ * do not fail but do not apply the flags.
+ */
+ if (!(a5psw->bridged_ports & BIT(port)))
+ return 0;
+
+ if (flags.mask & BR_LEARNING) {
+ val = flags.val & BR_LEARNING ? 0 : A5PSW_INPUT_LEARN_DIS(port);
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN,
+ A5PSW_INPUT_LEARN_DIS(port), val);
+ }
+
+ if (flags.mask & BR_FLOOD) {
+ val = flags.val & BR_FLOOD ? BIT(port) : 0;
+ a5psw_reg_rmw(a5psw, A5PSW_UCAST_DEF_MASK, BIT(port), val);
+ }
+
+ if (flags.mask & BR_MCAST_FLOOD) {
+ val = flags.val & BR_MCAST_FLOOD ? BIT(port) : 0;
+ a5psw_reg_rmw(a5psw, A5PSW_MCAST_DEF_MASK, BIT(port), val);
+ }
+
+ if (flags.mask & BR_BCAST_FLOOD) {
+ val = flags.val & BR_BCAST_FLOOD ? BIT(port) : 0;
+ a5psw_reg_rmw(a5psw, A5PSW_BCAST_DEF_MASK, BIT(port), val);
+ }
+
+ return 0;
+}
+
+static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ bool learning_enabled, rx_enabled, tx_enabled;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct a5psw *a5psw = ds->priv;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ rx_enabled = false;
+ tx_enabled = false;
+ learning_enabled = false;
+ break;
+ case BR_STATE_LEARNING:
+ rx_enabled = false;
+ tx_enabled = false;
+ learning_enabled = dp->learning;
+ break;
+ case BR_STATE_FORWARDING:
+ rx_enabled = true;
+ tx_enabled = true;
+ learning_enabled = dp->learning;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ a5psw_port_learning_set(a5psw, port, learning_enabled);
+ a5psw_port_rx_block_set(a5psw, port, !rx_enabled);
+ a5psw_port_tx_enable(a5psw, port, tx_enabled);
+}
+
+static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_fdb_flush(a5psw, port);
+}
+
+static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
+ u16 *entry)
+{
+ u32 ctrl;
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
+
+ ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
+ ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ if (ret)
+ return ret;
+
+ *entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
+
+ return 0;
+}
+
+static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool inc_learncount = false;
+ int ret = 0;
+ u16 entry;
+ u32 reg;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+ lk_data.entry.port_mask = BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ /* Set the value to be written in the lookup table */
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ if (!lk_data.entry.valid) {
+ inc_learncount = true;
+ /* port_mask set to 0x1f when entry is not valid, clear it */
+ lk_data.entry.port_mask = 0;
+ lk_data.entry.prio = 0;
+ }
+
+ lk_data.entry.port_mask |= BIT(port);
+ lk_data.entry.is_static = 1;
+ lk_data.entry.valid = 1;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ if (inc_learncount) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool clear = false;
+ u16 entry;
+ u32 reg;
+ int ret;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+
+ /* Our hardware does not associate any VID to the FDB entries so this
+ * means that if two entries were added for the same mac but for
+ * different VID, then, on the deletion of the first one, we would also
+ * delete the second one. Since there is unfortunately nothing we can do
+ * about that, do not return an error...
+ */
+ if (!lk_data.entry.valid)
+ goto lk_unlock;
+
+ lk_data.entry.port_mask &= ~BIT(port);
+ /* If there is no more port in the mask, clear the entry */
+ if (lk_data.entry.port_mask == 0)
+ clear = true;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = entry;
+ if (clear)
+ reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
+ else
+ reg |= A5PSW_LK_ADDR_CTRL_WRITE;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ /* Decrement LEARNCOUNT */
+ if (clear) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data;
+ int i = 0, ret = 0;
+ u32 reg;
+
+ mutex_lock(&a5psw->lk_lock);
+
+ for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
+ reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto out_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ /* If entry is not valid or does not contain the port, skip */
+ if (!lk_data.entry.valid ||
+ !(lk_data.entry.port_mask & BIT(port)))
+ continue;
+
+ lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
+
+ ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
+ if (ret)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ u32 mask = BIT(port + A5PSW_VLAN_VERI_SHIFT) |
+ BIT(port + A5PSW_VLAN_DISC_SHIFT);
+ u32 val = vlan_filtering ? mask : 0;
+ struct a5psw *a5psw = ds->priv;
+
+ /* Disable/enable vlan tagging */
+ a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE_ENA, BIT(port),
+ vlan_filtering ? BIT(port) : 0);
+
+ /* Disable/enable vlan input filtering */
+ a5psw_reg_rmw(a5psw, A5PSW_VLAN_VERIFY, mask, val);
+
+ return 0;
+}
+
+static int a5psw_find_vlan_entry(struct a5psw *a5psw, u16 vid)
+{
+ u32 vlan_res;
+ int i;
+
+ /* Find vlan for this port */
+ for (i = 0; i < A5PSW_VLAN_COUNT; i++) {
+ vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i));
+ if (FIELD_GET(A5PSW_VLAN_RES_VLANID, vlan_res) == vid)
+ return i;
+ }
+
+ return -1;
+}
+
+static int a5psw_new_vlan_res_entry(struct a5psw *a5psw, u16 newvid)
+{
+ u32 vlan_res;
+ int i;
+
+ /* Find a free VLAN entry */
+ for (i = 0; i < A5PSW_VLAN_COUNT; i++) {
+ vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i));
+ if (!(FIELD_GET(A5PSW_VLAN_RES_PORTMASK, vlan_res))) {
+ vlan_res = FIELD_PREP(A5PSW_VLAN_RES_VLANID, newvid);
+ a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(i), vlan_res);
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static void a5psw_port_vlan_tagged_cfg(struct a5psw *a5psw,
+ unsigned int vlan_res_id, int port,
+ bool set)
+{
+ u32 mask = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_RD_TAGMASK |
+ BIT(port);
+ u32 vlan_res_off = A5PSW_VLAN_RES(vlan_res_id);
+ u32 val = A5PSW_VLAN_RES_WR_TAGMASK, reg;
+
+ if (set)
+ val |= BIT(port);
+
+ /* Toggle tag mask read */
+ a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK);
+ reg = a5psw_reg_readl(a5psw, vlan_res_off);
+ a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK);
+
+ reg &= ~mask;
+ reg |= val;
+ a5psw_reg_writel(a5psw, vlan_res_off, reg);
+}
+
+static void a5psw_port_vlan_cfg(struct a5psw *a5psw, unsigned int vlan_res_id,
+ int port, bool set)
+{
+ u32 mask = A5PSW_VLAN_RES_WR_TAGMASK | BIT(port);
+ u32 reg = A5PSW_VLAN_RES_WR_PORTMASK;
+
+ if (set)
+ reg |= BIT(port);
+
+ a5psw_reg_rmw(a5psw, A5PSW_VLAN_RES(vlan_res_id), mask, reg);
+}
+
+static int a5psw_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool tagged = !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct a5psw *a5psw = ds->priv;
+ u16 vid = vlan->vid;
+ int vlan_res_id;
+
+ vlan_res_id = a5psw_find_vlan_entry(a5psw, vid);
+ if (vlan_res_id < 0) {
+ vlan_res_id = a5psw_new_vlan_res_entry(a5psw, vid);
+ if (vlan_res_id < 0)
+ return -ENOSPC;
+ }
+
+ a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, true);
+ if (tagged)
+ a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, true);
+
+ /* Configure port to tag with corresponding VID, but do not enable it
+ * yet: wait for vlan filtering to be enabled to enable vlan port
+ * tagging
+ */
+ if (pvid)
+ a5psw_reg_writel(a5psw, A5PSW_SYSTEM_TAGINFO(port), vid);
+
+ return 0;
+}
+
+static int a5psw_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct a5psw *a5psw = ds->priv;
+ u16 vid = vlan->vid;
+ int vlan_res_id;
+
+ vlan_res_id = a5psw_find_vlan_entry(a5psw, vid);
+ if (vlan_res_id < 0)
+ return -EINVAL;
+
+ a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, false);
+ a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, false);
+
+ return 0;
+}
+
+static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
+{
+ u32 reg_lo, reg_hi;
+
+ reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
+ /* A5PSW_STATS_HIWORD is latched on stat read */
+ reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
+
+ return ((u64)reg_hi << 32) | reg_lo;
+}
+
+static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ unsigned int u;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
+ ethtool_puts(&data, a5psw_stats[u].name);
+}
+
+static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned int u;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
+ data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
+}
+
+static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(a5psw_stats);
+}
+
+static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
+ mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
+ mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
+ mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
+ mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
+ mac_stats->AlignmentErrors = RD(aAlignmentErrors);
+ mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
+ mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
+ mac_stats->LateCollisions = RD(aLateCollisions);
+ mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
+ mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
+ mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
+ mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
+ mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
+ mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
+ mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
+ mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
+ mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
+ mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
+#undef RD
+}
+
+static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, A5PSW_MAX_MTU },
+ {}
+};
+
+static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
+ rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
+ rmon_stats->fragments = RD(etherStatsFragments);
+ rmon_stats->jabbers = RD(etherStatsJabbers);
+ rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
+ rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
+ rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
+ rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
+ rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
+ rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
+ rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
+#undef RD
+
+ *ranges = a5psw_rmon_ranges;
+}
+
+static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+ u64 stat;
+
+ stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesTransmitted = stat;
+ stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesReceived = stat;
+}
+
+static void a5psw_vlan_setup(struct a5psw *a5psw, int port)
+{
+ u32 reg;
+
+ /* Enable TAG always mode for the port, this is actually controlled
+ * by VLAN_IN_MODE_ENA field which will be used for PVID insertion
+ */
+ reg = A5PSW_VLAN_IN_MODE_TAG_ALWAYS;
+ reg <<= A5PSW_VLAN_IN_MODE_PORT_SHIFT(port);
+ a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE, A5PSW_VLAN_IN_MODE_PORT(port),
+ reg);
+
+ /* Set transparent mode for output frame manipulation, this will depend
+ * on the VLAN_RES configuration mode
+ */
+ reg = A5PSW_VLAN_OUT_MODE_TRANSPARENT;
+ reg <<= A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port);
+ a5psw_reg_rmw(a5psw, A5PSW_VLAN_OUT_MODE,
+ A5PSW_VLAN_OUT_MODE_PORT(port), reg);
+}
+
+static int a5psw_setup(struct dsa_switch *ds)
+{
+ struct a5psw *a5psw = ds->priv;
+ int port, vlan, ret;
+ struct dsa_port *dp;
+ u32 reg;
+
+ /* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dp->index != A5PSW_CPU_PORT) {
+ dev_err(a5psw->dev, "Invalid CPU port\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Configure management port */
+ reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
+
+ /* Set pattern 0 to forward all frame to mgmt port */
+ a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
+ A5PSW_PATTERN_CTRL_MGMTFWD);
+
+ /* Enable port tagging */
+ reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
+ reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
+
+ /* Enable normal switch operation */
+ reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
+ A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
+ A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
+ a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
+ !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret) {
+ dev_err(a5psw->dev, "Failed to clear lookup table\n");
+ return ret;
+ }
+
+ /* Reset learn count to 0 */
+ reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+
+ /* Clear VLAN resource table */
+ reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
+ for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
+ a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
+
+ /* Reset all ports */
+ dsa_switch_for_each_port(dp, ds) {
+ port = dp->index;
+
+ /* Reset the port */
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
+ A5PSW_CMD_CFG_SW_RESET);
+
+ /* Enable only CPU port */
+ a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
+
+ if (dsa_port_is_unused(dp))
+ continue;
+
+ /* Enable egress flooding and learning for CPU port */
+ if (dsa_port_is_cpu(dp)) {
+ a5psw_flooding_set_resolution(a5psw, port, true);
+ a5psw_port_learning_set(a5psw, port, true);
+ }
+
+ /* Enable standalone mode for user ports */
+ if (dsa_port_is_user(dp))
+ a5psw_port_set_standalone(a5psw, port, true);
+
+ a5psw_vlan_setup(a5psw, port);
+ }
+
+ return 0;
+}
+
+static const struct phylink_mac_ops a5psw_phylink_mac_ops = {
+ .mac_select_pcs = a5psw_phylink_mac_select_pcs,
+ .mac_config = a5psw_phylink_mac_config,
+ .mac_link_down = a5psw_phylink_mac_link_down,
+ .mac_link_up = a5psw_phylink_mac_link_up,
+};
+
+static const struct dsa_switch_ops a5psw_switch_ops = {
+ .get_tag_protocol = a5psw_get_tag_protocol,
+ .setup = a5psw_setup,
+ .port_disable = a5psw_port_disable,
+ .port_enable = a5psw_port_enable,
+ .phylink_get_caps = a5psw_phylink_get_caps,
+ .port_change_mtu = a5psw_port_change_mtu,
+ .port_max_mtu = a5psw_port_max_mtu,
+ .get_sset_count = a5psw_get_sset_count,
+ .get_strings = a5psw_get_strings,
+ .get_ethtool_stats = a5psw_get_ethtool_stats,
+ .get_eth_mac_stats = a5psw_get_eth_mac_stats,
+ .get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
+ .get_rmon_stats = a5psw_get_rmon_stats,
+ .set_ageing_time = a5psw_set_ageing_time,
+ .port_bridge_join = a5psw_port_bridge_join,
+ .port_bridge_leave = a5psw_port_bridge_leave,
+ .port_pre_bridge_flags = a5psw_port_pre_bridge_flags,
+ .port_bridge_flags = a5psw_port_bridge_flags,
+ .port_stp_state_set = a5psw_port_stp_state_set,
+ .port_fast_age = a5psw_port_fast_age,
+ .port_vlan_filtering = a5psw_port_vlan_filtering,
+ .port_vlan_add = a5psw_port_vlan_add,
+ .port_vlan_del = a5psw_port_vlan_del,
+ .port_fdb_add = a5psw_port_fdb_add,
+ .port_fdb_del = a5psw_port_fdb_del,
+ .port_fdb_dump = a5psw_port_fdb_dump,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
+};
+
+static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
+{
+ u32 status;
+ int err;
+
+ err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
+ !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
+ 1000 * USEC_PER_MSEC);
+ if (err)
+ dev_err(a5psw->dev, "MDIO command timeout\n");
+
+ return err;
+}
+
+static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd, status;
+ int ret;
+
+ cmd = A5PSW_MDIO_COMMAND_READ;
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+
+ ret = a5psw_mdio_wait_busy(a5psw);
+ if (ret)
+ return ret;
+
+ ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
+
+ status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
+ if (status & A5PSW_MDIO_CFG_STATUS_READERR)
+ return -EIO;
+
+ return ret;
+}
+
+static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
+ u16 phy_data)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd;
+
+ cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
+
+ return a5psw_mdio_wait_busy(a5psw);
+}
+
+static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
+{
+ unsigned long rate;
+ unsigned long div;
+ u32 cfgstatus;
+
+ rate = clk_get_rate(a5psw->hclk);
+ div = ((rate / mdio_freq) / 2);
+ if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
+ div < A5PSW_MDIO_CLK_DIV_MIN) {
+ dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
+ return -ERANGE;
+ }
+
+ cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
+
+ return 0;
+}
+
+static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
+{
+ struct device *dev = a5psw->dev;
+ struct mii_bus *bus;
+ u32 mdio_freq;
+ int ret;
+
+ if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
+ mdio_freq = A5PSW_MDIO_DEF_FREQ;
+
+ ret = a5psw_mdio_config(a5psw, mdio_freq);
+ if (ret)
+ return ret;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "a5psw_mdio";
+ bus->read = a5psw_mdio_read;
+ bus->write = a5psw_mdio_write;
+ bus->priv = a5psw;
+ bus->parent = dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ a5psw->mii_bus = bus;
+
+ return devm_of_mdiobus_register(dev, bus, node);
+}
+
+static void a5psw_pcs_free(struct a5psw *a5psw)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
+ if (a5psw->pcs[i])
+ miic_destroy(a5psw->pcs[i]);
+ }
+}
+
+static int a5psw_pcs_get(struct a5psw *a5psw)
+{
+ struct device_node *ports, *port, *pcs_node;
+ struct phylink_pcs *pcs;
+ int ret;
+ u32 reg;
+
+ ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
+ if (!ports)
+ return -EINVAL;
+
+ for_each_available_child_of_node(ports, port) {
+ pcs_node = of_parse_phandle(port, "pcs-handle", 0);
+ if (!pcs_node)
+ continue;
+
+ if (of_property_read_u32(port, "reg", &reg)) {
+ ret = -EINVAL;
+ goto free_pcs;
+ }
+
+ if (reg >= ARRAY_SIZE(a5psw->pcs)) {
+ ret = -ENODEV;
+ goto free_pcs;
+ }
+
+ pcs = miic_create(a5psw->dev, pcs_node);
+ if (IS_ERR(pcs)) {
+ dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
+ reg);
+ ret = PTR_ERR(pcs);
+ goto free_pcs;
+ }
+
+ a5psw->pcs[reg] = pcs;
+ of_node_put(pcs_node);
+ }
+ of_node_put(ports);
+
+ return 0;
+
+free_pcs:
+ of_node_put(pcs_node);
+ of_node_put(port);
+ of_node_put(ports);
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static int a5psw_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mdio;
+ struct dsa_switch *ds;
+ struct a5psw *a5psw;
+ int ret;
+
+ a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
+ if (!a5psw)
+ return -ENOMEM;
+
+ a5psw->dev = dev;
+ mutex_init(&a5psw->lk_lock);
+ spin_lock_init(&a5psw->reg_lock);
+ a5psw->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(a5psw->base))
+ return PTR_ERR(a5psw->base);
+
+ a5psw->bridged_ports = BIT(A5PSW_CPU_PORT);
+
+ ret = a5psw_pcs_get(a5psw);
+ if (ret)
+ return ret;
+
+ a5psw->hclk = devm_clk_get_enabled(dev, "hclk");
+ if (IS_ERR(a5psw->hclk)) {
+ dev_err(dev, "failed get hclk clock\n");
+ ret = PTR_ERR(a5psw->hclk);
+ goto free_pcs;
+ }
+
+ a5psw->clk = devm_clk_get_enabled(dev, "clk");
+ if (IS_ERR(a5psw->clk)) {
+ dev_err(dev, "failed get clk_switch clock\n");
+ ret = PTR_ERR(a5psw->clk);
+ goto free_pcs;
+ }
+
+ mdio = of_get_available_child_by_name(dev->of_node, "mdio");
+ if (mdio) {
+ ret = a5psw_probe_mdio(a5psw, mdio);
+ of_node_put(mdio);
+ if (ret) {
+ dev_err(dev, "Failed to register MDIO: %d\n", ret);
+ goto free_pcs;
+ }
+ }
+
+ ds = &a5psw->ds;
+ ds->dev = dev;
+ ds->num_ports = A5PSW_PORTS_NUM;
+ ds->ops = &a5psw_switch_ops;
+ ds->phylink_mac_ops = &a5psw_phylink_mac_ops;
+ ds->priv = a5psw;
+
+ ret = dsa_register_switch(ds);
+ if (ret) {
+ dev_err(dev, "Failed to register DSA switch: %d\n", ret);
+ goto free_pcs;
+ }
+
+ return 0;
+
+free_pcs:
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static void a5psw_remove(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return;
+
+ dsa_unregister_switch(&a5psw->ds);
+ a5psw_pcs_free(a5psw);
+}
+
+static void a5psw_shutdown(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return;
+
+ dsa_switch_shutdown(&a5psw->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id a5psw_of_mtable[] = {
+ { .compatible = "renesas,rzn1-a5psw", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
+
+static struct platform_driver a5psw_driver = {
+ .driver = {
+ .name = "rzn1_a5psw",
+ .of_match_table = a5psw_of_mtable,
+ },
+ .probe = a5psw_probe,
+ .remove = a5psw_remove,
+ .shutdown = a5psw_shutdown,
+};
+module_platform_driver(a5psw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h
new file mode 100644
index 000000000000..d54acedac194
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <net/dsa.h>
+
+#define A5PSW_REVISION 0x0
+#define A5PSW_PORT_OFFSET(port) (0x400 * (port))
+
+#define A5PSW_PORT_ENA 0x8
+#define A5PSW_PORT_ENA_TX(port) BIT(port)
+#define A5PSW_PORT_ENA_RX_SHIFT 16
+#define A5PSW_PORT_ENA_TX_RX(port) (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \
+ BIT(port))
+#define A5PSW_UCAST_DEF_MASK 0xC
+
+#define A5PSW_VLAN_VERIFY 0x10
+#define A5PSW_VLAN_VERI_SHIFT 0
+#define A5PSW_VLAN_DISC_SHIFT 16
+
+#define A5PSW_BCAST_DEF_MASK 0x14
+#define A5PSW_MCAST_DEF_MASK 0x18
+
+#define A5PSW_INPUT_LEARN 0x1C
+#define A5PSW_INPUT_LEARN_DIS(p) BIT((p) + 16)
+#define A5PSW_INPUT_LEARN_BLOCK(p) BIT(p)
+
+#define A5PSW_MGMT_CFG 0x20
+#define A5PSW_MGMT_CFG_ENABLE BIT(6)
+
+#define A5PSW_MODE_CFG 0x24
+#define A5PSW_MODE_STATS_RESET BIT(31)
+
+#define A5PSW_VLAN_IN_MODE 0x28
+#define A5PSW_VLAN_IN_MODE_PORT_SHIFT(port) ((port) * 2)
+#define A5PSW_VLAN_IN_MODE_PORT(port) (GENMASK(1, 0) << \
+ A5PSW_VLAN_IN_MODE_PORT_SHIFT(port))
+#define A5PSW_VLAN_IN_MODE_SINGLE_PASSTHROUGH 0x0
+#define A5PSW_VLAN_IN_MODE_SINGLE_REPLACE 0x1
+#define A5PSW_VLAN_IN_MODE_TAG_ALWAYS 0x2
+
+#define A5PSW_VLAN_OUT_MODE 0x2C
+#define A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port) ((port) * 2)
+#define A5PSW_VLAN_OUT_MODE_PORT(port) (GENMASK(1, 0) << \
+ A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port))
+#define A5PSW_VLAN_OUT_MODE_DIS 0x0
+#define A5PSW_VLAN_OUT_MODE_STRIP 0x1
+#define A5PSW_VLAN_OUT_MODE_TAG_THROUGH 0x2
+#define A5PSW_VLAN_OUT_MODE_TRANSPARENT 0x3
+
+#define A5PSW_VLAN_IN_MODE_ENA 0x30
+#define A5PSW_VLAN_TAG_ID 0x34
+
+#define A5PSW_SYSTEM_TAGINFO(port) (0x200 + 4 * (port))
+
+#define A5PSW_AUTH_PORT(port) (0x240 + 4 * (port))
+#define A5PSW_AUTH_PORT_AUTHORIZED BIT(0)
+
+#define A5PSW_VLAN_RES(entry) (0x280 + 4 * (entry))
+#define A5PSW_VLAN_RES_WR_PORTMASK BIT(30)
+#define A5PSW_VLAN_RES_WR_TAGMASK BIT(29)
+#define A5PSW_VLAN_RES_RD_TAGMASK BIT(28)
+#define A5PSW_VLAN_RES_VLANID GENMASK(16, 5)
+#define A5PSW_VLAN_RES_PORTMASK GENMASK(4, 0)
+
+#define A5PSW_RXMATCH_CONFIG(port) (0x3e80 + 4 * (port))
+#define A5PSW_RXMATCH_CONFIG_PATTERN(p) BIT(p)
+
+#define A5PSW_PATTERN_CTRL(p) (0x3eb0 + 4 * (p))
+#define A5PSW_PATTERN_CTRL_MGMTFWD BIT(1)
+
+#define A5PSW_LK_CTRL 0x400
+#define A5PSW_LK_ADDR_CTRL_BLOCKING BIT(0)
+#define A5PSW_LK_ADDR_CTRL_LEARNING BIT(1)
+#define A5PSW_LK_ADDR_CTRL_AGEING BIT(2)
+#define A5PSW_LK_ADDR_CTRL_ALLOW_MIGR BIT(3)
+#define A5PSW_LK_ADDR_CTRL_CLEAR_TABLE BIT(6)
+
+#define A5PSW_LK_ADDR_CTRL 0x408
+#define A5PSW_LK_ADDR_CTRL_BUSY BIT(31)
+#define A5PSW_LK_ADDR_CTRL_DELETE_PORT BIT(30)
+#define A5PSW_LK_ADDR_CTRL_CLEAR BIT(29)
+#define A5PSW_LK_ADDR_CTRL_LOOKUP BIT(28)
+#define A5PSW_LK_ADDR_CTRL_WAIT BIT(27)
+#define A5PSW_LK_ADDR_CTRL_READ BIT(26)
+#define A5PSW_LK_ADDR_CTRL_WRITE BIT(25)
+#define A5PSW_LK_ADDR_CTRL_ADDRESS GENMASK(12, 0)
+
+#define A5PSW_LK_DATA_LO 0x40C
+#define A5PSW_LK_DATA_HI 0x410
+#define A5PSW_LK_DATA_HI_VALID BIT(16)
+#define A5PSW_LK_DATA_HI_PORT BIT(16)
+
+#define A5PSW_LK_LEARNCOUNT 0x418
+#define A5PSW_LK_LEARNCOUNT_COUNT GENMASK(13, 0)
+#define A5PSW_LK_LEARNCOUNT_MODE GENMASK(31, 30)
+#define A5PSW_LK_LEARNCOUNT_MODE_SET 0x0
+#define A5PSW_LK_LEARNCOUNT_MODE_INC 0x1
+#define A5PSW_LK_LEARNCOUNT_MODE_DEC 0x2
+
+#define A5PSW_MGMT_TAG_CFG 0x480
+#define A5PSW_MGMT_TAG_CFG_TAGFIELD GENMASK(31, 16)
+#define A5PSW_MGMT_TAG_CFG_ALL_FRAMES BIT(1)
+#define A5PSW_MGMT_TAG_CFG_ENABLE BIT(0)
+
+#define A5PSW_LK_AGETIME 0x41C
+#define A5PSW_LK_AGETIME_MASK GENMASK(23, 0)
+
+#define A5PSW_MDIO_CFG_STATUS 0x700
+#define A5PSW_MDIO_CFG_STATUS_CLKDIV GENMASK(15, 7)
+#define A5PSW_MDIO_CFG_STATUS_READERR BIT(1)
+#define A5PSW_MDIO_CFG_STATUS_BUSY BIT(0)
+
+#define A5PSW_MDIO_COMMAND 0x704
+/* Register is named TRAININIT in datasheet and should be set when reading */
+#define A5PSW_MDIO_COMMAND_READ BIT(15)
+#define A5PSW_MDIO_COMMAND_PHY_ADDR GENMASK(9, 5)
+#define A5PSW_MDIO_COMMAND_REG_ADDR GENMASK(4, 0)
+
+#define A5PSW_MDIO_DATA 0x708
+#define A5PSW_MDIO_DATA_MASK GENMASK(15, 0)
+
+#define A5PSW_CMD_CFG(port) (0x808 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_CMD_CFG_CNTL_FRM_ENA BIT(23)
+#define A5PSW_CMD_CFG_SW_RESET BIT(13)
+#define A5PSW_CMD_CFG_TX_CRC_APPEND BIT(11)
+#define A5PSW_CMD_CFG_HD_ENA BIT(10)
+#define A5PSW_CMD_CFG_PAUSE_IGNORE BIT(8)
+#define A5PSW_CMD_CFG_CRC_FWD BIT(6)
+#define A5PSW_CMD_CFG_ETH_SPEED BIT(3)
+#define A5PSW_CMD_CFG_RX_ENA BIT(1)
+#define A5PSW_CMD_CFG_TX_ENA BIT(0)
+
+#define A5PSW_FRM_LENGTH(port) (0x814 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_FRM_LENGTH_MASK GENMASK(13, 0)
+
+#define A5PSW_STATUS(port) (0x840 + A5PSW_PORT_OFFSET(port))
+
+#define A5PSW_STATS_HIWORD 0x900
+
+/* Stats */
+#define A5PSW_aFramesTransmittedOK 0x868
+#define A5PSW_aFramesReceivedOK 0x86C
+#define A5PSW_aFrameCheckSequenceErrors 0x870
+#define A5PSW_aAlignmentErrors 0x874
+#define A5PSW_aOctetsTransmittedOK 0x878
+#define A5PSW_aOctetsReceivedOK 0x87C
+#define A5PSW_aTxPAUSEMACCtrlFrames 0x880
+#define A5PSW_aRxPAUSEMACCtrlFrames 0x884
+/* If */
+#define A5PSW_ifInErrors 0x888
+#define A5PSW_ifOutErrors 0x88C
+#define A5PSW_ifInUcastPkts 0x890
+#define A5PSW_ifInMulticastPkts 0x894
+#define A5PSW_ifInBroadcastPkts 0x898
+#define A5PSW_ifOutDiscards 0x89C
+#define A5PSW_ifOutUcastPkts 0x8A0
+#define A5PSW_ifOutMulticastPkts 0x8A4
+#define A5PSW_ifOutBroadcastPkts 0x8A8
+/* Ether */
+#define A5PSW_etherStatsDropEvents 0x8AC
+#define A5PSW_etherStatsOctets 0x8B0
+#define A5PSW_etherStatsPkts 0x8B4
+#define A5PSW_etherStatsUndersizePkts 0x8B8
+#define A5PSW_etherStatsOversizePkts 0x8BC
+#define A5PSW_etherStatsPkts64Octets 0x8C0
+#define A5PSW_etherStatsPkts65to127Octets 0x8C4
+#define A5PSW_etherStatsPkts128to255Octets 0x8C8
+#define A5PSW_etherStatsPkts256to511Octets 0x8CC
+#define A5PSW_etherStatsPkts512to1023Octets 0x8D0
+#define A5PSW_etherStatsPkts1024to1518Octets 0x8D4
+#define A5PSW_etherStatsPkts1519toXOctets 0x8D8
+#define A5PSW_etherStatsJabbers 0x8DC
+#define A5PSW_etherStatsFragments 0x8E0
+
+#define A5PSW_VLANReceived 0x8E8
+#define A5PSW_VLANTransmitted 0x8EC
+
+#define A5PSW_aDeferred 0x910
+#define A5PSW_aMultipleCollisions 0x914
+#define A5PSW_aSingleCollisions 0x918
+#define A5PSW_aLateCollisions 0x91C
+#define A5PSW_aExcessiveCollisions 0x920
+#define A5PSW_aCarrierSenseErrors 0x924
+
+#define A5PSW_VLAN_TAG(prio, id) (((prio) << 12) | (id))
+#define A5PSW_PORTS_NUM 5
+#define A5PSW_CPU_PORT (A5PSW_PORTS_NUM - 1)
+#define A5PSW_MDIO_DEF_FREQ 2500000
+#define A5PSW_MDIO_TIMEOUT 100
+#define A5PSW_JUMBO_LEN (10 * SZ_1K)
+#define A5PSW_MDIO_CLK_DIV_MIN 5
+#define A5PSW_TAG_LEN 8
+#define A5PSW_VLAN_COUNT 32
+
+/* Ensure enough space for 2 VLAN tags */
+#define A5PSW_EXTRA_MTU_LEN (A5PSW_TAG_LEN + 8)
+#define A5PSW_MAX_MTU (A5PSW_JUMBO_LEN - A5PSW_EXTRA_MTU_LEN)
+
+#define A5PSW_PATTERN_MGMTFWD 0
+
+#define A5PSW_LK_BUSY_USEC_POLL 10
+#define A5PSW_CTRL_TIMEOUT 1000
+#define A5PSW_TABLE_ENTRIES 8192
+
+struct fdb_entry {
+ u8 mac[ETH_ALEN];
+ u16 valid:1;
+ u16 is_static:1;
+ u16 prio:3;
+ u16 port_mask:5;
+ u16 reserved:6;
+} __packed;
+
+union lk_data {
+ struct {
+ u32 lo;
+ u32 hi;
+ };
+ struct fdb_entry entry;
+};
+
+/**
+ * struct a5psw - switch struct
+ * @base: Base address of the switch
+ * @hclk: hclk_switch clock
+ * @clk: clk_switch clock
+ * @dev: Device associated to the switch
+ * @mii_bus: MDIO bus struct
+ * @mdio_freq: MDIO bus frequency requested
+ * @pcs: Array of PCS connected to the switch ports (not for the CPU)
+ * @ds: DSA switch struct
+ * @stats_lock: lock to access statistics (shared HI counter)
+ * @lk_lock: Lock for the lookup table
+ * @reg_lock: Lock for register read-modify-write operation
+ * @bridged_ports: Mask of ports that are bridged and should be flooded
+ * @br_dev: Bridge net device
+ */
+struct a5psw {
+ void __iomem *base;
+ struct clk *hclk;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *mii_bus;
+ struct phylink_pcs *pcs[A5PSW_PORTS_NUM - 1];
+ struct dsa_switch ds;
+ struct mutex lk_lock;
+ spinlock_t reg_lock;
+ u32 bridged_ports;
+ struct net_device *br_dev;
+};
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 21dba16af097..dceb96ae9c83 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -132,6 +132,8 @@ struct sja1105_info {
int max_frame_mem;
int num_ports;
bool multiple_cascade_ports;
+ /* Every {port, TXQ} has its own CBS shaper */
+ bool fixed_cbs_mapping;
enum dsa_tag_protocol tag_proto;
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
@@ -149,8 +151,10 @@ struct sja1105_info {
bool (*rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
void (*txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
int (*clocking_setup)(struct sja1105_private *priv);
- int (*pcs_mdio_read)(struct mii_bus *bus, int phy, int reg);
- int (*pcs_mdio_write)(struct mii_bus *bus, int phy, int reg, u16 val);
+ int (*pcs_mdio_read_c45)(struct mii_bus *bus, int phy, int mmd,
+ int reg);
+ int (*pcs_mdio_write_c45)(struct mii_bus *bus, int phy, int mmd,
+ int reg, u16 val);
int (*disable_microcontroller)(struct sja1105_private *priv);
const char *name;
bool supports_mii[SJA1105_MAX_NUM_PORTS];
@@ -249,6 +253,8 @@ struct sja1105_private {
bool fixed_link[SJA1105_MAX_NUM_PORTS];
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
+ unsigned long hwts_tx_en;
+ unsigned long hwts_rx_en;
const struct sja1105_info *info;
size_t max_xfer_len;
struct spi_device *spidev;
@@ -256,11 +262,15 @@ struct sja1105_private {
u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS];
struct sja1105_flow_block flow_block;
- struct sja1105_port ports[SJA1105_MAX_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ /* Serializes accesses to the FDB */
+ struct mutex fdb_lock;
+ /* PTP two-step TX timestamp ID, and its serialization lock */
+ spinlock_t ts_id_lock;
+ u8 ts_id;
/* Serializes access to the dynamic config interface */
struct mutex dynamic_config_lock;
struct devlink_region **regions;
@@ -268,8 +278,7 @@ struct sja1105_private {
struct mii_bus *mdio_base_t1;
struct mii_bus *mdio_base_tx;
struct mii_bus *mdio_pcs;
- struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS];
- struct sja1105_tagger_data tagger_data;
+ struct phylink_pcs *pcs[SJA1105_MAX_NUM_PORTS];
struct sja1105_ptp_data ptp_data;
struct sja1105_tas_data tas_data;
};
@@ -285,7 +294,6 @@ struct sja1105_spi_message {
/* From sja1105_main.c */
enum sja1105_reset_reason {
SJA1105_VLAN_FILTERING = 0,
- SJA1105_RX_HWTSTAMPING,
SJA1105_AGEING_TIME,
SJA1105_SCHEDULING,
SJA1105_BEST_EFFORT_POLICING,
@@ -301,10 +309,12 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
/* From sja1105_mdio.c */
int sja1105_mdiobus_register(struct dsa_switch *ds);
void sja1105_mdiobus_unregister(struct dsa_switch *ds);
-int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
-int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
-int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
-int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+int sja1105_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg);
+int sja1105_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val);
+int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg);
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val);
/* From sja1105_devlink.c */
int sja1105_devlink_setup(struct dsa_switch *ds);
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index e3699f76f6d7..08a3e7b96254 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -153,14 +153,14 @@ static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_tx_clk;
- const int mac_clk_sources[] = {
+ static const int mac_clk_sources[] = {
CLKSRC_MII0_TX_CLK,
CLKSRC_MII1_TX_CLK,
CLKSRC_MII2_TX_CLK,
CLKSRC_MII3_TX_CLK,
CLKSRC_MII4_TX_CLK,
};
- const int phy_clk_sources[] = {
+ static const int phy_clk_sources[] = {
CLKSRC_IDIV0,
CLKSRC_IDIV1,
CLKSRC_IDIV2,
@@ -194,7 +194,7 @@ sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_rx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
- const int clk_sources[] = {
+ static const int clk_sources[] = {
CLKSRC_MII0_RX_CLK,
CLKSRC_MII1_RX_CLK,
CLKSRC_MII2_RX_CLK,
@@ -221,7 +221,7 @@ sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
- const int clk_sources[] = {
+ static const int clk_sources[] = {
CLKSRC_IDIV0,
CLKSRC_IDIV1,
CLKSRC_IDIV2,
@@ -248,7 +248,7 @@ sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
- const int clk_sources[] = {
+ static const int clk_sources[] = {
CLKSRC_IDIV0,
CLKSRC_IDIV1,
CLKSRC_IDIV2,
@@ -349,8 +349,13 @@ static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
clksrc = CLKSRC_PLL0;
} else {
- int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
- CLKSRC_IDIV3, CLKSRC_IDIV4};
+ static const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
clksrc = clk_sources[port];
}
@@ -638,7 +643,7 @@ static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl ref_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
- const int clk_sources[] = {
+ static const int clk_sources[] = {
CLKSRC_MII0_TX_CLK,
CLKSRC_MII1_TX_CLK,
CLKSRC_MII2_TX_CLK,
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 0569ff066634..30b1f1ba762f 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -93,8 +93,10 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
- while (i-- >= 0)
+ while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
+
+ kfree(priv->regions);
return PTR_ERR(region);
}
@@ -120,16 +122,10 @@ int sja1105_devlink_info_get(struct dsa_switch *ds,
struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
- int rc;
-
- rc = devlink_info_driver_name_put(req, "sja1105");
- if (rc)
- return rc;
- rc = devlink_info_version_fixed_put(req,
- DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
- priv->info->name);
- return rc;
+ return devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ priv->info->name);
}
int sja1105_devlink_setup(struct dsa_switch *ds)
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 7729d3f8b7f5..984c0e604e8d 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
static int
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
- struct sja1105_dyn_cmd *cmd,
- const struct sja1105_dynamic_table_ops *ops)
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
{
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
+ struct sja1105_dyn_cmd cmd = {};
int rc;
- /* We don't _need_ to read the full entry, just the command area which
- * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
- * buffer that contains the full entry too. Additionally, our API
- * doesn't really know how many bytes into the buffer does the command
- * area really begin. So just read back the whole entry.
- */
+ /* Read back the whole entry + command structure. */
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
ops->packed_size);
if (rc)
@@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
/* Unpack the command structure, and return it to the caller in case it
* needs to perform further checks on it (VALIDENT).
*/
- memset(cmd, 0, sizeof(*cmd));
- ops->cmd_packing(packed_buf, cmd, UNPACK);
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
/* Hardware hasn't cleared VALID => still working on it */
- return cmd->valid ? -EAGAIN : 0;
+ if (cmd.valid)
+ return -EAGAIN;
+
+ if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
+ return -ENOENT;
+
+ if (check_errors && cmd.errors)
+ return -EINVAL;
+
+ /* Don't dereference possibly NULL pointer - maybe caller
+ * only wanted to see whether the entry existed or not.
+ */
+ if (entry)
+ ops->entry_packing(packed_buf, entry, UNPACK);
+
+ return 0;
}
/* Poll the dynamic config entry's control area until the hardware has
@@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
*/
static int
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
- struct sja1105_dyn_cmd *cmd,
- const struct sja1105_dynamic_table_ops *ops)
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
{
- int rc;
-
- return read_poll_timeout(sja1105_dynamic_config_poll_valid,
- rc, rc != -EAGAIN,
- SJA1105_DYNAMIC_CONFIG_SLEEP_US,
- SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
- false, priv, cmd, ops);
+ int err, rc;
+
+ err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
+ rc, rc != -EAGAIN,
+ SJA1105_DYNAMIC_CONFIG_SLEEP_US,
+ SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
+ false, priv, ops, entry, check_valident,
+ check_errors);
+ return err < 0 ? err : rc;
}
/* Provides read access to the settings through the dynamic interface
@@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0) {
- mutex_unlock(&priv->dynamic_config_lock);
- return rc;
- }
-
- rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
- mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
- return rc;
+ goto out;
- if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
- return -ENOENT;
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
- /* Don't dereference possibly NULL pointer - maybe caller
- * only wanted to see whether the entry existed or not.
- */
- if (entry)
- ops->entry_packing(packed_buf, entry, UNPACK);
- return 0;
+ return rc;
}
int sja1105_dynamic_config_write(struct sja1105_private *priv,
@@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0) {
- mutex_unlock(&priv->dynamic_config_lock);
- return rc;
- }
-
- rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
- mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
- return rc;
+ goto out;
- cmd = (struct sja1105_dyn_cmd) {0};
- ops->cmd_packing(packed_buf, &cmd, UNPACK);
- if (cmd.errors)
- return -EINVAL;
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
- return 0;
+ return rc;
}
static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index decc6c931dc1..84d7d3f66bd0 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -571,6 +571,9 @@ void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
for (i = 0; i < max_ctr; i++) {
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
rc = sja1105_port_counter_read(priv, port, i, &data[k++]);
if (rc) {
dev_err(ds->dev,
@@ -586,7 +589,6 @@ void sja1105_get_strings(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
enum sja1105_counter_index max_ctr, i;
- char *p = data;
if (stringset != ETH_SS_STATS)
return;
@@ -598,8 +600,10 @@ void sja1105_get_strings(struct dsa_switch *ds, int port,
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
for (i = 0; i < max_ctr; i++) {
- strscpy(p, sja1105_port_counters[i].name, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
+ ethtool_puts(&data, sja1105_port_counters[i].name);
}
}
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 72b9b39b0989..05d8ed3121e7 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -205,15 +205,18 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv,
u16 pcp = U16_MAX;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported keys used");
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -300,6 +303,46 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv,
return -EOPNOTSUPP;
}
+static int sja1105_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
@@ -321,12 +364,9 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- rc = -EOPNOTSUPP;
+ rc = sja1105_policer_validate(&rule->action, act, extack);
+ if (rc)
goto out;
- }
rc = sja1105_flower_policer(priv, port, extack, cookie,
&key,
@@ -379,7 +419,7 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
vl_rule = true;
rc = sja1105_vl_gate(priv, port, extack, cookie,
- &key, act->gate.index,
+ &key, act->hw_index,
act->gate.prio,
act->gate.basetime,
act->gate.cycletime,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index c343effe2e96..aa2145cf29a6 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -15,13 +15,13 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
-#include <linux/of_device.h>
-#include <linux/pcs/pcs-xpcs.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
#include <linux/if_ether.h>
#include <linux/dsa/8021q.h>
+#include <linux/units.h>
+
#include "sja1105.h"
#include "sja1105_tas.h"
@@ -118,13 +118,14 @@ static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
static int sja1105_commit_pvid(struct dsa_switch *ds, int port)
{
struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct sja1105_private *priv = ds->priv;
struct sja1105_vlan_lookup_entry *vlan;
bool drop_untagged = false;
int match, rc;
u16 pvid;
- if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
+ if (br && br_vlan_enabled(br))
pvid = priv->bridge_pvid[port];
else
pvid = priv->tag_8021q_pvid[port];
@@ -392,10 +393,8 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
.start_dynspc = 0,
/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
.poly = 0x97,
- /* This selects between Independent VLAN Learning (IVL) and
- * Shared VLAN Learning (SVL)
- */
- .shared_learn = true,
+ /* Always use Independent VLAN Learning (IVL) */
+ .shared_learn = false,
/* Don't discard management traffic based on ENFPORT -
* we don't perform SMAC port enforcement anyway, so
* what we are setting here doesn't matter.
@@ -867,12 +866,12 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
.hostprio = 7,
.mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
.mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
- .incl_srcpt1 = false,
- .send_meta1 = false,
+ .incl_srcpt1 = true,
+ .send_meta1 = true,
.mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
.mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
- .incl_srcpt0 = false,
- .send_meta0 = false,
+ .incl_srcpt0 = true,
+ .send_meta0 = true,
/* Default to an invalid value */
.mirr_port = priv->ds->num_ports,
/* No TTEthernet */
@@ -1039,7 +1038,7 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
policing[bcast].sharindx = port;
/* Only SJA1110 has multicast policers */
- if (mcast <= table->ops->max_entry_count)
+ if (mcast < table->ops->max_entry_count)
policing[mcast].sharindx = port;
}
@@ -1188,9 +1187,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
struct device_node *ports_node)
{
struct device *dev = &priv->spidev->dev;
- struct device_node *child;
- for_each_available_child_of_node(ports_node, child) {
+ for_each_available_child_of_node_scoped(ports_node, child) {
struct device_node *phy_node;
phy_interface_t phy_mode;
u32 index;
@@ -1200,7 +1198,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (of_property_read_u32(child, "reg", &index) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
- of_node_put(child);
return -ENODEV;
}
@@ -1210,7 +1207,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
- of_node_put(child);
return -ENODEV;
}
@@ -1219,7 +1215,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (!of_phy_is_fixed_link(child)) {
dev_err(dev, "phy-handle or fixed-link "
"properties missing!\n");
- of_node_put(child);
return -ENODEV;
}
/* phy-handle is missing, but fixed-link isn't.
@@ -1233,10 +1228,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
priv->phy_mode[index] = phy_mode;
err = sja1105_parse_rgmii_delays(priv, index, child);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
@@ -1263,29 +1256,11 @@ static int sja1105_parse_dt(struct sja1105_private *priv)
return rc;
}
-/* Convert link speed from SJA1105 to ethtool encoding */
-static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv,
- u64 speed)
-{
- if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS])
- return SPEED_10;
- if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS])
- return SPEED_100;
- if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS])
- return SPEED_1000;
- if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS])
- return SPEED_2500;
- return SPEED_UNKNOWN;
-}
-
-/* Set link speed in the MAC configuration for a specific port. */
-static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
- int speed_mbps)
+static int sja1105_set_port_speed(struct sja1105_private *priv, int port,
+ int speed_mbps)
{
struct sja1105_mac_config_entry *mac;
- struct device *dev = priv->ds->dev;
u64 speed;
- int rc;
/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
* tables. On E/T, MAC reconfig tables are not readable, only writable.
@@ -1319,7 +1294,7 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
break;
default:
- dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
+ dev_err(priv->ds->dev, "Invalid speed %iMbps\n", speed_mbps);
return -EINVAL;
}
@@ -1327,15 +1302,28 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* table, since this will be used for the clocking setup, and we no
* longer need to store it in the static config (already told hardware
* we want auto during upload phase).
- * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
- * we need to configure the PCS only (if even that).
*/
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
- mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
- else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX)
- mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
- else
- mac[port].speed = speed;
+ mac[port].speed = speed;
+
+ return 0;
+}
+
+/* Write the MAC Configuration Table entry and, if necessary, the CGU settings,
+ * after a link speedchange for this port.
+ */
+static int sja1105_set_port_config(struct sja1105_private *priv, int port)
+{
+ struct sja1105_mac_config_entry *mac;
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
+ * tables. On E/T, MAC reconfig tables are not readable, only writable.
+ * We have to *know* what the MAC looks like. For the sake of keeping
+ * the code common, we'll use the static configuration tables as a
+ * reasonable approximation for both E/T and P/Q/R/S.
+ */
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
/* Write to the dynamic reconfiguration tables */
rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
@@ -1357,102 +1345,88 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
return sja1105_clocking_setup_port(priv, port);
}
-/* The SJA1105 MAC programming model is through the static config (the xMII
- * Mode table cannot be dynamically reconfigured), and we have to program
- * that early (earlier than PHYLINK calls us, anyway).
- * So just error out in case the connected PHY attempts to change the initial
- * system interface MII protocol from what is defined in the DT, at least for
- * now.
- */
-static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
- phy_interface_t interface)
+static struct phylink_pcs *
+sja1105_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
{
- return priv->phy_mode[port] != interface;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct sja1105_private *priv = dp->ds->priv;
+
+ return priv->pcs[dp->index];
}
-static void sja1105_mac_config(struct dsa_switch *ds, int port,
+static void sja1105_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct sja1105_private *priv = ds->priv;
- struct dw_xpcs *xpcs;
-
- if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
- phy_modes(state->interface));
- return;
- }
-
- xpcs = priv->xpcs[port];
-
- if (xpcs)
- phylink_set_pcs(dp->pl, &xpcs->pcs);
}
-static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
+static void sja1105_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- sja1105_inhibit_tx(ds->priv, BIT(port), true);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+
+ sja1105_inhibit_tx(dp->ds->priv, BIT(dp->index), true);
}
-static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
+static void sja1105_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct sja1105_private *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct sja1105_private *priv = dp->ds->priv;
+ int port = dp->index;
- sja1105_adjust_port_config(priv, port, speed);
+ if (!sja1105_set_port_speed(priv, port, speed))
+ sja1105_set_port_config(priv, port);
sja1105_inhibit_tx(priv, BIT(port), false);
}
-static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- /* Construct a new mask which exhaustively contains all link features
- * supported by the MAC, and then apply that (logical AND) to what will
- * be sent to the PHY for "marketing".
- */
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct sja1105_private *priv = ds->priv;
struct sja1105_xmii_params_entry *mii;
+ phy_interface_t phy_mode;
+
+ phy_mode = priv->phy_mode[port];
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* Changing the PHY mode on SERDES ports is possible and makes
+ * sense, because that is done through the XPCS. We allow
+ * changes between SGMII and 2500base-X.
+ */
+ if (priv->info->supports_sgmii[port])
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
- mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
-
- /* include/linux/phylink.h says:
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
- * expects the MAC driver to return all supported link modes.
- */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- linkmode_zero(supported);
- return;
+ if (priv->info->supports_2500basex[port])
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ } else {
+ /* The SJA1105 MAC programming model is through the static
+ * config (the xMII Mode table cannot be dynamically
+ * reconfigured), and we have to program that early.
+ */
+ __set_bit(phy_mode, config->supported_interfaces);
}
/* The MAC does not support pause frames, and also doesn't
* support half-duplex traffic modes.
*/
- phylink_set(mask, Autoneg);
- phylink_set(mask, MII);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT1_Full);
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
mii->xmii_mode[port] == XMII_MODE_SGMII)
- phylink_set(mask, 1000baseT_Full);
- if (priv->info->supports_2500basex[port]) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
+ config->mac_capabilities |= MAC_1000FD;
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ if (priv->info->supports_2500basex[port])
+ config->mac_capabilities |= MAC_2500FD;
}
static int
@@ -1818,25 +1792,71 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
}
static int sja1105_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_lock(&priv->fdb_lock);
+ rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
+ mutex_unlock(&priv->fdb_lock);
- return priv->info->fdb_add_cmd(ds, port, addr, vid);
+ return rc;
}
-static int sja1105_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ mutex_lock(&priv->fdb_lock);
+ rc = __sja1105_fdb_del(ds, port, addr, vid, db);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int i;
@@ -1865,15 +1885,16 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
if (!(l2_lookup.destports & BIT(port)))
continue;
- /* We need to hide the FDB entry for unknown multicast */
- if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
- l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
- continue;
-
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+ /* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
+ * only wants to see unicast
+ */
+ if (is_multicast_ether_addr(macaddr))
+ continue;
+
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!dsa_port_is_vlan_filtering(dp))
+ if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
if (rc)
@@ -1884,9 +1905,19 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
static void sja1105_fast_age(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
+ struct dsa_db db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = {
+ .dev = dsa_port_bridge_dev_get(dp),
+ .num = dsa_port_bridge_num_get(dp),
+ },
+ };
int i;
+ mutex_lock(&priv->fdb_lock);
+
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
@@ -1900,7 +1931,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
if (rc) {
dev_err(ds->dev, "Failed to read FDB: %pe\n",
ERR_PTR(rc));
- return;
+ break;
}
if (!(l2_lookup.destports & BIT(port)))
@@ -1912,26 +1943,30 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
- rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid);
+ rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
macaddr, l2_lookup.vlanid, ERR_PTR(rc));
- return;
+ break;
}
}
+
+ mutex_unlock(&priv->fdb_lock);
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db);
}
static int sja1105_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db);
}
/* Common function for unicast and broadcast flood configuration.
@@ -1979,7 +2014,7 @@ static int sja1105_manage_flood_domains(struct sja1105_private *priv)
}
static int sja1105_bridge_member(struct dsa_switch *ds, int port,
- struct net_device *br, bool member)
+ struct dsa_bridge bridge, bool member)
{
struct sja1105_l2_forwarding_entry *l2_fwd;
struct sja1105_private *priv = ds->priv;
@@ -2004,7 +2039,7 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port,
*/
if (i == port)
continue;
- if (dsa_to_port(ds, i)->bridge_dev != br)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
sja1105_port_allow_traffic(l2_fwd, i, port, member);
sja1105_port_allow_traffic(l2_fwd, port, i, member);
@@ -2039,6 +2074,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
switch (state) {
case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
/* From UM10944 description of DRPDTAG (why put this there?):
* "Management traffic flows to the port regardless of the state
* of the INGRESS flag". So BPDUs are still be allowed to pass.
@@ -2048,11 +2084,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
mac[port].egress = false;
mac[port].dyn_learn = false;
break;
- case BR_STATE_LISTENING:
- mac[port].ingress = true;
- mac[port].egress = false;
- mac[port].dyn_learn = false;
- break;
case BR_STATE_LEARNING:
mac[port].ingress = true;
mac[port].egress = false;
@@ -2073,23 +2104,63 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
}
static int sja1105_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
- return sja1105_bridge_member(ds, port, br, true);
+ int rc;
+
+ rc = sja1105_bridge_member(ds, port, bridge, true);
+ if (rc)
+ return rc;
+
+ rc = dsa_tag_8021q_bridge_join(ds, port, bridge, tx_fwd_offload,
+ extack);
+ if (rc) {
+ sja1105_bridge_member(ds, port, bridge, false);
+ return rc;
+ }
+
+ return 0;
}
static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
- sja1105_bridge_member(ds, port, br, false);
+ dsa_tag_8021q_bridge_leave(ds, port, bridge);
+ sja1105_bridge_member(ds, port, bridge, false);
}
-#define BYTES_PER_KBIT (1000LL / 8)
+/* Port 0 (the uC port) does not have CBS shapers */
+#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
+
+static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
+ int port, int prio)
+{
+ int i;
+
+ if (priv->info->fixed_cbs_mapping) {
+ i = SJA1110_FIXED_CBS(port, prio);
+ if (i >= 0 && i < priv->info->num_cbs_shapers)
+ return i;
+
+ return -1;
+ }
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
+ return i;
+
+ return -1;
+}
static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
{
int i;
+ if (priv->info->fixed_cbs_mapping)
+ return -1;
+
for (i = 0; i < priv->info->num_cbs_shapers; i++)
if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
return i;
@@ -2120,14 +2191,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct sja1105_cbs_entry *cbs;
+ s64 port_transmit_rate_kbps;
int index;
if (!offload->enable)
return sja1105_delete_cbs_shaper(priv, port, offload->queue);
- index = sja1105_find_unused_cbs_shaper(priv);
- if (index < 0)
- return -ENOSPC;
+ /* The user may be replacing an existing shaper */
+ index = sja1105_find_cbs_shaper(priv, port, offload->queue);
+ if (index < 0) {
+ /* That isn't the case - see if we can allocate a new one */
+ index = sja1105_find_unused_cbs_shaper(priv);
+ if (index < 0)
+ return -ENOSPC;
+ }
cbs = &priv->cbs[index];
cbs->port = port;
@@ -2137,9 +2214,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
*/
cbs->credit_hi = offload->hicredit;
cbs->credit_lo = abs(offload->locredit);
- /* User space is in kbits/sec, hardware in bytes/sec */
- cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
- cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
+ /* User space is in kbits/sec, while the hardware in bytes/sec times
+ * link speed. Since the given offload->sendslope is good only for the
+ * current link speed anyway, and user space is likely to reprogram it
+ * when that changes, don't even bother to track the port's link speed,
+ * but deduce the port transmit rate from idleslope - sendslope.
+ */
+ port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
+ cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
+ port_transmit_rate_kbps);
+ cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
+ port_transmit_rate_kbps);
/* Convert the negative values from 64-bit 2's complement
* to 32-bit 2's complement (for the case of 0x80000000 whose
* negative is still negative).
@@ -2178,7 +2263,6 @@ static int sja1105_reload_cbs(struct sja1105_private *priv)
static const char * const sja1105_reset_reasons[] = {
[SJA1105_VLAN_FILTERING] = "VLAN filtering",
- [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
[SJA1105_AGEING_TIME] = "Ageing time",
[SJA1105_SCHEDULING] = "Time-aware scheduling",
[SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
@@ -2196,8 +2280,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
{
struct ptp_system_timestamp ptp_sts_before;
struct ptp_system_timestamp ptp_sts_after;
- int speed_mbps[SJA1105_MAX_NUM_PORTS];
u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0};
+ u64 mac_speed[SJA1105_MAX_NUM_PORTS];
struct sja1105_mac_config_entry *mac;
struct dsa_switch *ds = priv->ds;
s64 t1, t2, t3, t4;
@@ -2205,24 +2289,23 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
int rc, i;
s64 now;
+ mutex_lock(&priv->fdb_lock);
mutex_lock(&priv->mgmt_lock);
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- /* Back up the dynamic link speed changed by sja1105_adjust_port_config
+ /* Back up the dynamic link speed changed by sja1105_set_port_speed()
* in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
* switch wants to see in the static config in order to allow us to
* change it through the dynamic interface later.
*/
for (i = 0; i < ds->num_ports; i++) {
- u32 reg_addr = mdiobus_c45_addr(MDIO_MMD_VEND2, MDIO_CTRL1);
-
- speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
- mac[i].speed);
+ mac_speed[i] = mac[i].speed;
mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
- if (priv->xpcs[i])
- bmcr[i] = mdiobus_read(priv->mdio_pcs, i, reg_addr);
+ if (priv->pcs[i])
+ bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i,
+ MDIO_MMD_VEND2, MDIO_CTRL1);
}
/* No PTP operations can run right now */
@@ -2277,28 +2360,28 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
}
for (i = 0; i < ds->num_ports; i++) {
- struct dw_xpcs *xpcs = priv->xpcs[i];
- unsigned int mode;
+ struct phylink_pcs *pcs = priv->pcs[i];
+ unsigned int neg_mode;
- rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
+ mac[i].speed = mac_speed[i];
+ rc = sja1105_set_port_config(priv, i);
if (rc < 0)
goto out;
- if (!xpcs)
+ if (!pcs)
continue;
if (bmcr[i] & BMCR_ANENABLE)
- mode = MLO_AN_INBAND;
- else if (priv->fixed_link[i])
- mode = MLO_AN_FIXED;
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
else
- mode = MLO_AN_PHY;
+ neg_mode = PHYLINK_PCS_NEG_OUTBAND;
- rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode);
+ rc = pcs->ops->pcs_config(pcs, neg_mode, priv->phy_mode[i],
+ NULL, true);
if (rc < 0)
goto out;
- if (!phylink_autoneg_inband(mode)) {
+ if (neg_mode == PHYLINK_PCS_NEG_OUTBAND) {
int speed = SPEED_UNKNOWN;
if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX)
@@ -2310,8 +2393,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
else
speed = SPEED_10;
- xpcs_link_up(&xpcs->pcs, mode, priv->phy_mode[i],
- speed, DUPLEX_FULL);
+ pcs->ops->pcs_link_up(pcs, neg_mode, priv->phy_mode[i],
+ speed, DUPLEX_FULL);
}
}
@@ -2320,6 +2403,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
goto out;
out:
mutex_unlock(&priv->mgmt_lock);
+ mutex_unlock(&priv->fdb_lock);
return rc;
}
@@ -2340,7 +2424,6 @@ sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack)
{
- struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
struct sja1105_table *table;
@@ -2372,33 +2455,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
general_params->tpid = tpid;
/* EtherType used to identify outer tagged (S-tag) VLAN traffic */
general_params->tpid2 = tpid2;
- /* When VLAN filtering is on, we need to at least be able to
- * decode management traffic through the "backup plan".
- */
- general_params->incl_srcpt1 = enabled;
- general_params->incl_srcpt0 = enabled;
-
- /* VLAN filtering => independent VLAN learning.
- * No VLAN filtering (or best effort) => shared VLAN learning.
- *
- * In shared VLAN learning mode, untagged traffic still gets
- * pvid-tagged, and the FDB table gets populated with entries
- * containing the "real" (pvid or from VLAN tag) VLAN ID.
- * However the switch performs a masked L2 lookup in the FDB,
- * effectively only looking up a frame's DMAC (and not VID) for the
- * forwarding decision.
- *
- * This is extremely convenient for us, because in modes with
- * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
- * each front panel port. This is good for identification but breaks
- * learning badly - the VID of the learnt FDB entry is unique, aka
- * no frames coming from any other port are going to have it. So
- * for forwarding purposes, this is as though learning was broken
- * (all frames get flooded).
- */
- table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
- l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = !enabled;
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
@@ -2508,7 +2564,7 @@ static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port,
*/
if (vid_is_dsa_8021q(vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack,
- "Range 1024-3071 reserved for dsa_8021q operation");
+ "Range 3072-4095 reserved for dsa_8021q operation");
return -EBUSY;
}
@@ -2587,8 +2643,9 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
if (netif_is_bridge_master(upper)) {
list_for_each_entry(dp, &dst->ports, list) {
- if (dp->bridge_dev && dp->bridge_dev != upper &&
- br_vlan_enabled(dp->bridge_dev)) {
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br && br != upper && br_vlan_enabled(br)) {
NL_SET_ERR_MSG_MOD(extack,
"Only one VLAN-aware bridge is supported");
return -EBUSY;
@@ -2599,18 +2656,6 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
return 0;
}
-static void sja1105_port_disable(struct dsa_switch *ds, int port)
-{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
-
- if (!dsa_is_user_port(ds, port))
- return;
-
- kthread_cancel_work_sync(&sp->xmit_work);
- skb_queue_purge(&sp->xmit_queue);
-}
-
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
struct sk_buff *skb, bool takets)
{
@@ -2636,7 +2681,7 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
}
/* Transfer skb to the host port. */
- dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
+ dsa_enqueue_skb(skb, dsa_to_port(ds, port)->user);
/* Wait until the switch has processed the frame */
do {
@@ -2669,10 +2714,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
return NETDEV_TX_OK;
}
-#define work_to_port(work) \
- container_of((work), struct sja1105_port, xmit_work)
-#define tagger_to_sja1105(t) \
- container_of((t), struct sja1105_private, tagger_data)
+#define work_to_xmit_work(w) \
+ container_of((w), struct sja1105_deferred_xmit_work, work)
/* Deferred work is unfortunately necessary because setting up the management
* route cannot be done from atomit context (SPI transfer takes a sleepable
@@ -2680,25 +2723,41 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
*/
static void sja1105_port_deferred_xmit(struct kthread_work *work)
{
- struct sja1105_port *sp = work_to_port(work);
- struct sja1105_tagger_data *tagger_data = sp->data;
- struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
- int port = sp - priv->ports;
- struct sk_buff *skb;
+ struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct sk_buff *clone, *skb = xmit_work->skb;
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct sja1105_private *priv = ds->priv;
+ int port = xmit_work->dp->index;
+
+ clone = SJA1105_SKB_CB(skb)->clone;
- while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
- struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
+ mutex_lock(&priv->mgmt_lock);
- mutex_lock(&priv->mgmt_lock);
+ sja1105_mgmt_xmit(ds, port, 0, skb, !!clone);
- sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
+ /* The clone, if there, was made by dsa_skb_tx_timestamp */
+ if (clone)
+ sja1105_ptp_txtstamp_skb(ds, port, clone);
- /* The clone, if there, was made by dsa_skb_tx_timestamp */
- if (clone)
- sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
+ mutex_unlock(&priv->mgmt_lock);
- mutex_unlock(&priv->mgmt_lock);
- }
+ kfree(xmit_work);
+}
+
+static int sja1105_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data;
+
+ if (proto != priv->info->tag_proto)
+ return -EPROTONOSUPPORT;
+
+ tagger_data = sja1105_tagger_data(ds);
+ tagger_data->xmit_work_fn = sja1105_port_deferred_xmit;
+ tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp;
+
+ return 0;
}
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
@@ -2830,7 +2889,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
static int sja1105_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
ingress, true);
@@ -2914,7 +2973,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
- int match;
+ int match, rc;
+
+ mutex_lock(&priv->fdb_lock);
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
l2_lookup = table->entries;
@@ -2927,7 +2988,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
if (match == table->entry_count) {
NL_SET_ERR_MSG_MOD(extack,
"Could not find FDB entry for unknown multicast");
- return -ENOSPC;
+ rc = -ENOSPC;
+ goto out;
}
if (flags.val & BR_MCAST_FLOOD)
@@ -2935,10 +2997,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
else
l2_lookup[match].destports &= ~BIT(to);
- return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
- l2_lookup[match].index,
- &l2_lookup[match],
- true);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup[match].index,
+ &l2_lookup[match], true);
+out:
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
}
static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
@@ -3001,58 +3066,6 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
return 0;
}
-static void sja1105_teardown_ports(struct sja1105_private *priv)
-{
- struct dsa_switch *ds = priv->ds;
- int port;
-
- for (port = 0; port < ds->num_ports; port++) {
- struct sja1105_port *sp = &priv->ports[port];
-
- if (sp->xmit_worker)
- kthread_destroy_worker(sp->xmit_worker);
- }
-}
-
-static int sja1105_setup_ports(struct sja1105_private *priv)
-{
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
- struct dsa_switch *ds = priv->ds;
- int port, rc;
-
- /* Connections between dsa_port and sja1105_port */
- for (port = 0; port < ds->num_ports; port++) {
- struct sja1105_port *sp = &priv->ports[port];
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct kthread_worker *worker;
- struct net_device *slave;
-
- if (!dsa_port_is_user(dp))
- continue;
-
- dp->priv = sp;
- sp->data = tagger_data;
- slave = dp->slave;
- kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
- worker = kthread_create_worker(0, "%s_xmit", slave->name);
- if (IS_ERR(worker)) {
- rc = PTR_ERR(worker);
- dev_err(ds->dev,
- "failed to create deferred xmit thread: %d\n",
- rc);
- goto out_destroy_workers;
- }
- sp->xmit_worker = worker;
- skb_queue_head_init(&sp->xmit_queue);
- }
-
- return 0;
-
-out_destroy_workers:
- sja1105_teardown_ports(priv);
- return rc;
-}
-
/* The programming model for the SJA1105 switch is "all-at-once" via static
* configuration tables. Some of these can be dynamically modified at runtime,
* but not the xMII mode parameters table.
@@ -3061,7 +3074,7 @@ out_destroy_workers:
* ref_clk pin. So port clocking needs to be initialized early, before
* connecting to PHYs is attempted, otherwise they won't respond through MDIO.
* Setting correct PHY link speed does not matter now.
- * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
+ * But dsa_user_phy_setup is called later than sja1105_setup, so the PHY
* bindings are not yet parsed by DSA core. We need to parse early so that we
* can populate the xMII mode parameters table.
*/
@@ -3098,10 +3111,6 @@ static int sja1105_setup(struct dsa_switch *ds)
}
}
- rc = sja1105_setup_ports(priv);
- if (rc)
- goto out_static_config_free;
-
sja1105_tas_setup(ds);
sja1105_flower_setup(ds);
@@ -3137,9 +3146,8 @@ static int sja1105_setup(struct dsa_switch *ds)
* TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
*/
ds->vlan_filtering_is_global = true;
- ds->untag_bridge_pvid = true;
- /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
- ds->num_fwd_offloading_bridges = 7;
+ ds->fdb_isolation = true;
+ ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SJA1105_NUM_TC;
@@ -3158,7 +3166,6 @@ out_ptp_clock_unregister:
out_flower_teardown:
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
- sja1105_teardown_ports(priv);
out_static_config_free:
sja1105_static_config_free(&priv->static_config);
@@ -3178,26 +3185,29 @@ static void sja1105_teardown(struct dsa_switch *ds)
sja1105_ptp_clock_unregister(ds);
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
- sja1105_teardown_ports(priv);
sja1105_static_config_free(&priv->static_config);
}
+static const struct phylink_mac_ops sja1105_phylink_mac_ops = {
+ .mac_select_pcs = sja1105_mac_select_pcs,
+ .mac_config = sja1105_mac_config,
+ .mac_link_up = sja1105_mac_link_up,
+ .mac_link_down = sja1105_mac_link_down,
+};
+
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
+ .connect_tag_protocol = sja1105_connect_tag_protocol,
.setup = sja1105_setup,
.teardown = sja1105_teardown,
.set_ageing_time = sja1105_set_ageing_time,
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
- .phylink_validate = sja1105_phylink_validate,
- .phylink_mac_config = sja1105_mac_config,
- .phylink_mac_link_up = sja1105_mac_link_up,
- .phylink_mac_link_down = sja1105_mac_link_down,
+ .phylink_get_caps = sja1105_phylink_get_caps,
.get_strings = sja1105_get_strings,
.get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count,
.get_ts_info = sja1105_get_ts_info,
- .port_disable = sja1105_port_disable,
.port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del,
@@ -3228,8 +3238,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add,
.tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del,
.port_prechangeupper = sja1105_prechangeupper,
- .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload,
- .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload,
};
static const struct of_device_id sja1105_dt_ids[];
@@ -3361,12 +3369,15 @@ static int sja1105_probe(struct spi_device *spi)
ds->dev = dev;
ds->num_ports = priv->info->num_ports;
ds->ops = &sja1105_switch_ops;
+ ds->phylink_mac_ops = &sja1105_phylink_mac_ops;
ds->priv = priv;
priv->ds = ds;
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
+ mutex_init(&priv->fdb_lock);
+ spin_lock_init(&priv->ts_id_lock);
rc = sja1105_parse_dt(priv);
if (rc < 0) {
@@ -3385,18 +3396,14 @@ static int sja1105_probe(struct spi_device *spi)
return dsa_register_switch(priv->ds);
}
-static int sja1105_remove(struct spi_device *spi)
+static void sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
if (!priv)
- return 0;
+ return;
dsa_unregister_switch(priv->ds);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void sja1105_shutdown(struct spi_device *spi)
@@ -3426,12 +3433,27 @@ static const struct of_device_id sja1105_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
+static const struct spi_device_id sja1105_spi_ids[] = {
+ { "sja1105e" },
+ { "sja1105t" },
+ { "sja1105p" },
+ { "sja1105q" },
+ { "sja1105r" },
+ { "sja1105s" },
+ { "sja1110a" },
+ { "sja1110b" },
+ { "sja1110c" },
+ { "sja1110d" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
+
static struct spi_driver sja1105_driver = {
.driver = {
.name = "sja1105",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sja1105_dt_ids),
},
+ .id_table = sja1105_spi_ids,
.probe = sja1105_probe,
.remove = sja1105_remove,
.shutdown = sja1105_shutdown,
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 215dd17ca790..8d535c033cef 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -7,20 +7,15 @@
#define SJA1110_PCS_BANK_REG SJA1110_SPI_ADDR(0x3fc)
-int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+int sja1105_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
return 0xffff;
@@ -37,19 +32,15 @@ int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int sja1105_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd,
+ int reg, u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- u16 mmd;
-
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
tmp = val;
if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
@@ -58,7 +49,7 @@ int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
}
-int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -66,17 +57,12 @@ int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
int offset, bank;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
return -ENODEV;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
return NXP_SJA1110_XPCS_ID >> 16;
@@ -108,7 +94,8 @@ int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -116,17 +103,12 @@ int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
int offset, bank;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
return -ENODEV;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
bank = addr >> 8;
offset = addr & GENMASK(7, 0);
@@ -167,7 +149,7 @@ static u64 sja1105_base_t1_encode_addr(struct sja1105_private *priv,
return regs->mdio_100base_t1 | (phy << 7) | (op << 5) | (xad << 0);
}
-static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
+static int sja1105_base_t1_mdio_read_c22(struct mii_bus *bus, int phy, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -175,30 +157,31 @@ static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
u32 tmp;
int rc;
- if (reg & MII_ADDR_C45) {
- u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
-
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
- mmd);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
- tmp = reg & MII_REGADDR_C45_MASK;
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ return tmp & 0xffff;
+}
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
- mmd);
+static int sja1105_base_t1_mdio_read_c45(struct mii_bus *bus, int phy,
+ int mmd, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
- rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd);
- return tmp & 0xffff;
- }
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &reg, NULL);
+ if (rc < 0)
+ return rc;
- /* Clause 22 read */
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd);
rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
if (rc < 0)
@@ -207,41 +190,37 @@ static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-static int sja1105_base_t1_mdio_write(struct mii_bus *bus, int phy, int reg,
- u16 val)
+static int sja1105_base_t1_mdio_write_c22(struct mii_bus *bus, int phy, int reg,
+ u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- int rc;
- if (reg & MII_ADDR_C45) {
- u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
-
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
- mmd);
-
- tmp = reg & MII_REGADDR_C45_MASK;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ tmp = val & 0xffff;
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
- mmd);
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
- tmp = val & 0xffff;
+static int sja1105_base_t1_mdio_write_c45(struct mii_bus *bus, int phy,
+ int mmd, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd);
- return 0;
- }
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &reg, NULL);
+ if (rc < 0)
+ return rc;
- /* Clause 22 write */
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd);
tmp = val & 0xffff;
@@ -354,8 +333,10 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
bus->name = "SJA1110 100base-T1 MDIO bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-t1",
dev_name(priv->ds->dev));
- bus->read = sja1105_base_t1_mdio_read;
- bus->write = sja1105_base_t1_mdio_write;
+ bus->read = sja1105_base_t1_mdio_read_c22;
+ bus->write = sja1105_base_t1_mdio_write_c22;
+ bus->read_c45 = sja1105_base_t1_mdio_read_c45;
+ bus->write_c45 = sja1105_base_t1_mdio_write_c45;
bus->parent = priv->ds->dev;
mdio_priv = bus->priv;
mdio_priv->priv = priv;
@@ -392,7 +373,7 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
int rc = 0;
int port;
- if (!priv->info->pcs_mdio_read || !priv->info->pcs_mdio_write)
+ if (!priv->info->pcs_mdio_read_c45 || !priv->info->pcs_mdio_write_c45)
return 0;
bus = mdiobus_alloc_size(sizeof(*mdio_priv));
@@ -402,8 +383,8 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
bus->name = "SJA1105 PCS MDIO bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
dev_name(ds->dev));
- bus->read = priv->info->pcs_mdio_read;
- bus->write = priv->info->pcs_mdio_write;
+ bus->read_c45 = priv->info->pcs_mdio_read_c45;
+ bus->write_c45 = priv->info->pcs_mdio_write_c45;
bus->parent = ds->dev;
/* There is no PHY on this MDIO bus => mask out all PHY addresses
* from auto probing.
@@ -419,8 +400,7 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
}
for (port = 0; port < ds->num_ports; port++) {
- struct mdio_device *mdiodev;
- struct dw_xpcs *xpcs;
+ struct phylink_pcs *pcs;
if (dsa_is_unused_port(ds, port))
continue;
@@ -429,19 +409,13 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
priv->phy_mode[port] != PHY_INTERFACE_MODE_2500BASEX)
continue;
- mdiodev = mdio_device_create(bus, port);
- if (IS_ERR(mdiodev)) {
- rc = PTR_ERR(mdiodev);
+ pcs = xpcs_create_pcs_mdiodev(bus, port);
+ if (IS_ERR(pcs)) {
+ rc = PTR_ERR(pcs);
goto out_pcs_free;
}
- xpcs = xpcs_create(mdiodev, priv->phy_mode[port]);
- if (IS_ERR(xpcs)) {
- rc = PTR_ERR(xpcs);
- goto out_pcs_free;
- }
-
- priv->xpcs[port] = xpcs;
+ priv->pcs[port] = pcs;
}
priv->mdio_pcs = bus;
@@ -450,12 +424,10 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
out_pcs_free:
for (port = 0; port < ds->num_ports; port++) {
- if (!priv->xpcs[port])
- continue;
-
- mdio_device_free(priv->xpcs[port]->mdiodev);
- xpcs_destroy(priv->xpcs[port]);
- priv->xpcs[port] = NULL;
+ if (priv->pcs[port]) {
+ xpcs_destroy_pcs(priv->pcs[port]);
+ priv->pcs[port] = NULL;
+ }
}
mdiobus_unregister(bus);
@@ -473,12 +445,10 @@ static void sja1105_mdiobus_pcs_unregister(struct sja1105_private *priv)
return;
for (port = 0; port < ds->num_ports; port++) {
- if (!priv->xpcs[port])
- continue;
-
- mdio_device_free(priv->xpcs[port]->mdiodev);
- xpcs_destroy(priv->xpcs[port]);
- priv->xpcs[port] = NULL;
+ if (priv->pcs[port]) {
+ xpcs_destroy_pcs(priv->pcs[port]);
+ priv->pcs[port] = NULL;
+ }
}
mdiobus_unregister(priv->mdio_pcs);
@@ -498,13 +468,10 @@ int sja1105_mdiobus_register(struct dsa_switch *ds)
if (rc)
return rc;
- mdio_node = of_get_child_by_name(switch_node, "mdios");
+ mdio_node = of_get_available_child_by_name(switch_node, "mdios");
if (!mdio_node)
return 0;
- if (!of_device_is_available(mdio_node))
- goto out_put_mdio_node;
-
if (regs->mdio_100base_tx != SJA1105_RSV_ADDR) {
rc = sja1105_mdiobus_base_tx_register(priv, mdio_node);
if (rc)
@@ -517,7 +484,6 @@ int sja1105_mdiobus_register(struct dsa_switch *ds)
goto err_free_base_tx_mdiobus;
}
-out_put_mdio_node:
of_node_put(mdio_node);
return 0;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 54396992a919..fefe46e2a5e6 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -58,103 +58,64 @@ enum sja1105_ptp_clk_mode {
#define ptp_data_to_sja1105(d) \
container_of((d), struct sja1105_private, ptp_data)
-/* Must be called only with priv->tagger_data.state bit
- * SJA1105_HWTS_RX_EN cleared
- */
-static int sja1105_change_rxtstamping(struct sja1105_private *priv,
- bool on)
-{
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
- struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
- struct sja1105_general_params_entry *general_params;
- struct sja1105_table *table;
-
- table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
- general_params = table->entries;
- general_params->send_meta1 = on;
- general_params->send_meta0 = on;
-
- /* Initialize the meta state machine to a known state */
- if (priv->tagger_data.stampable_skb) {
- kfree_skb(priv->tagger_data.stampable_skb);
- priv->tagger_data.stampable_skb = NULL;
- }
- ptp_cancel_worker_sync(ptp_data->clock);
- skb_queue_purge(&tagger_data->skb_txtstamp_queue);
- skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
-
- return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
-}
-
-int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
- bool rx_on;
- int rc;
+ unsigned long hwts_tx_en, hwts_rx_en;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ hwts_tx_en = priv->hwts_tx_en;
+ hwts_rx_en = priv->hwts_rx_en;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- priv->ports[port].hwts_tx_en = false;
+ hwts_tx_en &= ~BIT(port);
break;
case HWTSTAMP_TX_ON:
- priv->ports[port].hwts_tx_en = true;
+ hwts_tx_en |= BIT(port);
break;
default:
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- rx_on = false;
+ hwts_rx_en &= ~BIT(port);
break;
- default:
- rx_on = true;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ hwts_rx_en |= BIT(port);
break;
+ default:
+ return -ERANGE;
}
- if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
- clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+ priv->hwts_tx_en = hwts_tx_en;
+ priv->hwts_rx_en = hwts_rx_en;
- rc = sja1105_change_rxtstamping(priv, rx_on);
- if (rc < 0) {
- dev_err(ds->dev,
- "Failed to change RX timestamping: %d\n", rc);
- return rc;
- }
- if (rx_on)
- set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
- }
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
return 0;
}
-int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
- config.flags = 0;
- if (priv->ports[port].hwts_tx_en)
- config.tx_type = HWTSTAMP_TX_ON;
+ config->flags = 0;
+ if (priv->hwts_tx_en & BIT(port))
+ config->tx_type = HWTSTAMP_TX_ON;
else
- config.tx_type = HWTSTAMP_TX_OFF;
- if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ config->tx_type = HWTSTAMP_TX_OFF;
+ if (priv->hwts_rx_en & BIT(port))
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
@@ -403,7 +364,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
- netif_rx_ni(skb);
+ netif_rx(skb);
}
if (ptp_data->extts_enabled)
@@ -420,7 +381,7 @@ bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
- if (!test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
+ if (!(priv->hwts_rx_en & BIT(port)))
return false;
/* We need to read the full PTP clock to reconstruct the Rx
@@ -453,6 +414,39 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
return priv->info->rxtstamp(ds, port, skb);
}
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shwt = {0};
+
+ /* We don't care about RX timestamps on the CPU port */
+ if (dir == SJA1110_META_TSTAMP_RX)
+ return;
+
+ spin_lock(&ptp_data->skb_txtstamp_queue.lock);
+
+ skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
+ if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ __skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
+ skb_match = skb;
+
+ break;
+ }
+
+ spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
+
+ if (WARN_ON(!skb_match))
+ return;
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
+ skb_complete_tx_timestamp(skb_match, &shwt);
+}
+
/* In addition to cloning the skb which is done by the common
* sja1105_port_txtstamp, we need to generate a timestamp ID and save the
* packet to the TX timestamping queue.
@@ -461,22 +455,22 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
u8 ts_id;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- spin_lock(&sp->data->meta_lock);
+ spin_lock(&priv->ts_id_lock);
- ts_id = sp->data->ts_id;
+ ts_id = priv->ts_id;
/* Deal automatically with 8-bit wraparound */
- sp->data->ts_id++;
+ priv->ts_id++;
SJA1105_SKB_CB(clone)->ts_id = ts_id;
- spin_unlock(&sp->data->meta_lock);
+ spin_unlock(&priv->ts_id_lock);
- skb_queue_tail(&sp->data->skb_txtstamp_queue, clone);
+ skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
}
/* Called from dsa_skb_tx_timestamp. This callback is just to clone
@@ -486,10 +480,9 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
struct sk_buff *clone;
- if (!sp->hwts_tx_en)
+ if (!(priv->hwts_tx_en & BIT(port)))
return;
clone = skb_clone_sk(skb);
@@ -738,10 +731,6 @@ static int sja1105_per_out_enable(struct sja1105_private *priv,
if (perout->index != 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (perout->flags)
- return -EOPNOTSUPP;
-
mutex_lock(&ptp_data->lock);
rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_PEROUT);
@@ -821,13 +810,6 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
if (extts->index != 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (extts->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* We can only enable time stamping on both edges, sadly. */
if ((extts->flags & PTP_STRICT_FLAGS) &&
(extts->flags & PTP_ENABLE_FEATURE) &&
@@ -843,7 +825,7 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
if (on)
sja1105_ptp_extts_setup_timer(&priv->ptp_data);
else
- del_timer_sync(&priv->ptp_data.extts_timer);
+ timer_delete_sync(&priv->ptp_data.extts_timer);
return 0;
}
@@ -896,7 +878,6 @@ static struct ptp_pin_desc sja1105_ptp_pin = {
int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
ptp_data->caps = (struct ptp_clock_info) {
@@ -914,13 +895,15 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.n_pins = 1,
.n_ext_ts = 1,
.n_per_out = 1,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS,
};
/* Only used on SJA1105 */
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
/* Only used on SJA1110 */
- skb_queue_head_init(&tagger_data->skb_txtstamp_queue);
- spin_lock_init(&tagger_data->meta_lock);
+ skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
if (IS_ERR_OR_NULL(ptp_data->clock))
@@ -937,15 +920,14 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
if (IS_ERR_OR_NULL(ptp_data->clock))
return;
- del_timer_sync(&ptp_data->extts_timer);
+ timer_delete_sync(&ptp_data->extts_timer);
ptp_cancel_worker_sync(ptp_data->clock);
- skb_queue_purge(&tagger_data->skb_txtstamp_queue);
+ skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
ptp_data->clock = NULL;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 3ae6b9fdd492..325e3777ea07 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -8,6 +8,21 @@
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS 8
+
+static inline s64 ns_to_sja1105_ticks(s64 ns)
+{
+ return ns / SJA1105_TICK_NS;
+}
+
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
+{
+ return ticks * SJA1105_TICK_NS;
+}
+
/* Calculate the first base_time in the future that satisfies this
* relationship:
*
@@ -62,6 +77,10 @@ struct sja1105_ptp_data {
struct timer_list extts_timer;
/* Used only on SJA1105 to reconstruct partial timestamps */
struct sk_buff_head skb_rxtstamp_queue;
+ /* Used on SJA1110 where meta frames are generated only for
+ * 2-step TX timestamps
+ */
+ struct sk_buff_head skb_txtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
@@ -82,7 +101,7 @@ void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
enum packing_op op);
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *ts);
+ struct kernel_ethtool_ts_info *ts);
void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
struct sk_buff *clone);
@@ -93,9 +112,12 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
void sja1105_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
-int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config);
-int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
struct ptp_system_timestamp *sts);
@@ -112,6 +134,9 @@ bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp);
+
#else
struct sja1105_ptp_cmd;
@@ -178,6 +203,8 @@ static inline int sja1105_ptp_commit(struct dsa_switch *ds,
#define sja1110_rxtstamp NULL
#define sja1110_txtstamp NULL
+#define sja1110_process_meta_tstamp NULL
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index d3c9ad6d39d4..834b5c1b4db0 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -719,8 +719,8 @@ const struct sja1105_info sja1105r_info = {
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
- .pcs_mdio_read = sja1105_pcs_mdio_read,
- .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45,
.regs = &sja1105pqrs_regs,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
@@ -756,8 +756,8 @@ const struct sja1105_info sja1105s_info = {
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
- .pcs_mdio_read = sja1105_pcs_mdio_read,
- .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 3,
@@ -781,6 +781,7 @@ const struct sja1105_info sja1110a_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -794,8 +795,8 @@ const struct sja1105_info sja1110a_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -831,6 +832,7 @@ const struct sja1105_info sja1110b_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -844,8 +846,8 @@ const struct sja1105_info sja1110b_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -881,6 +883,7 @@ const struct sja1105_info sja1110c_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -894,8 +897,8 @@ const struct sja1105_info sja1110c_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -931,6 +934,7 @@ const struct sja1105_info sja1110d_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -944,8 +948,8 @@ const struct sja1105_info sja1110d_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index baba204ad62f..ffece8a400a6 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -26,12 +26,8 @@ void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
pr_err("Start bit (%d) expected to be larger than end (%d)\n",
start, end);
} else if (rc == -ERANGE) {
- if ((start - end + 1) > 64)
- pr_err("Field %d-%d too large for 64 bits!\n",
- start, end);
- else
- pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
- *val, start, end);
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
}
dump_stack();
}
@@ -1921,8 +1917,10 @@ int sja1105_table_delete_entry(struct sja1105_table *table, int i)
if (i > table->entry_count)
return -ERANGE;
- memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
- (table->entry_count - i) * entry_size);
+ if (i + 1 < table->entry_count) {
+ memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
+ (table->entry_count - i - 1) * entry_size);
+ }
table->entry_count--;
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index e6153848a950..d5949d2c3e71 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -516,10 +516,11 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
/* Can't change an already configured port (must delete qdisc first).
* Can't delete the qdisc from an unconfigured port.
*/
- if (!!tas_data->offload[port] == admin->enable)
+ if ((!!tas_data->offload[port] && admin->cmd == TAPRIO_CMD_REPLACE) ||
+ (!tas_data->offload[port] && admin->cmd == TAPRIO_CMD_DESTROY))
return -EINVAL;
- if (!admin->enable) {
+ if (admin->cmd == TAPRIO_CMD_DESTROY) {
taprio_offload_free(tas_data->offload[port]);
tas_data->offload[port] = NULL;
@@ -528,6 +529,8 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
return rc;
return sja1105_static_config_reload(priv, SJA1105_SCHEDULING);
+ } else if (admin->cmd != TAPRIO_CMD_REPLACE) {
+ return -EOPNOTSUPP;
}
/* The cycle time extension is the amount of time the last cycle from
@@ -772,9 +775,8 @@ static void sja1105_tas_state_machine(struct work_struct *work)
base_time_ts = ns_to_timespec64(base_time);
now_ts = ns_to_timespec64(now);
- dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n",
- base_time_ts.tv_sec, base_time_ts.tv_nsec,
- now_ts.tv_sec, now_ts.tv_nsec);
+ dev_dbg(ds->dev, "OPER base time %ptSp (now %ptSp)\n",
+ &base_time_ts, &now_ts);
break;
@@ -795,8 +797,7 @@ static void sja1105_tas_state_machine(struct work_struct *work)
if (now < tas_data->oper_base_time) {
/* TAS has not started yet */
diff = ns_to_timespec64(tas_data->oper_base_time - now);
- dev_dbg(ds->dev, "time to start: [%lld.%09ld]",
- diff.tv_sec, diff.tv_nsec);
+ dev_dbg(ds->dev, "time to start: [%ptSp]", &diff);
break;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index f5dca6a9b0f9..b7e95d60a6e4 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -296,6 +296,19 @@ static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
return false;
}
+/* FIXME: this should change when the bridge upper of the port changes. */
+static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp)
+{
+ unsigned long bridge_num;
+
+ if (!dp->bridge)
+ return dsa_tag_8021q_standalone_vid(dp);
+
+ bridge_num = dsa_port_bridge_num_get(dp);
+
+ return dsa_tag_8021q_bridge_vid(bridge_num);
+}
+
static int sja1105_init_virtual_links(struct sja1105_private *priv,
struct netlink_ext_ack *extack)
{
@@ -394,8 +407,9 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,
vl_lookup[k].vlanid = rule->key.vl.vid;
vl_lookup[k].vlanprior = rule->key.vl.pcp;
} else {
+ /* FIXME */
struct dsa_port *dp = dsa_to_port(priv->ds, port);
- u16 vid = dsa_tag_8021q_rx_vid(dp);
+ u16 vid = sja1105_port_get_tag_8021q_vid(dp);
vl_lookup[k].vlanid = vid;
vl_lookup[k].vlanprior = 0;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index a4b1447ff055..9d31b8258268 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -17,14 +17,17 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/dsa/8021q.h>
#include <linux/random.h>
#include <net/dsa.h>
@@ -34,11 +37,17 @@
#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */
#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */
-#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */
+#define VSC73XX_BLOCK_CAPTURE 0x4 /* Subblocks 0-4, 6, 7 */
#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
+/* MII Block subblock */
+#define VSC73XX_BLOCK_MII_INTERNAL 0x0 /* Internal MDIO subblock */
+#define VSC73XX_BLOCK_MII_EXTERNAL 0x1 /* External MDIO subblock */
+
#define CPU_PORT 6 /* CPU port */
+#define VSC73XX_NUM_FDB_ROWS 2048
+#define VSC73XX_NUM_BUCKETS 4
/* MAC Block registers */
#define VSC73XX_MAC_CFG 0x00
@@ -62,6 +71,8 @@
#define VSC73XX_CAT_DROP 0x6e
#define VSC73XX_CAT_PR_MISC_L2 0x6f
#define VSC73XX_CAT_PR_USR_PRIO 0x75
+#define VSC73XX_CAT_VLAN_MISC 0x79
+#define VSC73XX_CAT_PORT_VLAN 0x7a
#define VSC73XX_Q_MISC_CONF 0xdf
/* MAC_CFG register bits */
@@ -122,6 +133,17 @@
#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1)
#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0)
+/* TXUPDCFG transmit modify setup bits */
+#define VSC73XX_TXUPDCFG_DSCP_REWR_MODE GENMASK(20, 19)
+#define VSC73XX_TXUPDCFG_DSCP_REWR_ENA BIT(18)
+#define VSC73XX_TXUPDCFG_TX_INT_TO_USRPRIO_ENA BIT(17)
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID GENMASK(15, 4)
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA BIT(3)
+#define VSC73XX_TXUPDCFG_TX_UPDATE_CRC_CPU_ENA BIT(1)
+#define VSC73XX_TXUPDCFG_TX_INSERT_TAG BIT(0)
+
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_SHIFT 4
+
/* CAT_DROP categorizer frame dropping register bits */
#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6)
#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4)
@@ -135,6 +157,15 @@
#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1)
#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0)
+/* CAT_VLAN_MISC categorizer VLAN miscellaneous bits */
+#define VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA BIT(8)
+#define VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA BIT(7)
+
+/* CAT_PORT_VLAN categorizer port VLAN */
+#define VSC73XX_CAT_PORT_VLAN_VLAN_CFI BIT(15)
+#define VSC73XX_CAT_PORT_VLAN_VLAN_USR_PRIO GENMASK(14, 12)
+#define VSC73XX_CAT_PORT_VLAN_VLAN_VID GENMASK(11, 0)
+
/* Frame analyzer block 2 registers */
#define VSC73XX_STORMLIMIT 0x02
#define VSC73XX_ADVLEARN 0x03
@@ -164,6 +195,44 @@
#define VSC73XX_AGENCTRL 0xf0
#define VSC73XX_CAPRST 0xff
+#define VSC73XX_SRCMASKS_CPU_COPY BIT(27)
+#define VSC73XX_SRCMASKS_MIRROR BIT(26)
+#define VSC73XX_SRCMASKS_PORTS_MASK GENMASK(7, 0)
+
+#define VSC73XX_MACHDATA_VID GENMASK(27, 16)
+#define VSC73XX_MACHDATA_MAC0 GENMASK(15, 8)
+#define VSC73XX_MACHDATA_MAC1 GENMASK(7, 0)
+#define VSC73XX_MACLDATA_MAC2 GENMASK(31, 24)
+#define VSC73XX_MACLDATA_MAC3 GENMASK(23, 16)
+#define VSC73XX_MACLDATA_MAC4 GENMASK(15, 8)
+#define VSC73XX_MACLDATA_MAC5 GENMASK(7, 0)
+
+#define VSC73XX_HASH0_VID_FROM_MASK GENMASK(5, 0)
+#define VSC73XX_HASH0_MAC0_FROM_MASK GENMASK(7, 4)
+#define VSC73XX_HASH1_MAC0_FROM_MASK GENMASK(3, 0)
+#define VSC73XX_HASH1_MAC1_FROM_MASK GENMASK(7, 1)
+#define VSC73XX_HASH2_MAC1_FROM_MASK BIT(0)
+#define VSC73XX_HASH2_MAC2_FROM_MASK GENMASK(7, 0)
+#define VSC73XX_HASH2_MAC3_FROM_MASK GENMASK(7, 6)
+#define VSC73XX_HASH3_MAC3_FROM_MASK GENMASK(5, 0)
+#define VSC73XX_HASH3_MAC4_FROM_MASK GENMASK(7, 3)
+#define VSC73XX_HASH4_MAC4_FROM_MASK GENMASK(2, 0)
+
+#define VSC73XX_HASH0_VID_TO_MASK GENMASK(9, 4)
+#define VSC73XX_HASH0_MAC0_TO_MASK GENMASK(3, 0)
+#define VSC73XX_HASH1_MAC0_TO_MASK GENMASK(10, 7)
+#define VSC73XX_HASH1_MAC1_TO_MASK GENMASK(6, 0)
+#define VSC73XX_HASH2_MAC1_TO_MASK BIT(10)
+#define VSC73XX_HASH2_MAC2_TO_MASK GENMASK(9, 2)
+#define VSC73XX_HASH2_MAC3_TO_MASK GENMASK(1, 0)
+#define VSC73XX_HASH3_MAC3_TO_MASK GENMASK(10, 5)
+#define VSC73XX_HASH3_MAC4_TO_MASK GENMASK(4, 0)
+#define VSC73XX_HASH4_MAC4_TO_MASK GENMASK(10, 8)
+
+#define VSC73XX_MACTINDX_SHADOW BIT(13)
+#define VSC73XX_MACTINDX_BUCKET_MSK GENMASK(12, 11)
+#define VSC73XX_MACTINDX_INDEX_MSK GENMASK(10, 0)
+
#define VSC73XX_MACACCESS_CPU_COPY BIT(14)
#define VSC73XX_MACACCESS_FWD_KILL BIT(13)
#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12)
@@ -185,16 +254,37 @@
#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29)
#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28)
#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2)
-#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0)
+#define VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT 2
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(1, 0)
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3
/* MII block 3 registers */
-#define VSC73XX_MII_STAT 0x0
-#define VSC73XX_MII_CMD 0x1
-#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_STAT 0x0
+#define VSC73XX_MII_CMD 0x1
+#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_MPRES 0x3
+
+#define VSC73XX_MII_STAT_BUSY BIT(3)
+#define VSC73XX_MII_STAT_READ BIT(2)
+#define VSC73XX_MII_STAT_WRITE BIT(1)
+
+#define VSC73XX_MII_CMD_SCAN BIT(27)
+#define VSC73XX_MII_CMD_OPERATION BIT(26)
+#define VSC73XX_MII_CMD_PHY_ADDR GENMASK(25, 21)
+#define VSC73XX_MII_CMD_PHY_REG GENMASK(20, 16)
+#define VSC73XX_MII_CMD_WRITE_DATA GENMASK(15, 0)
+
+#define VSC73XX_MII_DATA_FAILURE BIT(16)
+#define VSC73XX_MII_DATA_READ_DATA GENMASK(15, 0)
+
+#define VSC73XX_MII_MPRES_NOPREAMBLE BIT(6)
+#define VSC73XX_MII_MPRES_PRESCALEVAL GENMASK(5, 0)
+#define VSC73XX_MII_PRESCALEVAL_MIN 3 /* min allowed mdio clock prescaler */
+
+#define VSC73XX_MII_STAT_BUSY BIT(3)
/* Arbiter block 5 registers */
#define VSC73XX_ARBEMPTY 0x0c
@@ -269,11 +359,22 @@
#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
#define IS_739X(a) (IS_7395(a) || IS_7398(a))
+#define VSC73XX_POLL_SLEEP_US 1000
+#define VSC73XX_MDIO_POLL_SLEEP_US 5
+#define VSC73XX_POLL_TIMEOUT_US 10000
+
struct vsc73xx_counter {
u8 counter;
const char *name;
};
+struct vsc73xx_fdb {
+ u16 vid;
+ u8 port;
+ u8 mac[ETH_ALEN];
+ bool valid;
+};
+
/* Counters are named according to the MIB standards where applicable.
* Some counters are custom, non-standard. The standard counters are
* named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex
@@ -340,6 +441,17 @@ static const struct vsc73xx_counter vsc73xx_tx_counters[] = {
{ 29, "TxQoSClass3" }, /* non-standard counter */
};
+struct vsc73xx_vlan_summary {
+ size_t num_tagged;
+ size_t num_untagged;
+};
+
+enum vsc73xx_port_vlan_conf {
+ VSC73XX_VLAN_FILTER,
+ VSC73XX_VLAN_FILTER_UNTAG_ALL,
+ VSC73XX_VLAN_IGNORE,
+};
+
int vsc73xx_is_addr_valid(u8 block, u8 subblock)
{
switch (block) {
@@ -360,13 +472,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock)
break;
case VSC73XX_BLOCK_MII:
- case VSC73XX_BLOCK_CAPTURE:
case VSC73XX_BLOCK_ARBITER:
switch (subblock) {
case 0 ... 1:
return 1;
}
break;
+ case VSC73XX_BLOCK_CAPTURE:
+ switch (subblock) {
+ case 0 ... 4:
+ case 6 ... 7:
+ return 1;
+ }
+ break;
}
return 0;
@@ -484,6 +602,22 @@ static int vsc73xx_detect(struct vsc73xx *vsc)
return 0;
}
+static int vsc73xx_mdio_busy_check(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 || !(val & VSC73XX_MII_STAT_BUSY),
+ VSC73XX_MDIO_POLL_SLEEP_US,
+ VSC73XX_POLL_TIMEOUT_US, false, vsc,
+ VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_STAT, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct vsc73xx *vsc = ds->priv;
@@ -491,21 +625,33 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
u32 val;
int ret;
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
+
/* Setting bit 26 means "read" */
- cmd = BIT(26) | (phy << 21) | (regnum << 16);
- ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ cmd = VSC73XX_MII_CMD_OPERATION |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_CMD, cmd);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_mdio_busy_check(vsc);
if (ret)
return ret;
- msleep(2);
- ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_DATA, &val);
if (ret)
return ret;
- if (val & BIT(16)) {
+ if (val & VSC73XX_MII_DATA_FAILURE) {
dev_err(vsc->dev, "reading reg %02x from phy%d failed\n",
regnum, phy);
return -EIO;
}
- val &= 0xFFFFU;
+ val &= VSC73XX_MII_DATA_READ_DATA;
dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n",
regnum, phy, val);
@@ -520,19 +666,15 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
u32 cmd;
int ret;
- /* It was found through tedious experiments that this router
- * chip really hates to have it's PHYs reset. They
- * never recover if that happens: autonegotiation stops
- * working after a reset. Just filter out this command.
- * (Resetting the whole chip is OK.)
- */
- if (regnum == 0 && (val & BIT(15))) {
- dev_info(vsc->dev, "reset PHY - disallowed\n");
- return 0;
- }
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
- cmd = (phy << 21) | (regnum << 16);
- ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ cmd = FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum) |
+ FIELD_PREP(VSC73XX_MII_CMD_WRITE_DATA, val);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_CMD, cmd);
if (ret)
return ret;
@@ -554,16 +696,164 @@ static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
* cannot access the tag. (See "Internal frame header" section
* 3.9.1 in the manual.)
*/
- return DSA_TAG_PROTO_NONE;
+ return DSA_TAG_PROTO_VSC73XX_8021Q;
+}
+
+static int vsc73xx_wait_for_vlan_table_cmd(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 ||
+ ((val & VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK) ==
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE),
+ VSC73XX_POLL_SLEEP_US, VSC73XX_POLL_TIMEOUT_US,
+ false, vsc, VSC73XX_BLOCK_ANALYZER,
+ 0, VSC73XX_VLANACCESS, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
+static int
+vsc73xx_read_vlan_table_entry(struct vsc73xx *vsc, u16 vid, u8 *portmap)
+{
+ u32 val;
+ int ret;
+
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANTIDX, vid);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS, &val);
+ *portmap = (val & VSC73XX_VLANACCESS_VLAN_PORT_MASK) >>
+ VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT;
+
+ return 0;
+}
+
+static int
+vsc73xx_write_vlan_table_entry(struct vsc73xx *vsc, u16 vid, u8 portmap)
+{
+ int ret;
+
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANTIDX, vid);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK |
+ VSC73XX_VLANACCESS_VLAN_SRC_CHECK |
+ VSC73XX_VLANACCESS_VLAN_PORT_MASK,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY |
+ VSC73XX_VLANACCESS_VLAN_SRC_CHECK |
+ (portmap << VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT));
+
+ return vsc73xx_wait_for_vlan_table_cmd(vsc);
+}
+
+static int
+vsc73xx_update_vlan_table(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u8 portmap;
+ int ret;
+
+ ret = vsc73xx_read_vlan_table_entry(vsc, vid, &portmap);
+ if (ret)
+ return ret;
+
+ if (set)
+ portmap |= BIT(port);
+ else
+ portmap &= ~BIT(port);
+
+ return vsc73xx_write_vlan_table_entry(vsc, vid, portmap);
+}
+
+static int vsc73xx_configure_rgmii_port_delay(struct dsa_switch *ds)
+{
+ /* Keep 2.0 ns delay for backward complatibility */
+ u32 tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS;
+ u32 rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS;
+ struct dsa_port *dp = dsa_to_port(ds, CPU_PORT);
+ struct device_node *port_dn = dp->dn;
+ struct vsc73xx *vsc = ds->priv;
+ u32 delay;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) {
+ switch (delay) {
+ case 0:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE;
+ break;
+ case 1400:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS;
+ break;
+ case 1700:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS;
+ break;
+ case 2000:
+ break;
+ default:
+ dev_err(vsc->dev,
+ "Unsupported RGMII Transmit Clock Delay\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(vsc->dev,
+ "RGMII Transmit Clock Delay isn't configured, set to 2.0 ns\n");
+ }
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) {
+ switch (delay) {
+ case 0:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE;
+ break;
+ case 1400:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS;
+ break;
+ case 1700:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS;
+ break;
+ case 2000:
+ break;
+ default:
+ dev_err(vsc->dev,
+ "Unsupported RGMII Receive Clock Delay value\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(vsc->dev,
+ "RGMII Receive Clock Delay isn't configured, set to 2.0 ns\n");
+ }
+
+ /* MII delay, set both GTX and RX delay */
+ return vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
+ tx_delay | rx_delay);
}
static int vsc73xx_setup(struct dsa_switch *ds)
{
struct vsc73xx *vsc = ds->priv;
- int i;
+ int i, ret, val;
dev_info(vsc->dev, "set up the switch\n");
+ ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
+ ds->fdb_isolation = true;
+
/* Issue RESET */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
VSC73XX_GLORESET_MASTER_RESET);
@@ -591,7 +881,7 @@ static int vsc73xx_setup(struct dsa_switch *ds)
VSC73XX_MACACCESS,
VSC73XX_MACACCESS_CMD_CLEAR_TABLE);
- /* Clear VLAN table */
+ /* Set VLAN table to default values */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
VSC73XX_VLANACCESS,
VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE);
@@ -616,26 +906,53 @@ static int vsc73xx_setup(struct dsa_switch *ds)
VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
}
- /* MII delay, set both GTX and RX delay to 2 ns */
- vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
- VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
- VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
- /* Enable reception of frames on all ports */
- vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK,
- 0x5f);
+ /* Configure RGMII delay */
+ ret = vsc73xx_configure_rgmii_port_delay(ds);
+ if (ret)
+ return ret;
+
+ /* Ingess VLAN reception mask (table 145) */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANMASK,
+ 0xff);
/* IP multicast flood mask (table 144) */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK,
0xff);
mdelay(50);
+ /* Disable preamble and use maximum allowed clock for the internal
+ * mdio bus, used for communication with internal PHYs only.
+ */
+ val = VSC73XX_MII_MPRES_NOPREAMBLE |
+ FIELD_PREP(VSC73XX_MII_MPRES_PRESCALEVAL,
+ VSC73XX_MII_PRESCALEVAL_MIN);
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_MPRES, val);
+
/* Release reset from the internal PHYs */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
VSC73XX_GLORESET_PHY_RESET);
udelay(4);
- return 0;
+ /* Clear VLAN table */
+ for (i = 0; i < VLAN_N_VID; i++)
+ vsc73xx_write_vlan_table_entry(vsc, i, 0);
+
+ INIT_LIST_HEAD(&vsc->vlans);
+
+ rtnl_lock();
+ ret = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q));
+ rtnl_unlock();
+
+ return ret;
+}
+
+static void vsc73xx_teardown(struct dsa_switch *ds)
+{
+ rtnl_lock();
+ dsa_tag_8021q_unregister(ds);
+ rtnl_unlock();
}
static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
@@ -714,22 +1031,123 @@ static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
port, VSC73XX_C_RX0, 0);
}
-static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
- int port, struct phy_device *phydev,
- u32 initval)
+static void vsc73xx_reset_port(struct vsc73xx *vsc, int port, u32 initval)
+{
+ int ret, err;
+ u32 val;
+
+ /* Disable RX on this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RX_EN, 0);
+
+ /* Discard packets */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), BIT(port));
+
+ /* Wait until queue is empty */
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 || (val & BIT(port)),
+ VSC73XX_POLL_SLEEP_US,
+ VSC73XX_POLL_TIMEOUT_US, false,
+ vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBEMPTY, &val);
+ if (ret)
+ dev_err(vsc->dev,
+ "timeout waiting for block arbiter\n");
+ else if (err < 0)
+ dev_err(vsc->dev, "error reading arbiter\n");
+
+ /* Put this port into reset */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET | initval);
+}
+
+static void vsc73xx_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- u32 val = initval;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
+
+ /* Special handling of the CPU-facing port */
+ if (port == CPU_PORT) {
+ /* Other ports are already initialized but not this one */
+ vsc73xx_init_port(vsc, CPU_PORT);
+ /* Select the external port for this interface (EXT_PORT)
+ * Enable the GMII GTX external clock
+ * Use double data rate (DDR mode)
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ CPU_PORT,
+ VSC73XX_ADVPORTM,
+ VSC73XX_ADVPORTM_EXT_PORT |
+ VSC73XX_ADVPORTM_ENA_GTX |
+ VSC73XX_ADVPORTM_DDR_MODE);
+ }
+}
+
+static void vsc73xx_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
+
+ /* This routine is described in the datasheet (below ARBDISC register
+ * description)
+ */
+ vsc73xx_reset_port(vsc, port, 0);
+
+ /* Allow backward dropping of frames from this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+}
+
+static void vsc73xx_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
+ u32 val;
u8 seed;
- /* Reset this port FIXME: break out subroutine */
- val |= VSC73XX_MAC_CFG_RESET;
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+ if (speed == SPEED_1000)
+ val = VSC73XX_MAC_CFG_GIGA_MODE | VSC73XX_MAC_CFG_TX_IPG_1000M;
+ else
+ val = VSC73XX_MAC_CFG_TX_IPG_100_10M;
+
+ if (phy_interface_mode_is_rgmii(interface))
+ val |= VSC73XX_MAC_CFG_CLK_SEL_1000M;
+ else
+ val |= VSC73XX_MAC_CFG_CLK_SEL_EXT;
+
+ if (duplex == DUPLEX_FULL)
+ val |= VSC73XX_MAC_CFG_FDX;
+ else
+ /* In datasheet description ("Port Mode Procedure" in 5.6.2)
+ * this bit is configured only for half duplex.
+ */
+ val |= VSC73XX_MAC_CFG_WEXC_DIS;
+
+ /* This routine is described in the datasheet (below ARBDISC register
+ * description)
+ */
+ vsc73xx_reset_port(vsc, port, val);
/* Seed the port randomness with randomness */
get_random_bytes(&seed, 1);
val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
val |= VSC73XX_MAC_CFG_SEED_LOAD;
- val |= VSC73XX_MAC_CFG_WEXC_DIS;
+
+ /* Those bits are responsible for MTU only. Kernel takes care about MTU,
+ * let's enable +8 bytes frame length unconditionally.
+ */
+ val |= VSC73XX_MAC_CFG_VLAN_AWR | VSC73XX_MAC_CFG_VLAN_DBLAWR;
+
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
/* Flow control for the PHY facing ports:
@@ -742,6 +1160,10 @@ static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
VSC73XX_FCCONF_FLOW_CTRL_OBEY |
0xff);
+ /* Accept packets again */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), 0);
+
/* Disallow backward dropping of frames from this port */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_SBACKWDROP, BIT(port), 0);
@@ -754,125 +1176,255 @@ static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
}
-static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static bool vsc73xx_tag_8021q_active(struct dsa_port *dp)
{
- struct vsc73xx *vsc = ds->priv;
- u32 val;
+ return !dsa_port_is_vlan_filtering(dp);
+}
- /* Special handling of the CPU-facing port */
- if (port == CPU_PORT) {
- /* Other ports are already initialized but not this one */
- vsc73xx_init_port(vsc, CPU_PORT);
- /* Select the external port for this interface (EXT_PORT)
- * Enable the GMII GTX external clock
- * Use double data rate (DDR mode)
- */
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
- CPU_PORT,
- VSC73XX_ADVPORTM,
- VSC73XX_ADVPORTM_EXT_PORT |
- VSC73XX_ADVPORTM_ENA_GTX |
- VSC73XX_ADVPORTM_DDR_MODE);
+static struct vsc73xx_bridge_vlan *
+vsc73xx_bridge_vlan_find(struct vsc73xx *vsc, u16 vid)
+{
+ struct vsc73xx_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &vsc->vlans, list)
+ if (vlan->vid == vid)
+ return vlan;
+
+ return NULL;
+}
+
+static void
+vsc73xx_bridge_vlan_remove_port(struct vsc73xx_bridge_vlan *vsc73xx_vlan,
+ int port)
+{
+ vsc73xx_vlan->portmask &= ~BIT(port);
+
+ if (vsc73xx_vlan->portmask)
+ return;
+
+ list_del(&vsc73xx_vlan->list);
+ kfree(vsc73xx_vlan);
+}
+
+static void vsc73xx_bridge_vlan_summary(struct vsc73xx *vsc, int port,
+ struct vsc73xx_vlan_summary *summary,
+ u16 ignored_vid)
+{
+ size_t num_tagged = 0, num_untagged = 0;
+ struct vsc73xx_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &vsc->vlans, list) {
+ if (!(vlan->portmask & BIT(port)) || vlan->vid == ignored_vid)
+ continue;
+
+ if (vlan->untagged & BIT(port))
+ num_untagged++;
+ else
+ num_tagged++;
}
- /* This is the MAC confiuration that always need to happen
- * after a PHY or the CPU port comes up or down.
- */
- if (!phydev->link) {
- int maxloop = 10;
-
- dev_dbg(vsc->dev, "port %d: went down\n",
- port);
-
- /* Disable RX on this port */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
- VSC73XX_MAC_CFG,
- VSC73XX_MAC_CFG_RX_EN, 0);
-
- /* Discard packets */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBDISC, BIT(port), BIT(port));
-
- /* Wait until queue is empty */
- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBEMPTY, &val);
- while (!(val & BIT(port))) {
- msleep(1);
- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBEMPTY, &val);
- if (--maxloop == 0) {
- dev_err(vsc->dev,
- "timeout waiting for block arbiter\n");
- /* Continue anyway */
- break;
- }
- }
+ summary->num_untagged = num_untagged;
+ summary->num_tagged = num_tagged;
+}
- /* Put this port into reset */
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
- VSC73XX_MAC_CFG_RESET);
+static u16 vsc73xx_find_first_vlan_untagged(struct vsc73xx *vsc, int port)
+{
+ struct vsc73xx_bridge_vlan *vlan;
- /* Accept packets again */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBDISC, BIT(port), 0);
+ list_for_each_entry(vlan, &vsc->vlans, list)
+ if ((vlan->portmask & BIT(port)) &&
+ (vlan->untagged & BIT(port)))
+ return vlan->vid;
- /* Allow backward dropping of frames from this port */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+ return VLAN_N_VID;
+}
- /* Receive mask (disable forwarding) */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
- VSC73XX_RECVMASK, BIT(port), 0);
+static int vsc73xx_set_vlan_conf(struct vsc73xx *vsc, int port,
+ enum vsc73xx_port_vlan_conf port_vlan_conf)
+{
+ u32 val = 0;
+ int ret;
- return;
+ if (port_vlan_conf == VSC73XX_VLAN_IGNORE)
+ val = VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA |
+ VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_VLAN_MISC,
+ VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA |
+ VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA, val);
+ if (ret)
+ return ret;
+
+ val = (port_vlan_conf == VSC73XX_VLAN_FILTER) ?
+ VSC73XX_TXUPDCFG_TX_INSERT_TAG : 0;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_TXUPDCFG,
+ VSC73XX_TXUPDCFG_TX_INSERT_TAG, val);
+}
+
+/**
+ * vsc73xx_vlan_commit_conf - Update VLAN configuration of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the VLAN behavior of a port to make sure that when it is under
+ * a VLAN filtering bridge, the port is either filtering with tag
+ * preservation, or filtering with all VLANs egress-untagged. Otherwise,
+ * the port ignores VLAN tags from packets and applies the port-based
+ * VID.
+ *
+ * Must be called when changes are made to:
+ * - the bridge VLAN filtering state of the port
+ * - the number or attributes of VLANs from the bridge VLAN table,
+ * while the port is currently VLAN-aware
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_conf(struct vsc73xx *vsc, int port)
+{
+ enum vsc73xx_port_vlan_conf port_vlan_conf = VSC73XX_VLAN_IGNORE;
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+
+ if (port == CPU_PORT) {
+ port_vlan_conf = VSC73XX_VLAN_FILTER;
+ } else if (dsa_port_is_vlan_filtering(dp)) {
+ struct vsc73xx_vlan_summary summary;
+
+ port_vlan_conf = VSC73XX_VLAN_FILTER;
+
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, VLAN_N_VID);
+ if (summary.num_tagged == 0)
+ port_vlan_conf = VSC73XX_VLAN_FILTER_UNTAG_ALL;
}
- /* Figure out what speed was negotiated */
- if (phydev->speed == SPEED_1000) {
- dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n",
- port);
+ return vsc73xx_set_vlan_conf(vsc, port, port_vlan_conf);
+}
- /* Set up default for internal port or external RGMII */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
- val = VSC73XX_MAC_CFG_1000M_F_RGMII;
- else
- val = VSC73XX_MAC_CFG_1000M_F_PHY;
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else if (phydev->speed == SPEED_100) {
- if (phydev->duplex == DUPLEX_FULL) {
- val = VSC73XX_MAC_CFG_100_10M_F_PHY;
- dev_dbg(vsc->dev,
- "port %d: 100 Mbit full duplex mode\n",
- port);
- } else {
- val = VSC73XX_MAC_CFG_100_10M_H_PHY;
- dev_dbg(vsc->dev,
- "port %d: 100 Mbit half duplex mode\n",
- port);
- }
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else if (phydev->speed == SPEED_10) {
- if (phydev->duplex == DUPLEX_FULL) {
- val = VSC73XX_MAC_CFG_100_10M_F_PHY;
- dev_dbg(vsc->dev,
- "port %d: 10 Mbit full duplex mode\n",
- port);
- } else {
- val = VSC73XX_MAC_CFG_100_10M_H_PHY;
- dev_dbg(vsc->dev,
- "port %d: 10 Mbit half duplex mode\n",
- port);
- }
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else {
- dev_err(vsc->dev,
- "could not adjust link: unknown speed\n");
+static int
+vsc73xx_vlan_change_untagged(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u32 val = 0;
+
+ if (set)
+ val = VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA |
+ ((vid << VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_SHIFT) &
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID);
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_TXUPDCFG,
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA |
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID, val);
+}
+
+/**
+ * vsc73xx_vlan_commit_untagged - Update native VLAN of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the native VLAN of a port (the one VLAN which is transmitted
+ * as egress-tagged on a trunk port) when port is in VLAN filtering mode and
+ * only one untagged vid is configured.
+ * In other cases no need to configure it because switch can untag all vlans on
+ * the port.
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_untagged(struct vsc73xx *vsc, int port)
+{
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+ struct vsc73xx_vlan_summary summary;
+ u16 vid = 0;
+ bool valid;
+
+ if (!dsa_port_is_vlan_filtering(dp))
+ /* Port is configured to untag all vlans in that case.
+ * No need to commit untagged config change.
+ */
+ return 0;
+
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, VLAN_N_VID);
+
+ if (summary.num_untagged > 1)
+ /* Port must untag all vlans in that case.
+ * No need to commit untagged config change.
+ */
+ return 0;
+
+ valid = (summary.num_untagged == 1);
+ if (valid)
+ vid = vsc73xx_find_first_vlan_untagged(vsc, port);
+
+ return vsc73xx_vlan_change_untagged(vsc, port, vid, valid);
+}
+
+static int
+vsc73xx_vlan_change_pvid(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u32 val = 0;
+ int ret;
+
+ val = set ? 0 : VSC73XX_CAT_DROP_UNTAGGED_ENA;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_DROP,
+ VSC73XX_CAT_DROP_UNTAGGED_ENA, val);
+ if (!set || ret)
+ return ret;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_PORT_VLAN,
+ VSC73XX_CAT_PORT_VLAN_VLAN_VID,
+ vid & VSC73XX_CAT_PORT_VLAN_VLAN_VID);
+}
+
+/**
+ * vsc73xx_vlan_commit_pvid - Update port-based default VLAN of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the PVID of a port so that it follows either the bridge PVID
+ * configuration, when the bridge is currently VLAN-aware, or the PVID
+ * from tag_8021q, when the port is standalone or under a VLAN-unaware
+ * bridge. A port with no PVID drops all untagged and VID 0 tagged
+ * traffic.
+ *
+ * Must be called when changes are made to:
+ * - the bridge VLAN filtering state of the port
+ * - the number or attributes of VLANs from the bridge VLAN table,
+ * while the port is currently VLAN-aware
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_pvid(struct vsc73xx *vsc, int port)
+{
+ struct vsc73xx_portinfo *portinfo = &vsc->portinfo[port];
+ bool valid = portinfo->pvid_tag_8021q_configured;
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+ u16 vid = portinfo->pvid_tag_8021q;
+
+ if (dsa_port_is_vlan_filtering(dp)) {
+ vid = portinfo->pvid_vlan_filtering;
+ valid = portinfo->pvid_vlan_filtering_configured;
}
- /* Enable port (forwarding) in the receieve mask */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
- VSC73XX_RECVMASK, BIT(port), BIT(port));
+ return vsc73xx_vlan_change_pvid(vsc, port, vid, valid);
+}
+
+static int vsc73xx_vlan_commit_settings(struct vsc73xx *vsc, int port)
+{
+ int ret;
+
+ ret = vsc73xx_vlan_commit_untagged(vsc, port);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_vlan_commit_pvid(vsc, port);
+ if (ret)
+ return ret;
+
+ return vsc73xx_vlan_commit_conf(vsc, port);
}
static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
@@ -883,7 +1435,7 @@ static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
dev_info(vsc->dev, "enable port %d\n", port);
vsc73xx_init_port(vsc, port);
- return 0;
+ return vsc73xx_vlan_commit_settings(vsc, port);
}
static void vsc73xx_port_disable(struct dsa_switch *ds, int port)
@@ -929,7 +1481,8 @@ static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset,
const struct vsc73xx_counter *cnt;
struct vsc73xx *vsc = ds->priv;
u8 indices[6];
- int i, j;
+ u8 *buf = data;
+ int i;
u32 val;
int ret;
@@ -949,10 +1502,7 @@ static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset,
indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */
/* The first counters is the RX octets */
- j = 0;
- strncpy(data + j * ETH_GSTRING_LEN,
- "RxEtherStatsOctets", ETH_GSTRING_LEN);
- j++;
+ ethtool_puts(&buf, "RxEtherStatsOctets");
/* Each port supports recording 3 RX counters and 3 TX counters,
* figure out what counters we use in this set-up and return the
@@ -962,23 +1512,16 @@ static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset,
*/
for (i = 0; i < 3; i++) {
cnt = vsc73xx_find_counter(vsc, indices[i], false);
- if (cnt)
- strncpy(data + j * ETH_GSTRING_LEN,
- cnt->name, ETH_GSTRING_LEN);
- j++;
+ ethtool_puts(&buf, cnt ? cnt->name : "");
}
/* TX stats begins with the number of TX octets */
- strncpy(data + j * ETH_GSTRING_LEN,
- "TxEtherStatsOctets", ETH_GSTRING_LEN);
- j++;
+ ethtool_puts(&buf, "TxEtherStatsOctets");
for (i = 3; i < 6; i++) {
cnt = vsc73xx_find_counter(vsc, indices[i], true);
- if (cnt)
- strncpy(data + j * ETH_GSTRING_LEN,
- cnt->name, ETH_GSTRING_LEN);
- j++;
+ ethtool_puts(&buf, cnt ? cnt->name : "");
+
}
}
@@ -1025,32 +1568,680 @@ static int vsc73xx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
struct vsc73xx *vsc = ds->priv;
return vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
- VSC73XX_MAXLEN, new_mtu);
+ VSC73XX_MAXLEN, new_mtu + ETH_HLEN + ETH_FCS_LEN);
}
/* According to application not "VSC7398 Jumbo Frames" setting
- * up the MTU to 9.6 KB does not affect the performance on standard
+ * up the frame size to 9.6 KB does not affect the performance on standard
* frames. It is clear from the application note that
* "9.6 kilobytes" == 9600 bytes.
*/
static int vsc73xx_get_max_mtu(struct dsa_switch *ds, int port)
{
- return 9600;
+ return 9600 - ETH_HLEN - ETH_FCS_LEN;
+}
+
+static void vsc73xx_phylink_get_caps(struct dsa_switch *dsa, int port,
+ struct phylink_config *config)
+{
+ unsigned long *interfaces = config->supported_interfaces;
+
+ if (port == 5)
+ return;
+
+ if (port == CPU_PORT) {
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, interfaces);
+ }
+
+ if (port <= 4) {
+ /* Internal PHYs */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ /* phylib default */
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ }
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000;
+}
+
+static int
+vsc73xx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering, struct netlink_ext_ack *extack)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ /* The commit to hardware processed below is required because vsc73xx
+ * is using tag_8021q. When vlan_filtering is disabled, tag_8021q uses
+ * pvid/untagged vlans for port recognition. The values configured for
+ * vlans and pvid/untagged states are stored in portinfo structure.
+ * When vlan_filtering is enabled, we need to restore pvid/untagged from
+ * portinfo structure. Analogous routine is processed when
+ * vlan_filtering is disabled, but values used for tag_8021q are
+ * restored.
+ */
+
+ return vsc73xx_vlan_commit_settings(vsc, port);
+}
+
+static int vsc73xx_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct vsc73xx_bridge_vlan *vsc73xx_vlan;
+ struct vsc73xx_vlan_summary summary;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret = 0;
+
+ /* Be sure to deny alterations to the configuration done by tag_8021q.
+ */
+ if (vid_is_dsa_8021q(vlan->vid)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Range 3072-4095 reserved for dsa_8021q operation");
+ return -EBUSY;
+ }
+
+ /* The processed vlan->vid is excluded from the search because the VLAN
+ * can be re-added with a different set of flags, so it's easiest to
+ * ignore its old flags from the VLAN database software copy.
+ */
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, vlan->vid);
+
+ /* VSC73XX allows only three untagged states: none, one or all */
+ if ((untagged && summary.num_tagged > 0 && summary.num_untagged > 0) ||
+ (!untagged && summary.num_untagged > 1)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port can have only none, one or all untagged vlan");
+ return -EBUSY;
+ }
+
+ vsc73xx_vlan = vsc73xx_bridge_vlan_find(vsc, vlan->vid);
+
+ if (!vsc73xx_vlan) {
+ vsc73xx_vlan = kzalloc(sizeof(*vsc73xx_vlan), GFP_KERNEL);
+ if (!vsc73xx_vlan)
+ return -ENOMEM;
+
+ vsc73xx_vlan->vid = vlan->vid;
+
+ list_add_tail(&vsc73xx_vlan->list, &vsc->vlans);
+ }
+
+ vsc73xx_vlan->portmask |= BIT(port);
+
+ /* CPU port must be always tagged because source port identification is
+ * based on tag_8021q.
+ */
+ if (port == CPU_PORT)
+ goto update_vlan_table;
+
+ if (untagged)
+ vsc73xx_vlan->untagged |= BIT(port);
+ else
+ vsc73xx_vlan->untagged &= ~BIT(port);
+
+ portinfo = &vsc->portinfo[port];
+
+ if (pvid) {
+ portinfo->pvid_vlan_filtering_configured = true;
+ portinfo->pvid_vlan_filtering = vlan->vid;
+ } else if (portinfo->pvid_vlan_filtering_configured &&
+ portinfo->pvid_vlan_filtering == vlan->vid) {
+ portinfo->pvid_vlan_filtering_configured = false;
+ }
+
+ commit_to_hardware = !vsc73xx_tag_8021q_active(dp);
+ if (commit_to_hardware) {
+ ret = vsc73xx_vlan_commit_settings(vsc, port);
+ if (ret)
+ goto err;
+ }
+
+update_vlan_table:
+ ret = vsc73xx_update_vlan_table(vsc, port, vlan->vid, true);
+ if (!ret)
+ return 0;
+err:
+ vsc73xx_bridge_vlan_remove_port(vsc73xx_vlan, port);
+ return ret;
+}
+
+static int vsc73xx_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct vsc73xx_bridge_vlan *vsc73xx_vlan;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret;
+
+ ret = vsc73xx_update_vlan_table(vsc, port, vlan->vid, false);
+ if (ret)
+ return ret;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (portinfo->pvid_vlan_filtering_configured &&
+ portinfo->pvid_vlan_filtering == vlan->vid)
+ portinfo->pvid_vlan_filtering_configured = false;
+
+ vsc73xx_vlan = vsc73xx_bridge_vlan_find(vsc, vlan->vid);
+
+ if (vsc73xx_vlan)
+ vsc73xx_bridge_vlan_remove_port(vsc73xx_vlan, port);
+
+ commit_to_hardware = !vsc73xx_tag_8021q_active(dsa_to_port(ds, port));
+
+ if (commit_to_hardware)
+ return vsc73xx_vlan_commit_settings(vsc, port);
+
+ return 0;
+}
+
+static int vsc73xx_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (pvid) {
+ portinfo->pvid_tag_8021q_configured = true;
+ portinfo->pvid_tag_8021q = vid;
+ }
+
+ commit_to_hardware = vsc73xx_tag_8021q_active(dsa_to_port(ds, port));
+ if (commit_to_hardware) {
+ ret = vsc73xx_vlan_commit_settings(vsc, port);
+ if (ret)
+ return ret;
+ }
+
+ return vsc73xx_update_vlan_table(vsc, port, vid, true);
}
+static int vsc73xx_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (portinfo->pvid_tag_8021q_configured &&
+ portinfo->pvid_tag_8021q == vid) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ bool commit_to_hardware;
+ int err;
+
+ portinfo->pvid_tag_8021q_configured = false;
+
+ commit_to_hardware = vsc73xx_tag_8021q_active(dp);
+ if (commit_to_hardware) {
+ err = vsc73xx_vlan_commit_settings(vsc, port);
+ if (err)
+ return err;
+ }
+ }
+
+ return vsc73xx_update_vlan_table(vsc, port, vid, false);
+}
+
+static int vsc73xx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vsc73xx_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & BR_LEARNING) {
+ u32 val = flags.val & BR_LEARNING ? BIT(port) : 0;
+ struct vsc73xx *vsc = ds->priv;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_LEARNMASK, BIT(port), val);
+ }
+
+ return 0;
+}
+
+static void vsc73xx_refresh_fwd_map(struct dsa_switch *ds, int port, u8 state)
+{
+ struct dsa_port *other_dp, *dp = dsa_to_port(ds, port);
+ struct vsc73xx *vsc = ds->priv;
+ u16 mask;
+
+ if (state != BR_STATE_FORWARDING) {
+ /* Ports that aren't in the forwarding state must not
+ * forward packets anywhere.
+ */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + port,
+ VSC73XX_SRCMASKS_PORTS_MASK, 0);
+
+ dsa_switch_for_each_available_port(other_dp, ds) {
+ if (other_dp == dp)
+ continue;
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + other_dp->index,
+ BIT(port), 0);
+ }
+
+ return;
+ }
+
+ /* Forwarding ports must forward to the CPU and to other ports
+ * in the same bridge
+ */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + CPU_PORT, BIT(port), BIT(port));
+
+ mask = BIT(CPU_PORT);
+
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ int other_port = other_dp->index;
+
+ if (port == other_port || !dsa_port_bridge_same(dp, other_dp) ||
+ other_dp->stp_state != BR_STATE_FORWARDING)
+ continue;
+
+ mask |= BIT(other_port);
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + other_port,
+ BIT(port), BIT(port));
+ }
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + port,
+ VSC73XX_SRCMASKS_PORTS_MASK, mask);
+}
+
+/* FIXME: STP frames aren't forwarded at this moment. BPDU frames are
+ * forwarded only from and to PI/SI interface. For more info see chapter
+ * 2.7.1 (CPU Forwarding) in datasheet.
+ * This function is required for tag_8021q operations.
+ */
+static void vsc73xx_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct vsc73xx *vsc = ds->priv;
+ u32 val = 0;
+
+ if (state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING)
+ val = dp->learning ? BIT(port) : 0;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_LEARNMASK, BIT(port), val);
+
+ val = (state == BR_STATE_BLOCKING || state == BR_STATE_DISABLED) ?
+ 0 : BIT(port);
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_RECVMASK, BIT(port), val);
+
+ /* CPU Port should always forward packets when user ports are forwarding
+ * so let's configure it from other ports only.
+ */
+ if (port != CPU_PORT)
+ vsc73xx_refresh_fwd_map(ds, port, state);
+}
+
+static u16 vsc73xx_calc_hash(const unsigned char *addr, u16 vid)
+{
+ /* VID 5-0, MAC 47-44 */
+ u16 hash = FIELD_PREP(VSC73XX_HASH0_VID_TO_MASK,
+ FIELD_GET(VSC73XX_HASH0_VID_FROM_MASK, vid)) |
+ FIELD_PREP(VSC73XX_HASH0_MAC0_TO_MASK,
+ FIELD_GET(VSC73XX_HASH0_MAC0_FROM_MASK, addr[0]));
+ /* MAC 43-33 */
+ hash ^= FIELD_PREP(VSC73XX_HASH1_MAC0_TO_MASK,
+ FIELD_GET(VSC73XX_HASH1_MAC0_FROM_MASK, addr[0])) |
+ FIELD_PREP(VSC73XX_HASH1_MAC1_TO_MASK,
+ FIELD_GET(VSC73XX_HASH1_MAC1_FROM_MASK, addr[1]));
+ /* MAC 32-22 */
+ hash ^= FIELD_PREP(VSC73XX_HASH2_MAC1_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC1_FROM_MASK, addr[1])) |
+ FIELD_PREP(VSC73XX_HASH2_MAC2_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC2_FROM_MASK, addr[2])) |
+ FIELD_PREP(VSC73XX_HASH2_MAC3_TO_MASK,
+ FIELD_GET(VSC73XX_HASH2_MAC3_FROM_MASK, addr[3]));
+ /* MAC 21-11 */
+ hash ^= FIELD_PREP(VSC73XX_HASH3_MAC3_TO_MASK,
+ FIELD_GET(VSC73XX_HASH3_MAC3_FROM_MASK, addr[3])) |
+ FIELD_PREP(VSC73XX_HASH3_MAC4_TO_MASK,
+ FIELD_GET(VSC73XX_HASH3_MAC4_FROM_MASK, addr[4]));
+ /* MAC 10-0 */
+ hash ^= FIELD_PREP(VSC73XX_HASH4_MAC4_TO_MASK,
+ FIELD_GET(VSC73XX_HASH4_MAC4_FROM_MASK, addr[4])) |
+ addr[5];
+
+ return hash;
+}
+
+static int
+vsc73xx_port_wait_for_mac_table_cmd(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 ||
+ ((val & VSC73XX_MACACCESS_CMD_MASK) ==
+ VSC73XX_MACACCESS_CMD_IDLE),
+ VSC73XX_POLL_SLEEP_US, VSC73XX_POLL_TIMEOUT_US,
+ false, vsc, VSC73XX_BLOCK_ANALYZER,
+ 0, VSC73XX_MACACCESS, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
+static int vsc73xx_port_read_mac_table_row(struct vsc73xx *vsc, u16 index,
+ struct vsc73xx_fdb *fdb)
+{
+ int ret, i;
+ u32 val;
+
+ if (!fdb)
+ return -EINVAL;
+ if (index >= VSC73XX_NUM_FDB_ROWS)
+ return -EINVAL;
+
+ for (i = 0; i < VSC73XX_NUM_BUCKETS; i++) {
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACTINDX,
+ (i ? 0 : VSC73XX_MACTINDX_SHADOW) |
+ FIELD_PREP(VSC73XX_MACTINDX_BUCKET_MSK, i) |
+ index);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS,
+ VSC73XX_MACACCESS_CMD_MASK,
+ VSC73XX_MACACCESS_CMD_READ_ENTRY);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].valid = FIELD_GET(VSC73XX_MACACCESS_VALID, val);
+ if (!fdb[i].valid)
+ continue;
+
+ fdb[i].port = FIELD_GET(VSC73XX_MACACCESS_DEST_IDX_MASK, val);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACHDATA, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].vid = FIELD_GET(VSC73XX_MACHDATA_VID, val);
+ fdb[i].mac[0] = FIELD_GET(VSC73XX_MACHDATA_MAC0, val);
+ fdb[i].mac[1] = FIELD_GET(VSC73XX_MACHDATA_MAC1, val);
+
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACLDATA, &val);
+ if (ret)
+ return ret;
+
+ fdb[i].mac[2] = FIELD_GET(VSC73XX_MACLDATA_MAC2, val);
+ fdb[i].mac[3] = FIELD_GET(VSC73XX_MACLDATA_MAC3, val);
+ fdb[i].mac[4] = FIELD_GET(VSC73XX_MACLDATA_MAC4, val);
+ fdb[i].mac[5] = FIELD_GET(VSC73XX_MACLDATA_MAC5, val);
+ }
+
+ return ret;
+}
+
+static int
+vsc73xx_fdb_operation(struct vsc73xx *vsc, const unsigned char *addr, u16 vid,
+ u16 hash, u16 cmd_mask, u16 cmd_val)
+{
+ int ret;
+ u32 val;
+
+ val = FIELD_PREP(VSC73XX_MACHDATA_VID, vid) |
+ FIELD_PREP(VSC73XX_MACHDATA_MAC0, addr[0]) |
+ FIELD_PREP(VSC73XX_MACHDATA_MAC1, addr[1]);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACHDATA,
+ val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(VSC73XX_MACLDATA_MAC2, addr[2]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC3, addr[3]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC4, addr[4]) |
+ FIELD_PREP(VSC73XX_MACLDATA_MAC5, addr[5]);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACLDATA,
+ val);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_MACTINDX,
+ hash);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_port_wait_for_mac_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_MACACCESS, cmd_mask, cmd_val);
+ if (ret)
+ return ret;
+
+ return vsc73xx_port_wait_for_mac_table_cmd(vsc);
+}
+
+static int vsc73xx_fdb_del_entry(struct vsc73xx *vsc, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ u16 hash = vsc73xx_calc_hash(addr, vid);
+ int bucket, ret;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ ret = vsc73xx_port_read_mac_table_row(vsc, hash, fdb);
+ if (ret)
+ goto err;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (fdb[bucket].valid && fdb[bucket].port == port &&
+ ether_addr_equal(addr, fdb[bucket].mac))
+ break;
+ }
+
+ if (bucket == VSC73XX_NUM_BUCKETS) {
+ /* Can't find MAC in MAC table */
+ ret = -ENODATA;
+ goto err;
+ }
+
+ ret = vsc73xx_fdb_operation(vsc, addr, vid, hash,
+ VSC73XX_MACACCESS_CMD_MASK,
+ VSC73XX_MACACCESS_CMD_FORGET);
+err:
+ mutex_unlock(&vsc->fdb_lock);
+ return ret;
+}
+
+static int vsc73xx_fdb_add_entry(struct vsc73xx *vsc, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ u16 hash = vsc73xx_calc_hash(addr, vid);
+ int bucket, ret;
+ u32 val;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ ret = vsc73xx_port_read_mac_table_row(vsc, hash, fdb);
+ if (ret)
+ goto err;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (!fdb[bucket].valid)
+ break;
+ }
+
+ if (bucket == VSC73XX_NUM_BUCKETS) {
+ /* Bucket is full */
+ ret = -EOVERFLOW;
+ goto err;
+ }
+
+ val = VSC73XX_MACACCESS_VALID | VSC73XX_MACACCESS_LOCKED |
+ FIELD_PREP(VSC73XX_MACACCESS_DEST_IDX_MASK, port) |
+ VSC73XX_MACACCESS_CMD_LEARN;
+ ret = vsc73xx_fdb_operation(vsc, addr, vid, hash,
+ VSC73XX_MACACCESS_VALID |
+ VSC73XX_MACACCESS_LOCKED |
+ VSC73XX_MACACCESS_DEST_IDX_MASK |
+ VSC73XX_MACACCESS_CMD_MASK, val);
+err:
+ mutex_unlock(&vsc->fdb_lock);
+ return ret;
+}
+
+static int vsc73xx_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return vsc73xx_fdb_add_entry(vsc, port, addr, vid);
+}
+
+static int vsc73xx_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return vsc73xx_fdb_del_entry(vsc, port, addr, vid);
+}
+
+static int vsc73xx_port_fdb_dump(struct dsa_switch *ds,
+ int port, dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct vsc73xx_fdb fdb[VSC73XX_NUM_BUCKETS];
+ struct vsc73xx *vsc = ds->priv;
+ u16 i, bucket;
+ int err = 0;
+
+ mutex_lock(&vsc->fdb_lock);
+
+ for (i = 0; i < VSC73XX_NUM_FDB_ROWS; i++) {
+ err = vsc73xx_port_read_mac_table_row(vsc, i, fdb);
+ if (err)
+ goto unlock;
+
+ for (bucket = 0; bucket < VSC73XX_NUM_BUCKETS; bucket++) {
+ if (!fdb[bucket].valid || fdb[bucket].port != port)
+ continue;
+
+ /* We need to hide dsa_8021q VLANs from the user */
+ if (vid_is_dsa_8021q(fdb[bucket].vid))
+ fdb[bucket].vid = 0;
+
+ err = cb(fdb[bucket].mac, fdb[bucket].vid, false, data);
+ if (err)
+ goto unlock;
+ }
+ }
+unlock:
+ mutex_unlock(&vsc->fdb_lock);
+ return err;
+}
+
+static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = {
+ .mac_config = vsc73xx_mac_config,
+ .mac_link_down = vsc73xx_mac_link_down,
+ .mac_link_up = vsc73xx_mac_link_up,
+};
+
static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_tag_protocol = vsc73xx_get_tag_protocol,
.setup = vsc73xx_setup,
+ .teardown = vsc73xx_teardown,
.phy_read = vsc73xx_phy_read,
.phy_write = vsc73xx_phy_write,
- .adjust_link = vsc73xx_adjust_link,
.get_strings = vsc73xx_get_strings,
.get_ethtool_stats = vsc73xx_get_ethtool_stats,
.get_sset_count = vsc73xx_get_sset_count,
.port_enable = vsc73xx_port_enable,
.port_disable = vsc73xx_port_disable,
+ .port_pre_bridge_flags = vsc73xx_port_pre_bridge_flags,
+ .port_bridge_flags = vsc73xx_port_bridge_flags,
+ .port_bridge_join = dsa_tag_8021q_bridge_join,
+ .port_bridge_leave = dsa_tag_8021q_bridge_leave,
.port_change_mtu = vsc73xx_change_mtu,
+ .port_fdb_add = vsc73xx_fdb_add,
+ .port_fdb_del = vsc73xx_fdb_del,
+ .port_fdb_dump = vsc73xx_port_fdb_dump,
.port_max_mtu = vsc73xx_get_max_mtu,
+ .port_stp_state_set = vsc73xx_port_stp_state_set,
+ .port_vlan_filtering = vsc73xx_port_vlan_filtering,
+ .port_vlan_add = vsc73xx_port_vlan_add,
+ .port_vlan_del = vsc73xx_port_vlan_del,
+ .phylink_get_caps = vsc73xx_phylink_get_caps,
+ .tag_8021q_vlan_add = vsc73xx_tag_8021q_vlan_add,
+ .tag_8021q_vlan_del = vsc73xx_tag_8021q_vlan_del,
};
static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -1067,14 +2258,14 @@ static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset));
}
-static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct vsc73xx *vsc = gpiochip_get_data(chip);
u32 tmp = val ? BIT(offset) : 0;
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
- VSC73XX_GPIO, BIT(offset), tmp);
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset), tmp);
}
static int vsc73xx_gpio_direction_output(struct gpio_chip *chip,
@@ -1119,12 +2310,11 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
vsc->chipid);
+ if (!vsc->gc.label)
+ return -ENOMEM;
vsc->gc.ngpio = 4;
vsc->gc.owner = THIS_MODULE;
vsc->gc.parent = vsc->dev;
-#if IS_ENABLED(CONFIG_OF_GPIO)
- vsc->gc.of_node = vsc->dev->of_node;
-#endif
vsc->gc.base = -1;
vsc->gc.get = vsc73xx_gpio_get;
vsc->gc.set = vsc73xx_gpio_set;
@@ -1174,32 +2364,24 @@ int vsc73xx_probe(struct vsc73xx *vsc)
return -ENODEV;
}
+ mutex_init(&vsc->fdb_lock);
+
eth_random_addr(vsc->addr);
dev_info(vsc->dev,
"MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n",
vsc->addr[0], vsc->addr[1], vsc->addr[2],
vsc->addr[3], vsc->addr[4], vsc->addr[5]);
- /* The VSC7395 switch chips have 5+1 ports which means 5
- * ordinary ports and a sixth CPU port facing the processor
- * with an RGMII interface. These ports are numbered 0..4
- * and 6, so they leave a "hole" in the port map for port 5,
- * which is invalid.
- *
- * The VSC7398 has 8 ports, port 7 is again the CPU port.
- *
- * We allocate 8 ports and avoid access to the nonexistant
- * ports.
- */
vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL);
if (!vsc->ds)
return -ENOMEM;
vsc->ds->dev = dev;
- vsc->ds->num_ports = 8;
+ vsc->ds->num_ports = VSC73XX_MAX_NUM_PORTS;
vsc->ds->priv = vsc;
vsc->ds->ops = &vsc73xx_ds_ops;
+ vsc->ds->phylink_mac_ops = &vsc73xx_phylink_mac_ops;
ret = dsa_register_switch(vsc->ds);
if (ret) {
dev_err(dev, "unable to register switch (%d)\n", ret);
@@ -1216,12 +2398,10 @@ int vsc73xx_probe(struct vsc73xx *vsc)
}
EXPORT_SYMBOL(vsc73xx_probe);
-int vsc73xx_remove(struct vsc73xx *vsc)
+void vsc73xx_remove(struct vsc73xx *vsc)
{
dsa_unregister_switch(vsc->ds);
gpiod_set_value(vsc->reset, 1);
-
- return 0;
}
EXPORT_SYMBOL(vsc73xx_remove);
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index fe4b154a0a57..7a2e0a619b85 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -112,18 +112,14 @@ static int vsc73xx_platform_probe(struct platform_device *pdev)
return vsc73xx_probe(&vsc_platform->vsc);
}
-static int vsc73xx_platform_remove(struct platform_device *pdev)
+static void vsc73xx_platform_remove(struct platform_device *pdev)
{
struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
if (!vsc_platform)
- return 0;
+ return;
vsc73xx_remove(&vsc_platform->vsc);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static void vsc73xx_platform_shutdown(struct platform_device *pdev)
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 645398901e05..85b9a0f51dd8 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -159,18 +159,14 @@ static int vsc73xx_spi_probe(struct spi_device *spi)
return vsc73xx_probe(&vsc_spi->vsc);
}
-static int vsc73xx_spi_remove(struct spi_device *spi)
+static void vsc73xx_spi_remove(struct spi_device *spi)
{
struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
if (!vsc_spi)
- return 0;
+ return;
vsc73xx_remove(&vsc_spi->vsc);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void vsc73xx_spi_shutdown(struct spi_device *spi)
@@ -207,10 +203,20 @@ static const struct of_device_id vsc73xx_of_match[] = {
};
MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+static const struct spi_device_id vsc73xx_spi_ids[] = {
+ { "vsc7385" },
+ { "vsc7388" },
+ { "vsc7395" },
+ { "vsc7398" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids);
+
static struct spi_driver vsc73xx_spi_driver = {
.probe = vsc73xx_spi_probe,
.remove = vsc73xx_spi_remove,
.shutdown = vsc73xx_spi_shutdown,
+ .id_table = vsc73xx_spi_ids,
.driver = {
.name = "vsc73xx-spi",
.of_match_table = vsc73xx_of_match,
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 30b951504e65..3c30e143c14f 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -3,8 +3,49 @@
#include <linux/etherdevice.h>
#include <linux/gpio/driver.h>
+/* The VSC7395 switch chips have 5+1 ports which means 5 ordinary ports and
+ * a sixth CPU port facing the processor with an RGMII interface. These ports
+ * are numbered 0..4 and 6, so they leave a "hole" in the port map for port 5,
+ * which is invalid.
+ *
+ * The VSC7398 has 8 ports, port 7 is again the CPU port.
+ *
+ * We allocate 8 ports and avoid access to the nonexistent ports.
+ */
+#define VSC73XX_MAX_NUM_PORTS 8
+
+/**
+ * struct vsc73xx_portinfo - port data structure: contains storage data
+ * @pvid_vlan_filtering: pvid vlan number used in vlan filtering mode
+ * @pvid_tag_8021q: pvid vlan number used in tag_8021q mode
+ * @pvid_vlan_filtering_configured: informs if port has configured pvid in vlan
+ * filtering mode
+ * @pvid_tag_8021q_configured: imforms if port have configured pvid in tag_8021q
+ * mode
+ */
+struct vsc73xx_portinfo {
+ u16 pvid_vlan_filtering;
+ u16 pvid_tag_8021q;
+ bool pvid_vlan_filtering_configured;
+ bool pvid_tag_8021q_configured;
+};
+
/**
- * struct vsc73xx - VSC73xx state container
+ * struct vsc73xx - VSC73xx state container: main data structure
+ * @dev: The device pointer
+ * @reset: The descriptor for the GPIO line tied to the reset pin
+ * @ds: Pointer to the DSA core structure
+ * @gc: Main structure of the GPIO controller
+ * @chipid: Storage for the Chip ID value read from the CHIPID register of the
+ * switch
+ * @addr: MAC address used in flow control frames
+ * @ops: Structure with hardware-dependent operations
+ * @priv: Pointer to the configuration interface structure
+ * @portinfo: Storage table portinfo structructures
+ * @vlans: List of configured vlans. Contains port mask and untagged status of
+ * every vlan configured in port vlan operation. It doesn't cover tag_8021q
+ * vlans.
+ * @fdb_lock: Mutex protects fdb access
*/
struct vsc73xx {
struct device *dev;
@@ -15,8 +56,16 @@ struct vsc73xx {
u8 addr[ETH_ALEN];
const struct vsc73xx_ops *ops;
void *priv;
+ struct vsc73xx_portinfo portinfo[VSC73XX_MAX_NUM_PORTS];
+ struct list_head vlans;
+ struct mutex fdb_lock;
};
+/**
+ * struct vsc73xx_ops - VSC73xx methods container
+ * @read: Method for register reading over the hardware-dependent interface
+ * @write: Method for register writing over the hardware-dependent interface
+ */
struct vsc73xx_ops {
int (*read)(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
u32 *val);
@@ -24,7 +73,22 @@ struct vsc73xx_ops {
u32 val);
};
+/**
+ * struct vsc73xx_bridge_vlan - VSC73xx driver structure which keeps vlan
+ * database copy
+ * @vid: VLAN number
+ * @portmask: each bit represents one port
+ * @untagged: each bit represents one port configured with @vid untagged
+ * @list: list structure
+ */
+struct vsc73xx_bridge_vlan {
+ u16 vid;
+ u8 portmask;
+ u8 untagged;
+ struct list_head list;
+};
+
int vsc73xx_is_addr_valid(u8 block, u8 subblock);
int vsc73xx_probe(struct vsc73xx *vsc);
-int vsc73xx_remove(struct vsc73xx *vsc);
+void vsc73xx_remove(struct vsc73xx *vsc);
void vsc73xx_shutdown(struct vsc73xx *vsc);
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 910fcb3b252b..0a05f4156ef4 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -5,8 +5,9 @@
*/
#include <net/dsa.h>
+#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/netdev_features.h>
#include <linux/if_hsr.h>
#include "xrs700x.h"
@@ -90,10 +91,8 @@ static void xrs700x_get_strings(struct dsa_switch *ds, int port,
if (stringset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
- strscpy(data, xrs700x_mibs[i].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++)
+ ethtool_puts(&data, xrs700x_mibs[i].name);
}
static int xrs700x_get_sset_count(struct dsa_switch *ds, int port, int sset)
@@ -108,6 +107,7 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
{
struct xrs700x_port *p = &priv->ports[port];
struct rtnl_link_stats64 stats;
+ unsigned long flags;
int i;
memset(&stats, 0, sizeof(stats));
@@ -137,9 +137,9 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
*/
stats.rx_packets += stats.multicast;
- u64_stats_update_begin(&p->syncp);
+ flags = u64_stats_update_begin_irqsave(&p->syncp);
p->stats64 = stats;
- u64_stats_update_end(&p->syncp);
+ u64_stats_update_end_irqrestore(&p->syncp, flags);
mutex_unlock(&p->mib_mutex);
}
@@ -441,43 +441,48 @@ static void xrs700x_teardown(struct dsa_switch *ds)
cancel_delayed_work_sync(&priv->mib_work);
}
-static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
switch (port) {
case 0:
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
break;
+
case 1:
case 2:
case 3:
- phylink_set(mask, 1000baseT_Full);
+ phy_interface_set_rgmii(config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD | MAC_1000FD;
break;
+
default:
- linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
+ break;
}
+}
- phylink_set_port_modes(mask);
-
- /* The switch only supports full duplex. */
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
+static void xrs700x_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+static void xrs700x_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
}
-static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
+static void xrs700x_mac_link_up(struct phylink_config *config,
struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct xrs700x *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct xrs700x *priv = dp->ds->priv;
+ int port = dp->index;
unsigned int val;
switch (speed) {
@@ -501,7 +506,7 @@ static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
}
static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
- struct net_device *bridge, bool join)
+ struct dsa_bridge bridge, bool join)
{
unsigned int i, cpu_mask = 0, mask = 0;
struct xrs700x *priv = ds->priv;
@@ -513,14 +518,14 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
cpu_mask |= BIT(i);
- if (dsa_to_port(ds, i)->bridge_dev == bridge)
+ if (dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
mask |= BIT(i);
}
for (i = 0; i < ds->num_ports; i++) {
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* 1 = Disable forwarding to the port */
@@ -540,25 +545,28 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
}
static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
return xrs700x_bridge_common(ds, port, bridge, true);
}
static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
xrs700x_bridge_common(ds, port, bridge, false);
}
static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
- struct net_device *hsr)
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack)
{
unsigned int val = XRS_HSR_CFG_HSR_PRP;
struct dsa_port *partner = NULL, *dp;
struct xrs700x *priv = ds->priv;
- struct net_device *slave;
+ struct net_device *user;
int ret, i, hsr_pair[2];
+ enum hsr_port_type type;
enum hsr_version ver;
bool fwd = false;
@@ -566,16 +574,31 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
if (ret)
return ret;
- /* Only ports 1 and 2 can be HSR/PRP redundant ports. */
- if (port != 1 && port != 2)
+ if (port != 1 && port != 2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only ports 1 and 2 can offload HSR/PRP");
return -EOPNOTSUPP;
+ }
- if (ver == HSR_V1)
+ if (ver == HSR_V1) {
val |= XRS_HSR_CFG_HSR;
- else if (ver == PRP_V1)
+ } else if (ver == PRP_V1) {
val |= XRS_HSR_CFG_PRP;
- else
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only HSR v1 and PRP v1 can be offloaded");
return -EOPNOTSUPP;
+ }
+
+ ret = hsr_get_port_type(hsr, dsa_to_port(ds, port)->user, &type);
+ if (ret)
+ return ret;
+
+ if (type != HSR_PT_SLAVE_A && type != HSR_PT_SLAVE_B) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only HSR slave ports can be offloaded");
+ return -EOPNOTSUPP;
+ }
dsa_hsr_foreach_port(dp, ds, hsr) {
if (dp->index != port) {
@@ -636,8 +659,8 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
- slave = dsa_to_port(ds, hsr_pair[i])->slave;
- slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
+ user = dsa_to_port(ds, hsr_pair[i])->user;
+ user->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
}
return 0;
@@ -648,7 +671,7 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
{
struct dsa_port *partner = NULL, *dp;
struct xrs700x *priv = ds->priv;
- struct net_device *slave;
+ struct net_device *user;
int i, hsr_pair[2];
unsigned int val;
@@ -690,20 +713,25 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
- slave = dsa_to_port(ds, hsr_pair[i])->slave;
- slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
+ user = dsa_to_port(ds, hsr_pair[i])->user;
+ user->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
}
return 0;
}
+static const struct phylink_mac_ops xrs700x_phylink_mac_ops = {
+ .mac_config = xrs700x_mac_config,
+ .mac_link_down = xrs700x_mac_link_down,
+ .mac_link_up = xrs700x_mac_link_up,
+};
+
static const struct dsa_switch_ops xrs700x_ops = {
.get_tag_protocol = xrs700x_get_tag_protocol,
.setup = xrs700x_setup,
.teardown = xrs700x_teardown,
.port_stp_state_set = xrs700x_port_stp_state_set,
- .phylink_validate = xrs700x_phylink_validate,
- .phylink_mac_link_up = xrs700x_mac_link_up,
+ .phylink_get_caps = xrs700x_phylink_get_caps,
.get_strings = xrs700x_get_strings,
.get_sset_count = xrs700x_get_sset_count,
.get_ethtool_stats = xrs700x_get_ethtool_stats,
@@ -761,6 +789,7 @@ struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
ds->ops = &xrs700x_ops;
+ ds->phylink_mac_ops = &xrs700x_phylink_mac_ops;
ds->priv = priv;
priv->dev = base;
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index 6deae388a0d6..9b731dea78c1 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -76,8 +76,7 @@ static const struct regmap_config xrs700x_i2c_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_BIG
};
-static int xrs700x_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
+static int xrs700x_i2c_probe(struct i2c_client *i2c)
{
struct xrs700x *priv;
int ret;
@@ -105,18 +104,14 @@ static int xrs700x_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int xrs700x_i2c_remove(struct i2c_client *i2c)
+static void xrs700x_i2c_remove(struct i2c_client *i2c)
{
struct xrs700x *priv = i2c_get_clientdata(i2c);
if (!priv)
- return 0;
+ return;
xrs700x_switch_remove(priv);
-
- i2c_set_clientdata(i2c, NULL);
-
- return 0;
}
static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
@@ -132,8 +127,8 @@ static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
}
static const struct i2c_device_id xrs700x_i2c_id[] = {
- { "xrs700x-switch", 0 },
- {},
+ { "xrs700x-switch" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, xrs700x_i2c_id);
@@ -152,7 +147,7 @@ static struct i2c_driver xrs700x_i2c_driver = {
.name = "xrs700x-i2c",
.of_match_table = of_match_ptr(xrs700x_i2c_dt_ids),
},
- .probe = xrs700x_i2c_probe,
+ .probe = xrs700x_i2c_probe,
.remove = xrs700x_i2c_remove,
.shutdown = xrs700x_i2c_shutdown,
.id_table = xrs700x_i2c_id,
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
index 127a677d1f39..5f7d344b5d73 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -140,8 +140,6 @@ static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
return;
xrs700x_switch_remove(priv);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c
new file mode 100644
index 000000000000..1c511f5dc6ab
--- /dev/null
+++ b/drivers/net/dsa/yt921x.c
@@ -0,0 +1,3006 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Motorcomm YT921x Switch
+ *
+ * Should work on YT9213/YT9214/YT9215/YT9218, but only tested on YT9215+SGMII,
+ * be sure to do your own checks before porting to another chip.
+ *
+ * Copyright (c) 2025 David Yang
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_hsr.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include <net/dsa.h>
+
+#include "yt921x.h"
+
+struct yt921x_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+#define MIB_DESC(_size, _offset, _name) \
+ {_size, _offset, _name}
+
+/* Must agree with yt921x_mib
+ *
+ * Unstructured fields (name != NULL) will appear in get_ethtool_stats(),
+ * structured go to their *_stats() methods, but we need their sizes and offsets
+ * to perform 32bit MIB overflow wraparound.
+ */
+static const struct yt921x_mib_desc yt921x_mib_descs[] = {
+ MIB_DESC(1, YT921X_MIB_DATA_RX_BROADCAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PAUSE, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_MULTICAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_CRC_ERR, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_ALIGN_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_UNDERSIZE_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_FRAG_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_64, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX, NULL),
+ MIB_DESC(2, YT921X_MIB_DATA_RX_GOOD_BYTES, NULL),
+
+ MIB_DESC(2, YT921X_MIB_DATA_RX_BAD_BYTES, "RxBadBytes"),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_OVERSIZE_ERR, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_RX_DROPPED, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_BROADCAST, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PAUSE, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_MULTICAST, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_UNDERSIZE_ERR, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_64, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX, NULL),
+
+ MIB_DESC(2, YT921X_MIB_DATA_TX_GOOD_BYTES, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_COLLISION, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_MULTIPLE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_SINGLE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_PKT, NULL),
+
+ MIB_DESC(1, YT921X_MIB_DATA_TX_DEFERRED, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_LATE_COLLISION, NULL),
+ MIB_DESC(1, YT921X_MIB_DATA_RX_OAM, "RxOAM"),
+ MIB_DESC(1, YT921X_MIB_DATA_TX_OAM, "TxOAM"),
+};
+
+struct yt921x_info {
+ const char *name;
+ u16 major;
+ /* Unknown, seems to be plain enumeration */
+ u8 mode;
+ u8 extmode;
+ /* Ports with integral GbE PHYs, not including MCU Port 10 */
+ u16 internal_mask;
+ /* TODO: see comments in yt921x_dsa_phylink_get_caps() */
+ u16 external_mask;
+};
+
+#define YT921X_PORT_MASK_INTn(port) BIT(port)
+#define YT921X_PORT_MASK_INT0_n(n) GENMASK((n) - 1, 0)
+#define YT921X_PORT_MASK_EXT0 BIT(8)
+#define YT921X_PORT_MASK_EXT1 BIT(9)
+
+static const struct yt921x_info yt921x_infos[] = {
+ {
+ "YT9215SC", YT9215_MAJOR, 1, 0,
+ YT921X_PORT_MASK_INT0_n(5),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9215S", YT9215_MAJOR, 2, 0,
+ YT921X_PORT_MASK_INT0_n(5),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9215RB", YT9215_MAJOR, 3, 0,
+ YT921X_PORT_MASK_INT0_n(5),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9214NB", YT9215_MAJOR, 3, 2,
+ YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9213NB", YT9215_MAJOR, 3, 3,
+ YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3),
+ YT921X_PORT_MASK_EXT1,
+ },
+ {
+ "YT9218N", YT9218_MAJOR, 0, 0,
+ YT921X_PORT_MASK_INT0_n(8),
+ 0,
+ },
+ {
+ "YT9218MB", YT9218_MAJOR, 1, 0,
+ YT921X_PORT_MASK_INT0_n(8),
+ YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1,
+ },
+ {}
+};
+
+#define YT921X_NAME "yt921x"
+
+#define YT921X_VID_UNWARE 4095
+
+#define YT921X_POLL_SLEEP_US 10000
+#define YT921X_POLL_TIMEOUT_US 100000
+
+/* The interval should be small enough to avoid overflow of 32bit MIBs.
+ *
+ * Until we can read MIBs from stats64 call directly (i.e. sleep
+ * there), we have to poll stats more frequently then it is actually needed.
+ * For overflow protection, normally, 100 sec interval should have been OK.
+ */
+#define YT921X_STATS_INTERVAL_JIFFIES (3 * HZ)
+
+struct yt921x_reg_mdio {
+ struct mii_bus *bus;
+ int addr;
+ /* SWITCH_ID_1 / SWITCH_ID_0 of the device
+ *
+ * This is a way to multiplex multiple devices on the same MII phyaddr
+ * and should be configurable in DT. However, MDIO core simply doesn't
+ * allow multiple devices over one reg addr, so this is a fixed value
+ * for now until a solution is found.
+ *
+ * Keep this because we need switchid to form MII regaddrs anyway.
+ */
+ unsigned char switchid;
+};
+
+/* TODO: SPI/I2C */
+
+#define to_yt921x_priv(_ds) container_of_const(_ds, struct yt921x_priv, ds)
+#define to_device(priv) ((priv)->ds.dev)
+
+static int yt921x_reg_read(struct yt921x_priv *priv, u32 reg, u32 *valp)
+{
+ WARN_ON(!mutex_is_locked(&priv->reg_lock));
+
+ return priv->reg_ops->read(priv->reg_ctx, reg, valp);
+}
+
+static int yt921x_reg_write(struct yt921x_priv *priv, u32 reg, u32 val)
+{
+ WARN_ON(!mutex_is_locked(&priv->reg_lock));
+
+ return priv->reg_ops->write(priv->reg_ctx, reg, val);
+}
+
+static int
+yt921x_reg_wait(struct yt921x_priv *priv, u32 reg, u32 mask, u32 *valp)
+{
+ u32 val;
+ int res;
+ int ret;
+
+ ret = read_poll_timeout(yt921x_reg_read, res,
+ res || (val & mask) == *valp,
+ YT921X_POLL_SLEEP_US, YT921X_POLL_TIMEOUT_US,
+ false, priv, reg, &val);
+ if (ret)
+ return ret;
+ if (res)
+ return res;
+
+ *valp = val;
+ return 0;
+}
+
+static int
+yt921x_reg_update_bits(struct yt921x_priv *priv, u32 reg, u32 mask, u32 val)
+{
+ int res;
+ u32 v;
+ u32 u;
+
+ res = yt921x_reg_read(priv, reg, &v);
+ if (res)
+ return res;
+
+ u = v;
+ u &= ~mask;
+ u |= val;
+ if (u == v)
+ return 0;
+
+ return yt921x_reg_write(priv, reg, u);
+}
+
+static int yt921x_reg_set_bits(struct yt921x_priv *priv, u32 reg, u32 mask)
+{
+ return yt921x_reg_update_bits(priv, reg, 0, mask);
+}
+
+static int yt921x_reg_clear_bits(struct yt921x_priv *priv, u32 reg, u32 mask)
+{
+ return yt921x_reg_update_bits(priv, reg, mask, 0);
+}
+
+static int
+yt921x_reg_toggle_bits(struct yt921x_priv *priv, u32 reg, u32 mask, bool set)
+{
+ return yt921x_reg_update_bits(priv, reg, mask, !set ? 0 : mask);
+}
+
+/* Some registers, like VLANn_CTRL, should always be written in 64-bit, even if
+ * you are to write only the lower / upper 32 bits.
+ *
+ * There is no such restriction for reading, but we still provide 64-bit read
+ * wrappers so that we always handle u64 values.
+ */
+
+static int yt921x_reg64_read(struct yt921x_priv *priv, u32 reg, u64 *valp)
+{
+ u32 lo;
+ u32 hi;
+ int res;
+
+ res = yt921x_reg_read(priv, reg, &lo);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, reg + 4, &hi);
+ if (res)
+ return res;
+
+ *valp = ((u64)hi << 32) | lo;
+ return 0;
+}
+
+static int yt921x_reg64_write(struct yt921x_priv *priv, u32 reg, u64 val)
+{
+ int res;
+
+ res = yt921x_reg_write(priv, reg, (u32)val);
+ if (res)
+ return res;
+ return yt921x_reg_write(priv, reg + 4, (u32)(val >> 32));
+}
+
+static int
+yt921x_reg64_update_bits(struct yt921x_priv *priv, u32 reg, u64 mask, u64 val)
+{
+ int res;
+ u64 v;
+ u64 u;
+
+ res = yt921x_reg64_read(priv, reg, &v);
+ if (res)
+ return res;
+
+ u = v;
+ u &= ~mask;
+ u |= val;
+ if (u == v)
+ return 0;
+
+ return yt921x_reg64_write(priv, reg, u);
+}
+
+static int yt921x_reg64_clear_bits(struct yt921x_priv *priv, u32 reg, u64 mask)
+{
+ return yt921x_reg64_update_bits(priv, reg, mask, 0);
+}
+
+static int yt921x_reg_mdio_read(void *context, u32 reg, u32 *valp)
+{
+ struct yt921x_reg_mdio *mdio = context;
+ struct mii_bus *bus = mdio->bus;
+ int addr = mdio->addr;
+ u32 reg_addr;
+ u32 reg_data;
+ u32 val;
+ int res;
+
+ /* Hold the mdio bus lock to avoid (un)locking for 4 times */
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR |
+ YT921X_SMI_READ;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16));
+ if (res)
+ goto end;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)reg);
+ if (res)
+ goto end;
+
+ reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA |
+ YT921X_SMI_READ;
+ res = __mdiobus_read(bus, addr, reg_data);
+ if (res < 0)
+ goto end;
+ val = (u16)res;
+ res = __mdiobus_read(bus, addr, reg_data);
+ if (res < 0)
+ goto end;
+ val = (val << 16) | (u16)res;
+
+ *valp = val;
+ res = 0;
+
+end:
+ mutex_unlock(&bus->mdio_lock);
+ return res;
+}
+
+static int yt921x_reg_mdio_write(void *context, u32 reg, u32 val)
+{
+ struct yt921x_reg_mdio *mdio = context;
+ struct mii_bus *bus = mdio->bus;
+ int addr = mdio->addr;
+ u32 reg_addr;
+ u32 reg_data;
+ int res;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR |
+ YT921X_SMI_WRITE;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16));
+ if (res)
+ goto end;
+ res = __mdiobus_write(bus, addr, reg_addr, (u16)reg);
+ if (res)
+ goto end;
+
+ reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA |
+ YT921X_SMI_WRITE;
+ res = __mdiobus_write(bus, addr, reg_data, (u16)(val >> 16));
+ if (res)
+ goto end;
+ res = __mdiobus_write(bus, addr, reg_data, (u16)val);
+ if (res)
+ goto end;
+
+ res = 0;
+
+end:
+ mutex_unlock(&bus->mdio_lock);
+ return res;
+}
+
+static const struct yt921x_reg_ops yt921x_reg_ops_mdio = {
+ .read = yt921x_reg_mdio_read,
+ .write = yt921x_reg_mdio_write,
+};
+
+/* TODO: SPI/I2C */
+
+static int yt921x_intif_wait(struct yt921x_priv *priv)
+{
+ u32 val = 0;
+
+ return yt921x_reg_wait(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START,
+ &val);
+}
+
+static int
+yt921x_intif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp)
+{
+ struct device *dev = to_device(priv);
+ u32 mask;
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_intif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_READ;
+ res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ res = yt921x_intif_wait(priv);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_INT_MBUS_DIN, &val);
+ if (res)
+ return res;
+
+ if ((u16)val != val)
+ dev_info(dev,
+ "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n",
+ __func__, port, reg, val);
+ *valp = (u16)val;
+ return 0;
+}
+
+static int
+yt921x_intif_write(struct yt921x_priv *priv, int port, int reg, u16 val)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ res = yt921x_intif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_WRITE;
+ res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_INT_MBUS_DOUT, val);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ return yt921x_intif_wait(priv);
+}
+
+static int yt921x_mbus_int_read(struct mii_bus *mbus, int port, int reg)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ u16 val;
+ int res;
+
+ if (port >= YT921X_PORT_NUM)
+ return U16_MAX;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_intif_read(priv, port, reg, &val);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+ return val;
+}
+
+static int
+yt921x_mbus_int_write(struct mii_bus *mbus, int port, int reg, u16 data)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ int res;
+
+ if (port >= YT921X_PORT_NUM)
+ return -ENODEV;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_intif_write(priv, port, reg, data);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_mbus_int_init(struct yt921x_priv *priv, struct device_node *mnp)
+{
+ struct device *dev = to_device(priv);
+ struct mii_bus *mbus;
+ int res;
+
+ mbus = devm_mdiobus_alloc(dev);
+ if (!mbus)
+ return -ENOMEM;
+
+ mbus->name = "YT921x internal MDIO bus";
+ snprintf(mbus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+ mbus->priv = priv;
+ mbus->read = yt921x_mbus_int_read;
+ mbus->write = yt921x_mbus_int_write;
+ mbus->parent = dev;
+ mbus->phy_mask = (u32)~GENMASK(YT921X_PORT_NUM - 1, 0);
+
+ res = devm_of_mdiobus_register(dev, mbus, mnp);
+ if (res)
+ return res;
+
+ priv->mbus_int = mbus;
+
+ return 0;
+}
+
+static int yt921x_extif_wait(struct yt921x_priv *priv)
+{
+ u32 val = 0;
+
+ return yt921x_reg_wait(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START,
+ &val);
+}
+
+static int
+yt921x_extif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp)
+{
+ struct device *dev = to_device(priv);
+ u32 mask;
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_extif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_READ;
+ res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ res = yt921x_extif_wait(priv);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_EXT_MBUS_DIN, &val);
+ if (res)
+ return res;
+
+ if ((u16)val != val)
+ dev_info(dev,
+ "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n",
+ __func__, port, reg, val);
+ *valp = (u16)val;
+ return 0;
+}
+
+static int
+yt921x_extif_write(struct yt921x_priv *priv, int port, int reg, u16 val)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ res = yt921x_extif_wait(priv);
+ if (res)
+ return res;
+
+ mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M |
+ YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M;
+ ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) |
+ YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_WRITE;
+ res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_EXT_MBUS_DOUT, val);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START);
+ if (res)
+ return res;
+
+ return yt921x_extif_wait(priv);
+}
+
+static int yt921x_mbus_ext_read(struct mii_bus *mbus, int port, int reg)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ u16 val;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_extif_read(priv, port, reg, &val);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+ return val;
+}
+
+static int
+yt921x_mbus_ext_write(struct mii_bus *mbus, int port, int reg, u16 data)
+{
+ struct yt921x_priv *priv = mbus->priv;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_extif_write(priv, port, reg, data);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_mbus_ext_init(struct yt921x_priv *priv, struct device_node *mnp)
+{
+ struct device *dev = to_device(priv);
+ struct mii_bus *mbus;
+ int res;
+
+ mbus = devm_mdiobus_alloc(dev);
+ if (!mbus)
+ return -ENOMEM;
+
+ mbus->name = "YT921x external MDIO bus";
+ snprintf(mbus->id, MII_BUS_ID_SIZE, "%s@ext", dev_name(dev));
+ mbus->priv = priv;
+ /* TODO: c45? */
+ mbus->read = yt921x_mbus_ext_read;
+ mbus->write = yt921x_mbus_ext_write;
+ mbus->parent = dev;
+
+ res = devm_of_mdiobus_register(dev, mbus, mnp);
+ if (res)
+ return res;
+
+ priv->mbus_ext = mbus;
+
+ return 0;
+}
+
+/* Read and handle overflow of 32bit MIBs. MIB buffer must be zeroed before. */
+static int yt921x_read_mib(struct yt921x_priv *priv, int port)
+{
+ struct yt921x_port *pp = &priv->ports[port];
+ struct device *dev = to_device(priv);
+ struct yt921x_mib *mib = &pp->mib;
+ int res = 0;
+
+ /* Reading of yt921x_port::mib is not protected by a lock and it's vain
+ * to keep its consistency, since we have to read registers one by one
+ * and there is no way to make a snapshot of MIB stats.
+ *
+ * Writing (by this function only) is and should be protected by
+ * reg_lock.
+ */
+
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+ u32 reg = YT921X_MIBn_DATA0(port) + desc->offset;
+ u64 *valp = &((u64 *)mib)[i];
+ u64 val = *valp;
+ u32 val0;
+ u32 val1;
+
+ res = yt921x_reg_read(priv, reg, &val0);
+ if (res)
+ break;
+
+ if (desc->size <= 1) {
+ if (val < (u32)val)
+ /* overflow */
+ val += (u64)U32_MAX + 1;
+ val &= ~U32_MAX;
+ val |= val0;
+ } else {
+ res = yt921x_reg_read(priv, reg + 4, &val1);
+ if (res)
+ break;
+ val = ((u64)val1 << 32) | val0;
+ }
+
+ WRITE_ONCE(*valp, val);
+ }
+
+ pp->rx_frames = mib->rx_64byte + mib->rx_65_127byte +
+ mib->rx_128_255byte + mib->rx_256_511byte +
+ mib->rx_512_1023byte + mib->rx_1024_1518byte +
+ mib->rx_jumbo;
+ pp->tx_frames = mib->tx_64byte + mib->tx_65_127byte +
+ mib->tx_128_255byte + mib->tx_256_511byte +
+ mib->tx_512_1023byte + mib->tx_1024_1518byte +
+ mib->tx_jumbo;
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "read stats for",
+ port, res);
+ return res;
+}
+
+static void yt921x_poll_mib(struct work_struct *work)
+{
+ struct yt921x_port *pp = container_of_const(work, struct yt921x_port,
+ mib_read.work);
+ struct yt921x_priv *priv = (void *)(pp - pp->index) -
+ offsetof(struct yt921x_priv, ports);
+ unsigned long delay = YT921X_STATS_INTERVAL_JIFFIES;
+ int port = pp->index;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+ if (res)
+ delay *= 4;
+
+ schedule_delayed_work(&pp->mib_read, delay);
+}
+
+static void
+yt921x_dsa_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+
+ if (desc->name)
+ ethtool_puts(&data, desc->name);
+ }
+}
+
+static void
+yt921x_dsa_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+ size_t j;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ j = 0;
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+
+ if (!desc->name)
+ continue;
+
+ data[j] = ((u64 *)mib)[i];
+ j++;
+ }
+}
+
+static int yt921x_dsa_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ int cnt = 0;
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) {
+ const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
+
+ if (desc->name)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static void
+yt921x_dsa_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ mac_stats->FramesTransmittedOK = pp->tx_frames;
+ mac_stats->SingleCollisionFrames = mib->tx_single_collisions;
+ mac_stats->MultipleCollisionFrames = mib->tx_multiple_collisions;
+ mac_stats->FramesReceivedOK = pp->rx_frames;
+ mac_stats->FrameCheckSequenceErrors = mib->rx_crc_errors;
+ mac_stats->AlignmentErrors = mib->rx_alignment_errors;
+ mac_stats->OctetsTransmittedOK = mib->tx_good_bytes;
+ mac_stats->FramesWithDeferredXmissions = mib->tx_deferred;
+ mac_stats->LateCollisions = mib->tx_late_collisions;
+ mac_stats->FramesAbortedDueToXSColls = mib->tx_aborted_errors;
+ /* mac_stats->FramesLostDueToIntMACXmitError */
+ /* mac_stats->CarrierSenseErrors */
+ mac_stats->OctetsReceivedOK = mib->rx_good_bytes;
+ /* mac_stats->FramesLostDueToIntMACRcvError */
+ mac_stats->MulticastFramesXmittedOK = mib->tx_multicast;
+ mac_stats->BroadcastFramesXmittedOK = mib->tx_broadcast;
+ /* mac_stats->FramesWithExcessiveDeferral */
+ mac_stats->MulticastFramesReceivedOK = mib->rx_multicast;
+ mac_stats->BroadcastFramesReceivedOK = mib->rx_broadcast;
+ /* mac_stats->InRangeLengthErrors */
+ /* mac_stats->OutOfRangeLengthField */
+ mac_stats->FrameTooLongErrors = mib->rx_oversize_errors;
+}
+
+static void
+yt921x_dsa_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ ctrl_stats->MACControlFramesTransmitted = mib->tx_pause;
+ ctrl_stats->MACControlFramesReceived = mib->rx_pause;
+ /* ctrl_stats->UnsupportedOpcodesReceived */
+}
+
+static const struct ethtool_rmon_hist_range yt921x_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, YT921X_FRAME_SIZE_MAX },
+ {}
+};
+
+static void
+yt921x_dsa_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ *ranges = yt921x_rmon_ranges;
+
+ rmon_stats->undersize_pkts = mib->rx_undersize_errors;
+ rmon_stats->oversize_pkts = mib->rx_oversize_errors;
+ rmon_stats->fragments = mib->rx_alignment_errors;
+ /* rmon_stats->jabbers */
+
+ rmon_stats->hist[0] = mib->rx_64byte;
+ rmon_stats->hist[1] = mib->rx_65_127byte;
+ rmon_stats->hist[2] = mib->rx_128_255byte;
+ rmon_stats->hist[3] = mib->rx_256_511byte;
+ rmon_stats->hist[4] = mib->rx_512_1023byte;
+ rmon_stats->hist[5] = mib->rx_1024_1518byte;
+ rmon_stats->hist[6] = mib->rx_jumbo;
+
+ rmon_stats->hist_tx[0] = mib->tx_64byte;
+ rmon_stats->hist_tx[1] = mib->tx_65_127byte;
+ rmon_stats->hist_tx[2] = mib->tx_128_255byte;
+ rmon_stats->hist_tx[3] = mib->tx_256_511byte;
+ rmon_stats->hist_tx[4] = mib->tx_512_1023byte;
+ rmon_stats->hist_tx[5] = mib->tx_1024_1518byte;
+ rmon_stats->hist_tx[6] = mib->tx_jumbo;
+}
+
+static void
+yt921x_dsa_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ stats->rx_length_errors = mib->rx_undersize_errors +
+ mib->rx_fragment_errors;
+ stats->rx_over_errors = mib->rx_oversize_errors;
+ stats->rx_crc_errors = mib->rx_crc_errors;
+ stats->rx_frame_errors = mib->rx_alignment_errors;
+ /* stats->rx_fifo_errors */
+ /* stats->rx_missed_errors */
+
+ stats->tx_aborted_errors = mib->tx_aborted_errors;
+ /* stats->tx_carrier_errors */
+ stats->tx_fifo_errors = mib->tx_undersize_errors;
+ /* stats->tx_heartbeat_errors */
+ stats->tx_window_errors = mib->tx_late_collisions;
+
+ stats->rx_packets = pp->rx_frames;
+ stats->tx_packets = pp->tx_frames;
+ stats->rx_bytes = mib->rx_good_bytes - ETH_FCS_LEN * stats->rx_packets;
+ stats->tx_bytes = mib->tx_good_bytes - ETH_FCS_LEN * stats->tx_packets;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_over_errors +
+ stats->rx_crc_errors + stats->rx_frame_errors;
+ stats->tx_errors = stats->tx_aborted_errors + stats->tx_fifo_errors +
+ stats->tx_window_errors;
+ stats->rx_dropped = mib->rx_dropped;
+ /* stats->tx_dropped */
+ stats->multicast = mib->rx_multicast;
+ stats->collisions = mib->tx_collisions;
+}
+
+static void
+yt921x_dsa_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct yt921x_port *pp = &priv->ports[port];
+ struct yt921x_mib *mib = &pp->mib;
+
+ mutex_lock(&priv->reg_lock);
+ yt921x_read_mib(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ pause_stats->tx_pause_frames = mib->tx_pause;
+ pause_stats->rx_pause_frames = mib->rx_pause;
+}
+
+static int
+yt921x_set_eee(struct yt921x_priv *priv, int port, struct ethtool_keee *e)
+{
+ /* Poor datasheet for EEE operations; don't ask if you are confused */
+
+ bool enable = e->eee_enabled;
+ u16 new_mask;
+ int res;
+
+ /* Enable / disable global EEE */
+ new_mask = priv->eee_ports_mask;
+ new_mask &= ~BIT(port);
+ new_mask |= !enable ? 0 : BIT(port);
+
+ if (!!new_mask != !!priv->eee_ports_mask) {
+ res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_FUNC,
+ YT921X_PON_STRAP_EEE, !!new_mask);
+ if (res)
+ return res;
+ res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_VAL,
+ YT921X_PON_STRAP_EEE, !!new_mask);
+ if (res)
+ return res;
+ }
+
+ priv->eee_ports_mask = new_mask;
+
+ /* Enable / disable port EEE */
+ res = yt921x_reg_toggle_bits(priv, YT921X_EEE_CTRL,
+ YT921X_EEE_CTRL_ENn(port), enable);
+ if (res)
+ return res;
+ res = yt921x_reg_toggle_bits(priv, YT921X_EEEn_VAL(port),
+ YT921X_EEE_VAL_DATA, enable);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_dsa_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_set_eee(priv, port, e);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ /* Only serves as packet filter, since the frame size is always set to
+ * maximum after reset
+ */
+
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ int frame_size;
+ int res;
+
+ frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ if (dsa_port_is_cpu(dp))
+ frame_size += YT921X_TAG_LEN;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_update_bits(priv, YT921X_MACn_FRAME(port),
+ YT921X_MAC_FRAME_SIZE_M,
+ YT921X_MAC_FRAME_SIZE(frame_size));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_dsa_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* Only called for user ports, exclude tag len here */
+ return YT921X_FRAME_SIZE_MAX - ETH_HLEN - ETH_FCS_LEN - YT921X_TAG_LEN;
+}
+
+static int
+yt921x_mirror_del(struct yt921x_priv *priv, int port, bool ingress)
+{
+ u32 mask;
+
+ if (ingress)
+ mask = YT921X_MIRROR_IGR_PORTn(port);
+ else
+ mask = YT921X_MIRROR_EGR_PORTn(port);
+ return yt921x_reg_clear_bits(priv, YT921X_MIRROR, mask);
+}
+
+static int
+yt921x_mirror_add(struct yt921x_priv *priv, int port, bool ingress,
+ int to_local_port, struct netlink_ext_ack *extack)
+{
+ u32 srcs;
+ u32 ctrl;
+ u32 val;
+ u32 dst;
+ int res;
+
+ if (ingress)
+ srcs = YT921X_MIRROR_IGR_PORTn(port);
+ else
+ srcs = YT921X_MIRROR_EGR_PORTn(port);
+ dst = YT921X_MIRROR_PORT(to_local_port);
+
+ res = yt921x_reg_read(priv, YT921X_MIRROR, &val);
+ if (res)
+ return res;
+
+ /* other mirror tasks & different dst port -> conflict */
+ if ((val & ~srcs & (YT921X_MIRROR_EGR_PORTS_M |
+ YT921X_MIRROR_IGR_PORTS_M)) &&
+ (val & YT921X_MIRROR_PORT_M) != dst) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sniffer port is already configured, delete existing rules & retry");
+ return -EBUSY;
+ }
+
+ ctrl = val & ~YT921X_MIRROR_PORT_M;
+ ctrl |= srcs;
+ ctrl |= dst;
+
+ if (ctrl == val)
+ return 0;
+
+ return yt921x_reg_write(priv, YT921X_MIRROR, ctrl);
+}
+
+static void
+yt921x_dsa_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_mirror_del(priv, port, mirror->ingress);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "unmirror",
+ port, res);
+}
+
+static int
+yt921x_dsa_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_mirror_add(priv, port, ingress,
+ mirror->to_local_port, extack);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_fdb_wait(struct yt921x_priv *priv, u32 *valp)
+{
+ struct device *dev = to_device(priv);
+ u32 val = YT921X_FDB_RESULT_DONE;
+ int res;
+
+ res = yt921x_reg_wait(priv, YT921X_FDB_RESULT, YT921X_FDB_RESULT_DONE,
+ &val);
+ if (res) {
+ dev_err(dev, "FDB probably stuck\n");
+ return res;
+ }
+
+ *valp = val;
+ return 0;
+}
+
+static int
+yt921x_fdb_in01(struct yt921x_priv *priv, const unsigned char *addr,
+ u16 vid, u32 ctrl1)
+{
+ u32 ctrl;
+ int res;
+
+ ctrl = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+ res = yt921x_reg_write(priv, YT921X_FDB_IN0, ctrl);
+ if (res)
+ return res;
+
+ ctrl = ctrl1 | YT921X_FDB_IO1_FID(vid) | (addr[4] << 8) | addr[5];
+ return yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl);
+}
+
+static int
+yt921x_fdb_has(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
+ u16 *indexp)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_fdb_in01(priv, addr, vid, 0);
+ if (res)
+ return res;
+
+ ctrl = 0;
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_fdb_wait(priv, &val);
+ if (res)
+ return res;
+ if (val & YT921X_FDB_RESULT_NOTFOUND) {
+ *indexp = YT921X_FDB_NUM;
+ return 0;
+ }
+
+ *indexp = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val);
+ return 0;
+}
+
+static int
+yt921x_fdb_read(struct yt921x_priv *priv, unsigned char *addr, u16 *vidp,
+ u16 *ports_maskp, u16 *indexp, u8 *statusp)
+{
+ struct device *dev = to_device(priv);
+ u16 index;
+ u32 data0;
+ u32 data1;
+ u32 data2;
+ u32 val;
+ int res;
+
+ res = yt921x_fdb_wait(priv, &val);
+ if (res)
+ return res;
+ if (val & YT921X_FDB_RESULT_NOTFOUND) {
+ *ports_maskp = 0;
+ return 0;
+ }
+ index = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val);
+
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &data1);
+ if (res)
+ return res;
+ if ((data1 & YT921X_FDB_IO1_STATUS_M) ==
+ YT921X_FDB_IO1_STATUS_INVALID) {
+ *ports_maskp = 0;
+ return 0;
+ }
+
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT0, &data0);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &data2);
+ if (res)
+ return res;
+
+ addr[0] = data0 >> 24;
+ addr[1] = data0 >> 16;
+ addr[2] = data0 >> 8;
+ addr[3] = data0;
+ addr[4] = data1 >> 8;
+ addr[5] = data1;
+ *vidp = FIELD_GET(YT921X_FDB_IO1_FID_M, data1);
+ *indexp = index;
+ *ports_maskp = FIELD_GET(YT921X_FDB_IO2_EGR_PORTS_M, data2);
+ *statusp = FIELD_GET(YT921X_FDB_IO1_STATUS_M, data1);
+
+ dev_dbg(dev,
+ "%s: index 0x%x, mac %02x:%02x:%02x:%02x:%02x:%02x, vid %d, ports 0x%x, status %d\n",
+ __func__, *indexp, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], *vidp, *ports_maskp, *statusp);
+ return 0;
+}
+
+static int
+yt921x_fdb_dump(struct yt921x_priv *priv, u16 ports_mask,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ unsigned char addr[ETH_ALEN];
+ u8 status;
+ u16 pmask;
+ u16 index;
+ u32 ctrl;
+ u16 vid;
+ int res;
+
+ ctrl = YT921X_FDB_OP_INDEX(0) | YT921X_FDB_OP_MODE_INDEX |
+ YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+ res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, &status);
+ if (res)
+ return res;
+ if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) {
+ res = cb(addr, vid,
+ status == YT921X_FDB_ENTRY_STATUS_STATIC, data);
+ if (res)
+ return res;
+ }
+
+ ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ index = 0;
+ do {
+ ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX |
+ YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT |
+ YT921X_FDB_OP_OP_GET_NEXT | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index,
+ &status);
+ if (res)
+ return res;
+ if (!pmask)
+ break;
+
+ if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) {
+ res = cb(addr, vid,
+ status == YT921X_FDB_ENTRY_STATUS_STATIC,
+ data);
+ if (res)
+ return res;
+ }
+
+ /* Never call GET_NEXT with 4095, otherwise it will hang
+ * forever until a reset!
+ */
+ } while (index < YT921X_FDB_NUM - 1);
+
+ return 0;
+}
+
+static int
+yt921x_fdb_flush_raw(struct yt921x_priv *priv, u16 ports_mask, u16 vid,
+ bool flush_static)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ if (vid < 4096) {
+ ctrl = YT921X_FDB_IO1_FID(vid);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl);
+ if (res)
+ return res;
+ }
+
+ ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_OP_FLUSH | YT921X_FDB_OP_START;
+ if (vid >= 4096)
+ ctrl |= YT921X_FDB_OP_FLUSH_PORT;
+ else
+ ctrl |= YT921X_FDB_OP_FLUSH_PORT_VID;
+ if (flush_static)
+ ctrl |= YT921X_FDB_OP_FLUSH_STATIC;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_fdb_wait(priv, &val);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_fdb_flush_port(struct yt921x_priv *priv, int port, bool flush_static)
+{
+ return yt921x_fdb_flush_raw(priv, BIT(port), 4096, flush_static);
+}
+
+static int
+yt921x_fdb_add_index_in12(struct yt921x_priv *priv, u16 index, u16 ctrl1,
+ u16 ctrl2)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl1);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl2);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX |
+ YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ return yt921x_fdb_wait(priv, &val);
+}
+
+static int
+yt921x_fdb_add(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
+ u16 ports_mask)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ ctrl = YT921X_FDB_IO1_STATUS_STATIC;
+ res = yt921x_fdb_in01(priv, addr, vid, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl);
+ if (res)
+ return res;
+
+ ctrl = YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ return yt921x_fdb_wait(priv, &val);
+}
+
+static int
+yt921x_fdb_leave(struct yt921x_priv *priv, const unsigned char *addr,
+ u16 vid, u16 ports_mask)
+{
+ u16 index;
+ u32 ctrl1;
+ u32 ctrl2;
+ u32 ctrl;
+ u32 val2;
+ u32 val;
+ int res;
+
+ /* Check for presence */
+ res = yt921x_fdb_has(priv, addr, vid, &index);
+ if (res)
+ return res;
+ if (index >= YT921X_FDB_NUM)
+ return 0;
+
+ /* Check if action required */
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2);
+ if (res)
+ return res;
+
+ ctrl2 = val2 & ~YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ if (ctrl2 == val2)
+ return 0;
+ if (!(ctrl2 & YT921X_FDB_IO2_EGR_PORTS_M)) {
+ ctrl = YT921X_FDB_OP_OP_DEL | YT921X_FDB_OP_START;
+ res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl);
+ if (res)
+ return res;
+
+ return yt921x_fdb_wait(priv, &val);
+ }
+
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &ctrl1);
+ if (res)
+ return res;
+
+ return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2);
+}
+
+static int
+yt921x_fdb_join(struct yt921x_priv *priv, const unsigned char *addr, u16 vid,
+ u16 ports_mask)
+{
+ u16 index;
+ u32 ctrl1;
+ u32 ctrl2;
+ u32 val1;
+ u32 val2;
+ int res;
+
+ /* Check for presence */
+ res = yt921x_fdb_has(priv, addr, vid, &index);
+ if (res)
+ return res;
+ if (index >= YT921X_FDB_NUM)
+ return yt921x_fdb_add(priv, addr, vid, ports_mask);
+
+ /* Check if action required */
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &val1);
+ if (res)
+ return res;
+ res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2);
+ if (res)
+ return res;
+
+ ctrl1 = val1 & ~YT921X_FDB_IO1_STATUS_M;
+ ctrl1 |= YT921X_FDB_IO1_STATUS_STATIC;
+ ctrl2 = val2 | YT921X_FDB_IO2_EGR_PORTS(ports_mask);
+ if (ctrl1 == val1 && ctrl2 == val2)
+ return 0;
+
+ return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2);
+}
+
+static int
+yt921x_dsa_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ /* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
+ * only wants to see unicast
+ */
+ res = yt921x_fdb_dump(priv, BIT(port), cb, data);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static void yt921x_dsa_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_flush_port(priv, port, false);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "clear FDB for",
+ port, res);
+}
+
+static int
+yt921x_dsa_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u32 ctrl;
+ int res;
+
+ /* AGEING reg is set in 5s step */
+ ctrl = clamp(msecs / 5000, 1, U16_MAX);
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_write(priv, YT921X_AGEING, ctrl);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_leave(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_join(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ const unsigned char *addr = mdb->addr;
+ u16 vid = mdb->vid;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_leave(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ const unsigned char *addr = mdb->addr;
+ u16 vid = mdb->vid;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_fdb_join(priv, addr, vid, BIT(port));
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_port_set_pvid(struct yt921x_priv *priv, int port, u16 vid)
+{
+ u32 mask;
+ u32 ctrl;
+
+ mask = YT921X_PORT_VLAN_CTRL_CVID_M;
+ ctrl = YT921X_PORT_VLAN_CTRL_CVID(vid);
+ return yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL(port),
+ mask, ctrl);
+}
+
+static int
+yt921x_vlan_filtering(struct yt921x_priv *priv, int port, bool vlan_filtering)
+{
+ struct dsa_port *dp = dsa_to_port(&priv->ds, port);
+ struct net_device *bdev;
+ u16 pvid;
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+
+ if (!bdev || !vlan_filtering)
+ pvid = YT921X_VID_UNWARE;
+ else
+ br_vlan_get_pvid(bdev, &pvid);
+ res = yt921x_port_set_pvid(priv, port, pvid);
+ if (res)
+ return res;
+
+ mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED |
+ YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ ctrl = 0;
+ /* Do not drop tagged frames here; let VLAN_IGR_FILTER do it */
+ if (vlan_filtering && !pvid)
+ ctrl |= YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ res = yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL1(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ res = yt921x_reg_toggle_bits(priv, YT921X_VLAN_IGR_FILTER,
+ YT921X_VLAN_IGR_FILTER_PORTn(port),
+ vlan_filtering);
+ if (res)
+ return res;
+
+ /* Turn on / off VLAN awareness */
+ mask = YT921X_PORT_IGR_TPIDn_CTAG_M;
+ if (!vlan_filtering)
+ ctrl = 0;
+ else
+ ctrl = YT921X_PORT_IGR_TPIDn_CTAG(0);
+ res = yt921x_reg_update_bits(priv, YT921X_PORTn_IGR_TPID(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_vlan_del(struct yt921x_priv *priv, int port, u16 vid)
+{
+ u64 mask64;
+
+ mask64 = YT921X_VLAN_CTRL_PORTS(port) |
+ YT921X_VLAN_CTRL_UNTAG_PORTn(port);
+
+ return yt921x_reg64_clear_bits(priv, YT921X_VLANn_CTRL(vid), mask64);
+}
+
+static int
+yt921x_vlan_add(struct yt921x_priv *priv, int port, u16 vid, bool untagged)
+{
+ u64 mask64;
+ u64 ctrl64;
+
+ mask64 = YT921X_VLAN_CTRL_PORTn(port) |
+ YT921X_VLAN_CTRL_PORTS(priv->cpu_ports_mask);
+ ctrl64 = mask64;
+
+ mask64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port);
+ if (untagged)
+ ctrl64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port);
+
+ return yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(vid),
+ mask64, ctrl64);
+}
+
+static int
+yt921x_pvid_clear(struct yt921x_priv *priv, int port)
+{
+ struct dsa_port *dp = dsa_to_port(&priv->ds, port);
+ bool vlan_filtering;
+ u32 mask;
+ int res;
+
+ vlan_filtering = dsa_port_is_vlan_filtering(dp);
+
+ res = yt921x_port_set_pvid(priv, port,
+ vlan_filtering ? 0 : YT921X_VID_UNWARE);
+ if (res)
+ return res;
+
+ if (vlan_filtering) {
+ mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ res = yt921x_reg_set_bits(priv, YT921X_PORTn_VLAN_CTRL1(port),
+ mask);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int
+yt921x_pvid_set(struct yt921x_priv *priv, int port, u16 vid)
+{
+ struct dsa_port *dp = dsa_to_port(&priv->ds, port);
+ bool vlan_filtering;
+ u32 mask;
+ int res;
+
+ vlan_filtering = dsa_port_is_vlan_filtering(dp);
+
+ if (vlan_filtering) {
+ res = yt921x_port_set_pvid(priv, port, vid);
+ if (res)
+ return res;
+ }
+
+ mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), mask);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_dsa_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_vlan_filtering(priv, port, vlan_filtering);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u16 vid = vlan->vid;
+ u16 pvid;
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ do {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bdev;
+
+ res = yt921x_vlan_del(priv, port, vid);
+ if (res)
+ break;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+ if (bdev) {
+ br_vlan_get_pvid(bdev, &pvid);
+ if (pvid == vid)
+ res = yt921x_pvid_clear(priv, port);
+ }
+ } while (0);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u16 vid = vlan->vid;
+ u16 pvid;
+ int res;
+
+ /* CPU port is supposed to be a member of every VLAN; see
+ * yt921x_vlan_add() and yt921x_port_setup()
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ do {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bdev;
+
+ res = yt921x_vlan_add(priv, port, vid,
+ vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (res)
+ break;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+ if (bdev) {
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ res = yt921x_pvid_set(priv, port, vid);
+ } else {
+ br_vlan_get_pvid(bdev, &pvid);
+ if (pvid == vid)
+ res = yt921x_pvid_clear(priv, port);
+ }
+ }
+ } while (0);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_userport_standalone(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ ctrl = ~priv->cpu_ports_mask;
+ res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), ctrl);
+ if (res)
+ return res;
+
+ /* Turn off FDB learning to prevent FDB pollution */
+ mask = YT921X_PORT_LEARN_DIS;
+ res = yt921x_reg_set_bits(priv, YT921X_PORTn_LEARN(port), mask);
+ if (res)
+ return res;
+
+ /* Turn off VLAN awareness */
+ mask = YT921X_PORT_IGR_TPIDn_CTAG_M;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_IGR_TPID(port), mask);
+ if (res)
+ return res;
+
+ /* Unrelated since learning is off and all packets are trapped;
+ * set it anyway
+ */
+ res = yt921x_port_set_pvid(priv, port, YT921X_VID_UNWARE);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int yt921x_userport_bridge(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ int res;
+
+ mask = YT921X_PORT_LEARN_DIS;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_LEARN(port), mask);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int yt921x_isolate(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ int res;
+
+ mask = BIT(port);
+ for (int i = 0; i < YT921X_PORT_NUM; i++) {
+ if ((BIT(i) & priv->cpu_ports_mask) || i == port)
+ continue;
+
+ res = yt921x_reg_set_bits(priv, YT921X_PORTn_ISOLATION(i),
+ mask);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+/* Make sure to include the CPU port in ports_mask, or your bridge will
+ * not have it.
+ */
+static int yt921x_bridge(struct yt921x_priv *priv, u16 ports_mask)
+{
+ unsigned long targets_mask = ports_mask & ~priv->cpu_ports_mask;
+ u32 isolated_mask;
+ u32 ctrl;
+ int port;
+ int res;
+
+ isolated_mask = 0;
+ for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) {
+ struct yt921x_port *pp = &priv->ports[port];
+
+ if (pp->isolated)
+ isolated_mask |= BIT(port);
+ }
+
+ /* Block from non-cpu bridge ports ... */
+ for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) {
+ struct yt921x_port *pp = &priv->ports[port];
+
+ /* to non-bridge ports */
+ ctrl = ~ports_mask;
+ /* to isolated ports when isolated */
+ if (pp->isolated)
+ ctrl |= isolated_mask;
+ /* to itself when non-hairpin */
+ if (!pp->hairpin)
+ ctrl |= BIT(port);
+ else
+ ctrl &= ~BIT(port);
+
+ res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port),
+ ctrl);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int yt921x_bridge_leave(struct yt921x_priv *priv, int port)
+{
+ int res;
+
+ res = yt921x_userport_standalone(priv, port);
+ if (res)
+ return res;
+
+ res = yt921x_isolate(priv, port);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int
+yt921x_bridge_join(struct yt921x_priv *priv, int port, u16 ports_mask)
+{
+ int res;
+
+ res = yt921x_userport_bridge(priv, port);
+ if (res)
+ return res;
+
+ res = yt921x_bridge(priv, ports_mask);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static u32
+dsa_bridge_ports(struct dsa_switch *ds, const struct net_device *bdev)
+{
+ struct dsa_port *dp;
+ u32 mask = 0;
+
+ dsa_switch_for_each_user_port(dp, ds)
+ if (dsa_port_offloads_bridge_dev(dp, bdev))
+ mask |= BIT(dp->index);
+
+ return mask;
+}
+
+static int
+yt921x_bridge_flags(struct yt921x_priv *priv, int port,
+ struct switchdev_brport_flags flags)
+{
+ struct yt921x_port *pp = &priv->ports[port];
+ bool do_flush;
+ u32 mask;
+ int res;
+
+ if (flags.mask & BR_LEARNING) {
+ bool learning = flags.val & BR_LEARNING;
+
+ mask = YT921X_PORT_LEARN_DIS;
+ res = yt921x_reg_toggle_bits(priv, YT921X_PORTn_LEARN(port),
+ mask, !learning);
+ if (res)
+ return res;
+ }
+
+ /* BR_FLOOD, BR_MCAST_FLOOD: see the comment where ACT_UNK_ACTn_TRAP
+ * is set
+ */
+
+ /* BR_BCAST_FLOOD: we can filter bcast, but cannot trap them */
+
+ do_flush = false;
+ if (flags.mask & BR_HAIRPIN_MODE) {
+ pp->hairpin = flags.val & BR_HAIRPIN_MODE;
+ do_flush = true;
+ }
+ if (flags.mask & BR_ISOLATED) {
+ pp->isolated = flags.val & BR_ISOLATED;
+ do_flush = true;
+ }
+ if (do_flush) {
+ struct dsa_switch *ds = &priv->ds;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bdev;
+
+ bdev = dsa_port_bridge_dev_get(dp);
+ if (bdev) {
+ u32 ports_mask;
+
+ ports_mask = dsa_bridge_ports(ds, bdev);
+ ports_mask |= priv->cpu_ports_mask;
+ res = yt921x_bridge(priv, ports_mask);
+ if (res)
+ return res;
+ }
+ }
+
+ return 0;
+}
+
+static int
+yt921x_dsa_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_HAIRPIN_MODE | BR_LEARNING | BR_FLOOD |
+ BR_MCAST_FLOOD | BR_ISOLATED))
+ return -EINVAL;
+ return 0;
+}
+
+static int
+yt921x_dsa_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_bridge_flags(priv, port, flags);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static void
+yt921x_dsa_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_bridge_leave(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "unbridge",
+ port, res);
+}
+
+static int
+yt921x_dsa_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u16 ports_mask;
+ int res;
+
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ ports_mask = dsa_bridge_ports(ds, bridge.dev);
+ ports_mask |= priv->cpu_ports_mask;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_bridge_join(priv, port, ports_mask);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_port_mst_state_set(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *st)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ mask = YT921X_STP_PORTn_M(port);
+ switch (st->state) {
+ case BR_STATE_DISABLED:
+ ctrl = YT921X_STP_PORTn_DISABLED(port);
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_LEARNING:
+ ctrl = YT921X_STP_PORTn_LEARNING(port);
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ ctrl = YT921X_STP_PORTn_FORWARD(port);
+ break;
+ case BR_STATE_BLOCKING:
+ ctrl = YT921X_STP_PORTn_BLOCKING(port);
+ break;
+ }
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg_update_bits(priv, YT921X_STPn(st->msti), mask, ctrl);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int
+yt921x_dsa_vlan_msti_set(struct dsa_switch *ds, struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ u64 mask64;
+ u64 ctrl64;
+ int res;
+
+ if (!msti->vid)
+ return -EINVAL;
+ if (!msti->msti || msti->msti >= YT921X_MSTI_NUM)
+ return -EINVAL;
+
+ mask64 = YT921X_VLAN_CTRL_STP_ID_M;
+ ctrl64 = YT921X_VLAN_CTRL_STP_ID(msti->msti);
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(msti->vid),
+ mask64, ctrl64);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static void
+yt921x_dsa_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct device *dev = to_device(priv);
+ bool learning;
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ mask = YT921X_STP_PORTn_M(port);
+ learning = false;
+ switch (state) {
+ case BR_STATE_DISABLED:
+ ctrl = YT921X_STP_PORTn_DISABLED(port);
+ break;
+ case BR_STATE_LISTENING:
+ ctrl = YT921X_STP_PORTn_LEARNING(port);
+ break;
+ case BR_STATE_LEARNING:
+ ctrl = YT921X_STP_PORTn_LEARNING(port);
+ learning = dp->learning;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ ctrl = YT921X_STP_PORTn_FORWARD(port);
+ learning = dp->learning;
+ break;
+ case BR_STATE_BLOCKING:
+ ctrl = YT921X_STP_PORTn_BLOCKING(port);
+ break;
+ }
+
+ mutex_lock(&priv->reg_lock);
+ do {
+ res = yt921x_reg_update_bits(priv, YT921X_STPn(0), mask, ctrl);
+ if (res)
+ break;
+
+ mask = YT921X_PORT_LEARN_DIS;
+ ctrl = !learning ? YT921X_PORT_LEARN_DIS : 0;
+ res = yt921x_reg_update_bits(priv, YT921X_PORTn_LEARN(port),
+ mask, ctrl);
+ } while (0);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dev, "Failed to %s port %d: %i\n", "set STP state for",
+ port, res);
+}
+
+static int yt921x_port_down(struct yt921x_priv *priv, int port)
+{
+ u32 mask;
+ int res;
+
+ mask = YT921X_PORT_LINK | YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN;
+ res = yt921x_reg_clear_bits(priv, YT921X_PORTn_CTRL(port), mask);
+ if (res)
+ return res;
+
+ if (yt921x_port_is_external(port)) {
+ mask = YT921X_SERDES_LINK;
+ res = yt921x_reg_clear_bits(priv, YT921X_SERDESn(port), mask);
+ if (res)
+ return res;
+
+ mask = YT921X_XMII_LINK;
+ res = yt921x_reg_clear_bits(priv, YT921X_XMIIn(port), mask);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int
+yt921x_port_up(struct yt921x_priv *priv, int port, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ switch (speed) {
+ case SPEED_10:
+ ctrl = YT921X_PORT_SPEED_10;
+ break;
+ case SPEED_100:
+ ctrl = YT921X_PORT_SPEED_100;
+ break;
+ case SPEED_1000:
+ ctrl = YT921X_PORT_SPEED_1000;
+ break;
+ case SPEED_2500:
+ ctrl = YT921X_PORT_SPEED_2500;
+ break;
+ case SPEED_10000:
+ ctrl = YT921X_PORT_SPEED_10000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (duplex == DUPLEX_FULL)
+ ctrl |= YT921X_PORT_DUPLEX_FULL;
+ if (tx_pause)
+ ctrl |= YT921X_PORT_TX_PAUSE;
+ if (rx_pause)
+ ctrl |= YT921X_PORT_RX_PAUSE;
+ ctrl |= YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN;
+ res = yt921x_reg_write(priv, YT921X_PORTn_CTRL(port), ctrl);
+ if (res)
+ return res;
+
+ if (yt921x_port_is_external(port)) {
+ mask = YT921X_SERDES_SPEED_M;
+ switch (speed) {
+ case SPEED_10:
+ ctrl = YT921X_SERDES_SPEED_10;
+ break;
+ case SPEED_100:
+ ctrl = YT921X_SERDES_SPEED_100;
+ break;
+ case SPEED_1000:
+ ctrl = YT921X_SERDES_SPEED_1000;
+ break;
+ case SPEED_2500:
+ ctrl = YT921X_SERDES_SPEED_2500;
+ break;
+ case SPEED_10000:
+ ctrl = YT921X_SERDES_SPEED_10000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ mask |= YT921X_SERDES_DUPLEX_FULL;
+ if (duplex == DUPLEX_FULL)
+ ctrl |= YT921X_SERDES_DUPLEX_FULL;
+ mask |= YT921X_SERDES_TX_PAUSE;
+ if (tx_pause)
+ ctrl |= YT921X_SERDES_TX_PAUSE;
+ mask |= YT921X_SERDES_RX_PAUSE;
+ if (rx_pause)
+ ctrl |= YT921X_SERDES_RX_PAUSE;
+ mask |= YT921X_SERDES_LINK;
+ ctrl |= YT921X_SERDES_LINK;
+ res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ mask = YT921X_XMII_LINK;
+ res = yt921x_reg_set_bits(priv, YT921X_XMIIn(port), mask);
+ if (res)
+ return res;
+
+ switch (speed) {
+ case SPEED_10:
+ ctrl = YT921X_MDIO_POLLING_SPEED_10;
+ break;
+ case SPEED_100:
+ ctrl = YT921X_MDIO_POLLING_SPEED_100;
+ break;
+ case SPEED_1000:
+ ctrl = YT921X_MDIO_POLLING_SPEED_1000;
+ break;
+ case SPEED_2500:
+ ctrl = YT921X_MDIO_POLLING_SPEED_2500;
+ break;
+ case SPEED_10000:
+ ctrl = YT921X_MDIO_POLLING_SPEED_10000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (duplex == DUPLEX_FULL)
+ ctrl |= YT921X_MDIO_POLLING_DUPLEX_FULL;
+ ctrl |= YT921X_MDIO_POLLING_LINK;
+ res = yt921x_reg_write(priv, YT921X_MDIO_POLLINGn(port), ctrl);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static int
+yt921x_port_config(struct yt921x_priv *priv, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct device *dev = to_device(priv);
+ u32 mask;
+ u32 ctrl;
+ int res;
+
+ if (!yt921x_port_is_external(port)) {
+ if (interface != PHY_INTERFACE_MODE_INTERNAL) {
+ dev_err(dev, "Wrong mode %d on port %d\n",
+ interface, port);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ switch (interface) {
+ /* SERDES */
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ mask = YT921X_SERDES_CTRL_PORTn(port);
+ res = yt921x_reg_set_bits(priv, YT921X_SERDES_CTRL, mask);
+ if (res)
+ return res;
+
+ mask = YT921X_XMII_CTRL_PORTn(port);
+ res = yt921x_reg_clear_bits(priv, YT921X_XMII_CTRL, mask);
+ if (res)
+ return res;
+
+ mask = YT921X_SERDES_MODE_M;
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ ctrl = YT921X_SERDES_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_100BASEX:
+ ctrl = YT921X_SERDES_MODE_100BASEX;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ ctrl = YT921X_SERDES_MODE_1000BASEX;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ ctrl = YT921X_SERDES_MODE_2500BASEX;
+ break;
+ default:
+ return -EINVAL;
+ }
+ res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port),
+ mask, ctrl);
+ if (res)
+ return res;
+
+ break;
+ /* add XMII support here */
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+yt921x_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
+ int port = dp->index;
+ int res;
+
+ /* No need to sync; port control block is hold until device remove */
+ cancel_delayed_work(&priv->ports[port].mib_read);
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_down(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring down",
+ port, res);
+}
+
+static void
+yt921x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
+ int port = dp->index;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_up(priv, port, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring up",
+ port, res);
+
+ schedule_delayed_work(&priv->ports[port].mib_read, 0);
+}
+
+static void
+yt921x_phylink_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct yt921x_priv *priv = to_yt921x_priv(dp->ds);
+ int port = dp->index;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_config(priv, port, mode, state->interface);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "config",
+ port, res);
+}
+
+static void
+yt921x_dsa_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ const struct yt921x_info *info = priv->info;
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+
+ if (info->internal_mask & BIT(port)) {
+ /* Port 10 for MCU should probably go here too. But since that
+ * is untested yet, turn it down for the moment by letting it
+ * fall to the default branch.
+ */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ } else if (info->external_mask & BIT(port)) {
+ /* TODO: external ports may support SERDES only, XMII only, or
+ * SERDES + XMII depending on the chip. However, we can't get
+ * the accurate config table due to lack of document, thus
+ * we simply declare SERDES + XMII and rely on the correctness
+ * of devicetree for now.
+ */
+
+ /* SERDES */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ /* REVSGMII (SGMII in PHY role) should go here, once
+ * PHY_INTERFACE_MODE_REVSGMII is introduced.
+ */
+ __set_bit(PHY_INTERFACE_MODE_100BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
+
+ /* XMII */
+
+ /* Not tested. To add support for XMII:
+ * - Add proper interface modes below
+ * - Handle them in yt921x_port_config()
+ */
+ }
+ /* no such port: empty supported_interfaces causes phylink to turn it
+ * down
+ */
+}
+
+static int yt921x_port_setup(struct yt921x_priv *priv, int port)
+{
+ struct dsa_switch *ds = &priv->ds;
+ u32 ctrl;
+ int res;
+
+ res = yt921x_userport_standalone(priv, port);
+ if (res)
+ return res;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* Egress of CPU port is supposed to be completely controlled
+ * via tagging, so set to oneway isolated (drop all packets
+ * without tag).
+ */
+ ctrl = ~(u32)0;
+ res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port),
+ ctrl);
+ if (res)
+ return res;
+
+ /* To simplify FDB "isolation" simulation, we also disable
+ * learning on the CPU port, and let software identify packets
+ * towarding CPU (either trapped or a static FDB entry is
+ * matched, no matter which bridge that entry is for), which is
+ * already done by yt921x_userport_standalone(). As a result,
+ * VLAN-awareness becomes unrelated on the CPU port (set to
+ * VLAN-unaware by the way).
+ */
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+yt921x_dsa_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol m)
+{
+ return DSA_TAG_PROTO_YT921X;
+}
+
+static int yt921x_dsa_port_setup(struct dsa_switch *ds, int port)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_port_setup(priv, port);
+ mutex_unlock(&priv->reg_lock);
+
+ return res;
+}
+
+static int yt921x_edata_wait(struct yt921x_priv *priv, u32 *valp)
+{
+ u32 val = YT921X_EDATA_DATA_IDLE;
+ int res;
+
+ res = yt921x_reg_wait(priv, YT921X_EDATA_DATA,
+ YT921X_EDATA_DATA_STATUS_M, &val);
+ if (res)
+ return res;
+
+ *valp = val;
+ return 0;
+}
+
+static int
+yt921x_edata_read_cont(struct yt921x_priv *priv, u8 addr, u8 *valp)
+{
+ u32 ctrl;
+ u32 val;
+ int res;
+
+ ctrl = YT921X_EDATA_CTRL_ADDR(addr) | YT921X_EDATA_CTRL_READ;
+ res = yt921x_reg_write(priv, YT921X_EDATA_CTRL, ctrl);
+ if (res)
+ return res;
+ res = yt921x_edata_wait(priv, &val);
+ if (res)
+ return res;
+
+ *valp = FIELD_GET(YT921X_EDATA_DATA_DATA_M, val);
+ return 0;
+}
+
+static int yt921x_edata_read(struct yt921x_priv *priv, u8 addr, u8 *valp)
+{
+ u32 val;
+ int res;
+
+ res = yt921x_edata_wait(priv, &val);
+ if (res)
+ return res;
+ return yt921x_edata_read_cont(priv, addr, valp);
+}
+
+static int yt921x_chip_detect(struct yt921x_priv *priv)
+{
+ struct device *dev = to_device(priv);
+ const struct yt921x_info *info;
+ u8 extmode;
+ u32 chipid;
+ u32 major;
+ u32 mode;
+ int res;
+
+ res = yt921x_reg_read(priv, YT921X_CHIP_ID, &chipid);
+ if (res)
+ return res;
+
+ major = FIELD_GET(YT921X_CHIP_ID_MAJOR, chipid);
+
+ for (info = yt921x_infos; info->name; info++)
+ if (info->major == major)
+ break;
+ if (!info->name) {
+ dev_err(dev, "Unexpected chipid 0x%x\n", chipid);
+ return -ENODEV;
+ }
+
+ res = yt921x_reg_read(priv, YT921X_CHIP_MODE, &mode);
+ if (res)
+ return res;
+ res = yt921x_edata_read(priv, YT921X_EDATA_EXTMODE, &extmode);
+ if (res)
+ return res;
+
+ for (; info->name; info++)
+ if (info->major == major && info->mode == mode &&
+ info->extmode == extmode)
+ break;
+ if (!info->name) {
+ dev_err(dev,
+ "Unsupported chipid 0x%x with chipmode 0x%x 0x%x\n",
+ chipid, mode, extmode);
+ return -ENODEV;
+ }
+
+ /* Print chipid here since we are interested in lower 16 bits */
+ dev_info(dev,
+ "Motorcomm %s ethernet switch, chipid: 0x%x, chipmode: 0x%x 0x%x\n",
+ info->name, chipid, mode, extmode);
+
+ priv->info = info;
+ return 0;
+}
+
+static int yt921x_chip_reset(struct yt921x_priv *priv)
+{
+ struct device *dev = to_device(priv);
+ u16 eth_p_tag;
+ u32 val;
+ int res;
+
+ res = yt921x_chip_detect(priv);
+ if (res)
+ return res;
+
+ /* Reset */
+ res = yt921x_reg_write(priv, YT921X_RST, YT921X_RST_HW);
+ if (res)
+ return res;
+
+ /* RST_HW is almost same as GPIO hard reset, so we need this delay. */
+ fsleep(YT921X_RST_DELAY_US);
+
+ val = 0;
+ res = yt921x_reg_wait(priv, YT921X_RST, ~0, &val);
+ if (res)
+ return res;
+
+ /* Check for tag EtherType; do it after reset in case you messed it up
+ * before.
+ */
+ res = yt921x_reg_read(priv, YT921X_CPU_TAG_TPID, &val);
+ if (res)
+ return res;
+ eth_p_tag = FIELD_GET(YT921X_CPU_TAG_TPID_TPID_M, val);
+ if (eth_p_tag != ETH_P_YT921X) {
+ dev_err(dev, "Tag type 0x%x != 0x%x\n", eth_p_tag,
+ ETH_P_YT921X);
+ /* Despite being possible, we choose not to set CPU_TAG_TPID,
+ * since there is no way it can be different unless you have the
+ * wrong chip.
+ */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int yt921x_chip_setup(struct yt921x_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ unsigned long cpu_ports_mask;
+ u64 ctrl64;
+ u32 ctrl;
+ int port;
+ int res;
+
+ /* Enable DSA */
+ priv->cpu_ports_mask = dsa_cpu_ports(ds);
+
+ ctrl = YT921X_EXT_CPU_PORT_TAG_EN | YT921X_EXT_CPU_PORT_PORT_EN |
+ YT921X_EXT_CPU_PORT_PORT(__ffs(priv->cpu_ports_mask));
+ res = yt921x_reg_write(priv, YT921X_EXT_CPU_PORT, ctrl);
+ if (res)
+ return res;
+
+ /* Enable and clear MIB */
+ res = yt921x_reg_set_bits(priv, YT921X_FUNC, YT921X_FUNC_MIB);
+ if (res)
+ return res;
+
+ ctrl = YT921X_MIB_CTRL_CLEAN | YT921X_MIB_CTRL_ALL_PORT;
+ res = yt921x_reg_write(priv, YT921X_MIB_CTRL, ctrl);
+ if (res)
+ return res;
+
+ /* Setup software switch */
+ ctrl = YT921X_CPU_COPY_TO_EXT_CPU;
+ res = yt921x_reg_write(priv, YT921X_CPU_COPY, ctrl);
+ if (res)
+ return res;
+
+ ctrl = GENMASK(10, 0);
+ res = yt921x_reg_write(priv, YT921X_FILTER_UNK_UCAST, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_FILTER_UNK_MCAST, ctrl);
+ if (res)
+ return res;
+
+ /* YT921x does not support native DSA port bridging, so we use port
+ * isolation to emulate it. However, be especially careful that port
+ * isolation takes _after_ FDB lookups, i.e. if an FDB entry (from
+ * another bridge) is matched and the destination port (in another
+ * bridge) is blocked, the packet will be dropped instead of flooding to
+ * the "bridged" ports, thus we need to trap and handle those packets by
+ * software.
+ *
+ * If there is no more than one bridge, we might be able to drop them
+ * directly given some conditions are met, but we trap them in all cases
+ * for now.
+ */
+ ctrl = 0;
+ for (int i = 0; i < YT921X_PORT_NUM; i++)
+ ctrl |= YT921X_ACT_UNK_ACTn_TRAP(i);
+ /* Except for CPU ports, if any packets are sent via CPU ports without
+ * tag, they should be dropped.
+ */
+ cpu_ports_mask = priv->cpu_ports_mask;
+ for_each_set_bit(port, &cpu_ports_mask, YT921X_PORT_NUM) {
+ ctrl &= ~YT921X_ACT_UNK_ACTn_M(port);
+ ctrl |= YT921X_ACT_UNK_ACTn_DROP(port);
+ }
+ res = yt921x_reg_write(priv, YT921X_ACT_UNK_UCAST, ctrl);
+ if (res)
+ return res;
+ res = yt921x_reg_write(priv, YT921X_ACT_UNK_MCAST, ctrl);
+ if (res)
+ return res;
+
+ /* Tagged VID 0 should be treated as untagged, which confuses the
+ * hardware a lot
+ */
+ ctrl64 = YT921X_VLAN_CTRL_LEARN_DIS | YT921X_VLAN_CTRL_PORTS_M;
+ res = yt921x_reg64_write(priv, YT921X_VLANn_CTRL(0), ctrl64);
+ if (res)
+ return res;
+
+ /* Miscellaneous */
+ res = yt921x_reg_set_bits(priv, YT921X_SENSOR, YT921X_SENSOR_TEMP);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static int yt921x_dsa_setup(struct dsa_switch *ds)
+{
+ struct yt921x_priv *priv = to_yt921x_priv(ds);
+ struct device *dev = to_device(priv);
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int res;
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_chip_reset(priv);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+
+ /* Register the internal mdio bus. Nodes for internal ports should have
+ * proper phy-handle pointing to their PHYs. Not enabling the internal
+ * bus is possible, though pretty wired, if internal ports are not used.
+ */
+ child = of_get_child_by_name(np, "mdio");
+ if (child) {
+ res = yt921x_mbus_int_init(priv, child);
+ of_node_put(child);
+ if (res)
+ return res;
+ }
+
+ /* External mdio bus is optional */
+ child = of_get_child_by_name(np, "mdio-external");
+ if (child) {
+ res = yt921x_mbus_ext_init(priv, child);
+ of_node_put(child);
+ if (res)
+ return res;
+
+ dev_err(dev, "Untested external mdio bus\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&priv->reg_lock);
+ res = yt921x_chip_setup(priv);
+ mutex_unlock(&priv->reg_lock);
+
+ if (res)
+ return res;
+
+ return 0;
+}
+
+static const struct phylink_mac_ops yt921x_phylink_mac_ops = {
+ .mac_link_down = yt921x_phylink_mac_link_down,
+ .mac_link_up = yt921x_phylink_mac_link_up,
+ .mac_config = yt921x_phylink_mac_config,
+};
+
+static const struct dsa_switch_ops yt921x_dsa_switch_ops = {
+ /* mib */
+ .get_strings = yt921x_dsa_get_strings,
+ .get_ethtool_stats = yt921x_dsa_get_ethtool_stats,
+ .get_sset_count = yt921x_dsa_get_sset_count,
+ .get_eth_mac_stats = yt921x_dsa_get_eth_mac_stats,
+ .get_eth_ctrl_stats = yt921x_dsa_get_eth_ctrl_stats,
+ .get_rmon_stats = yt921x_dsa_get_rmon_stats,
+ .get_stats64 = yt921x_dsa_get_stats64,
+ .get_pause_stats = yt921x_dsa_get_pause_stats,
+ /* eee */
+ .support_eee = dsa_supports_eee,
+ .set_mac_eee = yt921x_dsa_set_mac_eee,
+ /* mtu */
+ .port_change_mtu = yt921x_dsa_port_change_mtu,
+ .port_max_mtu = yt921x_dsa_port_max_mtu,
+ /* hsr */
+ .port_hsr_leave = dsa_port_simple_hsr_leave,
+ .port_hsr_join = dsa_port_simple_hsr_join,
+ /* mirror */
+ .port_mirror_del = yt921x_dsa_port_mirror_del,
+ .port_mirror_add = yt921x_dsa_port_mirror_add,
+ /* fdb */
+ .port_fdb_dump = yt921x_dsa_port_fdb_dump,
+ .port_fast_age = yt921x_dsa_port_fast_age,
+ .set_ageing_time = yt921x_dsa_set_ageing_time,
+ .port_fdb_del = yt921x_dsa_port_fdb_del,
+ .port_fdb_add = yt921x_dsa_port_fdb_add,
+ .port_mdb_del = yt921x_dsa_port_mdb_del,
+ .port_mdb_add = yt921x_dsa_port_mdb_add,
+ /* vlan */
+ .port_vlan_filtering = yt921x_dsa_port_vlan_filtering,
+ .port_vlan_del = yt921x_dsa_port_vlan_del,
+ .port_vlan_add = yt921x_dsa_port_vlan_add,
+ /* bridge */
+ .port_pre_bridge_flags = yt921x_dsa_port_pre_bridge_flags,
+ .port_bridge_flags = yt921x_dsa_port_bridge_flags,
+ .port_bridge_leave = yt921x_dsa_port_bridge_leave,
+ .port_bridge_join = yt921x_dsa_port_bridge_join,
+ /* mst */
+ .port_mst_state_set = yt921x_dsa_port_mst_state_set,
+ .vlan_msti_set = yt921x_dsa_vlan_msti_set,
+ .port_stp_state_set = yt921x_dsa_port_stp_state_set,
+ /* port */
+ .get_tag_protocol = yt921x_dsa_get_tag_protocol,
+ .phylink_get_caps = yt921x_dsa_phylink_get_caps,
+ .port_setup = yt921x_dsa_port_setup,
+ /* chip */
+ .setup = yt921x_dsa_setup,
+};
+
+static void yt921x_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(&priv->ds);
+}
+
+static void yt921x_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev);
+
+ if (!priv)
+ return;
+
+ for (size_t i = ARRAY_SIZE(priv->ports); i-- > 0; ) {
+ struct yt921x_port *pp = &priv->ports[i];
+
+ disable_delayed_work_sync(&pp->mib_read);
+ }
+
+ dsa_unregister_switch(&priv->ds);
+
+ mutex_destroy(&priv->reg_lock);
+}
+
+static int yt921x_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct yt921x_reg_mdio *mdio;
+ struct yt921x_priv *priv;
+ struct dsa_switch *ds;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mdio = devm_kzalloc(dev, sizeof(*mdio), GFP_KERNEL);
+ if (!mdio)
+ return -ENOMEM;
+
+ mdio->bus = mdiodev->bus;
+ mdio->addr = mdiodev->addr;
+ mdio->switchid = 0;
+
+ mutex_init(&priv->reg_lock);
+
+ priv->reg_ops = &yt921x_reg_ops_mdio;
+ priv->reg_ctx = mdio;
+
+ for (size_t i = 0; i < ARRAY_SIZE(priv->ports); i++) {
+ struct yt921x_port *pp = &priv->ports[i];
+
+ pp->index = i;
+ INIT_DELAYED_WORK(&pp->mib_read, yt921x_poll_mib);
+ }
+
+ ds = &priv->ds;
+ ds->dev = dev;
+ ds->assisted_learning_on_cpu_port = true;
+ ds->priv = priv;
+ ds->ops = &yt921x_dsa_switch_ops;
+ ds->ageing_time_min = 1 * 5000;
+ ds->ageing_time_max = U16_MAX * 5000;
+ ds->phylink_mac_ops = &yt921x_phylink_mac_ops;
+ ds->num_ports = YT921X_PORT_NUM;
+
+ mdiodev_set_drvdata(mdiodev, priv);
+
+ return dsa_register_switch(ds);
+}
+
+static const struct of_device_id yt921x_of_match[] = {
+ { .compatible = "motorcomm,yt9215" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, yt921x_of_match);
+
+static struct mdio_driver yt921x_mdio_driver = {
+ .probe = yt921x_mdio_probe,
+ .remove = yt921x_mdio_remove,
+ .shutdown = yt921x_mdio_shutdown,
+ .mdiodrv.driver = {
+ .name = YT921X_NAME,
+ .of_match_table = yt921x_of_match,
+ },
+};
+
+mdio_module_driver(yt921x_mdio_driver);
+
+MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>");
+MODULE_DESCRIPTION("Driver for Motorcomm YT921x Switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/yt921x.h b/drivers/net/dsa/yt921x.h
new file mode 100644
index 000000000000..61bb0ab3b09a
--- /dev/null
+++ b/drivers/net/dsa/yt921x.h
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 David Yang
+ */
+
+#ifndef __YT921X_H
+#define __YT921X_H
+
+#include <net/dsa.h>
+
+#define YT921X_SMI_SWITCHID_M GENMASK(3, 2)
+#define YT921X_SMI_SWITCHID(x) FIELD_PREP(YT921X_SMI_SWITCHID_M, (x))
+#define YT921X_SMI_AD BIT(1)
+#define YT921X_SMI_ADDR 0
+#define YT921X_SMI_DATA YT921X_SMI_AD
+#define YT921X_SMI_RW BIT(0)
+#define YT921X_SMI_WRITE 0
+#define YT921X_SMI_READ YT921X_SMI_RW
+
+#define YT921X_SWITCHID_NUM 4
+
+#define YT921X_RST 0x80000
+#define YT921X_RST_HW BIT(31)
+#define YT921X_RST_SW BIT(1)
+#define YT921X_FUNC 0x80004
+#define YT921X_FUNC_MIB BIT(1)
+#define YT921X_CHIP_ID 0x80008
+#define YT921X_CHIP_ID_MAJOR GENMASK(31, 16)
+#define YT921X_EXT_CPU_PORT 0x8000c
+#define YT921X_EXT_CPU_PORT_TAG_EN BIT(15)
+#define YT921X_EXT_CPU_PORT_PORT_EN BIT(14)
+#define YT921X_EXT_CPU_PORT_PORT_M GENMASK(3, 0)
+#define YT921X_EXT_CPU_PORT_PORT(x) FIELD_PREP(YT921X_EXT_CPU_PORT_PORT_M, (x))
+#define YT921X_CPU_TAG_TPID 0x80010
+#define YT921X_CPU_TAG_TPID_TPID_M GENMASK(15, 0)
+/* Same as ETH_P_YT921X, but this represents the true HW default, while the
+ * former is a local convention chosen by us.
+ */
+#define YT921X_CPU_TAG_TPID_TPID_DEFAULT 0x9988
+#define YT921X_PVID_SEL 0x80014
+#define YT921X_PVID_SEL_SVID_PORTn(port) BIT(port)
+#define YT921X_SERDES_CTRL 0x80028
+#define YT921X_SERDES_CTRL_PORTn_TEST(port) BIT((port) - 3)
+#define YT921X_SERDES_CTRL_PORTn(port) BIT((port) - 8)
+#define YT921X_IO_LEVEL 0x80030
+#define YT9215_IO_LEVEL_NORMAL_M GENMASK(5, 4)
+#define YT9215_IO_LEVEL_NORMAL(x) FIELD_PREP(YT9215_IO_LEVEL_NORMAL_M, (x))
+#define YT9215_IO_LEVEL_NORMAL_3V3 YT9215_IO_LEVEL_NORMAL(0)
+#define YT9215_IO_LEVEL_NORMAL_1V8 YT9215_IO_LEVEL_NORMAL(3)
+#define YT9215_IO_LEVEL_RGMII1_M GENMASK(3, 2)
+#define YT9215_IO_LEVEL_RGMII1(x) FIELD_PREP(YT9215_IO_LEVEL_RGMII1_M, (x))
+#define YT9215_IO_LEVEL_RGMII1_3V3 YT9215_IO_LEVEL_RGMII1(0)
+#define YT9215_IO_LEVEL_RGMII1_2V5 YT9215_IO_LEVEL_RGMII1(1)
+#define YT9215_IO_LEVEL_RGMII1_1V8 YT9215_IO_LEVEL_RGMII1(2)
+#define YT9215_IO_LEVEL_RGMII0_M GENMASK(1, 0)
+#define YT9215_IO_LEVEL_RGMII0(x) FIELD_PREP(YT9215_IO_LEVEL_RGMII0_M, (x))
+#define YT9215_IO_LEVEL_RGMII0_3V3 YT9215_IO_LEVEL_RGMII0(0)
+#define YT9215_IO_LEVEL_RGMII0_2V5 YT9215_IO_LEVEL_RGMII0(1)
+#define YT9215_IO_LEVEL_RGMII0_1V8 YT9215_IO_LEVEL_RGMII0(2)
+#define YT9218_IO_LEVEL_RGMII1_M GENMASK(5, 4)
+#define YT9218_IO_LEVEL_RGMII1(x) FIELD_PREP(YT9218_IO_LEVEL_RGMII1_M, (x))
+#define YT9218_IO_LEVEL_RGMII1_3V3 YT9218_IO_LEVEL_RGMII1(0)
+#define YT9218_IO_LEVEL_RGMII1_2V5 YT9218_IO_LEVEL_RGMII1(1)
+#define YT9218_IO_LEVEL_RGMII1_1V8 YT9218_IO_LEVEL_RGMII1(2)
+#define YT9218_IO_LEVEL_RGMII0_M GENMASK(3, 2)
+#define YT9218_IO_LEVEL_RGMII0(x) FIELD_PREP(YT9218_IO_LEVEL_RGMII0_M, (x))
+#define YT9218_IO_LEVEL_RGMII0_3V3 YT9218_IO_LEVEL_RGMII0(0)
+#define YT9218_IO_LEVEL_RGMII0_2V5 YT9218_IO_LEVEL_RGMII0(1)
+#define YT9218_IO_LEVEL_RGMII0_1V8 YT9218_IO_LEVEL_RGMII0(2)
+#define YT9218_IO_LEVEL_NORMAL_M GENMASK(1, 0)
+#define YT9218_IO_LEVEL_NORMAL(x) FIELD_PREP(YT9218_IO_LEVEL_NORMAL_M, (x))
+#define YT9218_IO_LEVEL_NORMAL_3V3 YT9218_IO_LEVEL_NORMAL(0)
+#define YT9218_IO_LEVEL_NORMAL_1V8 YT9218_IO_LEVEL_NORMAL(3)
+#define YT921X_MAC_ADDR_HI2 0x80080
+#define YT921X_MAC_ADDR_LO4 0x80084
+#define YT921X_SERDESn(port) (0x8008c + 4 * ((port) - 8))
+#define YT921X_SERDES_MODE_M GENMASK(9, 7)
+#define YT921X_SERDES_MODE(x) FIELD_PREP(YT921X_SERDES_MODE_M, (x))
+#define YT921X_SERDES_MODE_SGMII YT921X_SERDES_MODE(0)
+#define YT921X_SERDES_MODE_REVSGMII YT921X_SERDES_MODE(1)
+#define YT921X_SERDES_MODE_1000BASEX YT921X_SERDES_MODE(2)
+#define YT921X_SERDES_MODE_100BASEX YT921X_SERDES_MODE(3)
+#define YT921X_SERDES_MODE_2500BASEX YT921X_SERDES_MODE(4)
+#define YT921X_SERDES_RX_PAUSE BIT(6)
+#define YT921X_SERDES_TX_PAUSE BIT(5)
+#define YT921X_SERDES_LINK BIT(4) /* force link */
+#define YT921X_SERDES_DUPLEX_FULL BIT(3)
+#define YT921X_SERDES_SPEED_M GENMASK(2, 0)
+#define YT921X_SERDES_SPEED(x) FIELD_PREP(YT921X_SERDES_SPEED_M, (x))
+#define YT921X_SERDES_SPEED_10 YT921X_SERDES_SPEED(0)
+#define YT921X_SERDES_SPEED_100 YT921X_SERDES_SPEED(1)
+#define YT921X_SERDES_SPEED_1000 YT921X_SERDES_SPEED(2)
+#define YT921X_SERDES_SPEED_10000 YT921X_SERDES_SPEED(3)
+#define YT921X_SERDES_SPEED_2500 YT921X_SERDES_SPEED(4)
+#define YT921X_PORTn_CTRL(port) (0x80100 + 4 * (port))
+#define YT921X_PORT_CTRL_PAUSE_AN BIT(10)
+#define YT921X_PORTn_STATUS(port) (0x80200 + 4 * (port))
+#define YT921X_PORT_LINK BIT(9) /* CTRL: auto negotiation */
+#define YT921X_PORT_HALF_PAUSE BIT(8) /* Half-duplex back pressure mode */
+#define YT921X_PORT_DUPLEX_FULL BIT(7)
+#define YT921X_PORT_RX_PAUSE BIT(6)
+#define YT921X_PORT_TX_PAUSE BIT(5)
+#define YT921X_PORT_RX_MAC_EN BIT(4)
+#define YT921X_PORT_TX_MAC_EN BIT(3)
+#define YT921X_PORT_SPEED_M GENMASK(2, 0)
+#define YT921X_PORT_SPEED(x) FIELD_PREP(YT921X_PORT_SPEED_M, (x))
+#define YT921X_PORT_SPEED_10 YT921X_PORT_SPEED(0)
+#define YT921X_PORT_SPEED_100 YT921X_PORT_SPEED(1)
+#define YT921X_PORT_SPEED_1000 YT921X_PORT_SPEED(2)
+#define YT921X_PORT_SPEED_10000 YT921X_PORT_SPEED(3)
+#define YT921X_PORT_SPEED_2500 YT921X_PORT_SPEED(4)
+#define YT921X_PON_STRAP_FUNC 0x80320
+#define YT921X_PON_STRAP_VAL 0x80324
+#define YT921X_PON_STRAP_CAP 0x80328
+#define YT921X_PON_STRAP_EEE BIT(16)
+#define YT921X_PON_STRAP_LOOP_DETECT BIT(7)
+#define YT921X_MDIO_POLLINGn(port) (0x80364 + 4 * ((port) - 8))
+#define YT921X_MDIO_POLLING_DUPLEX_FULL BIT(4)
+#define YT921X_MDIO_POLLING_LINK BIT(3)
+#define YT921X_MDIO_POLLING_SPEED_M GENMASK(2, 0)
+#define YT921X_MDIO_POLLING_SPEED(x) FIELD_PREP(YT921X_MDIO_POLLING_SPEED_M, (x))
+#define YT921X_MDIO_POLLING_SPEED_10 YT921X_MDIO_POLLING_SPEED(0)
+#define YT921X_MDIO_POLLING_SPEED_100 YT921X_MDIO_POLLING_SPEED(1)
+#define YT921X_MDIO_POLLING_SPEED_1000 YT921X_MDIO_POLLING_SPEED(2)
+#define YT921X_MDIO_POLLING_SPEED_10000 YT921X_MDIO_POLLING_SPEED(3)
+#define YT921X_MDIO_POLLING_SPEED_2500 YT921X_MDIO_POLLING_SPEED(4)
+#define YT921X_SENSOR 0x8036c
+#define YT921X_SENSOR_TEMP BIT(18)
+#define YT921X_TEMP 0x80374
+#define YT921X_CHIP_MODE 0x80388
+#define YT921X_CHIP_MODE_MODE GENMASK(1, 0)
+#define YT921X_XMII_CTRL 0x80394
+#define YT921X_XMII_CTRL_PORTn(port) BIT(9 - (port)) /* Yes, it's reversed */
+#define YT921X_XMIIn(port) (0x80400 + 8 * ((port) - 8))
+#define YT921X_XMII_MODE_M GENMASK(31, 29)
+#define YT921X_XMII_MODE(x) FIELD_PREP(YT921X_XMII_MODE_M, (x))
+#define YT921X_XMII_MODE_MII YT921X_XMII_MODE(0)
+#define YT921X_XMII_MODE_REVMII YT921X_XMII_MODE(1)
+#define YT921X_XMII_MODE_RMII YT921X_XMII_MODE(2)
+#define YT921X_XMII_MODE_REVRMII YT921X_XMII_MODE(3)
+#define YT921X_XMII_MODE_RGMII YT921X_XMII_MODE(4)
+#define YT921X_XMII_MODE_DISABLE YT921X_XMII_MODE(5)
+#define YT921X_XMII_LINK BIT(19) /* force link */
+#define YT921X_XMII_EN BIT(18)
+#define YT921X_XMII_SOFT_RST BIT(17)
+#define YT921X_XMII_RGMII_TX_DELAY_150PS_M GENMASK(16, 13)
+#define YT921X_XMII_RGMII_TX_DELAY_150PS(x) FIELD_PREP(YT921X_XMII_RGMII_TX_DELAY_150PS_M, (x))
+#define YT921X_XMII_TX_CLK_IN BIT(11)
+#define YT921X_XMII_RX_CLK_IN BIT(10)
+#define YT921X_XMII_RGMII_TX_DELAY_2NS BIT(8)
+#define YT921X_XMII_RGMII_TX_CLK_OUT BIT(7)
+#define YT921X_XMII_RGMII_RX_DELAY_150PS_M GENMASK(6, 3)
+#define YT921X_XMII_RGMII_RX_DELAY_150PS(x) FIELD_PREP(YT921X_XMII_RGMII_RX_DELAY_150PS_M, (x))
+#define YT921X_XMII_RMII_PHY_TX_CLK_OUT BIT(2)
+#define YT921X_XMII_REVMII_TX_CLK_OUT BIT(1)
+#define YT921X_XMII_REVMII_RX_CLK_OUT BIT(0)
+
+#define YT921X_MACn_FRAME(port) (0x81008 + 0x1000 * (port))
+#define YT921X_MAC_FRAME_SIZE_M GENMASK(21, 8)
+#define YT921X_MAC_FRAME_SIZE(x) FIELD_PREP(YT921X_MAC_FRAME_SIZE_M, (x))
+
+#define YT921X_EEEn_VAL(port) (0xa0000 + 0x40 * (port))
+#define YT921X_EEE_VAL_DATA BIT(1)
+
+#define YT921X_EEE_CTRL 0xb0000
+#define YT921X_EEE_CTRL_ENn(port) BIT(port)
+
+#define YT921X_MIB_CTRL 0xc0004
+#define YT921X_MIB_CTRL_CLEAN BIT(30)
+#define YT921X_MIB_CTRL_PORT_M GENMASK(6, 3)
+#define YT921X_MIB_CTRL_PORT(x) FIELD_PREP(YT921X_MIB_CTRL_PORT_M, (x))
+#define YT921X_MIB_CTRL_ONE_PORT BIT(1)
+#define YT921X_MIB_CTRL_ALL_PORT BIT(0)
+#define YT921X_MIBn_DATA0(port) (0xc0100 + 0x100 * (port))
+#define YT921X_MIBn_DATAm(port, x) (YT921X_MIBn_DATA0(port) + 4 * (x))
+#define YT921X_MIB_DATA_RX_BROADCAST 0x00
+#define YT921X_MIB_DATA_RX_PAUSE 0x04
+#define YT921X_MIB_DATA_RX_MULTICAST 0x08
+#define YT921X_MIB_DATA_RX_CRC_ERR 0x0c
+
+#define YT921X_MIB_DATA_RX_ALIGN_ERR 0x10
+#define YT921X_MIB_DATA_RX_UNDERSIZE_ERR 0x14
+#define YT921X_MIB_DATA_RX_FRAG_ERR 0x18
+#define YT921X_MIB_DATA_RX_PKT_SZ_64 0x1c
+
+#define YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127 0x20
+#define YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255 0x24
+#define YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511 0x28
+#define YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023 0x2c
+
+#define YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518 0x30
+#define YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX 0x34
+/* 0x38: unused */
+#define YT921X_MIB_DATA_RX_GOOD_BYTES 0x3c
+
+/* 0x40: 64 bytes */
+#define YT921X_MIB_DATA_RX_BAD_BYTES 0x44
+/* 0x48: 64 bytes */
+#define YT921X_MIB_DATA_RX_OVERSIZE_ERR 0x4c
+
+#define YT921X_MIB_DATA_RX_DROPPED 0x50
+#define YT921X_MIB_DATA_TX_BROADCAST 0x54
+#define YT921X_MIB_DATA_TX_PAUSE 0x58
+#define YT921X_MIB_DATA_TX_MULTICAST 0x5c
+
+#define YT921X_MIB_DATA_TX_UNDERSIZE_ERR 0x60
+#define YT921X_MIB_DATA_TX_PKT_SZ_64 0x64
+#define YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127 0x68
+#define YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255 0x6c
+
+#define YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511 0x70
+#define YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023 0x74
+#define YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518 0x78
+#define YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX 0x7c
+
+/* 0x80: unused */
+#define YT921X_MIB_DATA_TX_GOOD_BYTES 0x84
+/* 0x88: 64 bytes */
+#define YT921X_MIB_DATA_TX_COLLISION 0x8c
+
+#define YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION 0x90
+#define YT921X_MIB_DATA_TX_MULTIPLE_COLLISION 0x94
+#define YT921X_MIB_DATA_TX_SINGLE_COLLISION 0x98
+#define YT921X_MIB_DATA_TX_PKT 0x9c
+
+#define YT921X_MIB_DATA_TX_DEFERRED 0xa0
+#define YT921X_MIB_DATA_TX_LATE_COLLISION 0xa4
+#define YT921X_MIB_DATA_RX_OAM 0xa8
+#define YT921X_MIB_DATA_TX_OAM 0xac
+
+#define YT921X_EDATA_CTRL 0xe0000
+#define YT921X_EDATA_CTRL_ADDR_M GENMASK(15, 8)
+#define YT921X_EDATA_CTRL_ADDR(x) FIELD_PREP(YT921X_EDATA_CTRL_ADDR_M, (x))
+#define YT921X_EDATA_CTRL_OP_M GENMASK(3, 0)
+#define YT921X_EDATA_CTRL_OP(x) FIELD_PREP(YT921X_EDATA_CTRL_OP_M, (x))
+#define YT921X_EDATA_CTRL_READ YT921X_EDATA_CTRL_OP(5)
+#define YT921X_EDATA_DATA 0xe0004
+#define YT921X_EDATA_DATA_DATA_M GENMASK(31, 24)
+#define YT921X_EDATA_DATA_STATUS_M GENMASK(3, 0)
+#define YT921X_EDATA_DATA_STATUS(x) FIELD_PREP(YT921X_EDATA_DATA_STATUS_M, (x))
+#define YT921X_EDATA_DATA_IDLE YT921X_EDATA_DATA_STATUS(3)
+
+#define YT921X_EXT_MBUS_OP 0x6a000
+#define YT921X_INT_MBUS_OP 0xf0000
+#define YT921X_MBUS_OP_START BIT(0)
+#define YT921X_EXT_MBUS_CTRL 0x6a004
+#define YT921X_INT_MBUS_CTRL 0xf0004
+#define YT921X_MBUS_CTRL_PORT_M GENMASK(25, 21)
+#define YT921X_MBUS_CTRL_PORT(x) FIELD_PREP(YT921X_MBUS_CTRL_PORT_M, (x))
+#define YT921X_MBUS_CTRL_REG_M GENMASK(20, 16)
+#define YT921X_MBUS_CTRL_REG(x) FIELD_PREP(YT921X_MBUS_CTRL_REG_M, (x))
+#define YT921X_MBUS_CTRL_TYPE_M GENMASK(11, 8) /* wild guess */
+#define YT921X_MBUS_CTRL_TYPE(x) FIELD_PREP(YT921X_MBUS_CTRL_TYPE_M, (x))
+#define YT921X_MBUS_CTRL_TYPE_C22 YT921X_MBUS_CTRL_TYPE(4)
+#define YT921X_MBUS_CTRL_OP_M GENMASK(3, 2) /* wild guess */
+#define YT921X_MBUS_CTRL_OP(x) FIELD_PREP(YT921X_MBUS_CTRL_OP_M, (x))
+#define YT921X_MBUS_CTRL_WRITE YT921X_MBUS_CTRL_OP(1)
+#define YT921X_MBUS_CTRL_READ YT921X_MBUS_CTRL_OP(2)
+#define YT921X_EXT_MBUS_DOUT 0x6a008
+#define YT921X_INT_MBUS_DOUT 0xf0008
+#define YT921X_EXT_MBUS_DIN 0x6a00c
+#define YT921X_INT_MBUS_DIN 0xf000c
+
+#define YT921X_PORTn_EGR(port) (0x100000 + 4 * (port))
+#define YT921X_PORT_EGR_TPID_CTAG_M GENMASK(5, 4)
+#define YT921X_PORT_EGR_TPID_CTAG(x) FIELD_PREP(YT921X_PORT_EGR_TPID_CTAG_M, (x))
+#define YT921X_PORT_EGR_TPID_STAG_M GENMASK(3, 2)
+#define YT921X_PORT_EGR_TPID_STAG(x) FIELD_PREP(YT921X_PORT_EGR_TPID_STAG_M, (x))
+#define YT921X_TPID_EGRn(x) (0x100300 + 4 * (x)) /* [0, 3] */
+#define YT921X_TPID_EGR_TPID_M GENMASK(15, 0)
+
+#define YT921X_VLAN_IGR_FILTER 0x180280
+#define YT921X_VLAN_IGR_FILTER_PORTn_BYPASS_IGMP(port) BIT((port) + 11)
+#define YT921X_VLAN_IGR_FILTER_PORTn(port) BIT(port)
+#define YT921X_PORTn_ISOLATION(port) (0x180294 + 4 * (port))
+#define YT921X_PORT_ISOLATION_BLOCKn(port) BIT(port)
+#define YT921X_STPn(n) (0x18038c + 4 * (n))
+#define YT921X_STP_PORTn_M(port) GENMASK(2 * (port) + 1, 2 * (port))
+#define YT921X_STP_PORTn(port, x) ((x) << (2 * (port)))
+#define YT921X_STP_PORTn_DISABLED(port) YT921X_STP_PORTn(port, 0)
+#define YT921X_STP_PORTn_LEARNING(port) YT921X_STP_PORTn(port, 1)
+#define YT921X_STP_PORTn_BLOCKING(port) YT921X_STP_PORTn(port, 2)
+#define YT921X_STP_PORTn_FORWARD(port) YT921X_STP_PORTn(port, 3)
+#define YT921X_PORTn_LEARN(port) (0x1803d0 + 4 * (port))
+#define YT921X_PORT_LEARN_VID_LEARN_MULTI_EN BIT(22)
+#define YT921X_PORT_LEARN_VID_LEARN_MODE BIT(21)
+#define YT921X_PORT_LEARN_VID_LEARN_EN BIT(20)
+#define YT921X_PORT_LEARN_SUSPEND_COPY_EN BIT(19)
+#define YT921X_PORT_LEARN_SUSPEND_DROP_EN BIT(18)
+#define YT921X_PORT_LEARN_DIS BIT(17)
+#define YT921X_PORT_LEARN_LIMIT_EN BIT(16)
+#define YT921X_PORT_LEARN_LIMIT_M GENMASK(15, 8)
+#define YT921X_PORT_LEARN_LIMIT(x) FIELD_PREP(YT921X_PORT_LEARN_LIMIT_M, (x))
+#define YT921X_PORT_LEARN_DROP_ON_EXCEEDED BIT(2)
+#define YT921X_PORT_LEARN_MODE_M GENMASK(1, 0)
+#define YT921X_PORT_LEARN_MODE(x) FIELD_PREP(YT921X_PORT_LEARN_MODE_M, (x))
+#define YT921X_PORT_LEARN_MODE_AUTO YT921X_PORT_LEARN_MODE(0)
+#define YT921X_PORT_LEARN_MODE_AUTO_AND_COPY YT921X_PORT_LEARN_MODE(1)
+#define YT921X_PORT_LEARN_MODE_CPU_CONTROL YT921X_PORT_LEARN_MODE(2)
+#define YT921X_AGEING 0x180440
+#define YT921X_AGEING_INTERVAL_M GENMASK(15, 0)
+#define YT921X_FDB_IN0 0x180454
+#define YT921X_FDB_IN1 0x180458
+#define YT921X_FDB_IN2 0x18045c
+#define YT921X_FDB_OP 0x180460
+#define YT921X_FDB_OP_INDEX_M GENMASK(22, 11)
+#define YT921X_FDB_OP_INDEX(x) FIELD_PREP(YT921X_FDB_OP_INDEX_M, (x))
+#define YT921X_FDB_OP_MODE_INDEX BIT(10) /* mac+fid / index */
+#define YT921X_FDB_OP_FLUSH_MCAST BIT(9) /* ucast / mcast */
+#define YT921X_FDB_OP_FLUSH_M GENMASK(8, 7)
+#define YT921X_FDB_OP_FLUSH(x) FIELD_PREP(YT921X_FDB_OP_FLUSH_M, (x))
+#define YT921X_FDB_OP_FLUSH_ALL YT921X_FDB_OP_FLUSH(0)
+#define YT921X_FDB_OP_FLUSH_PORT YT921X_FDB_OP_FLUSH(1)
+#define YT921X_FDB_OP_FLUSH_PORT_VID YT921X_FDB_OP_FLUSH(2)
+#define YT921X_FDB_OP_FLUSH_VID YT921X_FDB_OP_FLUSH(3)
+#define YT921X_FDB_OP_FLUSH_STATIC BIT(6)
+#define YT921X_FDB_OP_NEXT_TYPE_M GENMASK(5, 4)
+#define YT921X_FDB_OP_NEXT_TYPE(x) FIELD_PREP(YT921X_FDB_OP_NEXT_TYPE_M, (x))
+#define YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT YT921X_FDB_OP_NEXT_TYPE(0)
+#define YT921X_FDB_OP_NEXT_TYPE_UCAST_VID YT921X_FDB_OP_NEXT_TYPE(1)
+#define YT921X_FDB_OP_NEXT_TYPE_UCAST YT921X_FDB_OP_NEXT_TYPE(2)
+#define YT921X_FDB_OP_NEXT_TYPE_MCAST YT921X_FDB_OP_NEXT_TYPE(3)
+#define YT921X_FDB_OP_OP_M GENMASK(3, 1)
+#define YT921X_FDB_OP_OP(x) FIELD_PREP(YT921X_FDB_OP_OP_M, (x))
+#define YT921X_FDB_OP_OP_ADD YT921X_FDB_OP_OP(0)
+#define YT921X_FDB_OP_OP_DEL YT921X_FDB_OP_OP(1)
+#define YT921X_FDB_OP_OP_GET_ONE YT921X_FDB_OP_OP(2)
+#define YT921X_FDB_OP_OP_GET_NEXT YT921X_FDB_OP_OP(3)
+#define YT921X_FDB_OP_OP_FLUSH YT921X_FDB_OP_OP(4)
+#define YT921X_FDB_OP_START BIT(0)
+#define YT921X_FDB_RESULT 0x180464
+#define YT921X_FDB_RESULT_DONE BIT(15)
+#define YT921X_FDB_RESULT_NOTFOUND BIT(14)
+#define YT921X_FDB_RESULT_OVERWRITED BIT(13)
+#define YT921X_FDB_RESULT_INDEX_M GENMASK(11, 0)
+#define YT921X_FDB_RESULT_INDEX(x) FIELD_PREP(YT921X_FDB_RESULT_INDEX_M, (x))
+#define YT921X_FDB_OUT0 0x1804b0
+#define YT921X_FDB_IO0_ADDR_HI4_M GENMASK(31, 0)
+#define YT921X_FDB_OUT1 0x1804b4
+#define YT921X_FDB_IO1_EGR_INT_PRI_EN BIT(31)
+#define YT921X_FDB_IO1_STATUS_M GENMASK(30, 28)
+#define YT921X_FDB_IO1_STATUS(x) FIELD_PREP(YT921X_FDB_IO1_STATUS_M, (x))
+#define YT921X_FDB_IO1_STATUS_INVALID YT921X_FDB_IO1_STATUS(0)
+#define YT921X_FDB_IO1_STATUS_MIN_TIME YT921X_FDB_IO1_STATUS(1)
+#define YT921X_FDB_IO1_STATUS_MOVE_AGING_MAX_TIME YT921X_FDB_IO1_STATUS(3)
+#define YT921X_FDB_IO1_STATUS_MAX_TIME YT921X_FDB_IO1_STATUS(5)
+#define YT921X_FDB_IO1_STATUS_PENDING YT921X_FDB_IO1_STATUS(6)
+#define YT921X_FDB_IO1_STATUS_STATIC YT921X_FDB_IO1_STATUS(7)
+#define YT921X_FDB_IO1_FID_M GENMASK(27, 16) /* filtering ID (VID) */
+#define YT921X_FDB_IO1_FID(x) FIELD_PREP(YT921X_FDB_IO1_FID_M, (x))
+#define YT921X_FDB_IO1_ADDR_LO2_M GENMASK(15, 0)
+#define YT921X_FDB_OUT2 0x1804b8
+#define YT921X_FDB_IO2_MOVE_AGING_STATUS_M GENMASK(31, 30)
+#define YT921X_FDB_IO2_IGR_DROP BIT(29)
+#define YT921X_FDB_IO2_EGR_PORTS_M GENMASK(28, 18)
+#define YT921X_FDB_IO2_EGR_PORTS(x) FIELD_PREP(YT921X_FDB_IO2_EGR_PORTS_M, (x))
+#define YT921X_FDB_IO2_EGR_DROP BIT(17)
+#define YT921X_FDB_IO2_COPY_TO_CPU BIT(16)
+#define YT921X_FDB_IO2_IGR_INT_PRI_EN BIT(15)
+#define YT921X_FDB_IO2_INT_PRI_M GENMASK(14, 12)
+#define YT921X_FDB_IO2_INT_PRI(x) FIELD_PREP(YT921X_FDB_IO2_INT_PRI_M, (x))
+#define YT921X_FDB_IO2_NEW_VID_M GENMASK(11, 0)
+#define YT921X_FDB_IO2_NEW_VID(x) FIELD_PREP(YT921X_FDB_IO2_NEW_VID_M, (x))
+#define YT921X_FILTER_UNK_UCAST 0x180508
+#define YT921X_FILTER_UNK_MCAST 0x18050c
+#define YT921X_FILTER_MCAST 0x180510
+#define YT921X_FILTER_BCAST 0x180514
+#define YT921X_FILTER_PORTS_M GENMASK(10, 0)
+#define YT921X_FILTER_PORTS(x) FIELD_PREP(YT921X_FILTER_PORTS_M, (x))
+#define YT921X_FILTER_PORTn(port) BIT(port)
+#define YT921X_VLAN_EGR_FILTER 0x180598
+#define YT921X_VLAN_EGR_FILTER_PORTn(port) BIT(port)
+#define YT921X_CPU_COPY 0x180690
+#define YT921X_CPU_COPY_FORCE_INT_PORT BIT(2)
+#define YT921X_CPU_COPY_TO_INT_CPU BIT(1)
+#define YT921X_CPU_COPY_TO_EXT_CPU BIT(0)
+#define YT921X_ACT_UNK_UCAST 0x180734
+#define YT921X_ACT_UNK_MCAST 0x180738
+#define YT921X_ACT_UNK_MCAST_BYPASS_DROP_RMA BIT(23)
+#define YT921X_ACT_UNK_MCAST_BYPASS_DROP_IGMP BIT(22)
+#define YT921X_ACT_UNK_ACTn_M(port) GENMASK(2 * (port) + 1, 2 * (port))
+#define YT921X_ACT_UNK_ACTn(port, x) ((x) << (2 * (port)))
+#define YT921X_ACT_UNK_ACTn_FORWARD(port) YT921X_ACT_UNK_ACTn(port, 0) /* flood */
+#define YT921X_ACT_UNK_ACTn_TRAP(port) YT921X_ACT_UNK_ACTn(port, 1) /* steer to CPU */
+#define YT921X_ACT_UNK_ACTn_DROP(port) YT921X_ACT_UNK_ACTn(port, 2) /* discard */
+/* NEVER use this action; see comments in the tag driver */
+#define YT921X_ACT_UNK_ACTn_COPY(port) YT921X_ACT_UNK_ACTn(port, 3) /* flood and copy */
+#define YT921X_FDB_HW_FLUSH 0x180958
+#define YT921X_FDB_HW_FLUSH_ON_LINKDOWN BIT(0)
+
+#define YT921X_VLANn_CTRL(vlan) (0x188000 + 8 * (vlan))
+#define YT921X_VLAN_CTRL_UNTAG_PORTS_M GENMASK_ULL(50, 40)
+#define YT921X_VLAN_CTRL_UNTAG_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_UNTAG_PORTS_M, (x))
+#define YT921X_VLAN_CTRL_UNTAG_PORTn(port) BIT_ULL((port) + 40)
+#define YT921X_VLAN_CTRL_STP_ID_M GENMASK_ULL(39, 36)
+#define YT921X_VLAN_CTRL_STP_ID(x) FIELD_PREP(YT921X_VLAN_CTRL_STP_ID_M, (x))
+#define YT921X_VLAN_CTRL_SVLAN_EN BIT_ULL(35)
+#define YT921X_VLAN_CTRL_FID_M GENMASK_ULL(34, 23)
+#define YT921X_VLAN_CTRL_FID(x) FIELD_PREP(YT921X_VLAN_CTRL_FID_M, (x))
+#define YT921X_VLAN_CTRL_LEARN_DIS BIT_ULL(22)
+#define YT921X_VLAN_CTRL_INT_PRI_EN BIT_ULL(21)
+#define YT921X_VLAN_CTRL_INT_PRI_M GENMASK_ULL(20, 18)
+#define YT921X_VLAN_CTRL_PORTS_M GENMASK_ULL(17, 7)
+#define YT921X_VLAN_CTRL_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_PORTS_M, (x))
+#define YT921X_VLAN_CTRL_PORTn(port) BIT_ULL((port) + 7)
+#define YT921X_VLAN_CTRL_BYPASS_1X_AC BIT_ULL(6)
+#define YT921X_VLAN_CTRL_METER_EN BIT_ULL(5)
+#define YT921X_VLAN_CTRL_METER_ID_M GENMASK_ULL(4, 0)
+
+#define YT921X_TPID_IGRn(x) (0x210000 + 4 * (x)) /* [0, 3] */
+#define YT921X_TPID_IGR_TPID_M GENMASK(15, 0)
+#define YT921X_PORTn_IGR_TPID(port) (0x210010 + 4 * (port))
+#define YT921X_PORT_IGR_TPIDn_STAG_M GENMASK(7, 4)
+#define YT921X_PORT_IGR_TPIDn_STAG(x) BIT((x) + 4)
+#define YT921X_PORT_IGR_TPIDn_CTAG_M GENMASK(3, 0)
+#define YT921X_PORT_IGR_TPIDn_CTAG(x) BIT(x)
+
+#define YT921X_PORTn_VLAN_CTRL(port) (0x230010 + 4 * (port))
+#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_EN BIT(31)
+#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_EN BIT(30)
+#define YT921X_PORT_VLAN_CTRL_SVID_M GENMASK(29, 18)
+#define YT921X_PORT_VLAN_CTRL_SVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_SVID_M, (x))
+#define YT921X_PORT_VLAN_CTRL_CVID_M GENMASK(17, 6)
+#define YT921X_PORT_VLAN_CTRL_CVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_CVID_M, (x))
+#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_M GENMASK(5, 3)
+#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_M GENMASK(2, 0)
+#define YT921X_PORTn_VLAN_CTRL1(port) (0x230080 + 4 * (port))
+#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_EN BIT(8)
+#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_PROFILE_ID_M GENMASK(7, 4)
+#define YT921X_PORT_VLAN_CTRL1_SVLAN_DROP_TAGGED BIT(3)
+#define YT921X_PORT_VLAN_CTRL1_SVLAN_DROP_UNTAGGED BIT(2)
+#define YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED BIT(1)
+#define YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED BIT(0)
+
+#define YT921X_MIRROR 0x300300
+#define YT921X_MIRROR_IGR_PORTS_M GENMASK(26, 16)
+#define YT921X_MIRROR_IGR_PORTS(x) FIELD_PREP(YT921X_MIRROR_IGR_PORTS_M, (x))
+#define YT921X_MIRROR_IGR_PORTn(port) BIT((port) + 16)
+#define YT921X_MIRROR_EGR_PORTS_M GENMASK(14, 4)
+#define YT921X_MIRROR_EGR_PORTS(x) FIELD_PREP(YT921X_MIRROR_EGR_PORTS_M, (x))
+#define YT921X_MIRROR_EGR_PORTn(port) BIT((port) + 4)
+#define YT921X_MIRROR_PORT_M GENMASK(3, 0)
+#define YT921X_MIRROR_PORT(x) FIELD_PREP(YT921X_MIRROR_PORT_M, (x))
+
+#define YT921X_EDATA_EXTMODE 0xfb
+#define YT921X_EDATA_LEN 0x100
+
+#define YT921X_FDB_NUM 4096
+
+enum yt921x_fdb_entry_status {
+ YT921X_FDB_ENTRY_STATUS_INVALID = 0,
+ YT921X_FDB_ENTRY_STATUS_MIN_TIME = 1,
+ YT921X_FDB_ENTRY_STATUS_MOVE_AGING_MAX_TIME = 3,
+ YT921X_FDB_ENTRY_STATUS_MAX_TIME = 5,
+ YT921X_FDB_ENTRY_STATUS_PENDING = 6,
+ YT921X_FDB_ENTRY_STATUS_STATIC = 7,
+};
+
+#define YT921X_MSTI_NUM 16
+
+#define YT9215_MAJOR 0x9002
+#define YT9218_MAJOR 0x9001
+
+/* required for a hard reset */
+#define YT921X_RST_DELAY_US 10000
+
+#define YT921X_FRAME_SIZE_MAX 0x2400 /* 9216 */
+
+#define YT921X_TAG_LEN 8
+
+/* 8 internal + 2 external + 1 mcu */
+#define YT921X_PORT_NUM 11
+
+#define yt921x_port_is_internal(port) ((port) < 8)
+#define yt921x_port_is_external(port) (8 <= (port) && (port) < 9)
+
+struct yt921x_mib {
+ u64 rx_broadcast;
+ u64 rx_pause;
+ u64 rx_multicast;
+ u64 rx_crc_errors;
+
+ u64 rx_alignment_errors;
+ u64 rx_undersize_errors;
+ u64 rx_fragment_errors;
+ u64 rx_64byte;
+
+ u64 rx_65_127byte;
+ u64 rx_128_255byte;
+ u64 rx_256_511byte;
+ u64 rx_512_1023byte;
+
+ u64 rx_1024_1518byte;
+ u64 rx_jumbo;
+ u64 rx_good_bytes;
+
+ u64 rx_bad_bytes;
+ u64 rx_oversize_errors;
+
+ u64 rx_dropped;
+ u64 tx_broadcast;
+ u64 tx_pause;
+ u64 tx_multicast;
+
+ u64 tx_undersize_errors;
+ u64 tx_64byte;
+ u64 tx_65_127byte;
+ u64 tx_128_255byte;
+
+ u64 tx_256_511byte;
+ u64 tx_512_1023byte;
+ u64 tx_1024_1518byte;
+ u64 tx_jumbo;
+
+ u64 tx_good_bytes;
+ u64 tx_collisions;
+
+ u64 tx_aborted_errors;
+ u64 tx_multiple_collisions;
+ u64 tx_single_collisions;
+ u64 tx_good;
+
+ u64 tx_deferred;
+ u64 tx_late_collisions;
+ u64 rx_oam;
+ u64 tx_oam;
+};
+
+struct yt921x_port {
+ unsigned char index;
+
+ bool hairpin;
+ bool isolated;
+
+ struct delayed_work mib_read;
+ struct yt921x_mib mib;
+ u64 rx_frames;
+ u64 tx_frames;
+};
+
+struct yt921x_reg_ops {
+ int (*read)(void *context, u32 reg, u32 *valp);
+ int (*write)(void *context, u32 reg, u32 val);
+};
+
+struct yt921x_priv {
+ struct dsa_switch ds;
+
+ const struct yt921x_info *info;
+ /* cache of dsa_cpu_ports(ds) */
+ u16 cpu_ports_mask;
+
+ /* protect the access to the switch registers */
+ struct mutex reg_lock;
+ const struct yt921x_reg_ops *reg_ops;
+ void *reg_ctx;
+
+ /* mdio master bus */
+ struct mii_bus *mbus_int;
+ struct mii_bus *mbus_ext;
+
+ struct yt921x_port ports[YT921X_PORT_NUM];
+
+ u16 eee_ports_mask;
+};
+
+#endif
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index f82ad7419508..d6bdad4baadd 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -38,6 +38,7 @@
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
#include <linux/net_tstamp.h>
+#include <net/netdev_lock.h>
#include <net/rtnetlink.h>
#include <linux/u64_stats_sync.h>
@@ -67,18 +68,12 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
+ netdev_lockdep_set_classes(dev);
return 0;
}
-static void dummy_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
{
if (new_carrier)
@@ -90,7 +85,6 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
- .ndo_uninit = dummy_dev_uninit,
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = set_multicast_list,
@@ -99,14 +93,7 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_change_carrier = dummy_change_carrier,
};
-static void dummy_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
-}
-
static const struct ethtool_ops dummy_ethtool_ops = {
- .get_drvinfo = dummy_get_drvinfo,
.get_ts_info = ethtool_op_get_ts_info,
};
@@ -118,14 +105,16 @@ static void dummy_setup(struct net_device *dev)
dev->netdev_ops = &dummy_netdev_ops;
dev->ethtool_ops = &dummy_ethtool_ops;
dev->needs_free_netdev = true;
+ dev->request_ops_lock = true;
/* Fill in device structure with ethernet-generic values. */
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ dev->lltx = true;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
dev->features |= NETIF_F_GSO_ENCAP_ALL;
dev->hw_features |= dev->features;
dev->hw_enc_features |= dev->features;
@@ -181,22 +170,21 @@ static int __init dummy_init_module(void)
{
int i, err = 0;
- down_write(&pernet_ops_rwsem);
- rtnl_lock();
- err = __rtnl_link_register(&dummy_link_ops);
+ err = rtnl_link_register(&dummy_link_ops);
if (err < 0)
- goto out;
+ return err;
+
+ rtnl_net_lock(&init_net);
for (i = 0; i < numdummies && !err; i++) {
err = dummy_init_one();
cond_resched();
}
- if (err < 0)
- __rtnl_link_unregister(&dummy_link_ops);
-out:
- rtnl_unlock();
- up_write(&pernet_ops_rwsem);
+ rtnl_net_unlock(&init_net);
+
+ if (err < 0)
+ rtnl_link_unregister(&dummy_link_ops);
return err;
}
@@ -209,4 +197,5 @@ static void __exit dummy_cleanup_module(void)
module_init(dummy_init_module);
module_exit(dummy_cleanup_module);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Dummy netdevice driver which discards all packets sent to it");
MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 8ef34901c2d8..9ba10efd3794 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -143,7 +143,7 @@ static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
static void eql_timer(struct timer_list *t)
{
- equalizer_t *eql = from_timer(eql, t, timer);
+ equalizer_t *eql = timer_container_of(eql, t, timer);
struct list_head *this, *tmp, *head;
spin_lock(&eql->queue.lock);
@@ -225,7 +225,7 @@ static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
list_del(&slave->list);
queue->num_slaves--;
slave->dev->flags &= ~IFF_SLAVE;
- dev_put(slave->dev);
+ netdev_put(slave->dev, &slave->dev_tracker);
kfree(slave);
}
@@ -254,7 +254,7 @@ static int eql_close(struct net_device *dev)
* at the data structure it scans every so often...
*/
- del_timer_sync(&eql->timer);
+ timer_delete_sync(&eql->timer);
eql_kill_slave_queue(&eql->queue);
@@ -399,7 +399,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
if (duplicate_slave)
eql_kill_one_slave(queue, duplicate_slave);
- dev_hold(slave->dev);
+ netdev_hold(slave->dev, &slave->dev_tracker, GFP_ATOMIC);
list_add(&slave->list, &queue->all_slaves);
queue->num_slaves++;
slave->dev->flags |= IFF_SLAVE;
@@ -425,14 +425,13 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *
if ((master_dev->flags & IFF_UP) == IFF_UP) {
/* slave is not a master & not already a slave: */
if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
- slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ slave_t *s = kzalloc(sizeof(*s), GFP_KERNEL);
equalizer_t *eql = netdev_priv(master_dev);
int ret;
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof(*s));
s->dev = slave_dev;
s->priority = srq.priority;
s->priority_bps = srq.priority;
@@ -608,4 +607,5 @@ static void __exit eql_cleanup_module(void)
module_init(eql_init_module);
module_exit(eql_cleanup_module);
+MODULE_DESCRIPTION("Equalizer Load-balancer for serial network interfaces");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 846fa3af4504..fb68339e1511 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1135,7 +1135,7 @@ el3_netdev_set_ecmd(struct net_device *dev,
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int el3_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 1d124b0f65e7..2227c83a4862 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -31,9 +31,6 @@
Setting to > 1512 effectively disables this feature. */
static int rx_copybreak = 200;
-/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
-static const int mtu = 1500;
-
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
@@ -66,8 +63,10 @@ static int max_interrupt_work = 20;
#include <linux/timer.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
-
#include <linux/uaccess.h>
+
+#include <net/Space.h>
+
#include <asm/io.h>
#include <asm/dma.h>
@@ -860,7 +859,7 @@ static int corkscrew_open(struct net_device *dev)
static void corkscrew_timer(struct timer_list *t)
{
#ifdef AUTOMEDIA
- struct corkscrew_private *vp = from_timer(vp, t, timer);
+ struct corkscrew_private *vp = timer_container_of(vp, t, timer);
struct net_device *dev = vp->our_dev;
int ioaddr = dev->base_addr;
unsigned long flags;
@@ -1415,7 +1414,7 @@ static int corkscrew_close(struct net_device *dev)
dev->name, rx_nocopy, rx_copy, queued_packet);
}
- del_timer_sync(&vp->timer);
+ timer_delete_sync(&vp->timer);
/* Turn off statistics ASAP. We update lp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
@@ -1527,7 +1526,7 @@ static void set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx",
dev->base_addr);
}
@@ -1548,9 +1547,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.set_msglevel = netdev_set_msglevel,
};
-
#ifdef MODULE
-void cleanup_module(void)
+static void __exit corkscrew_exit_module(void)
{
while (!list_empty(&root_corkscrew_dev)) {
struct net_device *dev;
@@ -1564,4 +1562,5 @@ void cleanup_module(void)
free_netdev(dev);
}
}
+module_exit(corkscrew_exit_module);
#endif /* MODULE */
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index dc3b7c960611..1f2070497a75 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -858,7 +858,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
*/
static void media_check(struct timer_list *t)
{
- struct el3_private *lp = from_timer(lp, t, media);
+ struct el3_private *lp = timer_container_of(lp, t, media);
struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
unsigned long flags;
@@ -1140,7 +1140,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ timer_delete_sync(&lp->media);
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 4673bc1604e7..ea49be43b8c3 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -195,6 +195,7 @@ static int tc589_probe(struct pcmcia_device *link)
{
struct el3_private *lp;
struct net_device *dev;
+ int ret;
dev_dbg(&link->dev, "3c589_attach()\n");
@@ -218,7 +219,15 @@ static int tc589_probe(struct pcmcia_device *link)
dev->ethtool_ops = &netdev_ethtool_ops;
- return tc589_config(link);
+ ret = tc589_config(link);
+ if (ret)
+ goto err_free_netdev;
+
+ return 0;
+
+err_free_netdev:
+ free_netdev(dev);
+ return ret;
}
static void tc589_detach(struct pcmcia_device *link)
@@ -480,7 +489,7 @@ static void tc589_reset(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
@@ -493,7 +502,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
{
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 3) {
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
tc589_set_xcvr(dev, dev->if_port);
} else {
@@ -676,7 +685,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
static void media_check(struct timer_list *t)
{
- struct el3_private *lp = from_timer(lp, t, media);
+ struct el3_private *lp = timer_container_of(lp, t, media);
struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
u16 media, errs;
@@ -937,7 +946,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ timer_delete_sync(&lp->media);
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ccf07667aa5e..8c9cc97efd4e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1302,7 +1302,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
if (print_info)
pr_cont(", IRQ %d\n", dev->irq);
/* Tell them about an invalid IRQ. */
- if (dev->irq <= 0 || dev->irq >= nr_irqs)
+ if (dev->irq <= 0 || dev->irq >= irq_get_nr_irqs())
pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n",
dev->irq);
@@ -1783,7 +1783,7 @@ out:
static void
vortex_timer(struct timer_list *t)
{
- struct vortex_private *vp = from_timer(vp, t, timer);
+ struct vortex_private *vp = timer_container_of(vp, t, timer);
struct net_device *dev = vp->mii.dev;
void __iomem *ioaddr = vp->ioaddr;
int next_tick = 60*HZ;
@@ -2691,7 +2691,7 @@ vortex_down(struct net_device *dev, int final_down)
netdev_reset_queue(dev);
netif_stop_queue(dev);
- del_timer_sync(&vp->timer);
+ timer_delete_sync(&vp->timer);
/* Turn off statistics ASAP. We update dev->stats below. */
iowrite16(StatsDisable, ioaddr + EL3_CMD);
@@ -2959,13 +2959,13 @@ static void vortex_get_drvinfo(struct net_device *dev,
{
struct vortex_private *vp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (VORTEX_PCI(vp)) {
- strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+ strscpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
sizeof(info->bus_info));
} else {
if (VORTEX_EISA(vp))
- strlcpy(info->bus_info, dev_name(vp->gendev),
+ strscpy(info->bus_info, dev_name(vp->gendev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 706bd59bf645..1fbab79e2be4 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -44,7 +44,7 @@ config 3C515
config PCMCIA_3C574
tristate "3Com 3c574 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA
(PC-card) Fast Ethernet card to your computer.
@@ -54,7 +54,7 @@ config PCMCIA_3C574
config PCMCIA_3C589
tristate "3Com 3c589 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA
(PC-card) Ethernet card to your computer.
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 05e15b6e5e2c..aaaff3ba43ef 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -138,11 +138,6 @@ MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
module_param(rx_copybreak, int, 0);
module_param(use_mmio, int, 0);
-#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
-#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
-#undef NETIF_F_TSO
-#endif
-
#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
#error TX ring too small!
#endif
@@ -974,12 +969,12 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
smp_rmb();
if (tp->card_state == Sleeping) {
- strlcpy(info->fw_version, "Sleep image",
+ strscpy(info->fw_version, "Sleep image",
sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if (typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- strlcpy(info->fw_version, "Unknown runtime",
+ strscpy(info->fw_version, "Unknown runtime",
sizeof(info->fw_version));
} else {
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
@@ -989,8 +984,8 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
}
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int
@@ -1138,7 +1133,9 @@ typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static void
-typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
ering->rx_max_pending = RXENT_ENTRIES;
ering->tx_max_pending = TXLO_ENTRIES - 1;
@@ -2259,9 +2256,28 @@ out:
return mode;
}
+#if MAX_SKB_FRAGS > 32
+
+#include <net/vxlan.h>
+
+static netdev_features_t typhoon_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->nr_frags > 32 && skb_is_gso(skb))
+ features &= ~NETIF_F_GSO_MASK;
+
+ features = vlan_features_check(skb, features);
+ return vxlan_features_check(skb, features);
+}
+#endif
+
static const struct net_device_ops typhoon_netdev_ops = {
.ndo_open = typhoon_open,
.ndo_stop = typhoon_close,
+#if MAX_SKB_FRAGS > 32
+ .ndo_features_check = typhoon_features_check,
+#endif
.ndo_start_xmit = typhoon_start_tx,
.ndo_set_rx_mode = typhoon_set_rx_mode,
.ndo_tx_timeout = typhoon_tx_timeout,
@@ -2276,6 +2292,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *dev;
struct typhoon *tp;
int card_id = (int) ent->driver_data;
+ u8 addr[ETH_ALEN] __aligned(4);
void __iomem *ioaddr;
void *shared;
dma_addr_t shared_dma;
@@ -2407,8 +2424,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error_out_reset;
}
- *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
- *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ *(__be16 *)&addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
+ *(__be32 *)&addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
err_msg = "Could not obtain valid ethernet address, aborting";
@@ -2446,7 +2464,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* The chip-specific entries in the device structure. */
dev->netdev_ops = &typhoon_netdev_ops;
- netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
+ netif_napi_add_weight(dev, &tp->napi, typhoon_poll, 16);
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &typhoon_ethtool_ops;
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
index 0e0aa4016858..c5636245f1ca 100644
--- a/drivers/net/ethernet/8390/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -100,4 +100,5 @@ static void __exit ns8390_module_exit(void)
module_init(ns8390_module_init);
module_exit(ns8390_module_exit);
#endif /* MODULE */
+MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index e52264465998..f784a6e2ab0e 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-1.0+ */
+
/* Generic NS8390 register definitions. */
/* This file is part of Donald Becker's 8390 drivers, and is distributed
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
index 6834742057b3..6d429b11e9c6 100644
--- a/drivers/net/ethernet/8390/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -102,4 +102,5 @@ static void __exit NS8390p_cleanup_module(void)
module_init(NS8390p_init_module);
module_exit(NS8390p_cleanup_module);
+MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index a4130e643342..345f250781c6 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_8390
config PCMCIA_AXNET
tristate "Asix AX88190 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach an Asix AX88190-based PCMCIA
(PC-card) Fast Ethernet card to your computer. These cards are
@@ -117,7 +117,7 @@ config NE2000
config NE2K_PCI
tristate "PCI NE2000 and clones support (see help)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
help
This driver is for NE2000 compatible PCI cards. It will not work
@@ -146,7 +146,7 @@ config APNE
config PCMCIA_PCNET
tristate "NE2000 compatible PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select CRC32
help
Say Y here if you intend to attach an NE2000 compatible PCMCIA
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 991ad953aa79..828edca8d30c 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Amiga Linux/68k 8390 based PCMCIA Ethernet Driver for the Amiga 1200
*
@@ -19,12 +20,6 @@
*
* ----------------------------------------------------------------------------
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- *
- * ----------------------------------------------------------------------------
- *
*/
@@ -615,4 +610,5 @@ static int init_pcmcia(void)
return 1;
}
+MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 1f8acbba5b6b..e1695d0fbd8b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -579,9 +579,9 @@ static void ax_get_drvinfo(struct net_device *dev,
{
struct platform_device *pdev = to_platform_device(dev->dev.parent);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static u32 ax_get_msglevel(struct net_device *dev)
@@ -811,7 +811,7 @@ static int ax_init_dev(struct net_device *dev)
return ret;
}
-static int ax_remove(struct platform_device *pdev)
+static void ax_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct ei_device *ei_local = netdev_priv(dev);
@@ -832,8 +832,6 @@ static int ax_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
free_netdev(dev);
-
- return 0;
}
/*
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 3aef959fc25b..7c8213011b5c 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-1.0+
+
/*======================================================================
A PCMCIA ethernet driver for Asix AX88190-based cards
@@ -17,9 +19,7 @@
Written 1992,1993 by Donald Becker.
Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
+ Director, National Security Agency.
Donald Becker may be reached at becker@scyld.com
======================================================================*/
@@ -504,7 +504,7 @@ static int axnet_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&info->watchdog);
+ timer_delete_sync(&info->watchdog);
return 0;
} /* axnet_close */
@@ -550,7 +550,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
static void ei_watchdog(struct timer_list *t)
{
- struct axnet_dev *info = from_timer(info, t, watchdog);
+ struct axnet_dev *info = timer_container_of(info, t, watchdog);
struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + AXNET_MII_EEP;
@@ -650,7 +650,6 @@ static void block_input(struct net_device *dev, int count,
{
unsigned int nic_base = dev->base_addr;
struct ei_device *ei_local = netdev_priv(dev);
- int xfer_count = count;
char *buf = skb->data;
if ((netif_msg_rx_status(ei_local)) && (count != 4))
@@ -662,9 +661,7 @@ static void block_input(struct net_device *dev, int count,
insw(nic_base + AXNET_DATAPORT,buf,count>>1);
if (count & 0x01) {
buf[count-1] = inb(nic_base + AXNET_DATAPORT);
- xfer_count++;
}
-
}
/*====================================================================*/
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index bd22a534b1c0..e876fe52399a 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -258,7 +258,7 @@ static int etherh_set_config(struct net_device *dev, struct ifmap *map)
* media type, turn off automedia detection.
*/
dev->flags &= ~IFF_AUTOMEDIA;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
break;
default:
@@ -555,9 +555,9 @@ static int __init etherm_addr(char *addr)
static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
@@ -655,6 +655,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
struct ei_device *ei_local;
struct net_device *dev;
struct etherh_priv *eh;
+ u8 addr[ETH_ALEN];
int ret;
ret = ecard_request_resources(ec);
@@ -724,12 +725,13 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
spin_lock_init(&ei_local->page_lock);
if (ec->cid.product == PROD_ANT_ETHERM) {
- etherm_addr(dev->dev_addr);
+ etherm_addr(addr);
ei_local->reg_offset = etherm_regoffsets;
} else {
- etherh_addr(dev->dev_addr, ec);
+ etherh_addr(addr, ec);
ei_local->reg_offset = etherh_regoffsets;
}
+ eth_hw_addr_set(dev, addr);
ei_local->name = dev->name;
ei_local->word16 = 1;
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 941754ea78ec..fd9dcdc356e6 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
/* New Hydra driver using generic 8390 core */
/* Based on old hydra driver by Topi Kanerva (topi@susanna.oulu.fi) */
-/* This file is subject to the terms and conditions of the GNU General */
-/* Public License. See the file COPYING in the main directory of the */
-/* Linux distribution for more details. */
-
/* Peter De Schrijver (p2@mind.be) */
/* Oldenburg 2000 */
@@ -116,6 +114,7 @@ static int hydra_init(struct zorro_dev *z)
unsigned long ioaddr = board+HYDRA_NIC_BASE;
const char name[] = "NE2000";
int start_page, stop_page;
+ u8 macaddr[ETH_ALEN];
int j;
int err;
@@ -129,7 +128,8 @@ static int hydra_init(struct zorro_dev *z)
return -ENOMEM;
for (j = 0; j < ETH_ALEN; j++)
- dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+ macaddr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+ eth_hw_addr_set(dev, macaddr);
/* We must set the 8390 for word mode. */
z_writeb(0x4b, ioaddr + NE_EN0_DCFG);
@@ -270,4 +270,5 @@ static void __exit hydra_cleanup_module(void)
module_init(hydra_init_module);
module_exit(hydra_cleanup_module);
+MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index e84021282edf..84aeb8054304 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-1.0+
+
/* 8390.c: A general NS8390 ethernet driver core for linux. */
/*
Written 1992-94 by Donald Becker.
@@ -5,9 +7,6 @@
Copyright 1993 United States Government as represented by the
Director, National Security Agency.
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 91b04abfd687..4a0a095a1a8a 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/* mac8390.c: New driver for 8390-based Nubus (or Nubus-alike)
Ethernet cards on Linux */
/* Based on the former daynaport.c driver, by Alan Cox. Some code
taken from or inspired by skeleton.c by Donald Becker, acenic.c by
- Jes Sorensen, and ne2k-pci.c by Donald Becker and Paul Gortmaker.
-
- This software may be used and distributed according to the terms of
- the GNU Public License, incorporated herein by reference. */
+ Jes Sorensen, and ne2k-pci.c by Donald Becker and Paul Gortmaker. */
/* 2000-02-28: support added for Dayna and Kinetics cards by
A.G.deWijn@phys.uu.nl */
@@ -292,6 +290,7 @@ static bool mac8390_rsrc_init(struct net_device *dev,
struct nubus_dirent ent;
int offset;
volatile unsigned short *i;
+ u8 addr[ETH_ALEN];
dev->irq = SLOT2IRQ(board->slot);
/* This is getting to be a habit */
@@ -314,7 +313,8 @@ static bool mac8390_rsrc_init(struct net_device *dev,
return false;
}
- nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+ nubus_get_rsrc_mem(addr, &ent, 6);
+ eth_hw_addr_set(dev, addr);
if (useresources[cardtype] == 1) {
nubus_rewinddir(&dir);
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index e320cccba61a..94ff8364cdf0 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Support for ColdFire CPU based boards using a NS8390 Ethernet device.
*
@@ -5,9 +6,6 @@
*
* (C) Copyright 2012, Greg Ungerer <gerg@uclinux.org>
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
*/
#include <linux/module.h>
@@ -405,15 +403,13 @@ static int mcf8390_init(struct net_device *dev)
static int mcf8390_probe(struct platform_device *pdev)
{
struct net_device *dev;
- struct resource *mem, *irq;
+ struct resource *mem;
resource_size_t msize;
- int ret;
+ int ret, irq;
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq == NULL) {
- dev_err(&pdev->dev, "no IRQ specified?\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
return -ENXIO;
- }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem == NULL) {
@@ -433,7 +429,7 @@ static int mcf8390_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
- dev->irq = irq->start;
+ dev->irq = irq;
dev->base_addr = mem->start;
ret = mcf8390_init(dev);
@@ -445,17 +441,15 @@ static int mcf8390_probe(struct platform_device *pdev)
return 0;
}
-static int mcf8390_remove(struct platform_device *pdev)
+static void mcf8390_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct resource *mem;
unregister_netdev(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem)
- release_mem_region(mem->start, resource_size(mem));
+ release_mem_region(mem->start, resource_size(mem));
free_netdev(dev);
- return 0;
}
static struct platform_driver mcf8390_drv = {
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 0a9118b8be0c..961019c32842 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/* ne.c: A general non-shared-memory NS8390 ethernet driver for linux. */
/*
Written 1992-94 by Donald Becker.
@@ -5,9 +6,6 @@
Copyright 1993 United States Government as represented by the
Director, National Security Agency.
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
@@ -52,6 +50,7 @@ static const char version2[] =
#include <linux/etherdevice.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
+#include <net/Space.h>
#include <asm/io.h>
@@ -824,7 +823,7 @@ static int __init ne_drv_probe(struct platform_device *pdev)
return 0;
}
-static int ne_drv_remove(struct platform_device *pdev)
+static void ne_drv_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -843,7 +842,6 @@ static int ne_drv_remove(struct platform_device *pdev)
release_region(dev->base_addr, NE_IO_EXTENT);
free_netdev(dev);
}
- return 0;
}
/* Remove unused devices or all if true. */
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 6a0a2039600a..1a34da07c0db 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/* A Linux device driver for PCI NE2000 clones.
*
* Authors and other copyright holders:
@@ -185,17 +186,6 @@ static void ne2k_pci_block_output(struct net_device *dev, const int count,
static const struct ethtool_ops ne2k_pci_ethtool_ops;
-
-/* There is no room in the standard 8390 structure for extra info we need,
- * so we build a meta/outer-wrapper structure..
- */
-struct ne2k_pci_card {
- struct net_device *dev;
- struct pci_dev *pci_dev;
-};
-
-
-
/* NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet
* buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes
* 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be
@@ -730,18 +720,4 @@ static struct pci_driver ne2k_driver = {
.id_table = ne2k_pci_tbl,
.driver.pm = &ne2k_pci_pm_ops,
};
-
-
-static int __init ne2k_pci_init(void)
-{
- return pci_register_driver(&ne2k_driver);
-}
-
-
-static void __exit ne2k_pci_cleanup(void)
-{
- pci_unregister_driver(&ne2k_driver);
-}
-
-module_init(ne2k_pci_init);
-module_exit(ne2k_pci_cleanup);
+module_pci_driver(ne2k_driver);
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 0f07fe03da98..19f9c5db3f3b 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*======================================================================
A PCMCIA ethernet driver for NS8390-based cards
@@ -17,9 +18,7 @@
Written 1992,1993 by Donald Becker.
Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
+ Director, National Security Agency.
Donald Becker may be reached at becker@scyld.com
Based also on Keith Moore's changes to Don Becker's code, for IBM
@@ -948,7 +947,7 @@ static int pcnet_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
- del_timer_sync(&info->watchdog);
+ timer_delete_sync(&info->watchdog);
return 0;
} /* pcnet_close */
@@ -995,7 +994,7 @@ static int set_config(struct net_device *dev, struct ifmap *map)
return -EOPNOTSUPP;
else if ((map->port < 1) || (map->port > 2))
return -EINVAL;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
NS8390_init(dev, 1);
}
@@ -1019,7 +1018,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
static void ei_watchdog(struct timer_list *t)
{
- struct pcnet_dev *info = from_timer(info, t, watchdog);
+ struct pcnet_dev *info = timer_container_of(info, t, watchdog);
struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + DLINK_GPIO;
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index 0890fa493f70..22ca804b2e95 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/* smc-ultra.c: A SMC Ultra ethernet driver for linux. */
/*
This is a driver for the SMC Ultra and SMC EtherEZ ISA ethercards.
@@ -7,9 +8,6 @@
Copyright 1993 United States Government as represented by the
Director, National Security Agency.
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
@@ -66,6 +64,7 @@ static const char version[] =
#include <linux/isapnp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <net/Space.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -204,6 +203,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
{
int i, retval;
int checksum = 0;
+ u8 macaddr[ETH_ALEN];
const char *model_name;
unsigned char eeprom_irq = 0;
static unsigned version_printed;
@@ -239,7 +239,8 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
+ macaddr[i] = inb(ioaddr + 8 + i);
+ eth_hw_addr_set(dev, macaddr);
netdev_info(dev, "%s at %#3x, %pM", model_name,
ioaddr, dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index bd89ca8a92df..6cc0e190aa79 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* stnic.c : A SH7750 specific part of driver for NS DP83902A ST-NIC.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 1999 kaz Kojima
*/
@@ -299,4 +296,5 @@ static void __exit stnic_cleanup(void)
module_init(stnic_probe);
module_exit(stnic_cleanup);
+MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 263a942d81fa..ffd639477dfc 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/* wd.c: A WD80x3 ethernet driver for linux. */
/*
Written 1993-94 by Donald Becker.
@@ -5,9 +6,6 @@
Copyright 1993 United States Government as represented by the
Director, National Security Agency.
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
@@ -37,6 +35,7 @@ static const char version[] =
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <net/Space.h>
#include <asm/io.h>
@@ -168,6 +167,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
int checksum = 0;
int ancient = 0; /* An old card without config registers. */
int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
+ u8 addr[ETH_ALEN];
const char *model_name;
static unsigned version_printed;
struct ei_device *ei_local = netdev_priv(dev);
@@ -191,7 +191,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
netdev_info(dev, version);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
+ addr[i] = inb(ioaddr + 8 + i);
+ eth_hw_addr_set(dev, addr);
netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index e8b4fe813a08..c24dd4fe7a10 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Amiga Linux/m68k and Linux/PPC Zorro NS8390 Ethernet Driver
*
@@ -9,12 +10,6 @@
*
* ---------------------------------------------------------------------------
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of the Linux
- * distribution for more details.
- *
- * ---------------------------------------------------------------------------
- *
* The Ariadne II and X-Surf are Zorro-II boards containing Realtek RTL8019AS
* Ethernet Controllers.
*/
@@ -448,4 +443,5 @@ static void __exit zorro8390_cleanup_module(void)
module_init(zorro8390_init_module);
module_exit(zorro8390_cleanup_module);
+MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 4601b38f532a..4a1b368ca7e6 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -15,14 +15,12 @@ if ETHERNET
config MDIO
tristate
-config SUNGEM_PHY
- tristate
-
source "drivers/net/ethernet/3com/Kconfig"
source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/agere/Kconfig"
+source "drivers/net/ethernet/airoha/Kconfig"
source "drivers/net/ethernet/alacritech/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
@@ -35,15 +33,6 @@ source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/asix/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
-source "drivers/net/ethernet/broadcom/Kconfig"
-source "drivers/net/ethernet/brocade/Kconfig"
-source "drivers/net/ethernet/cadence/Kconfig"
-source "drivers/net/ethernet/calxeda/Kconfig"
-source "drivers/net/ethernet/cavium/Kconfig"
-source "drivers/net/ethernet/chelsio/Kconfig"
-source "drivers/net/ethernet/cirrus/Kconfig"
-source "drivers/net/ethernet/cisco/Kconfig"
-source "drivers/net/ethernet/cortina/Kconfig"
config CX_ECAT
tristate "Beckhoff CX5020 EtherCAT master support"
@@ -57,6 +46,14 @@ config CX_ECAT
To compile this driver as a module, choose M here. The module
will be called ec_bhf.
+source "drivers/net/ethernet/broadcom/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
+source "drivers/net/ethernet/chelsio/Kconfig"
+source "drivers/net/ethernet/cirrus/Kconfig"
+source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
source "drivers/net/ethernet/davicom/Kconfig"
config DNET
@@ -73,17 +70,18 @@ config DNET
source "drivers/net/ethernet/dec/Kconfig"
source "drivers/net/ethernet/dlink/Kconfig"
source "drivers/net/ethernet/emulex/Kconfig"
+source "drivers/net/ethernet/engleder/Kconfig"
source "drivers/net/ethernet/ezchip/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
+source "drivers/net/ethernet/fungible/Kconfig"
source "drivers/net/ethernet/google/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -120,14 +118,18 @@ config LANTIQ_XRX200
Support for the PMAC of the Gigabit switch (GSWIP) inside the
Lantiq / Intel VRX200 VDSL SoC
+source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/litex/Kconfig"
source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
+source "drivers/net/ethernet/meta/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
-source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
+source "drivers/net/ethernet/microsoft/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
+source "drivers/net/ethernet/mucse/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
@@ -139,10 +141,10 @@ config FEALNX
Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
cards. <http://www.myson.com.tw/>
+source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
-source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
source "drivers/net/ethernet/nxp/Kconfig"
@@ -158,10 +160,22 @@ config ETHOC
help
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
+config OA_TC6
+ tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support" if COMPILE_TEST
+ depends on SPI
+ select PHYLIB
+ help
+ This library implements OPEN Alliance TC6 10BASE-T1x MAC-PHY
+ Serial Interface protocol for supporting 10BASE-T1x MAC-PHYs.
+
+ To know the implementation details, refer documentation in
+ <file:Documentation/networking/oa-tc6-framework.rst>.
+
source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/pensando/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
+source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/qualcomm/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
@@ -169,20 +183,24 @@ source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
-source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
+source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
+source "drivers/net/ethernet/spacemit/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/sunplus/Kconfig"
source "drivers/net/ethernet/synopsys/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/toshiba/Kconfig"
source "drivers/net/ethernet/tundra/Kconfig"
+source "drivers/net/ethernet/vertexcom/Kconfig"
source "drivers/net/ethernet/via/Kconfig"
+source "drivers/net/ethernet/wangxun/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
source "drivers/net/ethernet/xircom/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index fdd8c6c17451..2e18df8ca8ec 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -8,7 +8,9 @@ obj-$(CONFIG_NET_VENDOR_8390) += 8390/
obj-$(CONFIG_NET_VENDOR_ACTIONS) += actions/
obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
+obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
+obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
@@ -36,10 +38,12 @@ obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
+obj-$(CONFIG_NET_VENDOR_ENGLEDER) += engleder/
obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
+obj-$(CONFIG_NET_VENDOR_FUNGIBLE) += fungible/
obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
@@ -56,10 +60,12 @@ obj-$(CONFIG_NET_VENDOR_LITEX) += litex/
obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
+obj-$(CONFIG_NET_VENDOR_META) += meta/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
+obj-$(CONFIG_NET_VENDOR_MUCSE) += mucse/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
@@ -86,15 +92,20 @@ obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
+obj-$(CONFIG_NET_VENDOR_SPACEMIT) += spacemit/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_SUNPLUS) += sunplus/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
+obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/
obj-$(CONFIG_NET_VENDOR_VIA) += via/
+obj-$(CONFIG_NET_VENDOR_WANGXUN) += wangxun/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/
+obj-$(CONFIG_OA_TC6) += oa_tc6.o
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index 1cfdd01b4c2e..0a08da799255 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1275,9 +1275,6 @@ static int owl_emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
u32 data, tmp;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
data = OWL_EMAC_BIT_MAC_CSR10_SB;
data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
@@ -1305,9 +1302,6 @@ owl_emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
struct owl_emac_priv *priv = bus->priv;
u32 data, tmp;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
data = OWL_EMAC_BIT_MAC_CSR10_SB;
data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
@@ -1331,15 +1325,10 @@ static int owl_emac_mdio_init(struct net_device *netdev)
struct device_node *mdio_node;
int ret;
- mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(dev->of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
@@ -1576,7 +1565,7 @@ static int owl_emac_probe(struct platform_device *pdev)
netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT;
netdev->netdev_ops = &owl_emac_netdev_ops;
netdev->ethtool_ops = &owl_emac_ethtool_ops;
- netif_napi_add(netdev, &priv->napi, owl_emac_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &priv->napi, owl_emac_poll);
ret = devm_register_netdev(dev, netdev);
if (ret) {
@@ -1588,15 +1577,13 @@ static int owl_emac_probe(struct platform_device *pdev)
return 0;
}
-static int owl_emac_remove(struct platform_device *pdev)
+static void owl_emac_remove(struct platform_device *pdev)
{
struct owl_emac_priv *priv = platform_get_drvdata(pdev);
netif_napi_del(&priv->napi);
phy_disconnect(priv->netdev->phydev);
cancel_work_sync(&priv->mac_reset_task);
-
- return 0;
}
static const struct of_device_id owl_emac_of_match[] = {
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index c6982f7caf9b..e1b8794b14c9 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -441,14 +441,6 @@ enum rx_desc_bits {
};
/* Completion queue entry. */
-struct short_rx_done_desc {
- __le32 status; /* Low 16 bits is length. */
-};
-struct basic_rx_done_desc {
- __le32 status; /* Low 16 bits is length. */
- __le16 vlanid;
- __le16 status2;
-};
struct csum_rx_done_desc {
__le32 status; /* Low 16 bits is length. */
__le16 csum; /* Partial checksum */
@@ -772,7 +764,7 @@ static int starfire_init_one(struct pci_dev *pdev,
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &ethtool_ops;
- netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
+ netif_napi_add_weight(dev, &np->napi, netdev_poll, max_interrupt_work);
if (mtu)
dev->mtu = mtu;
@@ -1844,8 +1836,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
new file mode 100644
index 000000000000..760a9a60bc15
--- /dev/null
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Analog Devices device configuration
+#
+
+config NET_VENDOR_ADI
+ bool "Analog Devices devices"
+ default y
+ depends on SPI
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about ADI devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ADI
+
+config ADIN1110
+ tristate "Analog Devices ADIN1110 MAC-PHY"
+ depends on SPI && NET_SWITCHDEV
+ select CRC8
+ select PHYLIB
+ help
+ Say yes here to build support for Analog Devices ADIN1110
+ Low Power 10BASE-T1L Ethernet MAC-PHY.
+
+endif # NET_VENDOR_ADI
diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile
new file mode 100644
index 000000000000..d0383d94303c
--- /dev/null
+++ b/drivers/net/ethernet/adi/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Makefile for the Analog Devices network device drivers.
+#
+
+obj-$(CONFIG_ADIN1110) += adin1110.o
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
new file mode 100644
index 000000000000..30f9d271e595
--- /dev/null
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -0,0 +1,1737 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* ADIN1110 Low Power 10BASE-T1L Ethernet MAC-PHY
+ * ADIN2111 2-Port Ethernet Switch with Integrated 10BASE-T1L PHY
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cache.h>
+#include <linux/crc8.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/gpio/consumer.h>
+#include <linux/if_bridge.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regulator/consumer.h>
+#include <linux/phy.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <net/switchdev.h>
+
+#include <linux/unaligned.h>
+
+#define ADIN1110_PHY_ID 0x1
+
+#define ADIN1110_RESET 0x03
+#define ADIN1110_SWRESET BIT(0)
+
+#define ADIN1110_CONFIG1 0x04
+#define ADIN1110_CONFIG1_SYNC BIT(15)
+
+#define ADIN1110_CONFIG2 0x06
+#define ADIN2111_P2_FWD_UNK2HOST BIT(12)
+#define ADIN2111_PORT_CUT_THRU_EN BIT(11)
+#define ADIN1110_CRC_APPEND BIT(5)
+#define ADIN1110_FWD_UNK2HOST BIT(2)
+
+#define ADIN1110_STATUS0 0x08
+
+#define ADIN1110_STATUS1 0x09
+#define ADIN2111_P2_RX_RDY BIT(17)
+#define ADIN1110_SPI_ERR BIT(10)
+#define ADIN1110_RX_RDY BIT(4)
+
+#define ADIN1110_IMASK1 0x0D
+#define ADIN2111_RX_RDY_IRQ BIT(17)
+#define ADIN1110_SPI_ERR_IRQ BIT(10)
+#define ADIN1110_RX_RDY_IRQ BIT(4)
+#define ADIN1110_TX_RDY_IRQ BIT(3)
+
+#define ADIN1110_MDIOACC 0x20
+#define ADIN1110_MDIO_TRDONE BIT(31)
+#define ADIN1110_MDIO_ST GENMASK(29, 28)
+#define ADIN1110_MDIO_OP GENMASK(27, 26)
+#define ADIN1110_MDIO_PRTAD GENMASK(25, 21)
+#define ADIN1110_MDIO_DEVAD GENMASK(20, 16)
+#define ADIN1110_MDIO_DATA GENMASK(15, 0)
+
+#define ADIN1110_TX_FSIZE 0x30
+#define ADIN1110_TX 0x31
+#define ADIN1110_TX_SPACE 0x32
+
+#define ADIN1110_MAC_ADDR_FILTER_UPR 0x50
+#define ADIN2111_MAC_ADDR_APPLY2PORT2 BIT(31)
+#define ADIN1110_MAC_ADDR_APPLY2PORT BIT(30)
+#define ADIN2111_MAC_ADDR_TO_OTHER_PORT BIT(17)
+#define ADIN1110_MAC_ADDR_TO_HOST BIT(16)
+
+#define ADIN1110_MAC_ADDR_FILTER_LWR 0x51
+
+#define ADIN1110_MAC_ADDR_MASK_UPR 0x70
+#define ADIN1110_MAC_ADDR_MASK_LWR 0x71
+
+#define ADIN1110_RX_FSIZE 0x90
+#define ADIN1110_RX 0x91
+
+#define ADIN2111_RX_P2_FSIZE 0xC0
+#define ADIN2111_RX_P2 0xC1
+
+#define ADIN1110_CLEAR_STATUS0 0xFFF
+
+/* MDIO_OP codes */
+#define ADIN1110_MDIO_OP_WR 0x1
+#define ADIN1110_MDIO_OP_RD 0x3
+
+#define ADIN1110_CD BIT(7)
+#define ADIN1110_WRITE BIT(5)
+
+#define ADIN1110_MAX_BUFF 2048
+#define ADIN1110_MAX_FRAMES_READ 64
+#define ADIN1110_WR_HEADER_LEN 2
+#define ADIN1110_FRAME_HEADER_LEN 2
+#define ADIN1110_INTERNAL_SIZE_HEADER_LEN 2
+#define ADIN1110_RD_HEADER_LEN 3
+#define ADIN1110_REG_LEN 4
+#define ADIN1110_FEC_LEN 4
+
+#define ADIN1110_PHY_ID_VAL 0x0283BC91
+#define ADIN2111_PHY_ID_VAL 0x0283BCA1
+
+#define ADIN_MAC_MAX_PORTS 2
+#define ADIN_MAC_MAX_ADDR_SLOTS 16
+
+#define ADIN_MAC_MULTICAST_ADDR_SLOT 0
+#define ADIN_MAC_BROADCAST_ADDR_SLOT 1
+#define ADIN_MAC_P1_ADDR_SLOT 2
+#define ADIN_MAC_P2_ADDR_SLOT 3
+#define ADIN_MAC_FDB_ADDR_SLOT 4
+
+DECLARE_CRC8_TABLE(adin1110_crc_table);
+
+enum adin1110_chips_id {
+ ADIN1110_MAC = 0,
+ ADIN2111_MAC,
+};
+
+struct adin1110_cfg {
+ enum adin1110_chips_id id;
+ char name[MDIO_NAME_SIZE];
+ u32 phy_ids[PHY_MAX_ADDR];
+ u32 ports_nr;
+ u32 phy_id_val;
+};
+
+struct adin1110_port_priv {
+ struct adin1110_priv *priv;
+ struct net_device *netdev;
+ struct net_device *bridge;
+ struct phy_device *phydev;
+ struct work_struct tx_work;
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ struct work_struct rx_mode_work;
+ u32 flags;
+ struct sk_buff_head txq;
+ u32 nr;
+ u32 state;
+ struct adin1110_cfg *cfg;
+};
+
+struct adin1110_priv {
+ struct mutex lock; /* protect spi */
+ spinlock_t state_lock; /* protect RX mode */
+ struct mii_bus *mii_bus;
+ struct spi_device *spidev;
+ bool append_crc;
+ struct adin1110_cfg *cfg;
+ u32 tx_space;
+ u32 irq_mask;
+ bool forwarding;
+ int irq;
+ struct adin1110_port_priv *ports[ADIN_MAC_MAX_PORTS];
+ char mii_bus_name[MII_BUS_ID_SIZE];
+ u8 data[ADIN1110_MAX_BUFF] ____cacheline_aligned;
+};
+
+struct adin1110_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct adin1110_port_priv *port_priv;
+ unsigned long event;
+};
+
+static struct adin1110_cfg adin1110_cfgs[] = {
+ {
+ .id = ADIN1110_MAC,
+ .name = "adin1110",
+ .phy_ids = {1},
+ .ports_nr = 1,
+ .phy_id_val = ADIN1110_PHY_ID_VAL,
+ },
+ {
+ .id = ADIN2111_MAC,
+ .name = "adin2111",
+ .phy_ids = {1, 2},
+ .ports_nr = 2,
+ .phy_id_val = ADIN2111_PHY_ID_VAL,
+ },
+};
+
+static u8 adin1110_crc_data(u8 *data, u32 len)
+{
+ return crc8(adin1110_crc_table, data, len, 0);
+}
+
+static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
+{
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ u32 read_len = ADIN1110_REG_LEN;
+ struct spi_transfer t = {0};
+ int ret;
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+ priv->data[2] = 0x00;
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ priv->data[3] = 0x00;
+ header_len++;
+ }
+
+ if (priv->append_crc)
+ read_len++;
+
+ memset(&priv->data[header_len], 0, read_len);
+ t.tx_buf = &priv->data[0];
+ t.rx_buf = &priv->data[0];
+ t.len = read_len + header_len;
+
+ ret = spi_sync_transfer(priv->spidev, &t, 1);
+ if (ret)
+ return ret;
+
+ if (priv->append_crc) {
+ u8 recv_crc;
+ u8 crc;
+
+ crc = adin1110_crc_data(&priv->data[header_len],
+ ADIN1110_REG_LEN);
+ recv_crc = priv->data[header_len + ADIN1110_REG_LEN];
+
+ if (crc != recv_crc) {
+ dev_err_ratelimited(&priv->spidev->dev, "CRC error.");
+ return -EBADMSG;
+ }
+ }
+
+ *val = get_unaligned_be32(&priv->data[header_len]);
+
+ return ret;
+}
+
+static int adin1110_write_reg(struct adin1110_priv *priv, u16 reg, u32 val)
+{
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ u32 write_len = ADIN1110_REG_LEN;
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], header_len);
+ header_len++;
+ }
+
+ put_unaligned_be32(val, &priv->data[header_len]);
+ if (priv->append_crc) {
+ priv->data[header_len + write_len] = adin1110_crc_data(&priv->data[header_len],
+ write_len);
+ write_len++;
+ }
+
+ return spi_write(priv->spidev, &priv->data[0], header_len + write_len);
+}
+
+static int adin1110_set_bits(struct adin1110_priv *priv, u16 reg,
+ unsigned long mask, unsigned long val)
+{
+ u32 write_val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, reg, &write_val);
+ if (ret < 0)
+ return ret;
+
+ set_mask_bits(&write_val, mask, val);
+
+ return adin1110_write_reg(priv, reg, write_val);
+}
+
+static int adin1110_round_len(int len)
+{
+ /* can read/write only mutiples of 4 bytes of payload */
+ len = ALIGN(len, 4);
+
+ /* NOTE: ADIN1110_WR_HEADER_LEN should be used for write ops. */
+ if (len + ADIN1110_RD_HEADER_LEN > ADIN1110_MAX_BUFF)
+ return -EINVAL;
+
+ return len;
+}
+
+static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ struct spi_transfer t = {0};
+ u32 frame_size_no_fcs;
+ struct sk_buff *rxb;
+ u32 frame_size;
+ int round_len;
+ u16 reg;
+ int ret;
+
+ if (!port_priv->nr) {
+ reg = ADIN1110_RX;
+ ret = adin1110_read_reg(priv, ADIN1110_RX_FSIZE, &frame_size);
+ } else {
+ reg = ADIN2111_RX_P2;
+ ret = adin1110_read_reg(priv, ADIN2111_RX_P2_FSIZE,
+ &frame_size);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* The read frame size includes the extra 2 bytes
+ * from the ADIN1110 frame header.
+ */
+ if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN)
+ return -EINVAL;
+
+ round_len = adin1110_round_len(frame_size);
+ if (round_len < 0)
+ return -EINVAL;
+
+ frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
+ memset(priv->data, 0, ADIN1110_RD_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ rxb = netdev_alloc_skb(port_priv->netdev, round_len + header_len);
+ if (!rxb)
+ return -ENOMEM;
+
+ skb_put(rxb, frame_size_no_fcs + header_len + ADIN1110_FRAME_HEADER_LEN);
+
+ t.tx_buf = &priv->data[0];
+ t.rx_buf = &rxb->data[0];
+ t.len = header_len + round_len;
+
+ ret = spi_sync_transfer(priv->spidev, &t, 1);
+ if (ret) {
+ kfree_skb(rxb);
+ return ret;
+ }
+
+ skb_pull(rxb, header_len + ADIN1110_FRAME_HEADER_LEN);
+ rxb->protocol = eth_type_trans(rxb, port_priv->netdev);
+
+ if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
+ (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
+ rxb->offload_fwd_mark = port_priv->priv->forwarding;
+
+ netif_rx(rxb);
+
+ port_priv->rx_bytes += frame_size - ADIN1110_FRAME_HEADER_LEN;
+ port_priv->rx_packets++;
+
+ return 0;
+}
+
+static int adin1110_write_fifo(struct adin1110_port_priv *port_priv,
+ struct sk_buff *txb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ __be16 frame_header;
+ int padding = 0;
+ int padded_len;
+ int round_len;
+ int ret;
+
+ /* Pad frame to 64 byte length,
+ * MAC nor PHY will otherwise add the
+ * required padding.
+ * The FEC will be added by the MAC internally.
+ */
+ if (txb->len + ADIN1110_FEC_LEN < 64)
+ padding = 64 - (txb->len + ADIN1110_FEC_LEN);
+
+ padded_len = txb->len + padding + ADIN1110_FRAME_HEADER_LEN;
+
+ round_len = adin1110_round_len(padded_len);
+ if (round_len < 0)
+ return round_len;
+
+ ret = adin1110_write_reg(priv, ADIN1110_TX_FSIZE, padded_len);
+ if (ret < 0)
+ return ret;
+
+ memset(priv->data, 0, round_len + ADIN1110_WR_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE;
+ priv->data[0] |= FIELD_GET(GENMASK(12, 8), ADIN1110_TX);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), ADIN1110_TX);
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ /* mention the port on which to send the frame in the frame header */
+ frame_header = cpu_to_be16(port_priv->nr);
+ memcpy(&priv->data[header_len], &frame_header,
+ ADIN1110_FRAME_HEADER_LEN);
+
+ memcpy(&priv->data[header_len + ADIN1110_FRAME_HEADER_LEN],
+ txb->data, txb->len);
+
+ ret = spi_write(priv->spidev, &priv->data[0], round_len + header_len);
+ if (ret < 0)
+ return ret;
+
+ port_priv->tx_bytes += txb->len;
+ port_priv->tx_packets++;
+
+ return 0;
+}
+
+static int adin1110_read_mdio_acc(struct adin1110_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_read_reg(priv, ADIN1110_MDIOACC, &val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return 0;
+
+ return val;
+}
+
+static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_RD);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+
+ /* write the clause 22 read command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ /* ADIN1110_MDIO_TRDONE BIT of the ADIN1110_MDIOACC
+ * register is set when the read is done.
+ * After the transaction is done, ADIN1110_MDIO_DATA
+ * bitfield of ADIN1110_MDIOACC register will contain
+ * the requested register value.
+ */
+ ret = readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE),
+ 100, 30000);
+ if (ret < 0)
+ return ret;
+
+ return (val & ADIN1110_MDIO_DATA);
+}
+
+static int adin1110_mdio_write(struct mii_bus *bus, int phy_id,
+ int reg, u16 reg_val)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_WR);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+ val |= FIELD_PREP(ADIN1110_MDIO_DATA, reg_val);
+
+ /* write the clause 22 write command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ return readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE),
+ 100, 30000);
+}
+
+/* ADIN1110 MAC-PHY contains an ADIN1100 PHY.
+ * ADIN2111 MAC-PHY contains two ADIN1100 PHYs.
+ * By registering a new MDIO bus we allow the PAL to discover
+ * the encapsulated PHY and probe the ADIN1100 driver.
+ */
+static int adin1110_register_mdiobus(struct adin1110_priv *priv,
+ struct device *dev)
+{
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = devm_mdiobus_alloc(dev);
+ if (!mii_bus)
+ return -ENOMEM;
+
+ snprintf(priv->mii_bus_name, MII_BUS_ID_SIZE, "%s-%u",
+ priv->cfg->name, spi_get_chipselect(priv->spidev, 0));
+
+ mii_bus->name = priv->mii_bus_name;
+ mii_bus->read = adin1110_mdio_read;
+ mii_bus->write = adin1110_mdio_write;
+ mii_bus->priv = priv;
+ mii_bus->parent = dev;
+ mii_bus->phy_mask = ~((u32)GENMASK(2, 0));
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ ret = devm_mdiobus_register(dev, mii_bus);
+ if (ret)
+ return ret;
+
+ priv->mii_bus = mii_bus;
+
+ return 0;
+}
+
+static bool adin1110_port_rx_ready(struct adin1110_port_priv *port_priv,
+ u32 status)
+{
+ if (!netif_oper_up(port_priv->netdev))
+ return false;
+
+ if (!port_priv->nr)
+ return !!(status & ADIN1110_RX_RDY);
+ else
+ return !!(status & ADIN2111_P2_RX_RDY);
+}
+
+static void adin1110_read_frames(struct adin1110_port_priv *port_priv,
+ unsigned int budget)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 status1;
+ int ret;
+
+ while (budget) {
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ return;
+
+ if (!adin1110_port_rx_ready(port_priv, status1))
+ break;
+
+ ret = adin1110_read_fifo(port_priv);
+ if (ret < 0)
+ return;
+
+ budget--;
+ }
+}
+
+static void adin1110_wake_queues(struct adin1110_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++)
+ netif_wake_queue(priv->ports[i]->netdev);
+}
+
+static irqreturn_t adin1110_irq(int irq, void *p)
+{
+ struct adin1110_priv *priv = p;
+ u32 status1;
+ u32 val;
+ int ret;
+ int i;
+
+ mutex_lock(&priv->lock);
+
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ goto out;
+
+ if (priv->append_crc && (status1 & ADIN1110_SPI_ERR))
+ dev_warn_ratelimited(&priv->spidev->dev,
+ "SPI CRC error on write.\n");
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0)
+ goto out;
+
+ /* TX FIFO space is expressed in half-words */
+ priv->tx_space = 2 * val;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (adin1110_port_rx_ready(priv->ports[i], status1))
+ adin1110_read_frames(priv->ports[i],
+ ADIN1110_MAX_FRAMES_READ);
+ }
+
+ /* clear IRQ sources */
+ adin1110_write_reg(priv, ADIN1110_STATUS0, ADIN1110_CLEAR_STATUS0);
+ adin1110_write_reg(priv, ADIN1110_STATUS1, priv->irq_mask);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (priv->tx_space > 0 && ret >= 0)
+ adin1110_wake_queues(priv);
+
+ return IRQ_HANDLED;
+}
+
+/* ADIN1110 can filter up to 16 MAC addresses, mac_nr here is the slot used */
+static int adin1110_write_mac_address(struct adin1110_port_priv *port_priv,
+ int mac_nr, const u8 *addr,
+ u8 *mask, u32 port_rules)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 offset = mac_nr * 2;
+ u32 port_rules_mask;
+ int ret;
+ u32 val;
+
+ if (!port_priv->nr)
+ port_rules_mask = ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules_mask = ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (port_rules & port_rules_mask)
+ port_rules_mask |= ADIN1110_MAC_ADDR_TO_HOST | ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ port_rules_mask |= GENMASK(15, 0);
+ val = port_rules | get_unaligned_be16(&addr[0]);
+ ret = adin1110_set_bits(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset,
+ port_rules_mask, val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&addr[2]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_FILTER_LWR + offset, val);
+ if (ret < 0)
+ return ret;
+
+ /* Only the first two MAC address slots support masking. */
+ if (mac_nr < ADIN_MAC_P1_ADDR_SLOT) {
+ val = get_unaligned_be16(&mask[0]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_UPR + offset,
+ val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&mask[2]);
+ return adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_LWR + offset,
+ val);
+ }
+
+ return 0;
+}
+
+static int adin1110_clear_mac_address(struct adin1110_priv *priv, int mac_nr)
+{
+ u32 offset = mac_nr * 2;
+ int ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ /* only the first two MAC address slots are maskable */
+ if (mac_nr <= 1) {
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_LWR + offset, 0);
+ }
+
+ return ret;
+}
+
+static u32 adin1110_port_rules(struct adin1110_port_priv *port_priv,
+ bool fw_to_host,
+ bool fw_to_other_port)
+{
+ u32 port_rules = 0;
+
+ if (!port_priv->nr)
+ port_rules |= ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules |= ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (fw_to_host)
+ port_rules |= ADIN1110_MAC_ADDR_TO_HOST;
+
+ if (fw_to_other_port && port_priv->priv->forwarding)
+ port_rules |= ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ return port_rules;
+}
+
+static int adin1110_multicast_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_multicast)
+{
+ u8 mask[ETH_ALEN] = {0};
+ u8 mac[ETH_ALEN] = {0};
+ u32 port_rules = 0;
+
+ mask[0] = BIT(0);
+ mac[0] = BIT(0);
+
+ if (accept_multicast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mac,
+ mask, port_rules);
+}
+
+static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_broadcast)
+{
+ u32 port_rules = 0;
+ u8 mask[ETH_ALEN];
+
+ eth_broadcast_addr(mask);
+
+ if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mask,
+ mask, port_rules);
+}
+
+static int adin1110_set_mac_address(struct net_device *netdev,
+ const unsigned char *dev_addr)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ u32 mac_slot;
+
+ if (!is_valid_ether_addr(dev_addr))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, dev_addr);
+ eth_broadcast_addr(mask);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ port_rules = adin1110_port_rules(port_priv, true, false);
+
+ return adin1110_write_mac_address(port_priv, mac_slot, netdev->dev_addr,
+ mask, port_rules);
+}
+
+static int adin1110_ndo_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_mac_address(netdev, sa->sa_data);
+}
+
+static int adin1110_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return phy_do_ioctl(netdev, rq, cmd);
+}
+
+static int adin1110_set_promisc_mode(struct adin1110_port_priv *port_priv,
+ bool promisc)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+
+ if (port_priv->state != BR_STATE_FORWARDING)
+ promisc = false;
+
+ if (!port_priv->nr)
+ mask = ADIN1110_FWD_UNK2HOST;
+ else
+ mask = ADIN2111_P2_FWD_UNK2HOST;
+
+ return adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ mask, promisc ? mask : 0);
+}
+
+static int adin1110_setup_rx_mode(struct adin1110_port_priv *port_priv)
+{
+ int ret;
+
+ ret = adin1110_set_promisc_mode(port_priv,
+ !!(port_priv->flags & IFF_PROMISC));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_multicast_filter(port_priv, ADIN_MAC_MULTICAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_ALLMULTI));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_broadcasts_filter(port_priv,
+ ADIN_MAC_BROADCAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_BROADCAST));
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_bits(port_priv->priv, ADIN1110_CONFIG1,
+ ADIN1110_CONFIG1_SYNC, ADIN1110_CONFIG1_SYNC);
+}
+
+static bool adin1110_can_offload_forwarding(struct adin1110_priv *priv)
+{
+ int i;
+
+ if (priv->cfg->id != ADIN2111_MAC)
+ return false;
+
+ /* Can't enable forwarding if ports do not belong to the same bridge */
+ if (priv->ports[0]->bridge != priv->ports[1]->bridge || !priv->ports[0]->bridge)
+ return false;
+
+ /* Can't enable forwarding if there is a port
+ * that has been blocked by STP.
+ */
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (priv->ports[i]->state != BR_STATE_FORWARDING)
+ return false;
+ }
+
+ return true;
+}
+
+static void adin1110_rx_mode_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+
+ port_priv = container_of(work, struct adin1110_port_priv, rx_mode_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+ adin1110_setup_rx_mode(port_priv);
+ mutex_unlock(&priv->lock);
+}
+
+static void adin1110_set_rx_mode(struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ spin_lock(&priv->state_lock);
+
+ port_priv->flags = dev->flags;
+ schedule_work(&port_priv->rx_mode_work);
+
+ spin_unlock(&priv->state_lock);
+}
+
+static int adin1110_net_open(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /* Configure MAC to compute and append the FCS itself. */
+ ret = adin1110_write_reg(priv, ADIN1110_CONFIG2, ADIN1110_CRC_APPEND);
+ if (ret < 0)
+ goto out;
+
+ val = ADIN1110_TX_RDY_IRQ | ADIN1110_RX_RDY_IRQ | ADIN1110_SPI_ERR_IRQ;
+ if (priv->cfg->id == ADIN2111_MAC)
+ val |= ADIN2111_RX_RDY_IRQ;
+
+ priv->irq_mask = val;
+ ret = adin1110_write_reg(priv, ADIN1110_IMASK1, ~val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to enable chip IRQs: %d\n", ret);
+ goto out;
+ }
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to read TX FIFO space: %d\n", ret);
+ goto out;
+ }
+
+ priv->tx_space = 2 * val;
+
+ port_priv->state = BR_STATE_FORWARDING;
+ ret = adin1110_set_mac_address(net_dev, net_dev->dev_addr);
+ if (ret < 0) {
+ netdev_err(net_dev, "Could not set MAC address: %pM, %d\n",
+ net_dev->dev_addr, ret);
+ goto out;
+ }
+
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG1, ADIN1110_CONFIG1_SYNC,
+ ADIN1110_CONFIG1_SYNC);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+
+ phy_start(port_priv->phydev);
+
+ netif_start_queue(net_dev);
+
+ return 0;
+}
+
+static int adin1110_net_stop(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+ int ret;
+
+ mask = !port_priv->nr ? ADIN2111_RX_RDY_IRQ : ADIN1110_RX_RDY_IRQ;
+
+ /* Disable RX RDY IRQs */
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_bits(priv, ADIN1110_IMASK1, mask, mask);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ netif_stop_queue(port_priv->netdev);
+ flush_work(&port_priv->tx_work);
+ phy_stop(port_priv->phydev);
+
+ return 0;
+}
+
+static void adin1110_tx_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+ struct sk_buff *txb;
+ int ret;
+
+ port_priv = container_of(work, struct adin1110_port_priv, tx_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+
+ while ((txb = skb_dequeue(&port_priv->txq))) {
+ ret = adin1110_write_fifo(port_priv, txb);
+ if (ret < 0)
+ dev_err_ratelimited(&priv->spidev->dev,
+ "Frame write error: %d\n", ret);
+
+ dev_kfree_skb(txb);
+ }
+
+ mutex_unlock(&priv->lock);
+}
+
+static netdev_tx_t adin1110_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ netdev_tx_t netdev_ret = NETDEV_TX_OK;
+ u32 tx_space_needed;
+
+ tx_space_needed = skb->len + ADIN1110_FRAME_HEADER_LEN + ADIN1110_INTERNAL_SIZE_HEADER_LEN;
+ if (tx_space_needed > priv->tx_space) {
+ netif_stop_queue(dev);
+ netdev_ret = NETDEV_TX_BUSY;
+ } else {
+ priv->tx_space -= tx_space_needed;
+ skb_queue_tail(&port_priv->txq, skb);
+ }
+
+ schedule_work(&port_priv->tx_work);
+
+ return netdev_ret;
+}
+
+static void adin1110_ndo_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ storage->rx_packets = port_priv->rx_packets;
+ storage->tx_packets = port_priv->tx_packets;
+
+ storage->rx_bytes = port_priv->rx_bytes;
+ storage->tx_bytes = port_priv->tx_bytes;
+}
+
+static int adin1110_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ ppid->id_len = strnlen(priv->mii_bus_name, MAX_PHYS_ITEM_ID_LEN);
+ memcpy(ppid->id, priv->mii_bus_name, ppid->id_len);
+
+ return 0;
+}
+
+static int adin1110_ndo_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ int err;
+
+ err = snprintf(name, len, "p%d", port_priv->nr);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct net_device_ops adin1110_netdev_ops = {
+ .ndo_open = adin1110_net_open,
+ .ndo_stop = adin1110_net_stop,
+ .ndo_eth_ioctl = adin1110_ioctl,
+ .ndo_start_xmit = adin1110_start_xmit,
+ .ndo_set_mac_address = adin1110_ndo_set_mac_address,
+ .ndo_set_rx_mode = adin1110_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = adin1110_ndo_get_stats64,
+ .ndo_get_port_parent_id = adin1110_port_get_port_parent_id,
+ .ndo_get_phys_port_name = adin1110_ndo_get_phys_port_name,
+};
+
+static void adin1110_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *di)
+{
+ strscpy(di->driver, "ADIN1110", sizeof(di->driver));
+ strscpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+}
+
+static const struct ethtool_ops adin1110_ethtool_ops = {
+ .get_drvinfo = adin1110_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static void adin1110_adjust_link(struct net_device *dev)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (!phydev->link)
+ phy_print_status(phydev);
+}
+
+/* PHY ID is stored in the MAC registers too,
+ * check spi connection by reading it.
+ */
+static int adin1110_check_spi(struct adin1110_priv *priv)
+{
+ struct gpio_desc *reset_gpio;
+ int ret;
+ u32 val;
+
+ reset_gpio = devm_gpiod_get_optional(&priv->spidev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (reset_gpio) {
+ /* MISO pin is used for internal configuration, can't have
+ * anyone else disturbing the SDO line.
+ */
+ spi_bus_lock(priv->spidev->controller);
+
+ gpiod_set_value(reset_gpio, 1);
+ fsleep(10000);
+ gpiod_set_value(reset_gpio, 0);
+
+ /* Need to wait 90 ms before interacting with
+ * the MAC after a HW reset.
+ */
+ fsleep(90000);
+
+ spi_bus_unlock(priv->spidev->controller);
+ }
+
+ ret = adin1110_read_reg(priv, ADIN1110_PHY_ID, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != priv->cfg->phy_id_val) {
+ dev_err(&priv->spidev->dev, "PHY ID expected: %x, read: %x\n",
+ priv->cfg->phy_id_val, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adin1110_hw_forwarding(struct adin1110_priv *priv, bool enable)
+{
+ int ret;
+ int i;
+
+ priv->forwarding = enable;
+
+ if (!priv->forwarding) {
+ for (i = ADIN_MAC_FDB_ADDR_SLOT; i < ADIN_MAC_MAX_ADDR_SLOTS; i++) {
+ ret = adin1110_clear_mac_address(priv, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Forwarding is optimised when MAC runs in Cut Through mode. */
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ ADIN2111_PORT_CUT_THRU_EN,
+ priv->forwarding ? ADIN2111_PORT_CUT_THRU_EN : 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = adin1110_setup_rx_mode(priv->ports[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int adin1110_port_bridge_join(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = bridge;
+
+ if (adin1110_can_offload_forwarding(priv)) {
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, true);
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return adin1110_set_mac_address(port_priv->netdev, bridge->dev_addr);
+}
+
+static int adin1110_port_bridge_leave(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = NULL;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, false);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static bool adin1110_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &adin1110_netdev_ops;
+}
+
+static int adin1110_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ int ret = 0;
+
+ if (!adin1110_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = adin1110_port_bridge_join(port_priv, info->upper_dev);
+ else
+ ret = adin1110_port_bridge_leave(port_priv, info->upper_dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block adin1110_netdevice_nb = {
+ .notifier_call = adin1110_netdevice_event,
+};
+
+static void adin1110_disconnect_phy(void *data)
+{
+ phy_disconnect(data);
+}
+
+static int adin1110_port_set_forwarding_state(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->state = BR_STATE_FORWARDING;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_mac_address(port_priv->netdev,
+ port_priv->netdev->dev_addr);
+ if (ret < 0)
+ goto out;
+
+ if (adin1110_can_offload_forwarding(priv))
+ ret = adin1110_hw_forwarding(priv, true);
+ else
+ ret = adin1110_setup_rx_mode(port_priv);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv)
+{
+ u8 mac[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00};
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_slot;
+ int ret;
+
+ port_priv->state = BR_STATE_BLOCKING;
+
+ mutex_lock(&priv->lock);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ ret = adin1110_clear_mac_address(priv, mac_slot);
+ if (ret < 0)
+ goto out;
+
+ ret = adin1110_hw_forwarding(priv, false);
+ if (ret < 0)
+ goto out;
+
+ /* Allow only BPDUs to be passed to the CPU */
+ eth_broadcast_addr(mask);
+ port_rules = adin1110_port_rules(port_priv, true, false);
+ ret = adin1110_write_mac_address(port_priv, mac_slot, mac,
+ mask, port_rules);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/* ADIN1110/2111 does not have any native STP support.
+ * Listen for bridge core state changes and
+ * allow all frames to pass or only the BPDUs.
+ */
+static int adin1110_port_attr_stp_state_set(struct adin1110_port_priv *port_priv,
+ u8 state)
+{
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ return adin1110_port_set_forwarding_state(port_priv);
+ case BR_STATE_LEARNING:
+ case BR_STATE_LISTENING:
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ return adin1110_port_set_blocking_state(port_priv);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adin1110_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ return adin1110_port_attr_stp_state_set(port_priv,
+ attr->u.stp_state);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adin1110_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ ret = switchdev_handle_port_attr_set(netdev, ptr,
+ adin1110_port_dev_check,
+ adin1110_port_attr_set);
+
+ return notifier_from_errno(ret);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block adin1110_switchdev_blocking_notifier = {
+ .notifier_call = adin1110_switchdev_blocking_event,
+};
+
+static void adin1110_fdb_offload_notify(struct net_device *netdev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info = {};
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ netdev, &info.info, NULL);
+}
+
+static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ struct adin1110_port_priv *other_port;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_nr;
+ u32 val;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (!priv->forwarding)
+ return 0;
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ /* Find free FDB slot on device. */
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+ if (!val)
+ break;
+ }
+
+ if (mac_nr == ADIN_MAC_MAX_ADDR_SLOTS)
+ return -ENOMEM;
+
+ other_port = priv->ports[!port_priv->nr];
+ port_rules = adin1110_port_rules(other_port, false, true);
+ eth_broadcast_addr(mask);
+
+ return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
+ mask, port_rules);
+}
+
+static int adin1110_read_mac(struct adin1110_priv *priv, int mac_nr, u8 *addr)
+{
+ u32 val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be16(val, addr);
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be32(val, addr + 2);
+
+ return 0;
+}
+
+static int adin1110_fdb_del(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 addr[ETH_ALEN];
+ int mac_nr;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_mac(priv, mac_nr, addr);
+ if (ret < 0)
+ return ret;
+
+ if (ether_addr_equal(addr, fdb->addr)) {
+ ret = adin1110_clear_mac_address(priv, mac_nr);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void adin1110_switchdev_event_work(struct work_struct *work)
+{
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct adin1110_port_priv *port_priv;
+ int ret;
+
+ switchdev_work = container_of(work, struct adin1110_switchdev_event_work, work);
+ port_priv = switchdev_work->port_priv;
+
+ mutex_lock(&port_priv->priv->lock);
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ ret = adin1110_fdb_add(port_priv, &switchdev_work->fdb_info);
+ if (!ret)
+ adin1110_fdb_offload_notify(port_priv->netdev,
+ &switchdev_work->fdb_info);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ adin1110_fdb_del(port_priv, &switchdev_work->fdb_info);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&port_priv->priv->lock);
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(port_priv->netdev);
+}
+
+/* called under rcu_read_lock() */
+static int adin1110_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
+ if (!adin1110_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, adin1110_switchdev_event_work);
+ switchdev_work->port_priv = port_priv;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(netdev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block adin1110_switchdev_notifier = {
+ .notifier_call = adin1110_switchdev_event,
+};
+
+static void adin1110_unregister_notifiers(void)
+{
+ unregister_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+}
+
+static int adin1110_setup_notifiers(void)
+{
+ int ret;
+
+ ret = register_netdevice_notifier(&adin1110_netdevice_nb);
+ if (ret < 0)
+ return ret;
+
+ ret = register_switchdev_notifier(&adin1110_switchdev_notifier);
+ if (ret < 0)
+ goto err_netdev;
+
+ ret = register_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ if (ret < 0)
+ goto err_sdev;
+
+ return 0;
+
+err_sdev:
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+
+err_netdev:
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+
+ return ret;
+}
+
+static int adin1110_probe_netdevs(struct adin1110_priv *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct adin1110_port_priv *port_priv;
+ struct net_device *netdev;
+ int ret;
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ netdev = devm_alloc_etherdev(dev, sizeof(*port_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ port_priv = netdev_priv(netdev);
+ port_priv->netdev = netdev;
+ port_priv->priv = priv;
+ port_priv->cfg = priv->cfg;
+ port_priv->nr = i;
+ priv->ports[i] = port_priv;
+ SET_NETDEV_DEV(netdev, dev);
+
+ ret = device_get_ethdev_address(dev, netdev);
+ if (ret < 0)
+ return ret;
+
+ netdev->irq = priv->spidev->irq;
+ INIT_WORK(&port_priv->tx_work, adin1110_tx_work);
+ INIT_WORK(&port_priv->rx_mode_work, adin1110_rx_mode_work);
+ skb_queue_head_init(&port_priv->txq);
+
+ netif_carrier_off(netdev);
+
+ netdev->if_port = IF_PORT_10BASET;
+ netdev->netdev_ops = &adin1110_netdev_ops;
+ netdev->ethtool_ops = &adin1110_ethtool_ops;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->netns_immutable = true;
+
+ port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not find PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ port_priv->phydev = phy_connect(netdev,
+ phydev_name(port_priv->phydev),
+ adin1110_adjust_link,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not connect PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ ret = devm_add_action_or_reset(dev, adin1110_disconnect_phy,
+ port_priv->phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* ADIN1110 INT_N pin will be used to signal the host */
+ ret = devm_request_threaded_irq(dev, priv->spidev->irq, NULL,
+ adin1110_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev_name(dev), priv);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = devm_register_netdev(dev, priv->ports[i]->netdev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register network device.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int adin1110_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *dev_id = spi_get_device_id(spi);
+ struct device *dev = &spi->dev;
+ struct adin1110_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct adin1110_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spidev = spi;
+ priv->cfg = &adin1110_cfgs[dev_id->driver_data];
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+
+ mutex_init(&priv->lock);
+ spin_lock_init(&priv->state_lock);
+
+ /* use of CRC on control and data transactions is pin dependent */
+ priv->append_crc = device_property_read_bool(dev, "adi,spi-crc");
+ if (priv->append_crc)
+ crc8_populate_msb(adin1110_crc_table, 0x7);
+
+ ret = adin1110_check_spi(priv);
+ if (ret < 0) {
+ dev_err(dev, "Probe SPI Read check failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = adin1110_write_reg(priv, ADIN1110_RESET, ADIN1110_SWRESET);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_register_mdiobus(priv, dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not register MDIO bus %d\n", ret);
+ return ret;
+ }
+
+ return adin1110_probe_netdevs(priv);
+}
+
+static const struct of_device_id adin1110_match_table[] = {
+ { .compatible = "adi,adin1110" },
+ { .compatible = "adi,adin2111" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adin1110_match_table);
+
+static const struct spi_device_id adin1110_spi_id[] = {
+ { .name = "adin1110", .driver_data = ADIN1110_MAC },
+ { .name = "adin2111", .driver_data = ADIN2111_MAC },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adin1110_spi_id);
+
+static struct spi_driver adin1110_driver = {
+ .driver = {
+ .name = "adin1110",
+ .of_match_table = adin1110_match_table,
+ },
+ .probe = adin1110_probe,
+ .id_table = adin1110_spi_id,
+};
+
+static int __init adin1110_driver_init(void)
+{
+ int ret;
+
+ ret = adin1110_setup_notifiers();
+ if (ret < 0)
+ return ret;
+
+ ret = spi_register_driver(&adin1110_driver);
+ if (ret < 0) {
+ adin1110_unregister_notifiers();
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit adin1110_exit(void)
+{
+ adin1110_unregister_notifiers();
+ spi_unregister_driver(&adin1110_driver);
+}
+module_init(adin1110_driver_init);
+module_exit(adin1110_exit);
+
+MODULE_DESCRIPTION("ADIN1110 Network driver");
+MODULE_AUTHOR("Alexandru Tachici <alexandru.tachici@analog.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 447dc64a17e5..a593adc16c78 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -29,9 +29,9 @@
#include <linux/io.h>
#include <linux/crc32.h>
#include <linux/mii.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/byteorder.h>
@@ -258,6 +258,7 @@ static int greth_init_rings(struct greth_private *greth)
if (dma_mapping_error(greth->dev, dma_addr)) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ dev_kfree_skb(skb);
goto cleanup;
}
greth->rx_skbuff[i] = skb;
@@ -483,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
dev->stats.tx_errors++;
- goto out;
+ goto len_error;
}
/* Save skb pointer. */
@@ -574,6 +575,7 @@ frag_map_error:
map_error:
if (net_ratelimit())
dev_warn(greth->dev, "Could not create TX DMA mapping\n");
+len_error:
dev_kfree_skb(skb);
out:
return err;
@@ -1112,9 +1114,9 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct greth_private *greth = netdev_priv(dev);
- strlcpy(info->driver, dev_driver_string(greth->dev),
+ strscpy(info->driver, dev_driver_string(greth->dev),
sizeof(info->driver));
- strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
+ strscpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
}
static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
@@ -1507,7 +1509,7 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* setup NAPI */
- netif_napi_add(dev, &greth->napi, greth_poll, 64);
+ netif_napi_add(dev, &greth->napi, greth_poll);
return 0;
@@ -1524,7 +1526,7 @@ error1:
return err;
}
-static int greth_of_remove(struct platform_device *of_dev)
+static void greth_of_remove(struct platform_device *of_dev)
{
struct net_device *ndev = platform_get_drvdata(of_dev);
struct greth_private *greth = netdev_priv(ndev);
@@ -1543,8 +1545,6 @@ static int greth_of_remove(struct platform_device *of_dev)
of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id greth_of_match[] = {
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index f4edc616388c..5c8217638dda 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -1106,7 +1106,7 @@ static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
writel(0, &rxmac->mif_ctrl);
writel(0, &rxmac->space_avail);
- /* Initialize the the mif_ctrl register
+ /* Initialize the mif_ctrl register
* bit 3: Receive code error. One or more nibbles were signaled as
* errors during the reception of the packet. Clear this
* bit in Gigabit, set it in 100Mbit. This was derived
@@ -2413,11 +2413,13 @@ static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
kfree(tx_ring->tcb_ring);
}
+#define MAX_TX_DESC_PER_PKT 24
+
/* nic_send_packet - NIC specific send handler for version B silicon. */
static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
{
u32 i;
- struct tx_desc desc[24];
+ struct tx_desc desc[MAX_TX_DESC_PER_PKT];
u32 frag = 0;
u32 thiscopy, remainder;
struct sk_buff *skb = tcb->skb;
@@ -2432,9 +2434,6 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
* more than 5 fragments.
*/
- /* nr_frags should be no more than 18. */
- BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
-
memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
for (i = 0; i < nr_frags; i++) {
@@ -2460,6 +2459,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb->data,
skb_headlen(skb),
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ return -ENOMEM;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2469,6 +2472,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb->data,
skb_headlen(skb) / 2,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ return -ENOMEM;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2479,6 +2486,10 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
skb_headlen(skb) / 2,
skb_headlen(skb) / 2,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ dma_addr))
+ goto unmap_first_out;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2490,6 +2501,9 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
0,
desc[frag].len_vlan,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, dma_addr))
+ goto unmap_out;
+
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
frag++;
@@ -2579,6 +2593,27 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
&adapter->regs->global.watchdog_timer);
}
return 0;
+
+unmap_out:
+ // Unmap the body of the packet with map_page
+ while (--i) {
+ frag--;
+ dma_addr = desc[frag].addr_lo;
+ dma_addr |= (u64)desc[frag].addr_hi << 32;
+ dma_unmap_page(&adapter->pdev->dev, dma_addr,
+ desc[frag].len_vlan, DMA_TO_DEVICE);
+ }
+
+unmap_first_out:
+ // Unmap the header with map_single
+ while (frag--) {
+ dma_addr = desc[frag].addr_lo;
+ dma_addr |= (u64)desc[frag].addr_hi << 32;
+ dma_unmap_single(&adapter->pdev->dev, dma_addr,
+ desc[frag].len_vlan, DMA_TO_DEVICE);
+ }
+
+ return -ENOMEM;
}
static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
@@ -2953,8 +2988,8 @@ static void et131x_get_drvinfo(struct net_device *netdev,
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -3077,7 +3112,8 @@ err_out:
*/
static void et131x_error_timer_handler(struct timer_list *t)
{
- struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
+ struct et131x_adapter *adapter = timer_container_of(adapter, t,
+ error_timer);
struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
@@ -3640,7 +3676,7 @@ static int et131x_close(struct net_device *netdev)
free_irq(adapter->pdev->irq, netdev);
/* Stop the error timer */
- return del_timer_sync(&adapter->error_timer);
+ return timer_delete_sync(&adapter->error_timer);
}
/* et131x_set_packet_filter - Configures the Rx Packet filtering */
@@ -3762,6 +3798,13 @@ static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
struct tx_ring *tx_ring = &adapter->tx_ring;
+ /* This driver does not support TSO, it is very unlikely
+ * this condition is true.
+ */
+ if (unlikely(skb_shinfo(skb)->nr_frags > MAX_TX_DESC_PER_PKT - 2)) {
+ if (skb_linearize(skb))
+ goto drop_err;
+ }
/* stop the queue if it's getting full */
if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
netif_stop_queue(netdev);
@@ -3846,7 +3889,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
et131x_disable_txrx(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
et131x_adapter_memory_free(adapter);
@@ -3914,10 +3957,9 @@ static int et131x_pci_setup(struct pci_dev *pdev,
pci_set_master(pdev);
/* Check the DMA addressing support of this device */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
dev_err(&pdev->dev, "No usable DMA addressing method\n");
- rc = -EIO;
goto err_release_res;
}
@@ -3964,7 +4006,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
et131x_init_send(adapter);
- netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, et131x_poll);
eth_hw_addr_set(netdev, adapter->addr);
@@ -3977,8 +4019,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
}
adapter->mii_bus->name = "et131x_eth_mii";
- snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
- (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
+ snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(adapter->pdev));
adapter->mii_bus->priv = netdev;
adapter->mii_bus->read = et131x_mdio_read;
adapter->mii_bus->write = et131x_mdio_write;
diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig
new file mode 100644
index 000000000000..ad3ce501e7a5
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_VENDOR_AIROHA
+ bool "Airoha devices"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ help
+ If you have a Airoha SoC with ethernet, say Y.
+
+if NET_VENDOR_AIROHA
+
+config NET_AIROHA_NPU
+ tristate "Airoha NPU support"
+ select WANT_DEV_COREDUMP
+ select REGMAP_MMIO
+ help
+ This driver supports Airoha Network Processor (NPU) available
+ on the Airoha Soc family.
+
+config NET_AIROHA
+ tristate "Airoha SoC Gigabit Ethernet support"
+ depends on NET_DSA || !NET_DSA
+ select NET_AIROHA_NPU
+ select PAGE_POOL
+ help
+ This driver supports the gigabit ethernet MACs in the
+ Airoha SoC family.
+
+config NET_AIROHA_FLOW_STATS
+ default y
+ bool "Airoha flow stats"
+ depends on NET_AIROHA && NET_AIROHA_NPU
+ help
+ Enable Aiorha flowtable statistic counters.
+
+endif #NET_VENDOR_AIROHA
diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile
new file mode 100644
index 000000000000..94468053e34b
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Airoha for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_AIROHA) += airoha-eth.o
+airoha-eth-y := airoha_eth.o airoha_ppe.o
+airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o
+obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
new file mode 100644
index 000000000000..75893c90a0a1
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -0,0 +1,3180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/tcp.h>
+#include <linux/u64_stats_sync.h>
+#include <net/dst_metadata.h>
+#include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
+#include <uapi/linux/ppp_defs.h>
+
+#include "airoha_regs.h"
+#include "airoha_eth.h"
+
+u32 airoha_rr(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+void airoha_wr(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+{
+ val |= (airoha_rr(base, offset) & ~mask);
+ airoha_wr(base, offset, val);
+
+ return val;
+}
+
+static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
+ int index, u32 clear, u32 set)
+{
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ int bank = irq_bank - &qdma->irq_banks[0];
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
+ return;
+
+ spin_lock_irqsave(&irq_bank->irq_lock, flags);
+
+ irq_bank->irqmask[index] &= ~clear;
+ irq_bank->irqmask[index] |= set;
+ airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
+ irq_bank->irqmask[index]);
+ /* Read irq_enable register in order to guarantee the update above
+ * completes in the spinlock critical section.
+ */
+ airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
+
+ spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
+}
+
+static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
+{
+ airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
+}
+
+static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
+{
+ airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
+}
+
+static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 val, reg;
+
+ reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
+ : REG_FE_WAN_MAC_H;
+ val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
+ airoha_fe_wr(eth, reg, val);
+
+ val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
+ airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
+
+ airoha_ppe_init_upd_mem(port);
+}
+
+static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
+ u32 val)
+{
+ airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
+ FIELD_PREP(GDM_OCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
+ FIELD_PREP(GDM_MCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
+ FIELD_PREP(GDM_BCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
+ FIELD_PREP(GDM_UCFQ_MASK, val));
+}
+
+static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
+ bool enable)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 vip_port;
+
+ switch (port->id) {
+ case 3:
+ /* FIXME: handle XSI_PCIE1_PORT */
+ vip_port = XSI_PCIE0_VIP_PORT_MASK;
+ break;
+ case 4:
+ /* FIXME: handle XSI_USB_PORT */
+ vip_port = XSI_ETH_VIP_PORT_MASK;
+ break;
+ default:
+ return 0;
+ }
+
+ if (enable) {
+ airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
+ airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
+ } else {
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
+ }
+
+ return 0;
+}
+
+static void airoha_fe_maccr_init(struct airoha_eth *eth)
+{
+ int p;
+
+ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
+ GDM_TCP_CKSUM_MASK | GDM_UDP_CKSUM_MASK |
+ GDM_IP4_CKSUM_MASK | GDM_DROP_CRC_ERR_MASK);
+
+ airoha_fe_rmw(eth, REG_CDM_VLAN_CTRL(1), CDM_VLAN_MASK,
+ FIELD_PREP(CDM_VLAN_MASK, 0x8100));
+
+ airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
+}
+
+static void airoha_fe_vip_setup(struct airoha_eth *eth)
+{
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(4),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(6),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(7),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* BOOTP (0x43) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(8),
+ PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ /* BOOTP (0x44) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(9),
+ PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ /* ISAKMP */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(10),
+ PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(11),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* DHCPv6 */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(12),
+ PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(19),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* ETH->ETH_P_1905 (0x893a) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(20),
+ PATN_FCPU_EN_MASK | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(21),
+ PATN_FCPU_EN_MASK | PATN_EN_MASK);
+}
+
+static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
+ u32 port, u32 queue)
+{
+ u32 val;
+
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
+ PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
+ FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
+ FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
+ val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
+
+ return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
+}
+
+static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
+ u32 port, u32 queue, u32 val)
+{
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
+ FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
+ PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
+ PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
+ FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
+ FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
+ PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
+}
+
+static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
+{
+ u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
+
+ return FIELD_GET(PSE_ALLRSV_MASK, val);
+}
+
+static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
+ u32 port, u32 queue, u32 val)
+{
+ u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
+ u32 tmp, all_rsv, fq_limit;
+
+ airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
+
+ /* modify all rsv */
+ all_rsv = airoha_fe_get_pse_all_rsv(eth);
+ all_rsv += (val - orig_val);
+ airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
+ FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
+
+ /* modify hthd */
+ tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
+ fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
+ tmp = fq_limit - all_rsv - 0x20;
+ airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
+ PSE_SHARE_USED_HTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
+
+ tmp = fq_limit - all_rsv - 0x100;
+ airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
+ PSE_SHARE_USED_MTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
+ tmp = (3 * tmp) >> 2;
+ airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
+ PSE_SHARE_USED_LTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
+
+ return 0;
+}
+
+static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
+{
+ const u32 pse_port_num_queues[] = {
+ [FE_PSE_PORT_CDM1] = 6,
+ [FE_PSE_PORT_GDM1] = 6,
+ [FE_PSE_PORT_GDM2] = 32,
+ [FE_PSE_PORT_GDM3] = 6,
+ [FE_PSE_PORT_PPE1] = 4,
+ [FE_PSE_PORT_CDM2] = 6,
+ [FE_PSE_PORT_CDM3] = 8,
+ [FE_PSE_PORT_CDM4] = 10,
+ [FE_PSE_PORT_PPE2] = 4,
+ [FE_PSE_PORT_GDM4] = 2,
+ [FE_PSE_PORT_CDM5] = 2,
+ };
+ u32 all_rsv;
+ int q;
+
+ all_rsv = airoha_fe_get_pse_all_rsv(eth);
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* hw misses PPE2 oq rsv */
+ all_rsv += PSE_RSV_PAGES *
+ pse_port_num_queues[FE_PSE_PORT_PPE2];
+ }
+ airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
+
+ /* CMD1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* GMD1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* GMD2 */
+ for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
+ /* GMD3 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* PPE1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
+ if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
+ PSE_QUEUE_RSV_PAGES);
+ else
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
+ }
+ /* CDM2 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* CDM3 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
+ /* CDM4 */
+ for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
+ PSE_QUEUE_RSV_PAGES);
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* PPE2 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
+ if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
+ q,
+ PSE_QUEUE_RSV_PAGES);
+ else
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
+ q, 0);
+ }
+ }
+ /* GMD4 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* CDM5 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
+ PSE_QUEUE_RSV_PAGES);
+}
+
+static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
+ int err, j;
+ u32 val;
+
+ airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
+
+ val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
+ MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
+ airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
+ err = read_poll_timeout(airoha_fe_rr, val,
+ val & MC_VLAN_CFG_CMD_DONE_MASK,
+ USEC_PER_MSEC, 5 * USEC_PER_MSEC,
+ false, eth, REG_MC_VLAN_CFG);
+ if (err)
+ return err;
+
+ for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
+ airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
+
+ val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
+ FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
+ MC_VLAN_CFG_RW_MASK;
+ airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
+ err = read_poll_timeout(airoha_fe_rr, val,
+ val & MC_VLAN_CFG_CMD_DONE_MASK,
+ USEC_PER_MSEC,
+ 5 * USEC_PER_MSEC, false, eth,
+ REG_MC_VLAN_CFG);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
+{
+ /* CDM1_CRSN_QSEL */
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_22 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_08 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_21 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_24 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ CDM_CRSN_QSEL_Q6));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_25 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ CDM_CRSN_QSEL_Q1));
+ /* CDM2_CRSN_QSEL */
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_08 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_21 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_22 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_24 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ CDM_CRSN_QSEL_Q6));
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_25 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ CDM_CRSN_QSEL_Q1));
+}
+
+static int airoha_fe_init(struct airoha_eth *eth)
+{
+ airoha_fe_maccr_init(eth);
+
+ /* PSE IQ reserve */
+ airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
+ FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
+ airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
+ PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
+ FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
+ FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
+
+ /* enable FE copy engine for MC/KA/DPI */
+ airoha_fe_wr(eth, REG_FE_PCE_CFG,
+ PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
+ /* set vip queue selection to ring 1 */
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(1), CDM_VIP_QSEL_MASK,
+ FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_VIP_QSEL_MASK,
+ FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
+ /* set GDM4 source interface offset to 8 */
+ airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4),
+ GDM_SPORT_OFF2_MASK |
+ GDM_SPORT_OFF1_MASK |
+ GDM_SPORT_OFF0_MASK,
+ FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) |
+ FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) |
+ FIELD_PREP(GDM_SPORT_OFF0_MASK, 8));
+
+ /* set PSE Page as 128B */
+ airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
+ FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
+ FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
+ FE_DMA_GLO_PG_SZ_MASK);
+ airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
+ FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
+ FE_RST_GDM4_MBI_ARB_MASK);
+ usleep_range(1000, 2000);
+
+ /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
+ * connect other rings to PSE Port0 OQ-0
+ */
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
+
+ airoha_fe_vip_setup(eth);
+ airoha_fe_pse_ports_init(eth);
+
+ airoha_fe_set(eth, REG_GDM_MISC_CFG,
+ GDM2_RDM_ACK_WAIT_PREF_MASK |
+ GDM2_CHN_VLD_MODE_MASK);
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_OAM_QSEL_MASK,
+ FIELD_PREP(CDM_OAM_QSEL_MASK, 15));
+
+ /* init fragment and assemble Force Port */
+ /* NPU Core-3, NPU Bridge Channel-3 */
+ airoha_fe_rmw(eth, REG_IP_FRAG_FP,
+ IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
+ FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
+ FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
+ /* QDMA LAN, RX Ring-22 */
+ airoha_fe_rmw(eth, REG_IP_FRAG_FP,
+ IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
+ FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
+ FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
+
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(3), GDM_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(4), GDM_PAD_EN_MASK);
+
+ airoha_fe_crsn_qsel_init(eth);
+
+ airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
+ airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
+
+ /* default aging mode for mbi unlock issue */
+ airoha_fe_rmw(eth, REG_GDM_CHN_RLS(2),
+ MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
+ FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
+ FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
+
+ /* disable IFC by default */
+ airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
+
+ /* enable 1:N vlan action, init vlan table */
+ airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
+
+ return airoha_fe_mc_vlan_clear(eth);
+}
+
+static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
+{
+ struct airoha_qdma *qdma = q->qdma;
+ int qid = q - &qdma->q_rx[0];
+ int nframes = 0;
+
+ while (q->queued < q->ndesc - 1) {
+ struct airoha_queue_entry *e = &q->entry[q->head];
+ struct airoha_qdma_desc *desc = &q->desc[q->head];
+ struct page *page;
+ int offset;
+ u32 val;
+
+ page = page_pool_dev_alloc_frag(q->page_pool, &offset,
+ q->buf_size);
+ if (!page)
+ break;
+
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued++;
+ nframes++;
+
+ e->buf = page_address(page) + offset;
+ e->dma_addr = page_pool_get_dma_addr(page) + offset;
+ e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
+ WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
+ val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
+ WRITE_ONCE(desc->data, cpu_to_le32(val));
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+ WRITE_ONCE(desc->msg2, 0);
+ WRITE_ONCE(desc->msg3, 0);
+
+ airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
+ RX_RING_CPU_IDX_MASK,
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
+ }
+
+ return nframes;
+}
+
+static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
+ struct airoha_qdma_desc *desc)
+{
+ u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
+
+ sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
+ switch (sport) {
+ case 0x10 ... 0x14:
+ port = 0;
+ break;
+ case 0x2 ... 0x4:
+ port = sport - 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
+}
+
+static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
+{
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0];
+ int done = 0;
+
+ while (done < budget) {
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+ struct airoha_qdma_desc *desc = &q->desc[q->tail];
+ u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
+ struct page *page = virt_to_head_page(e->buf);
+ u32 desc_ctrl = le32_to_cpu(desc->ctrl);
+ struct airoha_gdm_port *port;
+ int data_len, len, p;
+
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
+ break;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ dma_sync_single_for_cpu(eth->dev, e->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size), dir);
+
+ len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
+ data_len = q->skb ? q->buf_size
+ : SKB_WITH_OVERHEAD(q->buf_size);
+ if (!len || data_len < len)
+ goto free_frag;
+
+ p = airoha_qdma_get_gdm_port(eth, desc);
+ if (p < 0 || !eth->ports[p])
+ goto free_frag;
+
+ port = eth->ports[p];
+ if (!q->skb) { /* first buffer */
+ q->skb = napi_build_skb(e->buf, q->buf_size);
+ if (!q->skb)
+ goto free_frag;
+
+ __skb_put(q->skb, len);
+ skb_mark_for_recycle(q->skb);
+ q->skb->dev = port->dev;
+ q->skb->protocol = eth_type_trans(q->skb, port->dev);
+ q->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(q->skb, qid);
+ } else { /* scattered frame */
+ struct skb_shared_info *shinfo = skb_shinfo(q->skb);
+ int nr_frags = shinfo->nr_frags;
+
+ if (nr_frags >= ARRAY_SIZE(shinfo->frags))
+ goto free_frag;
+
+ skb_add_rx_frag(q->skb, nr_frags, page,
+ e->buf - page_address(page), len,
+ q->buf_size);
+ }
+
+ if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
+ continue;
+
+ if (netdev_uses_dsa(port->dev)) {
+ /* PPE module requires untagged packets to work
+ * properly and it provides DSA port index via the
+ * DMA descriptor. Report DSA tag to the DSA stack
+ * via skb dst info.
+ */
+ u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
+ le32_to_cpu(desc->msg0));
+
+ if (sptag < ARRAY_SIZE(port->dsa_meta) &&
+ port->dsa_meta[sptag])
+ skb_dst_set_noref(q->skb,
+ &port->dsa_meta[sptag]->dst);
+ }
+
+ hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
+ if (hash != AIROHA_RXD4_FOE_ENTRY)
+ skb_set_hash(q->skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+
+ reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
+ if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ airoha_ppe_check_skb(&eth->ppe->dev, q->skb, hash,
+ false);
+
+ done++;
+ napi_gro_receive(&q->napi, q->skb);
+ q->skb = NULL;
+ continue;
+free_frag:
+ if (q->skb) {
+ dev_kfree_skb(q->skb);
+ q->skb = NULL;
+ } else {
+ page_pool_put_full_page(q->page_pool, page, true);
+ }
+ }
+ airoha_qdma_fill_rx_queue(q);
+
+ return done;
+}
+
+static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
+ int cur, done = 0;
+
+ do {
+ cur = airoha_qdma_rx_process(q, budget - done);
+ done += cur;
+ } while (cur && done < budget);
+
+ if (done < budget && napi_complete(napi)) {
+ struct airoha_qdma *qdma = q->qdma;
+ int i, qid = q - &qdma->q_rx[0];
+ int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
+ : QDMA_INT_REG_IDX2;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
+ continue;
+
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
+ BIT(qid % RX_DONE_HIGH_OFFSET));
+ }
+ }
+
+ return done;
+}
+
+static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int ndesc)
+{
+ const struct page_pool_params pp_params = {
+ .order = 0,
+ .pool_size = 256,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = PAGE_SIZE,
+ .nid = NUMA_NO_NODE,
+ .dev = qdma->eth->dev,
+ .napi = &q->napi,
+ };
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0], thr;
+ dma_addr_t dma_addr;
+
+ q->buf_size = PAGE_SIZE / 2;
+ q->ndesc = ndesc;
+ q->qdma = qdma;
+
+ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(q->page_pool)) {
+ int err = PTR_ERR(q->page_pool);
+
+ q->page_pool = NULL;
+ return err;
+ }
+
+ q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
+ &dma_addr, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
+
+ airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
+ RX_RING_SIZE_MASK,
+ FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
+
+ thr = clamp(ndesc >> 3, 1, 32);
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
+ FIELD_PREP(RX_RING_THR_MASK, thr));
+ airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
+ FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+ airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
+
+ airoha_qdma_fill_rx_queue(q);
+
+ return 0;
+}
+
+static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
+{
+ struct airoha_eth *eth = q->qdma->eth;
+
+ while (q->queued) {
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+ struct page *page = virt_to_head_page(e->buf);
+
+ dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
+ page_pool_get_dma_dir(q->page_pool));
+ page_pool_put_full_page(q->page_pool, page, false);
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+ }
+}
+
+static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ int err;
+
+ if (!(RX_DONE_INT_MASK & BIT(i))) {
+ /* rx-queue not binded to irq */
+ continue;
+ }
+
+ err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
+ RX_DSCP_NUM(i));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct airoha_tx_irq_queue *irq_q;
+ int id, done = 0, irq_queued;
+ struct airoha_qdma *qdma;
+ struct airoha_eth *eth;
+ u32 status, head;
+
+ irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
+ qdma = irq_q->qdma;
+ id = irq_q - &qdma->q_tx_irq[0];
+ eth = qdma->eth;
+
+ status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
+ head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
+ head = head % irq_q->size;
+ irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
+
+ while (irq_queued > 0 && done < budget) {
+ u32 qid, val = irq_q->q[head];
+ struct airoha_qdma_desc *desc;
+ struct airoha_queue_entry *e;
+ struct airoha_queue *q;
+ u32 index, desc_ctrl;
+ struct sk_buff *skb;
+
+ if (val == 0xff)
+ break;
+
+ irq_q->q[head] = 0xff; /* mark as done */
+ head = (head + 1) % irq_q->size;
+ irq_queued--;
+ done++;
+
+ qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
+ if (qid >= ARRAY_SIZE(qdma->q_tx))
+ continue;
+
+ q = &qdma->q_tx[qid];
+ if (!q->ndesc)
+ continue;
+
+ index = FIELD_GET(IRQ_DESC_IDX_MASK, val);
+ if (index >= q->ndesc)
+ continue;
+
+ spin_lock_bh(&q->lock);
+
+ if (!q->queued)
+ goto unlock;
+
+ desc = &q->desc[index];
+ desc_ctrl = le32_to_cpu(desc->ctrl);
+
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
+ !(desc_ctrl & QDMA_DESC_DROP_MASK))
+ goto unlock;
+
+ e = &q->entry[index];
+ skb = e->skb;
+
+ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ e->dma_addr = 0;
+ list_add_tail(&e->list, &q->tx_list);
+
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+ q->queued--;
+
+ if (skb) {
+ u16 queue = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(skb->dev, queue);
+ netdev_tx_completed_queue(txq, 1, skb->len);
+ if (netif_tx_queue_stopped(txq) &&
+ q->ndesc - q->queued >= q->free_thr)
+ netif_tx_wake_queue(txq);
+
+ dev_kfree_skb_any(skb);
+ }
+unlock:
+ spin_unlock_bh(&q->lock);
+ }
+
+ if (done) {
+ int i, len = done >> 7;
+
+ for (i = 0; i < len; i++)
+ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
+ IRQ_CLEAR_LEN_MASK, 0x80);
+ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
+ IRQ_CLEAR_LEN_MASK, (done & 0x7f));
+ }
+
+ if (done < budget && napi_complete(napi))
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(id));
+
+ return done;
+}
+
+static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int size)
+{
+ struct airoha_eth *eth = qdma->eth;
+ int i, qid = q - &qdma->q_tx[0];
+ dma_addr_t dma_addr;
+
+ spin_lock_init(&q->lock);
+ q->ndesc = size;
+ q->qdma = qdma;
+ q->free_thr = 1 + MAX_SKB_FRAGS;
+ INIT_LIST_HEAD(&q->tx_list);
+
+ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
+ &dma_addr, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ for (i = 0; i < q->ndesc; i++) {
+ u32 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
+
+ list_add_tail(&q->entry[i].list, &q->tx_list);
+ WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
+ }
+
+ /* xmit ring drop default setting */
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
+ TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
+
+ airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, 0));
+ airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
+ FIELD_PREP(TX_RING_DMA_IDX_MASK, 0));
+
+ return 0;
+}
+
+static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
+ struct airoha_qdma *qdma, int size)
+{
+ int id = irq_q - &qdma->q_tx_irq[0];
+ struct airoha_eth *eth = qdma->eth;
+ dma_addr_t dma_addr;
+
+ netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
+ airoha_qdma_tx_napi_poll);
+ irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
+ &dma_addr, GFP_KERNEL);
+ if (!irq_q->q)
+ return -ENOMEM;
+
+ memset(irq_q->q, 0xff, size * sizeof(u32));
+ irq_q->size = size;
+ irq_q->qdma = qdma;
+
+ airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
+ FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
+ FIELD_PREP(TX_IRQ_THR_MASK, 1));
+
+ return 0;
+}
+
+static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
+{
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
+ IRQ_QUEUE_LEN(i));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
+ TX_DSCP_NUM);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
+{
+ struct airoha_eth *eth = q->qdma->eth;
+ int i;
+
+ spin_lock_bh(&q->lock);
+ for (i = 0; i < q->ndesc; i++) {
+ struct airoha_queue_entry *e = &q->entry[i];
+
+ if (!e->dma_addr)
+ continue;
+
+ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(e->skb);
+ e->dma_addr = 0;
+ e->skb = NULL;
+ list_add_tail(&e->list, &q->tx_list);
+ q->queued--;
+ }
+ spin_unlock_bh(&q->lock);
+}
+
+static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
+{
+ int size, index, num_desc = HW_DSCP_NUM;
+ struct airoha_eth *eth = qdma->eth;
+ int id = qdma - &eth->qdma[0];
+ u32 status, buf_size;
+ dma_addr_t dma_addr;
+ const char *name;
+
+ name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
+ if (!name)
+ return -ENOMEM;
+
+ buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
+ index = of_property_match_string(eth->dev->of_node,
+ "memory-region-names", name);
+ if (index >= 0) {
+ struct reserved_mem *rmem;
+ struct device_node *np;
+
+ /* Consume reserved memory for hw forwarding buffers queue if
+ * available in the DTS
+ */
+ np = of_parse_phandle(eth->dev->of_node, "memory-region",
+ index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+ dma_addr = rmem->base;
+ /* Compute the number of hw descriptors according to the
+ * reserved memory size and the payload buffer size
+ */
+ num_desc = div_u64(rmem->size, buf_size);
+ } else {
+ size = buf_size * num_desc;
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL))
+ return -ENOMEM;
+ }
+
+ airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+
+ size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
+ return -ENOMEM;
+
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+ /* QDMA0: 2KB. QDMA1: 1KB */
+ airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
+ HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
+ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
+ airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
+ FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
+ airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
+ HW_FWD_DESC_NUM_MASK,
+ FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
+
+ return read_poll_timeout(airoha_qdma_rr, status,
+ !(status & LMGR_INIT_START), USEC_PER_MSEC,
+ 30 * USEC_PER_MSEC, true, qdma,
+ REG_LMGR_INIT_CFG);
+}
+
+static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
+{
+ airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
+ airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
+
+ airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
+ PSE_BUF_ESTIMATE_EN_MASK);
+
+ airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_EN_MASK |
+ EGRESS_RATE_METER_EQ_RATE_EN_MASK);
+ /* 2047us x 31 = 63.457ms */
+ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_WINDOW_SZ_MASK,
+ FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
+ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_TIMESLICE_MASK,
+ FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
+
+ /* ratelimit init */
+ airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
+ /* fast-tick 25us */
+ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
+ FIELD_PREP(GLB_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
+
+ airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
+ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
+ FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
+ EGRESS_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
+
+ airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
+ airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
+ INGRESS_TRTCM_MODE_MASK);
+ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
+ FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
+ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
+ INGRESS_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
+
+ airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
+ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
+ FIELD_PREP(SLA_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
+}
+
+static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
+ /* Tx-cpu transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ /* Tx-fwd transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_SRC_MASK, 1) |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ }
+}
+
+static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ /* clear pending irqs */
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
+ /* setup rx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
+ INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
+ INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
+ INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
+ INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ }
+ /* setup tx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
+ TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
+ TX_COHERENT_HIGH_INT_MASK);
+
+ /* setup irq binding */
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
+ TX_RING_IRQ_BLOCKING_CFG_MASK);
+ else
+ airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
+ TX_RING_IRQ_BLOCKING_CFG_MASK);
+ }
+
+ airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
+ FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
+ GLOBAL_CFG_CPU_TXR_RR_MASK |
+ GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
+ GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
+ GLOBAL_CFG_MULTICAST_EN_MASK |
+ GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
+ GLOBAL_CFG_TX_WB_DONE_MASK |
+ FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
+
+ airoha_qdma_init_qos(qdma);
+
+ /* disable qdma rx delay interrupt */
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
+ RX_DELAY_INT_MASK);
+ }
+
+ airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
+ TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
+ airoha_qdma_init_qos_stats(qdma);
+
+ return 0;
+}
+
+static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
+{
+ struct airoha_irq_bank *irq_bank = dev_instance;
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
+ u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intr); i++) {
+ intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
+ intr[i] &= irq_bank->irqmask[i];
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
+ }
+
+ if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
+ return IRQ_NONE;
+
+ rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
+ if (rx_intr1) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
+ rx_intr_mask |= rx_intr1;
+ }
+
+ rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
+ if (rx_intr2) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
+ rx_intr_mask |= (rx_intr2 << 16);
+ }
+
+ for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ if (rx_intr_mask & BIT(i))
+ napi_schedule(&qdma->q_rx[i].napi);
+ }
+
+ if (intr[0] & INT_TX_MASK) {
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ if (!(intr[0] & TX_DONE_INT_MASK(i)))
+ continue;
+
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(i));
+ napi_schedule(&qdma->q_tx_irq[i].napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
+ struct airoha_qdma *qdma)
+{
+ struct airoha_eth *eth = qdma->eth;
+ int i, id = qdma - &eth->qdma[0];
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
+ int err, irq_index = 4 * id + i;
+ const char *name;
+
+ spin_lock_init(&irq_bank->irq_lock);
+ irq_bank->qdma = qdma;
+
+ irq_bank->irq = platform_get_irq(pdev, irq_index);
+ if (irq_bank->irq < 0)
+ return irq_bank->irq;
+
+ name = devm_kasprintf(eth->dev, GFP_KERNEL,
+ KBUILD_MODNAME ".%d", irq_index);
+ if (!name)
+ return -ENOMEM;
+
+ err = devm_request_irq(eth->dev, irq_bank->irq,
+ airoha_irq_handler, IRQF_SHARED, name,
+ irq_bank);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_qdma_init(struct platform_device *pdev,
+ struct airoha_eth *eth,
+ struct airoha_qdma *qdma)
+{
+ int err, id = qdma - &eth->qdma[0];
+ const char *res;
+
+ qdma->eth = eth;
+ res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
+ if (!res)
+ return -ENOMEM;
+
+ qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
+ if (IS_ERR(qdma->regs))
+ return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
+ "failed to iomap qdma%d regs\n", id);
+
+ err = airoha_qdma_init_irq_banks(pdev, qdma);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_rx(qdma);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_tx(qdma);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_hfwd_queues(qdma);
+ if (err)
+ return err;
+
+ return airoha_qdma_hw_init(qdma);
+}
+
+static int airoha_hw_init(struct platform_device *pdev,
+ struct airoha_eth *eth)
+{
+ int err, i;
+
+ /* disable xsi */
+ err = reset_control_bulk_assert(eth->soc->num_xsi_rsts, eth->xsi_rsts);
+ if (err)
+ return err;
+
+ err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ if (err)
+ return err;
+
+ msleep(20);
+ err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ if (err)
+ return err;
+
+ msleep(20);
+ err = airoha_fe_init(eth);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
+ if (err)
+ return err;
+ }
+
+ err = airoha_ppe_init(eth);
+ if (err)
+ return err;
+
+ set_bit(DEV_STATE_INITIALIZED, &eth->state);
+
+ return 0;
+}
+
+static void airoha_hw_cleanup(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ netif_napi_del(&qdma->q_rx[i].napi);
+ airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
+ if (qdma->q_rx[i].page_pool)
+ page_pool_destroy(qdma->q_rx[i].page_pool);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ netif_napi_del(&qdma->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
+ }
+}
+
+static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_enable(&qdma->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ napi_enable(&qdma->q_rx[i].napi);
+ }
+}
+
+static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_disable(&qdma->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ napi_disable(&qdma->q_rx[i].napi);
+ }
+}
+
+static void airoha_update_hw_stats(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 val, i = 0;
+
+ spin_lock(&port->stats.lock);
+ u64_stats_update_begin(&port->stats.syncp);
+
+ /* TX */
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
+ port->stats.tx_ok_pkts += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
+ port->stats.tx_ok_pkts += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
+ port->stats.tx_ok_bytes += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
+ port->stats.tx_ok_bytes += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
+ port->stats.tx_drops += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
+ port->stats.tx_broadcast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
+ port->stats.tx_multicast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
+ port->stats.tx_len[i] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
+ port->stats.tx_len[i++] += val;
+
+ /* RX */
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
+ port->stats.rx_ok_pkts += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
+ port->stats.rx_ok_pkts += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
+ port->stats.rx_ok_bytes += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
+ port->stats.rx_ok_bytes += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
+ port->stats.rx_drops += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
+ port->stats.rx_broadcast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
+ port->stats.rx_multicast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
+ port->stats.rx_errors += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
+ port->stats.rx_crc_error += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
+ port->stats.rx_over_errors += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
+ port->stats.rx_fragment += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
+ port->stats.rx_jabber += val;
+
+ i = 0;
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
+ port->stats.rx_len[i] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
+ port->stats.rx_len[i++] += val;
+
+ /* reset mib counters */
+ airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
+ FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
+
+ u64_stats_update_end(&port->stats.syncp);
+ spin_unlock(&port->stats.lock);
+}
+
+static int airoha_dev_open(struct net_device *dev)
+{
+ int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_qdma *qdma = port->qdma;
+
+ netif_tx_start_all_queues(dev);
+ err = airoha_set_vip_for_gdm_port(port, true);
+ if (err)
+ return err;
+
+ if (netdev_uses_dsa(dev))
+ airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+ else
+ airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+
+ airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+
+ airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
+ atomic_inc(&qdma->users);
+
+ return 0;
+}
+
+static int airoha_dev_stop(struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_qdma *qdma = port->qdma;
+ int i, err;
+
+ netif_tx_disable(dev);
+ err = airoha_set_vip_for_gdm_port(port, false);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++)
+ netdev_tx_reset_subqueue(dev, i);
+
+ if (atomic_dec_and_test(&qdma->users)) {
+ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int err;
+
+ err = eth_mac_addr(dev, p);
+ if (err)
+ return err;
+
+ airoha_set_macaddr(port, dev->dev_addr);
+
+ return 0;
+}
+
+static int airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 val, pse_port, chan, nbq;
+ int src_port;
+
+ /* Forward the traffic to the proper GDM port */
+ pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
+ : FE_PSE_PORT_GDM4;
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port);
+ airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC_MASK);
+
+ /* Enable GDM2 loopback */
+ airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff);
+ airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
+
+ chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0;
+ airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
+ LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
+ FIELD_PREP(LPBK_CHAN_MASK, chan) |
+ LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK |
+ LBK_CHAN_MODE_MASK | LPBK_EN_MASK);
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
+
+ /* Disable VIP and IFC for GDM2 */
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2));
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2));
+
+ /* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */
+ nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0;
+ src_port = eth->soc->ops.get_src_port_id(port, nbq);
+ if (src_port < 0)
+ return src_port;
+
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, src_port));
+ val = src_port & SP_CPORT_DFT_MASK;
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(src_port >> fls(SP_CPORT_DFT_MASK)),
+ SP_CPORT_MASK(val),
+ FE_PSE_PORT_CDM2 << __ffs(SP_CPORT_MASK(val)));
+
+ if (port->id != AIROHA_GDM3_IDX && airoha_is_7581(eth))
+ airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6,
+ FC_ID_OF_SRC_PORT24_MASK,
+ FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2));
+
+ return 0;
+}
+
+static int airoha_dev_init(struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_qdma *qdma = port->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ u32 pse_port, fe_cpu_port;
+ u8 ppe_id;
+
+ airoha_set_macaddr(port, dev->dev_addr);
+
+ switch (port->id) {
+ case 3:
+ case 4:
+ /* If GDM2 is active we can't enable loopback */
+ if (!eth->ports[1]) {
+ int err;
+
+ err = airhoha_set_gdm2_loopback(port);
+ if (err)
+ return err;
+ }
+ fallthrough;
+ case 2:
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* For PPE2 always use secondary cpu port. */
+ fe_cpu_port = FE_PSE_PORT_CDM2;
+ pse_port = FE_PSE_PORT_PPE2;
+ break;
+ }
+ fallthrough;
+ default: {
+ u8 qdma_id = qdma - &eth->qdma[0];
+
+ /* For PPE1 select cpu port according to the running QDMA. */
+ fe_cpu_port = qdma_id ? FE_PSE_PORT_CDM2 : FE_PSE_PORT_CDM1;
+ pse_port = FE_PSE_PORT_PPE1;
+ break;
+ }
+ }
+
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port);
+ ppe_id = pse_port == FE_PSE_PORT_PPE2 ? 1 : 0;
+ airoha_fe_rmw(eth, REG_PPE_DFT_CPORT0(ppe_id),
+ DFT_CPORT_MASK(port->id),
+ fe_cpu_port << __ffs(DFT_CPORT_MASK(port->id)));
+
+ return 0;
+}
+
+static void airoha_dev_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ unsigned int start;
+
+ airoha_update_hw_stats(port);
+ do {
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ storage->rx_packets = port->stats.rx_ok_pkts;
+ storage->tx_packets = port->stats.tx_ok_pkts;
+ storage->rx_bytes = port->stats.rx_ok_bytes;
+ storage->tx_bytes = port->stats.tx_ok_bytes;
+ storage->multicast = port->stats.rx_multicast;
+ storage->rx_errors = port->stats.rx_errors;
+ storage->rx_dropped = port->stats.rx_drops;
+ storage->tx_dropped = port->stats.tx_drops;
+ storage->rx_crc_errors = port->stats.rx_crc_error;
+ storage->rx_over_errors = port->stats.rx_over_errors;
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
+
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
+ GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+ WRITE_ONCE(dev->mtu, mtu);
+
+ return 0;
+}
+
+static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int queue, channel;
+
+ /* For dsa device select QoS channel according to the dsa user port
+ * index, rely on port id otherwise. Select QoS queue based on the
+ * skb priority.
+ */
+ channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+ queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
+ queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
+
+ return queue < dev->num_tx_queues ? queue : 0;
+}
+
+static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct ethhdr *ehdr;
+ u8 xmit_tpid;
+ u16 tag;
+
+ if (!netdev_uses_dsa(dev))
+ return 0;
+
+ if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ return 0;
+
+ if (skb_cow_head(skb, 0))
+ return 0;
+
+ ehdr = (struct ethhdr *)skb->data;
+ tag = be16_to_cpu(ehdr->h_proto);
+ xmit_tpid = tag >> 8;
+
+ switch (xmit_tpid) {
+ case MTK_HDR_XMIT_TAGGED_TPID_8100:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021Q);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8);
+ break;
+ case MTK_HDR_XMIT_TAGGED_TPID_88A8:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021AD);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8);
+ break;
+ default:
+ /* PPE module requires untagged DSA packets to work properly,
+ * so move DSA tag to DMA descriptor.
+ */
+ memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN);
+ __skb_pull(skb, MTK_HDR_LEN);
+ break;
+ }
+
+ return tag;
+#else
+ return 0;
+#endif
+}
+
+static int airoha_get_fe_port(struct airoha_gdm_port *port)
+{
+ struct airoha_qdma *qdma = port->qdma;
+ struct airoha_eth *eth = qdma->eth;
+
+ switch (eth->soc->version) {
+ case 0x7583:
+ return port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
+ : port->id;
+ case 0x7581:
+ default:
+ return port->id == AIROHA_GDM4_IDX ? FE_PSE_PORT_GDM4
+ : port->id;
+ }
+}
+
+static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_qdma *qdma = port->qdma;
+ u32 nr_frags, tag, msg0, msg1, len;
+ struct airoha_queue_entry *e;
+ struct netdev_queue *txq;
+ struct airoha_queue *q;
+ LIST_HEAD(tx_list);
+ void *data;
+ int i, qid;
+ u16 index;
+ u8 fport;
+
+ qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
+ tag = airoha_get_dsa_tag(skb, dev);
+
+ msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
+ qid / AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
+ qid % AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
+ FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
+ FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
+
+ /* TSO: fill MSS info in tcp checksum field */
+ if (skb_is_gso(skb)) {
+ if (skb_cow_head(skb, 0))
+ goto error;
+
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
+ SKB_GSO_TCPV6)) {
+ __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
+
+ tcp_hdr(skb)->check = (__force __sum16)csum;
+ msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
+ }
+ }
+
+ fport = airoha_get_fe_port(port);
+ msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
+ FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
+
+ q = &qdma->q_tx[qid];
+ if (WARN_ON_ONCE(!q->ndesc))
+ goto error;
+
+ spin_lock_bh(&q->lock);
+
+ txq = netdev_get_tx_queue(dev, qid);
+ nr_frags = 1 + skb_shinfo(skb)->nr_frags;
+
+ if (q->queued + nr_frags >= q->ndesc) {
+ /* not enough space in the queue */
+ netif_tx_stop_queue(txq);
+ spin_unlock_bh(&q->lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ len = skb_headlen(skb);
+ data = skb->data;
+
+ e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
+ list);
+ index = e - q->entry;
+
+ for (i = 0; i < nr_frags; i++) {
+ struct airoha_qdma_desc *desc = &q->desc[index];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t addr;
+ u32 val;
+
+ addr = dma_map_single(dev->dev.parent, data, len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
+ goto error_unmap;
+
+ list_move_tail(&e->list, &tx_list);
+ e->skb = i ? NULL : skb;
+ e->dma_addr = addr;
+ e->dma_len = len;
+
+ e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
+ list);
+ index = e - q->entry;
+
+ val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
+ if (i < nr_frags - 1)
+ val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
+ WRITE_ONCE(desc->addr, cpu_to_le32(addr));
+ val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
+ WRITE_ONCE(desc->data, cpu_to_le32(val));
+ WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
+ WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
+ WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
+
+ data = skb_frag_address(frag);
+ len = skb_frag_size(frag);
+ }
+ q->queued += i;
+
+ skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(txq, skb->len);
+
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more())
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
+ TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
+
+ if (q->ndesc - q->queued < q->free_thr)
+ netif_tx_stop_queue(txq);
+
+ spin_unlock_bh(&q->lock);
+
+ return NETDEV_TX_OK;
+
+error_unmap:
+ while (!list_empty(&tx_list)) {
+ e = list_first_entry(&tx_list, struct airoha_queue_entry,
+ list);
+ dma_unmap_single(dev->dev.parent, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ e->dma_addr = 0;
+ list_move_tail(&e->list, &q->tx_list);
+ }
+
+ spin_unlock_bh(&q->lock);
+error:
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+static void airoha_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+
+ strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
+}
+
+static void airoha_ethtool_get_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *stats)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ unsigned int start;
+
+ airoha_update_hw_stats(port);
+ do {
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ stats->FramesTransmittedOK = port->stats.tx_ok_pkts;
+ stats->OctetsTransmittedOK = port->stats.tx_ok_bytes;
+ stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
+ stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
+ stats->FramesReceivedOK = port->stats.rx_ok_pkts;
+ stats->OctetsReceivedOK = port->stats.rx_ok_bytes;
+ stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 10239 },
+ {},
+};
+
+static void
+airoha_ethtool_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_hw_stats *hw_stats = &port->stats;
+ unsigned int start;
+
+ BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
+ ARRAY_SIZE(hw_stats->tx_len) + 1);
+ BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
+ ARRAY_SIZE(hw_stats->rx_len) + 1);
+
+ *ranges = airoha_ethtool_rmon_ranges;
+ airoha_update_hw_stats(port);
+ do {
+ int i;
+
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ stats->fragments = hw_stats->rx_fragment;
+ stats->jabbers = hw_stats->rx_jabber;
+ for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
+ i++) {
+ stats->hist[i] = hw_stats->rx_len[i];
+ stats->hist_tx[i] = hw_stats->tx_len[i];
+ }
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
+ int channel, enum tx_sched_mode mode,
+ const u16 *weights, u8 n_weights)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_TX_RING; i++)
+ airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
+ TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
+
+ for (i = 0; i < n_weights; i++) {
+ u32 status;
+ int err;
+
+ airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
+ TWRR_RW_CMD_MASK |
+ FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
+ FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
+ FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
+ err = read_poll_timeout(airoha_qdma_rr, status,
+ status & TWRR_RW_CMD_DONE,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC,
+ true, port->qdma,
+ REG_TXWRR_WEIGHT_CFG);
+ if (err)
+ return err;
+ }
+
+ airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
+ CHAN_QOS_MODE_MASK(channel),
+ mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
+
+ return 0;
+}
+
+static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
+ int channel)
+{
+ static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
+ enum tx_sched_mode mode = TC_SCH_SP;
+ u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+ int i, nstrict = 0;
+
+ if (p->bands > AIROHA_NUM_QOS_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < p->bands; i++) {
+ if (!p->quanta[i])
+ nstrict++;
+ }
+
+ /* this configuration is not supported by the hw */
+ if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
+ return -EINVAL;
+
+ /* EN7581 SoC supports fixed QoS band priority where WRR queues have
+ * lowest priorities with respect to SP ones.
+ * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
+ */
+ for (i = 0; i < nstrict; i++) {
+ if (p->priomap[p->bands - i - 1] != i)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < p->bands - nstrict; i++) {
+ if (p->priomap[i] != nstrict + i)
+ return -EINVAL;
+
+ w[i] = p->weights[nstrict + i];
+ }
+
+ if (!nstrict)
+ mode = TC_SCH_WRR8;
+ else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
+ mode = nstrict + 1;
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL(channel << 1));
+ u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL((channel << 1) + 1));
+ u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
+ (fwd_tx_packets - port->fwd_tx_packets);
+ _bstats_update(opt->stats.bstats, 0, tx_packets);
+
+ port->cpu_tx_packets = cpu_tx_packets;
+ port->fwd_tx_packets = fwd_tx_packets;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
+ struct tc_ets_qopt_offload *opt)
+{
+ int channel;
+
+ if (opt->parent == TC_H_ROOT)
+ return -EINVAL;
+
+ channel = TC_H_MAJ(opt->handle) >> 16;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+
+ switch (opt->command) {
+ case TC_ETS_REPLACE:
+ return airoha_qdma_set_tx_ets_sched(port, channel, opt);
+ case TC_ETS_DESTROY:
+ /* PRIO is default qdisc scheduler */
+ return airoha_qdma_set_tx_prio_sched(port, channel);
+ case TC_ETS_STATS:
+ return airoha_qdma_get_tx_ets_stats(port, channel, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma,
+ REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 val)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 config = RATE_LIMIT_PARAM_RW_MASK |
+ FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, bool enable, u32 enable_mask)
+{
+ u32 val;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ &val, NULL);
+ if (err)
+ return err;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ val);
+}
+
+static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma,
+ int queue_id, u32 rate_val,
+ u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_MISC_MODE, &config, NULL);
+ if (err)
+ return err;
+
+ val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_TOKEN_RATE_MODE, rate);
+ if (err)
+ return err;
+
+ val = bucket_size;
+ if (!(config & TRTCM_PKT_MODE))
+ val = max_t(u32, val, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_BUCKETSIZE_SHIFT_MODE, val);
+}
+
+static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id,
+ bool enable, enum trtcm_unit_type unit)
+{
+ bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8;
+ enum trtcm_param mode = TRTCM_METER_MODE;
+ int err;
+
+ mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0;
+ err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ enable, mode);
+ if (err)
+ return err;
+
+ return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ tick_sel, TRTCM_TICK_SEL);
+}
+
+static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode, u32 val)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 config = TRTCM_PARAM_RW_MASK |
+ FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_mode_type mode,
+ bool enable, u32 enable_mask)
+{
+ u32 val;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &val, NULL))
+ return -EINVAL;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
+ int channel, u32 addr,
+ enum trtcm_mode_type mode,
+ u32 rate_val, u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &config, NULL))
+ return -EINVAL;
+
+ val = airoha_qdma_rr(qdma, addr);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_TOKEN_RATE_MODE, mode, rate);
+ if (err)
+ return err;
+
+ val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
+ int channel, u32 rate,
+ u32 bucket_size)
+{
+ int i, err;
+
+ for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
+ err = airoha_qdma_set_trtcm_config(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG, i,
+ !!rate, TRTCM_METER_MODE);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG,
+ i, rate, bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+ u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
+ struct net_device *dev = port->dev;
+ int num_tx_queues = dev->real_num_tx_queues;
+ int err;
+
+ if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
+ return -EINVAL;
+ }
+
+ err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed configuring htb offload");
+ return err;
+ }
+
+ if (opt->command == TC_HTB_NODE_MODIFY)
+ return 0;
+
+ err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
+ if (err) {
+ airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed setting real_num_tx_queues");
+ return err;
+ }
+
+ set_bit(channel, port->qos_sq_bmap);
+ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+}
+
+static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port,
+ u32 rate, u32 bucket_size,
+ enum trtcm_unit_type unit_type)
+{
+ struct airoha_qdma *qdma = port->qdma;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ int err;
+
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_rl_token_bucket(qdma, i, rate,
+ bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f)
+{
+ const struct flow_action *actions = &f->rule->action;
+ const struct flow_action_entry *act;
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "filter run with no actions");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "only once action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &actions->entries[0];
+ if (act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid exceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid notexceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(actions, act)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "action accept must be last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps || act->police.avrate ||
+ act->police.overhead || act->police.mtu) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "peakrate/avrate/overhead/mtu unsupported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *f)
+{
+ enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u32 rate = 0, bucket_size = 0;
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE: {
+ const struct flow_action_entry *act;
+ int err;
+
+ err = airoha_tc_matchall_act_validate(f);
+ if (err)
+ return err;
+
+ act = &f->rule->action.entries[0];
+ if (act->police.rate_pkt_ps) {
+ rate = act->police.rate_pkt_ps;
+ bucket_size = act->police.burst_pkt;
+ unit_type = TRTCM_PACKET_UNIT;
+ } else {
+ rate = div_u64(act->police.rate_bytes_ps, 1000);
+ rate = rate << 3; /* Kbps */
+ bucket_size = act->police.burst;
+ }
+ fallthrough;
+ }
+ case TC_CLSMATCHALL_DESTROY:
+ return airoha_qdma_set_rx_meter(port, rate, bucket_size,
+ unit_type);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct net_device *dev = cb_priv;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return airoha_ppe_setup_tc_block_cb(&eth->ppe->dev, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return airoha_dev_tc_matchall(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &block_cb_list;
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+ block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_incref(block_cb);
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
+{
+ struct net_device *dev = port->dev;
+
+ netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
+ airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
+ clear_bit(queue, port->qos_sq_bmap);
+}
+
+static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ airoha_tc_remove_htb_queue(port, channel);
+
+ return 0;
+}
+
+static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
+{
+ int q;
+
+ for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
+ airoha_tc_remove_htb_queue(port, q);
+
+ return 0;
+}
+
+static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_HTB_CREATE:
+ break;
+ case TC_HTB_DESTROY:
+ return airoha_tc_htb_destroy(port);
+ case TC_HTB_NODE_MODIFY:
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ return airoha_tc_htb_alloc_leaf_queue(port, opt);
+ case TC_HTB_LEAF_DEL:
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return airoha_tc_htb_delete_leaf_queue(port, opt);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ return airoha_tc_get_htb_get_leaf_queue(port, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_ETS:
+ return airoha_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_QDISC_HTB:
+ return airoha_tc_setup_qdisc_htb(port, type_data);
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
+ return airoha_dev_setup_tc_block(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops airoha_netdev_ops = {
+ .ndo_init = airoha_dev_init,
+ .ndo_open = airoha_dev_open,
+ .ndo_stop = airoha_dev_stop,
+ .ndo_change_mtu = airoha_dev_change_mtu,
+ .ndo_select_queue = airoha_dev_select_queue,
+ .ndo_start_xmit = airoha_dev_xmit,
+ .ndo_get_stats64 = airoha_dev_get_stats64,
+ .ndo_set_mac_address = airoha_dev_set_macaddr,
+ .ndo_setup_tc = airoha_dev_tc_setup,
+};
+
+static const struct ethtool_ops airoha_ethtool_ops = {
+ .get_drvinfo = airoha_ethtool_get_drvinfo,
+ .get_eth_mac_stats = airoha_ethtool_get_mac_stats,
+ .get_rmon_stats = airoha_ethtool_get_rmon_stats,
+ .get_link = ethtool_op_get_link,
+};
+
+static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ struct metadata_dst *md_dst;
+
+ md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!md_dst)
+ return -ENOMEM;
+
+ md_dst->u.port_info.port_id = i;
+ port->dsa_meta[i] = md_dst;
+ }
+
+ return 0;
+}
+
+static void airoha_metadata_dst_free(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ if (!port->dsa_meta[i])
+ continue;
+
+ metadata_dst_free(port->dsa_meta[i]);
+ }
+}
+
+bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
+ struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ if (eth->ports[i] == port)
+ return true;
+ }
+
+ return false;
+}
+
+static int airoha_alloc_gdm_port(struct airoha_eth *eth,
+ struct device_node *np, int index)
+{
+ const __be32 *id_ptr = of_get_property(np, "reg", NULL);
+ struct airoha_gdm_port *port;
+ struct airoha_qdma *qdma;
+ struct net_device *dev;
+ int err, p;
+ u32 id;
+
+ if (!id_ptr) {
+ dev_err(eth->dev, "missing gdm port id\n");
+ return -EINVAL;
+ }
+
+ id = be32_to_cpup(id_ptr);
+ p = id - 1;
+
+ if (!id || id > ARRAY_SIZE(eth->ports)) {
+ dev_err(eth->dev, "invalid gdm port id: %d\n", id);
+ return -EINVAL;
+ }
+
+ if (eth->ports[p]) {
+ dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
+ return -EINVAL;
+ }
+
+ dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
+ AIROHA_NUM_NETDEV_TX_RINGS,
+ AIROHA_NUM_RX_RING);
+ if (!dev) {
+ dev_err(eth->dev, "alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+
+ qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
+ dev->netdev_ops = &airoha_netdev_ops;
+ dev->ethtool_ops = &airoha_ethtool_ops;
+ dev->max_mtu = AIROHA_MAX_MTU;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_TC;
+ dev->features |= dev->hw_features;
+ dev->vlan_features = dev->hw_features;
+ dev->dev.of_node = np;
+ dev->irq = qdma->irq_banks[0].irq;
+ SET_NETDEV_DEV(dev, eth->dev);
+
+ /* reserve hw queues for HTB offloading */
+ err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
+ if (err)
+ return err;
+
+ err = of_get_ethdev_address(np, dev);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ return err;
+
+ eth_hw_addr_random(dev);
+ dev_info(eth->dev, "generated random MAC address %pM\n",
+ dev->dev_addr);
+ }
+
+ port = netdev_priv(dev);
+ u64_stats_init(&port->stats.syncp);
+ spin_lock_init(&port->stats.lock);
+ port->qdma = qdma;
+ port->dev = dev;
+ port->id = id;
+ eth->ports[p] = port;
+
+ err = airoha_metadata_dst_alloc(port);
+ if (err)
+ return err;
+
+ err = register_netdev(dev);
+ if (err)
+ goto free_metadata_dst;
+
+ return 0;
+
+free_metadata_dst:
+ airoha_metadata_dst_free(port);
+ return err;
+}
+
+static int airoha_probe(struct platform_device *pdev)
+{
+ struct reset_control_bulk_data *xsi_rsts;
+ struct device_node *np;
+ struct airoha_eth *eth;
+ int i, err;
+
+ eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
+ if (!eth)
+ return -ENOMEM;
+
+ eth->soc = of_device_get_match_data(&pdev->dev);
+ if (!eth->soc)
+ return -EINVAL;
+
+ eth->dev = &pdev->dev;
+
+ err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(eth->dev, "failed configuring DMA mask\n");
+ return err;
+ }
+
+ eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
+ if (IS_ERR(eth->fe_regs))
+ return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
+ "failed to iomap fe regs\n");
+
+ eth->rsts[0].id = "fe";
+ eth->rsts[1].id = "pdma";
+ eth->rsts[2].id = "qdma";
+ err = devm_reset_control_bulk_get_exclusive(eth->dev,
+ ARRAY_SIZE(eth->rsts),
+ eth->rsts);
+ if (err) {
+ dev_err(eth->dev, "failed to get bulk reset lines\n");
+ return err;
+ }
+
+ xsi_rsts = devm_kcalloc(eth->dev,
+ eth->soc->num_xsi_rsts, sizeof(*xsi_rsts),
+ GFP_KERNEL);
+ if (!xsi_rsts)
+ return -ENOMEM;
+
+ eth->xsi_rsts = xsi_rsts;
+ for (i = 0; i < eth->soc->num_xsi_rsts; i++)
+ eth->xsi_rsts[i].id = eth->soc->xsi_rsts_names[i];
+
+ err = devm_reset_control_bulk_get_exclusive(eth->dev,
+ eth->soc->num_xsi_rsts,
+ eth->xsi_rsts);
+ if (err) {
+ dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
+ return err;
+ }
+
+ eth->napi_dev = alloc_netdev_dummy(0);
+ if (!eth->napi_dev)
+ return -ENOMEM;
+
+ /* Enable threaded NAPI by default */
+ eth->napi_dev->threaded = true;
+ strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
+ platform_set_drvdata(pdev, eth);
+
+ err = airoha_hw_init(pdev, eth);
+ if (err)
+ goto error_hw_cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_start_napi(&eth->qdma[i]);
+
+ i = 0;
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_device_is_compatible(np, "airoha,eth-mac"))
+ continue;
+
+ if (!of_device_is_available(np))
+ continue;
+
+ err = airoha_alloc_gdm_port(eth, np, i++);
+ if (err) {
+ of_node_put(np);
+ goto error_napi_stop;
+ }
+ }
+
+ return 0;
+
+error_napi_stop:
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_stop_napi(&eth->qdma[i]);
+ airoha_ppe_deinit(eth);
+error_hw_cleanup:
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_hw_cleanup(&eth->qdma[i]);
+
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
+ if (port && port->dev->reg_state == NETREG_REGISTERED) {
+ unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
+ }
+ }
+ free_netdev(eth->napi_dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return err;
+}
+
+static void airoha_remove(struct platform_device *pdev)
+{
+ struct airoha_eth *eth = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ airoha_qdma_stop_napi(&eth->qdma[i]);
+ airoha_hw_cleanup(&eth->qdma[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
+ if (!port)
+ continue;
+
+ airoha_dev_stop(port->dev);
+ unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
+ }
+ free_netdev(eth->napi_dev);
+
+ airoha_ppe_deinit(eth);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const char * const en7581_xsi_rsts_names[] = {
+ "xsi-mac",
+ "hsi0-mac",
+ "hsi1-mac",
+ "hsi-mac",
+ "xfp-mac",
+};
+
+static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq)
+{
+ switch (port->id) {
+ case 3:
+ /* 7581 SoC supports PCIe serdes on GDM3 port */
+ if (nbq == 4)
+ return HSGMII_LAN_7581_PCIE0_SRCPORT;
+ if (nbq == 5)
+ return HSGMII_LAN_7581_PCIE1_SRCPORT;
+ break;
+ case 4:
+ /* 7581 SoC supports eth and usb serdes on GDM4 port */
+ if (!nbq)
+ return HSGMII_LAN_7581_ETH_SRCPORT;
+ if (nbq == 1)
+ return HSGMII_LAN_7581_USB_SRCPORT;
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const char * const an7583_xsi_rsts_names[] = {
+ "xsi-mac",
+ "hsi0-mac",
+ "hsi1-mac",
+ "xfp-mac",
+};
+
+static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq)
+{
+ switch (port->id) {
+ case 3:
+ /* 7583 SoC supports eth serdes on GDM3 port */
+ if (!nbq)
+ return HSGMII_LAN_7583_ETH_SRCPORT;
+ break;
+ case 4:
+ /* 7583 SoC supports PCIe and USB serdes on GDM4 port */
+ if (!nbq)
+ return HSGMII_LAN_7583_PCIE_SRCPORT;
+ if (nbq == 1)
+ return HSGMII_LAN_7583_USB_SRCPORT;
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct airoha_eth_soc_data en7581_soc_data = {
+ .version = 0x7581,
+ .xsi_rsts_names = en7581_xsi_rsts_names,
+ .num_xsi_rsts = ARRAY_SIZE(en7581_xsi_rsts_names),
+ .num_ppe = 2,
+ .ops = {
+ .get_src_port_id = airoha_en7581_get_src_port_id,
+ },
+};
+
+static const struct airoha_eth_soc_data an7583_soc_data = {
+ .version = 0x7583,
+ .xsi_rsts_names = an7583_xsi_rsts_names,
+ .num_xsi_rsts = ARRAY_SIZE(an7583_xsi_rsts_names),
+ .num_ppe = 1,
+ .ops = {
+ .get_src_port_id = airoha_an7583_get_src_port_id,
+ },
+};
+
+static const struct of_device_id of_airoha_match[] = {
+ { .compatible = "airoha,en7581-eth", .data = &en7581_soc_data },
+ { .compatible = "airoha,an7583-eth", .data = &an7583_soc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_airoha_match);
+
+static struct platform_driver airoha_driver = {
+ .probe = airoha_probe,
+ .remove = airoha_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_airoha_match,
+ },
+};
+module_platform_driver(airoha_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
new file mode 100644
index 000000000000..fbbc58133364
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -0,0 +1,673 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_ETH_H
+#define AIROHA_ETH_H
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/reset.h>
+#include <linux/soc/airoha/airoha_offload.h>
+#include <net/dsa.h>
+
+#define AIROHA_MAX_NUM_GDM_PORTS 4
+#define AIROHA_MAX_NUM_QDMA 2
+#define AIROHA_MAX_NUM_IRQ_BANKS 4
+#define AIROHA_MAX_DSA_PORTS 7
+#define AIROHA_MAX_NUM_RSTS 3
+#define AIROHA_MAX_MTU 9216
+#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_QOS_CHANNELS 4
+#define AIROHA_NUM_QOS_QUEUES 8
+#define AIROHA_NUM_TX_RING 32
+#define AIROHA_NUM_RX_RING 32
+#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
+ AIROHA_NUM_QOS_CHANNELS)
+#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
+#define AIROHA_FE_MC_MAX_VLAN_PORT 16
+#define AIROHA_NUM_TX_IRQ 2
+#define HW_DSCP_NUM 2048
+#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
+#define TX_DSCP_NUM 1024
+#define RX_DSCP_NUM(_n) \
+ ((_n) == 2 ? 128 : \
+ (_n) == 11 ? 128 : \
+ (_n) == 15 ? 128 : \
+ (_n) == 0 ? 1024 : 16)
+
+#define PSE_RSV_PAGES 128
+#define PSE_QUEUE_RSV_PAGES 64
+
+#define QDMA_METER_IDX(_n) ((_n) & 0xff)
+#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
+
+#define PPE_SRAM_NUM_ENTRIES (8 * 1024)
+#define PPE_STATS_NUM_ENTRIES (4 * 1024)
+#define PPE_DRAM_NUM_ENTRIES (16 * 1024)
+#define PPE_ENTRY_SIZE 80
+#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10))
+
+#define MTK_HDR_LEN 4
+#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
+#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
+
+enum {
+ QDMA_INT_REG_IDX0,
+ QDMA_INT_REG_IDX1,
+ QDMA_INT_REG_IDX2,
+ QDMA_INT_REG_IDX3,
+ QDMA_INT_REG_IDX4,
+ QDMA_INT_REG_MAX
+};
+
+enum {
+ HSGMII_LAN_7581_PCIE0_SRCPORT = 0x16,
+ HSGMII_LAN_7581_PCIE1_SRCPORT,
+ HSGMII_LAN_7581_ETH_SRCPORT,
+ HSGMII_LAN_7581_USB_SRCPORT,
+};
+
+enum {
+ HSGMII_LAN_7583_ETH_SRCPORT = 0x16,
+ HSGMII_LAN_7583_PCIE_SRCPORT = 0x18,
+ HSGMII_LAN_7583_USB_SRCPORT,
+};
+
+enum {
+ XSI_PCIE0_VIP_PORT_MASK = BIT(22),
+ XSI_PCIE1_VIP_PORT_MASK = BIT(23),
+ XSI_USB_VIP_PORT_MASK = BIT(25),
+ XSI_ETH_VIP_PORT_MASK = BIT(24),
+};
+
+enum {
+ DEV_STATE_INITIALIZED,
+};
+
+enum {
+ CDM_CRSN_QSEL_Q1 = 1,
+ CDM_CRSN_QSEL_Q5 = 5,
+ CDM_CRSN_QSEL_Q6 = 6,
+ CDM_CRSN_QSEL_Q15 = 15,
+};
+
+enum {
+ CRSN_08 = 0x8,
+ CRSN_21 = 0x15, /* KA */
+ CRSN_22 = 0x16, /* hit bind and force route to CPU */
+ CRSN_24 = 0x18,
+ CRSN_25 = 0x19,
+};
+
+enum airoha_gdm_index {
+ AIROHA_GDM1_IDX = 1,
+ AIROHA_GDM2_IDX = 2,
+ AIROHA_GDM3_IDX = 3,
+ AIROHA_GDM4_IDX = 4,
+};
+
+enum {
+ FE_PSE_PORT_CDM1,
+ FE_PSE_PORT_GDM1,
+ FE_PSE_PORT_GDM2,
+ FE_PSE_PORT_GDM3,
+ FE_PSE_PORT_PPE1,
+ FE_PSE_PORT_CDM2,
+ FE_PSE_PORT_CDM3,
+ FE_PSE_PORT_CDM4,
+ FE_PSE_PORT_PPE2,
+ FE_PSE_PORT_GDM4,
+ FE_PSE_PORT_CDM5,
+ FE_PSE_PORT_DROP = 0xf,
+};
+
+enum tx_sched_mode {
+ TC_SCH_WRR8,
+ TC_SCH_SP,
+ TC_SCH_WRR7,
+ TC_SCH_WRR6,
+ TC_SCH_WRR5,
+ TC_SCH_WRR4,
+ TC_SCH_WRR3,
+ TC_SCH_WRR2,
+};
+
+enum trtcm_unit_type {
+ TRTCM_BYTE_UNIT,
+ TRTCM_PACKET_UNIT,
+};
+
+enum trtcm_param_type {
+ TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
+ TRTCM_TOKEN_RATE_MODE,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ TRTCM_BUCKET_COUNTER_MODE,
+};
+
+enum trtcm_mode_type {
+ TRTCM_COMMIT_MODE,
+ TRTCM_PEAK_MODE,
+};
+
+enum trtcm_param {
+ TRTCM_TICK_SEL = BIT(0),
+ TRTCM_PKT_MODE = BIT(1),
+ TRTCM_METER_MODE = BIT(2),
+};
+
+#define MIN_TOKEN_SIZE 4096
+#define MAX_TOKEN_SIZE_OFFSET 17
+#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
+#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
+
+struct airoha_queue_entry {
+ union {
+ void *buf;
+ struct {
+ struct list_head list;
+ struct sk_buff *skb;
+ };
+ };
+ dma_addr_t dma_addr;
+ u16 dma_len;
+};
+
+struct airoha_queue {
+ struct airoha_qdma *qdma;
+
+ /* protect concurrent queue accesses */
+ spinlock_t lock;
+ struct airoha_queue_entry *entry;
+ struct airoha_qdma_desc *desc;
+ u16 head;
+ u16 tail;
+
+ int queued;
+ int ndesc;
+ int free_thr;
+ int buf_size;
+
+ struct napi_struct napi;
+ struct page_pool *page_pool;
+ struct sk_buff *skb;
+
+ struct list_head tx_list;
+};
+
+struct airoha_tx_irq_queue {
+ struct airoha_qdma *qdma;
+
+ struct napi_struct napi;
+
+ int size;
+ u32 *q;
+};
+
+struct airoha_hw_stats {
+ /* protect concurrent hw_stats accesses */
+ spinlock_t lock;
+ struct u64_stats_sync syncp;
+
+ /* get_stats64 */
+ u64 rx_ok_pkts;
+ u64 tx_ok_pkts;
+ u64 rx_ok_bytes;
+ u64 tx_ok_bytes;
+ u64 rx_multicast;
+ u64 rx_errors;
+ u64 rx_drops;
+ u64 tx_drops;
+ u64 rx_crc_error;
+ u64 rx_over_errors;
+ /* ethtool stats */
+ u64 tx_broadcast;
+ u64 tx_multicast;
+ u64 tx_len[7];
+ u64 rx_broadcast;
+ u64 rx_fragment;
+ u64 rx_jabber;
+ u64 rx_len[7];
+};
+
+enum {
+ AIROHA_FOE_STATE_INVALID,
+ AIROHA_FOE_STATE_UNBIND,
+ AIROHA_FOE_STATE_BIND,
+ AIROHA_FOE_STATE_FIN
+};
+
+enum {
+ PPE_PKT_TYPE_IPV4_HNAPT = 0,
+ PPE_PKT_TYPE_IPV4_ROUTE = 1,
+ PPE_PKT_TYPE_BRIDGE = 2,
+ PPE_PKT_TYPE_IPV4_DSLITE = 3,
+ PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
+ PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
+ PPE_PKT_TYPE_IPV6_6RD = 7,
+};
+
+#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16)
+#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0)
+
+#define AIROHA_FOE_MAC_WDMA_QOS GENMASK(15, 12)
+#define AIROHA_FOE_MAC_WDMA_BAND BIT(11)
+#define AIROHA_FOE_MAC_WDMA_WCID GENMASK(10, 0)
+
+struct airoha_foe_mac_info_common {
+ u16 vlan1;
+ u16 etype;
+
+ u32 dest_mac_hi;
+
+ u16 vlan2;
+ u16 dest_mac_lo;
+
+ u32 src_mac_hi;
+};
+
+struct airoha_foe_mac_info {
+ struct airoha_foe_mac_info_common common;
+
+ u16 pppoe_id;
+ u16 src_mac_lo;
+
+ u32 meter;
+};
+
+#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24)
+#define AIROHA_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
+#define AIROHA_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
+
+#define AIROHA_FOE_IB1_BIND_STATIC BIT(31)
+#define AIROHA_FOE_IB1_BIND_UDP BIT(30)
+#define AIROHA_FOE_IB1_BIND_STATE GENMASK(29, 28)
+#define AIROHA_FOE_IB1_BIND_PACKET_TYPE GENMASK(27, 25)
+#define AIROHA_FOE_IB1_BIND_TTL BIT(24)
+#define AIROHA_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
+#define AIROHA_FOE_IB1_BIND_PPPOE BIT(22)
+#define AIROHA_FOE_IB1_BIND_VPM GENMASK(21, 20)
+#define AIROHA_FOE_IB1_BIND_VLAN_LAYER GENMASK(19, 16)
+#define AIROHA_FOE_IB1_BIND_KEEPALIVE BIT(15)
+#define AIROHA_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
+
+#define AIROHA_FOE_IB2_DSCP GENMASK(31, 24)
+#define AIROHA_FOE_IB2_PORT_AG GENMASK(23, 13)
+#define AIROHA_FOE_IB2_PCP BIT(12)
+#define AIROHA_FOE_IB2_MULTICAST BIT(11)
+#define AIROHA_FOE_IB2_FAST_PATH BIT(10)
+#define AIROHA_FOE_IB2_PSE_QOS BIT(9)
+#define AIROHA_FOE_IB2_PSE_PORT GENMASK(8, 5)
+#define AIROHA_FOE_IB2_NBQ GENMASK(4, 0)
+
+#define AIROHA_FOE_ACTDP GENMASK(31, 24)
+#define AIROHA_FOE_SHAPER_ID GENMASK(23, 16)
+#define AIROHA_FOE_CHANNEL GENMASK(15, 11)
+#define AIROHA_FOE_QID GENMASK(10, 8)
+#define AIROHA_FOE_DPI BIT(7)
+#define AIROHA_FOE_TUNNEL BIT(6)
+#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0)
+
+#define AIROHA_FOE_TUNNEL_MTU GENMASK(31, 16)
+#define AIROHA_FOE_ACNT_GRP3 GENMASK(15, 9)
+#define AIROHA_FOE_METER_GRP3 GENMASK(8, 5)
+#define AIROHA_FOE_METER_GRP2 GENMASK(4, 0)
+
+struct airoha_foe_bridge {
+ u32 dest_mac_hi;
+
+ u16 src_mac_hi;
+ u16 dest_mac_lo;
+
+ u32 src_mac_lo;
+
+ u32 ib2;
+
+ u32 rsv[5];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_tuple {
+ u32 src_ip;
+ u32 dest_ip;
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 _pad[3]; /* fill with 0xa5a5a5 */
+ };
+ u32 ports;
+ };
+};
+
+struct airoha_foe_ipv4 {
+ struct airoha_foe_ipv4_tuple orig_tuple;
+
+ u32 ib2;
+
+ struct airoha_foe_ipv4_tuple new_tuple;
+
+ u32 rsv[2];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_dslite {
+ struct airoha_foe_ipv4_tuple ip4;
+
+ u32 ib2;
+
+ u8 flow_label[3];
+ u8 priority;
+
+ u32 rsv[4];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv6 {
+ u32 src_ip[4];
+ u32 dest_ip[4];
+
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 pad[3];
+ };
+ u32 ports;
+ };
+
+ u32 data;
+
+ u32 ib2;
+
+ struct airoha_foe_mac_info_common l2;
+
+ u32 meter;
+};
+
+struct airoha_foe_entry {
+ union {
+ struct {
+ u32 ib1;
+ union {
+ struct airoha_foe_bridge bridge;
+ struct airoha_foe_ipv4 ipv4;
+ struct airoha_foe_ipv4_dslite dslite;
+ struct airoha_foe_ipv6 ipv6;
+ DECLARE_FLEX_ARRAY(u32, d);
+ };
+ };
+ u8 data[PPE_ENTRY_SIZE];
+ };
+};
+
+struct airoha_foe_stats {
+ u32 bytes;
+ u32 packets;
+};
+
+struct airoha_foe_stats64 {
+ u64 bytes;
+ u64 packets;
+};
+
+struct airoha_flow_data {
+ struct ethhdr eth;
+
+ union {
+ struct {
+ __be32 src_addr;
+ __be32 dst_addr;
+ } v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
+ };
+
+ __be16 src_port;
+ __be16 dst_port;
+
+ struct {
+ struct {
+ u16 id;
+ __be16 proto;
+ } hdr[2];
+ u8 num;
+ } vlan;
+ struct {
+ u16 sid;
+ u8 num;
+ } pppoe;
+};
+
+enum airoha_flow_entry_type {
+ FLOW_TYPE_L4,
+ FLOW_TYPE_L2,
+ FLOW_TYPE_L2_SUBFLOW,
+};
+
+struct airoha_flow_table_entry {
+ union {
+ struct hlist_node list; /* PPE L3 flow entry */
+ struct {
+ struct rhash_head l2_node; /* L2 flow entry */
+ struct hlist_head l2_flows; /* PPE L2 subflows list */
+ };
+ };
+
+ struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
+ u32 hash;
+
+ struct airoha_foe_stats64 stats;
+ enum airoha_flow_entry_type type;
+
+ struct rhash_head node;
+ unsigned long cookie;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct airoha_foe_entry data;
+};
+
+struct airoha_wdma_info {
+ u8 idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+};
+
+/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
+#define RX_IRQ0_BANK_PIN_MASK 0x839f
+#define RX_IRQ1_BANK_PIN_MASK 0x7fe00000
+#define RX_IRQ2_BANK_PIN_MASK 0x20
+#define RX_IRQ3_BANK_PIN_MASK 0x40
+#define RX_IRQ_BANK_PIN_MASK(_n) \
+ (((_n) == 3) ? RX_IRQ3_BANK_PIN_MASK : \
+ ((_n) == 2) ? RX_IRQ2_BANK_PIN_MASK : \
+ ((_n) == 1) ? RX_IRQ1_BANK_PIN_MASK : \
+ RX_IRQ0_BANK_PIN_MASK)
+
+struct airoha_irq_bank {
+ struct airoha_qdma *qdma;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+};
+
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ atomic_t users;
+
+ struct airoha_irq_bank irq_banks[AIROHA_MAX_NUM_IRQ_BANKS];
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+};
+
+struct airoha_gdm_port {
+ struct airoha_qdma *qdma;
+ struct net_device *dev;
+ int id;
+
+ struct airoha_hw_stats stats;
+
+ DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
+
+ /* qos stats counters */
+ u64 cpu_tx_packets;
+ u64 fwd_tx_packets;
+
+ struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS];
+};
+
+#define AIROHA_RXD4_PPE_CPU_REASON GENMASK(20, 16)
+#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0)
+
+struct airoha_ppe {
+ struct airoha_ppe_dev dev;
+ struct airoha_eth *eth;
+
+ void *foe;
+ dma_addr_t foe_dma;
+
+ struct rhashtable l2_flows;
+
+ struct hlist_head *foe_flow;
+ u16 *foe_check_time;
+
+ struct airoha_foe_stats *foe_stats;
+ dma_addr_t foe_stats_dma;
+
+ struct dentry *debugfs_dir;
+};
+
+struct airoha_eth_soc_data {
+ u16 version;
+ const char * const *xsi_rsts_names;
+ int num_xsi_rsts;
+ int num_ppe;
+ struct {
+ int (*get_src_port_id)(struct airoha_gdm_port *port, int nbq);
+ } ops;
+};
+
+struct airoha_eth {
+ struct device *dev;
+
+ const struct airoha_eth_soc_data *soc;
+
+ unsigned long state;
+ void __iomem *fe_regs;
+
+ struct airoha_npu __rcu *npu;
+
+ struct airoha_ppe *ppe;
+ struct rhashtable flow_table;
+
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data *xsi_rsts;
+
+ struct net_device *napi_dev;
+
+ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+};
+
+u32 airoha_rr(void __iomem *base, u32 offset);
+void airoha_wr(void __iomem *base, u32 offset, u32 val);
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val);
+
+#define airoha_fe_rr(eth, offset) \
+ airoha_rr((eth)->fe_regs, (offset))
+#define airoha_fe_wr(eth, offset, val) \
+ airoha_wr((eth)->fe_regs, (offset), (val))
+#define airoha_fe_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
+#define airoha_fe_set(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), 0, (val))
+#define airoha_fe_clear(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
+
+#define airoha_qdma_rr(qdma, offset) \
+ airoha_rr((qdma)->regs, (offset))
+#define airoha_qdma_wr(qdma, offset, val) \
+ airoha_wr((qdma)->regs, (offset), (val))
+#define airoha_qdma_rmw(qdma, offset, mask, val) \
+ airoha_rmw((qdma)->regs, (offset), (mask), (val))
+#define airoha_qdma_set(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), 0, (val))
+#define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
+{
+ /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
+ * GDM{2,3,4} can be used as wan port connected to an external
+ * phy module.
+ */
+ return port->id == 1;
+}
+
+static inline bool airoha_is_7581(struct airoha_eth *eth)
+{
+ return eth->soc->version == 0x7581;
+}
+
+static inline bool airoha_is_7583(struct airoha_eth *eth)
+{
+ return eth->soc->version == 0x7583;
+}
+
+bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
+ struct airoha_gdm_port *port);
+
+bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index);
+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
+ u16 hash, bool rx_wlan);
+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data);
+int airoha_ppe_init(struct airoha_eth *eth);
+void airoha_ppe_deinit(struct airoha_eth *eth);
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
+u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe);
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash);
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats);
+
+#ifdef CONFIG_DEBUG_FS
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
+#else
+static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ return 0;
+}
+#endif
+
+#endif /* AIROHA_ETH_H */
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
new file mode 100644
index 000000000000..68b7f9684dc7
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/devcoredump.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/regmap.h>
+
+#include "airoha_eth.h"
+
+#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
+#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
+#define NPU_AN7583_FIRMWARE_DATA "airoha/an7583_npu_data.bin"
+#define NPU_AN7583_FIRMWARE_RV32 "airoha/an7583_npu_rv32.bin"
+#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000
+#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000
+#define NPU_DUMP_SIZE 512
+
+#define REG_NPU_LOCAL_SRAM 0x0
+
+#define NPU_PC_BASE_ADDR 0x305000
+#define REG_PC_DBG(_n) (0x305000 + ((_n) * 0x100))
+
+#define NPU_CLUSTER_BASE_ADDR 0x306000
+
+#define REG_CR_BOOT_TRIGGER (NPU_CLUSTER_BASE_ADDR + 0x000)
+#define REG_CR_BOOT_CONFIG (NPU_CLUSTER_BASE_ADDR + 0x004)
+#define REG_CR_BOOT_BASE(_n) (NPU_CLUSTER_BASE_ADDR + 0x020 + ((_n) << 2))
+
+#define NPU_MBOX_BASE_ADDR 0x30c000
+
+#define REG_CR_MBOX_INT_STATUS (NPU_MBOX_BASE_ADDR + 0x000)
+#define MBOX_INT_STATUS_MASK BIT(8)
+
+#define REG_CR_MBOX_INT_MASK(_n) (NPU_MBOX_BASE_ADDR + 0x004 + ((_n) << 2))
+#define REG_CR_MBQ0_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x030 + ((_n) << 2))
+#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
+#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
+
+#define NPU_WLAN_BASE_ADDR 0x30d000
+
+#define REG_IRQ_STATUS (NPU_WLAN_BASE_ADDR + 0x030)
+#define REG_IRQ_RXDONE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 2) + 0x034)
+#define NPU_IRQ_RX_MASK(_n) ((_n) == 1 ? BIT(17) : BIT(16))
+
+#define REG_TX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x080)
+#define REG_TX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x084)
+#define REG_TX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x088)
+#define REG_TX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x08c)
+
+#define REG_RX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x180)
+#define REG_RX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x184)
+#define REG_RX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x188)
+#define REG_RX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x18c)
+
+#define NPU_TIMER_BASE_ADDR 0x310100
+#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
+#define WDT_EN_MASK BIT(25)
+#define WDT_INTR_MASK BIT(21)
+
+enum {
+ NPU_OP_SET = 1,
+ NPU_OP_SET_NO_WAIT,
+ NPU_OP_GET,
+ NPU_OP_GET_NO_WAIT,
+};
+
+enum {
+ NPU_FUNC_WIFI,
+ NPU_FUNC_TUNNEL,
+ NPU_FUNC_NOTIFY,
+ NPU_FUNC_DBA,
+ NPU_FUNC_TR471,
+ NPU_FUNC_PPE,
+};
+
+enum {
+ NPU_MBOX_ERROR,
+ NPU_MBOX_SUCCESS,
+};
+
+enum {
+ PPE_FUNC_SET_WAIT,
+ PPE_FUNC_SET_WAIT_HWNAT_INIT,
+ PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+ PPE_FUNC_SET_WAIT_API,
+ PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP,
+};
+
+enum {
+ PPE2_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_VAL,
+ PPE_SRAM_RESET_VAL,
+};
+
+enum {
+ QDMA_WAN_ETHER = 1,
+ QDMA_WAN_PON_XDSL,
+};
+
+struct airoha_npu_fw {
+ const char *name;
+ int max_size;
+};
+
+struct airoha_npu_soc_data {
+ struct airoha_npu_fw fw_rv32;
+ struct airoha_npu_fw fw_data;
+};
+
+#define MBOX_MSG_FUNC_ID GENMASK(14, 11)
+#define MBOX_MSG_STATIC_BUF BIT(5)
+#define MBOX_MSG_STATUS GENMASK(4, 2)
+#define MBOX_MSG_DONE BIT(1)
+#define MBOX_MSG_WAIT_RSP BIT(0)
+
+#define PPE_TYPE_L2B_IPV4 2
+#define PPE_TYPE_L2B_IPV4_IPV6 3
+
+struct ppe_mbox_data {
+ u32 func_type;
+ u32 func_id;
+ union {
+ struct {
+ u8 cds;
+ u8 xpon_hal_api;
+ u8 wan_xsi;
+ u8 ct_joyme4;
+ u8 max_packet;
+ u8 rsv[3];
+ u32 ppe_type;
+ u32 wan_mode;
+ u32 wan_sel;
+ } init_info;
+ struct {
+ u32 func_id;
+ u32 size;
+ u32 data;
+ } set_info;
+ struct {
+ u32 npu_stats_addr;
+ u32 foe_stats_addr;
+ } stats_info;
+ };
+};
+
+struct wlan_mbox_data {
+ u32 ifindex:4;
+ u32 func_type:4;
+ u32 func_id;
+ DECLARE_FLEX_ARRAY(u8, d);
+};
+
+static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
+ void *p, int size)
+{
+ u16 core = 0; /* FIXME */
+ u32 val, offset = core << 4;
+ dma_addr_t dma_addr;
+ int ret;
+
+ dma_addr = dma_map_single(npu->dev, p, size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(npu->dev, dma_addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&npu->cores[core].lock);
+
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(0) + offset, dma_addr);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(1) + offset, size);
+ regmap_read(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, &val);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, val + 1);
+ val = FIELD_PREP(MBOX_MSG_FUNC_ID, func_id) | MBOX_MSG_WAIT_RSP;
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(3) + offset, val);
+
+ ret = regmap_read_poll_timeout_atomic(npu->regmap,
+ REG_CR_MBQ0_CTRL(3) + offset,
+ val, (val & MBOX_MSG_DONE),
+ 100, 100 * MSEC_PER_SEC);
+ if (!ret && FIELD_GET(MBOX_MSG_STATUS, val) != NPU_MBOX_SUCCESS)
+ ret = -EINVAL;
+
+ spin_unlock_bh(&npu->cores[core].lock);
+
+ dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int airoha_npu_load_firmware(struct device *dev, void __iomem *addr,
+ const struct airoha_npu_fw *fw_info)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, fw_info->name, dev);
+ if (ret)
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+
+ if (fw->size > fw_info->max_size) {
+ dev_err(dev, "%s: fw size too overlimit (%zu)\n",
+ fw_info->name, fw->size);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ memcpy_toio(addr, fw->data, fw->size);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
+ struct resource *res)
+{
+ const struct airoha_npu_soc_data *soc;
+ void __iomem *addr;
+ int ret;
+
+ soc = of_device_get_match_data(dev);
+ if (!soc)
+ return -EINVAL;
+
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+
+ /* Load rv32 npu firmware */
+ ret = airoha_npu_load_firmware(dev, addr, &soc->fw_rv32);
+ if (ret)
+ return ret;
+
+ /* Load data npu firmware */
+ return airoha_npu_load_firmware(dev, base + REG_NPU_LOCAL_SRAM,
+ &soc->fw_data);
+}
+
+static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
+{
+ struct airoha_npu *npu = npu_instance;
+
+ /* clear mbox interrupt status */
+ regmap_write(npu->regmap, REG_CR_MBOX_INT_STATUS,
+ MBOX_INT_STATUS_MASK);
+
+ /* acknowledge npu */
+ regmap_update_bits(npu->regmap, REG_CR_MBQ8_CTRL(3),
+ MBOX_MSG_STATUS | MBOX_MSG_DONE, MBOX_MSG_DONE);
+
+ return IRQ_HANDLED;
+}
+
+static void airoha_npu_wdt_work(struct work_struct *work)
+{
+ struct airoha_npu_core *core;
+ struct airoha_npu *npu;
+ void *dump;
+ u32 val[3];
+ int c;
+
+ core = container_of(work, struct airoha_npu_core, wdt_work);
+ npu = core->npu;
+
+ dump = vzalloc(NPU_DUMP_SIZE);
+ if (!dump)
+ return;
+
+ c = core - &npu->cores[0];
+ regmap_bulk_read(npu->regmap, REG_PC_DBG(c), val, ARRAY_SIZE(val));
+ snprintf(dump, NPU_DUMP_SIZE, "PC: %08x SP: %08x LR: %08x\n",
+ val[0], val[1], val[2]);
+
+ dev_coredumpv(npu->dev, dump, NPU_DUMP_SIZE, GFP_KERNEL);
+}
+
+static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance)
+{
+ struct airoha_npu_core *core = core_instance;
+ struct airoha_npu *npu = core->npu;
+ int c = core - &npu->cores[0];
+ u32 val;
+
+ regmap_set_bits(npu->regmap, REG_WDT_TIMER_CTRL(c), WDT_INTR_MASK);
+ if (!regmap_read(npu->regmap, REG_WDT_TIMER_CTRL(c), &val) &&
+ FIELD_GET(WDT_EN_MASK, val))
+ schedule_work(&core->wdt_work);
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_npu_ppe_init(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT;
+ ppe_data->init_info.ppe_type = PPE_TYPE_L2B_IPV4_IPV6;
+ ppe_data->init_info.wan_mode = QDMA_WAN_ETHER;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.func_id = PPE_SRAM_RESET_VAL;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = sram_num_entries;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash, bool ppe2)
+{
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = entry_size;
+ ppe_data->set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
+ : PPE_SRAM_SET_ENTRY;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ if (err)
+ goto out;
+
+ ppe_data->set_info.func_id = PPE_SRAM_SET_VAL;
+ ppe_data->set_info.data = hash;
+ ppe_data->set_info.size = sizeof(u32);
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+out:
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_ppe_stats_setup(struct airoha_npu *npu,
+ dma_addr_t foe_stats_addr,
+ u32 num_stats_entries)
+{
+ int err, size = num_stats_entries * sizeof(*npu->stats);
+ struct ppe_mbox_data *ppe_data;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP;
+ ppe_data->stats_info.foe_stats_addr = foe_stats_addr;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ if (err)
+ goto out;
+
+ npu->stats = devm_ioremap(npu->dev,
+ ppe_data->stats_info.npu_stats_addr,
+ size);
+ if (!npu->stats)
+ err = -ENOMEM;
+out:
+ kfree(ppe_data);
+
+ return err;
+}
+
+static int airoha_npu_wlan_msg_send(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_set_cmd func_id,
+ void *data, int data_len, gfp_t gfp)
+{
+ struct wlan_mbox_data *wlan_data;
+ int err, len;
+
+ len = sizeof(*wlan_data) + data_len;
+ wlan_data = kzalloc(len, gfp);
+ if (!wlan_data)
+ return -ENOMEM;
+
+ wlan_data->ifindex = ifindex;
+ wlan_data->func_type = NPU_OP_SET;
+ wlan_data->func_id = func_id;
+ memcpy(wlan_data->d, data, data_len);
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
+ kfree(wlan_data);
+
+ return err;
+}
+
+static int airoha_npu_wlan_msg_get(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd func_id,
+ void *data, int data_len, gfp_t gfp)
+{
+ struct wlan_mbox_data *wlan_data;
+ int err, len;
+
+ len = sizeof(*wlan_data) + data_len;
+ wlan_data = kzalloc(len, gfp);
+ if (!wlan_data)
+ return -ENOMEM;
+
+ wlan_data->ifindex = ifindex;
+ wlan_data->func_type = NPU_OP_GET;
+ wlan_data->func_id = func_id;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
+ if (!err)
+ memcpy(data, wlan_data->d, data_len);
+ kfree(wlan_data);
+
+ return err;
+}
+
+static int
+airoha_npu_wlan_set_reserved_memory(struct airoha_npu *npu,
+ int ifindex, const char *name,
+ enum airoha_npu_wlan_set_cmd func_id)
+{
+ struct device *dev = npu->dev;
+ struct resource res;
+ int err;
+ u32 val;
+
+ err = of_reserved_mem_region_to_resource_byname(dev->of_node, name,
+ &res);
+ if (err)
+ return err;
+
+ val = res.start;
+ return airoha_npu_wlan_msg_send(npu, ifindex, func_id, &val,
+ sizeof(val), GFP_KERNEL);
+}
+
+static int airoha_npu_wlan_init_memory(struct airoha_npu *npu)
+{
+ enum airoha_npu_wlan_set_cmd cmd = WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU;
+ u32 val = 0;
+ int err;
+
+ err = airoha_npu_wlan_msg_send(npu, 1, cmd, &val, sizeof(val),
+ GFP_KERNEL);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-bufid", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "pkt", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-pkt", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU;
+ return airoha_npu_wlan_msg_send(npu, 0, cmd, &val, sizeof(val),
+ GFP_KERNEL);
+}
+
+static u32 airoha_npu_wlan_queue_addr_get(struct airoha_npu *npu, int qid,
+ bool xmit)
+{
+ if (xmit)
+ return REG_TX_BASE(qid + 2);
+
+ return REG_RX_BASE(qid);
+}
+
+static void airoha_npu_wlan_irq_status_set(struct airoha_npu *npu, u32 val)
+{
+ regmap_write(npu->regmap, REG_IRQ_STATUS, val);
+}
+
+static u32 airoha_npu_wlan_irq_status_get(struct airoha_npu *npu, int q)
+{
+ u32 val;
+
+ regmap_read(npu->regmap, REG_IRQ_STATUS, &val);
+ return val;
+}
+
+static void airoha_npu_wlan_irq_enable(struct airoha_npu *npu, int q)
+{
+ regmap_set_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
+}
+
+static void airoha_npu_wlan_irq_disable(struct airoha_npu *npu, int q)
+{
+ regmap_clear_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
+}
+
+struct airoha_npu *airoha_npu_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_npu *npu;
+
+ np = of_parse_phandle(dev->of_node, "airoha,npu", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ of_node_put(np);
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(np);
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ npu = ERR_PTR(-ENODEV);
+ goto error_pdev_put;
+ }
+
+ npu = platform_get_drvdata(pdev);
+ if (!npu) {
+ npu = ERR_PTR(-ENODEV);
+ goto error_module_put;
+ }
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ npu = ERR_PTR(-EINVAL);
+ goto error_module_put;
+ }
+
+ return npu;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return npu;
+}
+EXPORT_SYMBOL_GPL(airoha_npu_get);
+
+void airoha_npu_put(struct airoha_npu *npu)
+{
+ module_put(THIS_MODULE);
+ put_device(npu->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_npu_put);
+
+static const struct airoha_npu_soc_data en7581_npu_soc_data = {
+ .fw_rv32 = {
+ .name = NPU_EN7581_FIRMWARE_RV32,
+ .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE,
+ },
+ .fw_data = {
+ .name = NPU_EN7581_FIRMWARE_DATA,
+ .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE,
+ },
+};
+
+static const struct airoha_npu_soc_data an7583_npu_soc_data = {
+ .fw_rv32 = {
+ .name = NPU_AN7583_FIRMWARE_RV32,
+ .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE,
+ },
+ .fw_data = {
+ .name = NPU_AN7583_FIRMWARE_DATA,
+ .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE,
+ },
+};
+
+static const struct of_device_id of_airoha_npu_match[] = {
+ { .compatible = "airoha,en7581-npu", .data = &en7581_npu_soc_data },
+ { .compatible = "airoha,an7583-npu", .data = &an7583_npu_soc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_airoha_npu_match);
+
+static const struct regmap_config regmap_config = {
+ .name = "npu",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .disable_locking = true,
+};
+
+static int airoha_npu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct airoha_npu *npu;
+ struct resource res;
+ void __iomem *base;
+ int i, irq, err;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ npu = devm_kzalloc(dev, sizeof(*npu), GFP_KERNEL);
+ if (!npu)
+ return -ENOMEM;
+
+ npu->dev = dev;
+ npu->ops.ppe_init = airoha_npu_ppe_init;
+ npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
+ npu->ops.ppe_init_stats = airoha_npu_ppe_stats_setup;
+ npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
+ npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
+ npu->ops.wlan_init_reserved_memory = airoha_npu_wlan_init_memory;
+ npu->ops.wlan_send_msg = airoha_npu_wlan_msg_send;
+ npu->ops.wlan_get_msg = airoha_npu_wlan_msg_get;
+ npu->ops.wlan_get_queue_addr = airoha_npu_wlan_queue_addr_get;
+ npu->ops.wlan_set_irq_status = airoha_npu_wlan_irq_status_set;
+ npu->ops.wlan_get_irq_status = airoha_npu_wlan_irq_status_get;
+ npu->ops.wlan_enable_irq = airoha_npu_wlan_irq_enable;
+ npu->ops.wlan_disable_irq = airoha_npu_wlan_irq_disable;
+
+ npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(npu->regmap))
+ return PTR_ERR(npu->regmap);
+
+ err = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_mbox_handler,
+ IRQF_SHARED, "airoha-npu-mbox", npu);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++) {
+ struct airoha_npu_core *core = &npu->cores[i];
+
+ spin_lock_init(&core->lock);
+ core->npu = npu;
+
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_wdt_handler,
+ IRQF_SHARED, "airoha-npu-wdt", core);
+ if (err)
+ return err;
+
+ INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
+ }
+
+ /* wlan IRQ lines */
+ for (i = 0; i < ARRAY_SIZE(npu->irqs); i++) {
+ irq = platform_get_irq(pdev, i + ARRAY_SIZE(npu->cores) + 1);
+ if (irq < 0)
+ return irq;
+
+ npu->irqs[i] = irq;
+ }
+
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ err = airoha_npu_run_firmware(dev, base, &res);
+ if (err)
+ return dev_err_probe(dev, err, "failed to run npu firmware\n");
+
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(10),
+ res.start + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1);
+ msleep(100);
+
+ /* setting booting address */
+ for (i = 0; i < NPU_NUM_CORES; i++)
+ regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), res.start);
+ usleep_range(1000, 2000);
+
+ /* enable NPU cores */
+ regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xff);
+ regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
+ msleep(100);
+
+ platform_set_drvdata(pdev, npu);
+
+ return 0;
+}
+
+static void airoha_npu_remove(struct platform_device *pdev)
+{
+ struct airoha_npu *npu = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++)
+ cancel_work_sync(&npu->cores[i].wdt_work);
+}
+
+static struct platform_driver airoha_npu_driver = {
+ .probe = airoha_npu_probe,
+ .remove = airoha_npu_remove,
+ .driver = {
+ .name = "airoha-npu",
+ .of_match_table = of_airoha_npu_match,
+ },
+};
+module_platform_driver(airoha_npu_driver);
+
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32);
+MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_RV32);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
new file mode 100644
index 000000000000..0caabb0c3aa0
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -0,0 +1,1561 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rhashtable.h>
+#include <net/ipv6.h>
+#include <net/pkt_cls.h>
+
+#include "airoha_regs.h"
+#include "airoha_eth.h"
+
+static DEFINE_MUTEX(flow_offload_mutex);
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params airoha_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
+ .key_len = sizeof(unsigned long),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params airoha_l2_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
+ .key_len = 2 * ETH_ALEN,
+ .automatic_shrinking = true,
+};
+
+static int airoha_ppe_get_num_stats_entries(struct airoha_ppe *ppe)
+{
+ if (!IS_ENABLED(CONFIG_NET_AIROHA_FLOW_STATS))
+ return -EOPNOTSUPP;
+
+ if (airoha_is_7583(ppe->eth))
+ return -EOPNOTSUPP;
+
+ return PPE_STATS_NUM_ENTRIES;
+}
+
+static int airoha_ppe_get_total_num_stats_entries(struct airoha_ppe *ppe)
+{
+ int num_stats = airoha_ppe_get_num_stats_entries(ppe);
+
+ if (num_stats > 0) {
+ struct airoha_eth *eth = ppe->eth;
+
+ num_stats = num_stats * eth->soc->num_ppe;
+ }
+
+ return num_stats;
+}
+
+static u32 airoha_ppe_get_total_sram_num_entries(struct airoha_ppe *ppe)
+{
+ struct airoha_eth *eth = ppe->eth;
+
+ return PPE_SRAM_NUM_ENTRIES * eth->soc->num_ppe;
+}
+
+u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+
+ return sram_num_entries + PPE_DRAM_NUM_ENTRIES;
+}
+
+bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index)
+{
+ if (index >= eth->soc->num_ppe)
+ return false;
+
+ return airoha_fe_rr(eth, REG_PPE_GLO_CFG(index)) & PPE_GLO_CFG_EN_MASK;
+}
+
+static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
+{
+ u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
+
+ return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
+}
+
+static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
+{
+ u32 sram_ppe_num_data_entries = PPE_SRAM_NUM_ENTRIES, sram_num_entries;
+ u32 sram_tb_size, dram_num_entries;
+ struct airoha_eth *eth = ppe->eth;
+ int i, sram_num_stats_entries;
+
+ sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+ sram_tb_size = sram_num_entries * sizeof(struct airoha_foe_entry);
+ dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
+
+ sram_num_stats_entries = airoha_ppe_get_num_stats_entries(ppe);
+ if (sram_num_stats_entries > 0)
+ sram_ppe_num_data_entries -= sram_num_stats_entries;
+ sram_ppe_num_data_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(sram_ppe_num_data_entries);
+
+ for (i = 0; i < eth->soc->num_ppe; i++) {
+ int p;
+
+ airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
+ ppe->foe_dma + sram_tb_size);
+
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
+ PPE_BIND_AGE0_DELTA_NON_L4 |
+ PPE_BIND_AGE0_DELTA_UDP,
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
+ PPE_BIND_AGE1_DELTA_TCP_FIN |
+ PPE_BIND_AGE1_DELTA_TCP,
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
+ PPE_SRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH1_EN_MASK |
+ PPE_DRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH0_MODE_MASK |
+ PPE_SRAM_HASH1_MODE_MASK |
+ PPE_DRAM_HASH0_MODE_MASK |
+ PPE_DRAM_HASH1_MODE_MASK,
+ FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
+ FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
+ PPE_TB_CFG_SEARCH_MISS_MASK |
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK |
+ PPE_TB_CFG_KEEPALIVE_MASK |
+ PPE_TB_ENTRY_SIZE_MASK,
+ FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
+ FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0) |
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_ppe_num_data_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+
+ airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
+
+ for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
+ airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
+ FP0_EGRESS_MTU_MASK |
+ FP1_EGRESS_MTU_MASK,
+ FIELD_PREP(FP0_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU) |
+ FIELD_PREP(FP1_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU));
+ }
+}
+
+static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
+{
+ void *dest = eth + act->mangle.offset;
+ const void *src = &act->mangle.val;
+
+ if (act->mangle.offset > 8)
+ return;
+
+ if (act->mangle.mask == 0xffff) {
+ src += 2;
+ dest += 2;
+ }
+
+ memcpy(dest, src, act->mangle.mask ? 2 : 4);
+}
+
+static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ u32 val = be32_to_cpu((__force __be32)act->mangle.val);
+
+ switch (act->mangle.offset) {
+ case 0:
+ if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
+ data->dst_port = cpu_to_be16(val);
+ else
+ data->src_port = cpu_to_be16(val >> 16);
+ break;
+ case 2:
+ data->dst_port = cpu_to_be16(val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ __be32 *dest;
+
+ switch (act->mangle.offset) {
+ case offsetof(struct iphdr, saddr):
+ dest = &data->v4.src_addr;
+ break;
+ case offsetof(struct iphdr, daddr):
+ dest = &data->v4.dst_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(dest, &act->mangle.val, sizeof(u32));
+
+ return 0;
+}
+
+static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr,
+ struct airoha_wdma_info *info)
+{
+ struct net_device_path_stack stack;
+ struct net_device_path *path;
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+
+ err = dev_fill_forward_path(dev, addr, &stack);
+ if (err)
+ return err;
+
+ path = &stack.path[stack.num_paths - 1];
+ if (path->type != DEV_PATH_MTK_WDMA)
+ return -1;
+
+ info->idx = path->mtk_wdma.wdma_idx;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
+
+ return 0;
+}
+
+static int airoha_get_dsa_port(struct net_device **dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_port *dp = dsa_port_from_netdev(*dev);
+
+ if (IS_ERR(dp))
+ return -ENODEV;
+
+ *dev = dsa_port_to_conduit(dp);
+ return dp->index;
+#else
+ return -ENODEV;
+#endif
+}
+
+static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
+ struct ethhdr *eh)
+{
+ br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
+ br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
+ br->src_mac_hi = get_unaligned_be16(eh->h_source);
+ br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
+}
+
+static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
+ struct airoha_foe_entry *hwe,
+ struct net_device *dev, int type,
+ struct airoha_flow_data *data,
+ int l4proto)
+{
+ u32 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f), ports_pad, val;
+ int wlan_etype = -EINVAL, dsa_port = airoha_get_dsa_port(&dev);
+ struct airoha_foe_mac_info_common *l2;
+ u8 smac_id = 0xf;
+
+ memset(hwe, 0, sizeof(*hwe));
+
+ val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
+ AIROHA_FOE_IB1_BIND_TTL;
+ hwe->ib1 = val;
+
+ val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
+ if (dev) {
+ struct airoha_wdma_info info = {};
+
+ if (!airoha_ppe_get_wdma_info(dev, data->eth.h_dest, &info)) {
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, info.idx) |
+ FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT,
+ FE_PSE_PORT_CDM4);
+ qdata |= FIELD_PREP(AIROHA_FOE_ACTDP, info.bss);
+ wlan_etype = FIELD_PREP(AIROHA_FOE_MAC_WDMA_BAND,
+ info.idx) |
+ FIELD_PREP(AIROHA_FOE_MAC_WDMA_WCID,
+ info.wcid);
+ } else {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u8 pse_port;
+
+ if (!airoha_is_valid_gdm_port(eth, port))
+ return -EINVAL;
+
+ if (dsa_port >= 0 || eth->ports[1])
+ pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
+ : port->id;
+ else
+ pse_port = 2; /* uplink relies on GDM2
+ * loopback
+ */
+
+ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port) |
+ AIROHA_FOE_IB2_PSE_QOS;
+ /* For downlink traffic consume SRAM memory for hw
+ * forwarding descriptors queue.
+ */
+ if (airhoa_is_lan_gdm_port(port))
+ val |= AIROHA_FOE_IB2_FAST_PATH;
+ if (dsa_port >= 0)
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ,
+ dsa_port);
+
+ smac_id = port->id;
+ }
+ }
+
+ if (is_multicast_ether_addr(data->eth.h_dest))
+ val |= AIROHA_FOE_IB2_MULTICAST;
+
+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
+ if (type == PPE_PKT_TYPE_IPV4_ROUTE)
+ hwe->ipv4.orig_tuple.ports = ports_pad;
+ if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ hwe->ipv6.ports = ports_pad;
+
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
+ hwe->bridge.data = qdata;
+ hwe->bridge.ib2 = val;
+ l2 = &hwe->bridge.l2.common;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ hwe->ipv6.data = qdata;
+ hwe->ipv6.ib2 = val;
+ l2 = &hwe->ipv6.l2;
+ l2->etype = ETH_P_IPV6;
+ } else {
+ hwe->ipv4.data = qdata;
+ hwe->ipv4.ib2 = val;
+ l2 = &hwe->ipv4.l2.common;
+ l2->etype = ETH_P_IP;
+ }
+
+ l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+ l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
+ if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
+ struct airoha_foe_mac_info *mac_info;
+
+ l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
+ hwe->ipv4.l2.src_mac_lo =
+ get_unaligned_be16(data->eth.h_source + 4);
+
+ mac_info = (struct airoha_foe_mac_info *)l2;
+ mac_info->pppoe_id = data->pppoe.sid;
+ } else {
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
+ FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
+ data->pppoe.sid);
+ }
+
+ if (data->vlan.num) {
+ l2->vlan1 = data->vlan.hdr[0].id;
+ if (data->vlan.num == 2)
+ l2->vlan2 = data->vlan.hdr[1].id;
+ }
+
+ if (wlan_etype >= 0) {
+ l2->etype = wlan_etype;
+ } else if (dsa_port >= 0) {
+ l2->etype = BIT(dsa_port);
+ l2->etype |= !data->vlan.num ? BIT(15) : 0;
+ } else if (data->pppoe.num) {
+ l2->etype = ETH_P_PPP_SES;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data,
+ bool egress)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ struct airoha_foe_ipv4_tuple *t;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ if (egress) {
+ t = &hwe->ipv4.new_tuple;
+ break;
+ }
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ t = &hwe->ipv4.orig_tuple;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ t->src_ip = be32_to_cpu(data->v4.src_addr);
+ t->dest_ip = be32_to_cpu(data->v4.dst_addr);
+
+ if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
+ t->src_port = be16_to_cpu(data->src_port);
+ t->dest_port = be16_to_cpu(data->dst_port);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data)
+
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 *src, *dest;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ hwe->ipv6.src_port = be16_to_cpu(data->src_port);
+ hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ src = hwe->ipv6.src_ip;
+ dest = hwe->ipv6.dest_ip;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
+ ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
+
+ return 0;
+}
+
+static u32 airoha_ppe_foe_get_entry_hash(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
+ u32 hash, hv1, hv2, hv3;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ hv1 = hwe->ipv4.orig_tuple.ports;
+ hv2 = hwe->ipv4.orig_tuple.dest_ip;
+ hv3 = hwe->ipv4.orig_tuple.src_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
+ hv1 ^= hwe->ipv6.ports;
+
+ hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
+ hv2 ^= hwe->ipv6.dest_ip[0];
+
+ hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
+ hv3 ^= hwe->ipv6.src_ip[0];
+ break;
+ case PPE_PKT_TYPE_BRIDGE: {
+ struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
+
+ hv1 = l2->common.src_mac_hi & 0xffff;
+ hv1 = hv1 << 16 | l2->src_mac_lo;
+
+ hv2 = l2->common.dest_mac_lo;
+ hv2 = hv2 << 16;
+ hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
+
+ hv3 = l2->common.dest_mac_hi;
+ break;
+ }
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ default:
+ WARN_ON_ONCE(1);
+ return ppe_hash_mask;
+ }
+
+ hash = (hv1 & hv2) | ((~hv1) & hv3);
+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
+ hash ^= hv1 ^ hv2 ^ hv3;
+ hash ^= hash >> 16;
+ hash &= ppe_hash_mask;
+
+ return hash;
+}
+
+static int airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe,
+ u32 hash, u32 *index)
+{
+ int ppe_num_stats_entries;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return ppe_num_stats_entries;
+
+ *index = hash >= ppe_num_stats_entries ? hash - PPE_STATS_NUM_ENTRIES
+ : hash;
+
+ return 0;
+}
+
+static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ int index)
+{
+ memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
+ memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
+}
+
+static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu)
+{
+ int i, ppe_num_stats_entries;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
+
+ for (i = 0; i < ppe_num_stats_entries; i++)
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
+}
+
+static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ struct airoha_foe_entry *hwe,
+ u32 hash)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 index, pse_port, val, *data, *ib2, *meter;
+ int ppe_num_stats_entries;
+ u8 nbq;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
+
+ if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
+ return;
+
+ if (index >= ppe_num_stats_entries)
+ return;
+
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ data = &hwe->bridge.data;
+ ib2 = &hwe->bridge.ib2;
+ meter = &hwe->bridge.l2.meter;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = &hwe->ipv6.data;
+ ib2 = &hwe->ipv6.ib2;
+ meter = &hwe->ipv6.meter;
+ } else {
+ data = &hwe->ipv4.data;
+ ib2 = &hwe->ipv4.ib2;
+ meter = &hwe->ipv4.l2.meter;
+ }
+
+ pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
+ if (pse_port == FE_PSE_PORT_CDM4)
+ return;
+
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
+
+ val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
+ *data = (*data & ~AIROHA_FOE_ACTDP) |
+ FIELD_PREP(AIROHA_FOE_ACTDP, val);
+
+ val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
+ *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
+
+ nbq = pse_port == 1 ? 6 : 5;
+ *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS);
+ *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
+ FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
+}
+
+static struct airoha_foe_entry *
+airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+
+ lockdep_assert_held(&ppe_lock);
+
+ if (hash < sram_num_entries) {
+ u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
+ bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
+ struct airoha_eth *eth = ppe->eth;
+ u32 val;
+ int i;
+
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
+ FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
+ PPE_SRAM_CTRL_REQ_MASK);
+ if (read_poll_timeout_atomic(airoha_fe_rr, val,
+ val & PPE_SRAM_CTRL_ACK_MASK,
+ 10, 100, false, eth,
+ REG_PPE_RAM_CTRL(ppe2)))
+ return NULL;
+
+ for (i = 0; i < sizeof(struct airoha_foe_entry) / sizeof(*hwe);
+ i++)
+ hwe[i] = airoha_fe_rr(eth,
+ REG_PPE_RAM_ENTRY(ppe2, i));
+ }
+
+ return ppe->foe + hash * sizeof(struct airoha_foe_entry);
+}
+
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash)
+{
+ struct airoha_foe_entry *hwe;
+
+ spin_lock_bh(&ppe_lock);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ spin_unlock_bh(&ppe_lock);
+
+ return hwe;
+}
+
+static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
+ struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ int len;
+
+ if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
+ return false;
+
+ if (type > PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct airoha_foe_entry, ipv6.data);
+ else
+ len = offsetof(struct airoha_foe_entry, ipv4.ib2);
+
+ return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
+}
+
+static int airoha_ppe_foe_commit_sram_entry(struct airoha_ppe *ppe, u32 hash)
+{
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
+ u32 *ptr = (u32 *)hwe, val;
+ int i;
+
+ for (i = 0; i < sizeof(*hwe) / sizeof(*ptr); i++)
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_ENTRY(ppe2, i), ptr[i]);
+
+ wmb();
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
+ FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
+ PPE_SRAM_CTRL_WR_MASK | PPE_SRAM_CTRL_REQ_MASK);
+
+ return read_poll_timeout_atomic(airoha_fe_rr, val,
+ val & PPE_SRAM_CTRL_ACK_MASK,
+ 10, 100, false, ppe->eth,
+ REG_PPE_RAM_CTRL(ppe2));
+}
+
+static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *e,
+ u32 hash, bool rx_wlan)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ u32 ts = airoha_ppe_get_timestamp(ppe);
+ struct airoha_eth *eth = ppe->eth;
+ struct airoha_npu *npu;
+ int err = 0;
+
+ memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
+ wmb();
+
+ e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
+ hwe->ib1 = e->ib1;
+
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (!npu) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ if (!rx_wlan)
+ airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
+
+ if (hash < sram_num_entries)
+ err = airoha_ppe_foe_commit_sram_entry(ppe, hash);
+unlock:
+ rcu_read_unlock();
+
+ return err;
+}
+
+static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_del_init(&e->list);
+ if (e->hash != 0xffff) {
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
+ e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
+ AIROHA_FOE_STATE_INVALID);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash, false);
+ e->hash = 0xffff;
+ }
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ hlist_del_init(&e->l2_subflow_node);
+ kfree(e);
+ }
+}
+
+static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct hlist_head *head = &e->l2_flows;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
+ airoha_ppe_foe_remove_flow(ppe, e);
+}
+
+static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ spin_lock_bh(&ppe_lock);
+
+ if (e->type == FLOW_TYPE_L2)
+ airoha_ppe_foe_remove_l2_flow(ppe, e);
+ else
+ airoha_ppe_foe_remove_flow(ppe, e);
+
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e,
+ u32 hash, bool rx_wlan)
+{
+ u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
+ struct airoha_foe_entry *hwe_p, hwe;
+ struct airoha_flow_table_entry *f;
+ int type;
+
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ if (!hwe_p)
+ return -EINVAL;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return -ENOMEM;
+
+ hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
+ f->type = FLOW_TYPE_L2_SUBFLOW;
+ f->hash = hash;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
+
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
+ hwe.ipv6.ib2 = e->data.bridge.ib2;
+ /* setting smac_id to 0xf instruct the hw to keep original
+ * source mac address
+ */
+ hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
+ 0xf);
+ } else {
+ memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
+ sizeof(hwe.bridge.l2));
+ hwe.bridge.ib2 = e->data.bridge.ib2;
+ if (type == PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
+ sizeof(hwe.ipv4.new_tuple));
+ }
+
+ hwe.bridge.data = e->data.bridge.data;
+ airoha_ppe_foe_commit_entry(ppe, &hwe, hash, rx_wlan);
+
+ return 0;
+}
+
+static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
+ struct sk_buff *skb,
+ u32 hash, bool rx_wlan)
+{
+ struct airoha_flow_table_entry *e;
+ struct airoha_foe_bridge br = {};
+ struct airoha_foe_entry *hwe;
+ bool commit_done = false;
+ struct hlist_node *n;
+ u32 index, state;
+
+ spin_lock_bh(&ppe_lock);
+
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ if (!hwe)
+ goto unlock;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state == AIROHA_FOE_STATE_BIND)
+ goto unlock;
+
+ index = airoha_ppe_foe_get_entry_hash(ppe, hwe);
+ hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ e->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, e);
+ }
+ continue;
+ }
+
+ if (!airoha_ppe_foe_compare_entry(e, hwe))
+ continue;
+
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash, rx_wlan);
+ commit_done = true;
+ e->hash = hash;
+ }
+
+ if (commit_done)
+ goto unlock;
+
+ airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
+ e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
+ airoha_l2_flow_table_params);
+ if (e)
+ airoha_ppe_foe_commit_subflow_entry(ppe, e, hash, rx_wlan);
+unlock:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct airoha_flow_table_entry *prev;
+
+ e->type = FLOW_TYPE_L2;
+ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ if (!prev)
+ return 0;
+
+ if (IS_ERR(prev))
+ return PTR_ERR(prev);
+
+ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
+ &e->l2_node,
+ airoha_l2_flow_table_params);
+}
+
+static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ u32 hash;
+
+ if (type == PPE_PKT_TYPE_BRIDGE)
+ return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
+
+ hash = airoha_ppe_foe_get_entry_hash(ppe, &e->data);
+ e->type = FLOW_TYPE_L4;
+ e->hash = 0xffff;
+
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&e->list, &ppe->foe_flow[hash]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
+{
+ u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
+ int idle;
+
+ if (state == AIROHA_FOE_STATE_BIND) {
+ ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
+ ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ } else {
+ ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
+ now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
+ ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
+ }
+ idle = now - ts;
+
+ return idle < 0 ? idle + ts_mask + 1 : idle;
+}
+
+static void
+airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+ struct airoha_flow_table_entry *iter;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
+ struct airoha_foe_entry *hwe;
+ u32 ib1, state;
+ int idle;
+
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
+ if (!hwe)
+ continue;
+
+ ib1 = READ_ONCE(hwe->ib1);
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ iter->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, iter);
+ continue;
+ }
+
+ idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
+ if (idle >= min_idle)
+ continue;
+
+ min_idle = idle;
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ }
+}
+
+static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct airoha_foe_entry *hwe_p, hwe = {};
+
+ spin_lock_bh(&ppe_lock);
+
+ if (e->type == FLOW_TYPE_L2) {
+ airoha_ppe_foe_flow_l2_entry_update(ppe, e);
+ goto unlock;
+ }
+
+ if (e->hash == 0xffff)
+ goto unlock;
+
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
+ if (!hwe_p)
+ goto unlock;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
+ e->hash = 0xffff;
+ goto unlock;
+ }
+
+ e->data.ib1 = hwe.ib1;
+unlock:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ airoha_ppe_foe_flow_entry_update(ppe, e);
+
+ return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+}
+
+static int airoha_ppe_flow_offload_replace(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct airoha_flow_table_entry *e;
+ struct airoha_flow_data data = {};
+ struct net_device *odev = NULL;
+ struct flow_action_entry *act;
+ struct airoha_foe_entry hwe;
+ int err, i, offload_type;
+ u16 addr_type = 0;
+ u8 l4proto = 0;
+
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params))
+ return -EEXIST;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ l4proto = match.key->ip_proto;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ switch (addr_type) {
+ case 0:
+ offload_type = PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
+ airoha_ppe_flow_mangle_eth(act, &data.eth);
+ break;
+ case FLOW_ACTION_REDIRECT:
+ odev = act->dev;
+ break;
+ case FLOW_ACTION_CSUM:
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (data.vlan.num == 2 ||
+ act->vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
+ data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
+ data.vlan.num++;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ break;
+ case FLOW_ACTION_PPPOE_PUSH:
+ if (data.pppoe.num == 1 || data.vlan.num == 2)
+ return -EOPNOTSUPP;
+
+ data.pppoe.sid = act->pppoe.sid;
+ data.pppoe.num++;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!is_valid_ether_addr(data.eth.h_source) ||
+ !is_valid_ether_addr(data.eth.h_dest))
+ return -EINVAL;
+
+ err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
+ &data, l4proto);
+ if (err)
+ return err;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports ports;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ flow_rule_match_ports(rule, &ports);
+ data.src_port = ports.key->src;
+ data.dst_port = ports.key->dst;
+ } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
+ return -EOPNOTSUPP;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs addrs;
+
+ flow_rule_match_ipv4_addrs(rule, &addrs);
+ data.v4.src_addr = addrs.key->src;
+ data.v4.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id != FLOW_ACTION_MANGLE)
+ continue;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ err = airoha_ppe_flow_mangle_ports(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ err = airoha_ppe_flow_mangle_ipv4(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ /* handled earlier */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
+ if (err)
+ return err;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->cookie = f->cookie;
+ memcpy(&e->data, &hwe, sizeof(e->data));
+
+ err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
+ if (err)
+ goto free_entry;
+
+ err = rhashtable_insert_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ if (err < 0)
+ goto remove_foe_entry;
+
+ return 0;
+
+remove_foe_entry:
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+free_entry:
+ kfree(e);
+
+ return err;
+}
+
+static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ struct airoha_flow_table_entry *e;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+ rhashtable_remove_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ kfree(e);
+
+ return 0;
+}
+
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats)
+{
+ struct airoha_eth *eth = ppe->eth;
+ int ppe_num_stats_entries;
+ struct airoha_npu *npu;
+ u32 index;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
+
+ if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
+ return;
+
+ if (index >= ppe_num_stats_entries)
+ return;
+
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ u64 packets = ppe->foe_stats[index].packets;
+ u64 bytes = ppe->foe_stats[index].bytes;
+ struct airoha_foe_stats npu_stats;
+
+ memcpy_fromio(&npu_stats, &npu->stats[index],
+ sizeof(*npu->stats));
+ stats->packets = packets << 32 | npu_stats.packets;
+ stats->bytes = bytes << 32 | npu_stats.bytes;
+ }
+
+ rcu_read_unlock();
+}
+
+static int airoha_ppe_flow_offload_stats(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ struct airoha_flow_table_entry *e;
+ u32 idle;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ idle = airoha_ppe_entry_idle_time(eth->ppe, e);
+ f->stats.lastused = jiffies - idle * HZ;
+
+ if (e->hash != 0xffff) {
+ struct airoha_foe_stats64 stats = {};
+
+ airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
+ f->stats.pkts += (stats.packets - e->stats.packets);
+ f->stats.bytes += (stats.bytes - e->stats.bytes);
+ e->stats = stats;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return airoha_ppe_flow_offload_replace(eth, f);
+ case FLOW_CLS_DESTROY:
+ return airoha_ppe_flow_offload_destroy(eth, f);
+ case FLOW_CLS_STATS:
+ return airoha_ppe_flow_offload_stats(eth, f);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+ struct airoha_foe_entry *hwe = ppe->foe;
+ int i, err = 0;
+
+ for (i = 0; i < sram_num_entries; i++) {
+ int err;
+
+ memset(&hwe[i], 0, sizeof(*hwe));
+ err = airoha_ppe_foe_commit_sram_entry(ppe, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_npu_get(eth->dev);
+
+ if (IS_ERR(npu)) {
+ request_module("airoha-npu");
+ npu = airoha_npu_get(eth->dev);
+ }
+
+ return npu;
+}
+
+static int airoha_ppe_offload_setup(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_ppe_npu_get(eth);
+ struct airoha_ppe *ppe = eth->ppe;
+ int err, ppe_num_stats_entries;
+
+ if (IS_ERR(npu))
+ return PTR_ERR(npu);
+
+ err = npu->ops.ppe_init(npu);
+ if (err)
+ goto error_npu_put;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries > 0) {
+ err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma,
+ ppe_num_stats_entries);
+ if (err)
+ goto error_npu_put;
+ }
+
+ airoha_ppe_hw_init(ppe);
+ airoha_ppe_foe_flow_stats_reset(ppe, npu);
+
+ rcu_assign_pointer(eth->npu, npu);
+ synchronize_rcu();
+
+ return 0;
+
+error_npu_put:
+ airoha_npu_put(npu);
+
+ return err;
+}
+
+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data)
+{
+ struct airoha_ppe *ppe = dev->priv;
+ struct airoha_eth *eth = ppe->eth;
+ int err = 0;
+
+ mutex_lock(&flow_offload_mutex);
+
+ if (!eth->npu)
+ err = airoha_ppe_offload_setup(eth);
+ if (!err)
+ err = airoha_ppe_flow_offload_cmd(eth, type_data);
+
+ mutex_unlock(&flow_offload_mutex);
+
+ return err;
+}
+
+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
+ u16 hash, bool rx_wlan)
+{
+ struct airoha_ppe *ppe = dev->priv;
+ u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
+ u16 now, diff;
+
+ if (hash > ppe_hash_mask)
+ return;
+
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
+
+ ppe->foe_check_time[hash] = now;
+ airoha_ppe_foe_insert_entry(ppe, skb, hash, rx_wlan);
+}
+
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct net_device *dev = port->dev;
+ const u8 *addr = dev->dev_addr;
+ u32 val;
+
+ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+
+ val = (addr[0] << 8) | addr[1];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+}
+
+struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_eth *eth;
+
+ np = of_parse_phandle(dev->of_node, "airoha,eth", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ of_node_put(np);
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(np);
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ goto error_pdev_put;
+ }
+
+ eth = platform_get_drvdata(pdev);
+ if (!eth)
+ goto error_module_put;
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ goto error_module_put;
+ }
+
+ return &eth->ppe->dev;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(airoha_ppe_get_dev);
+
+void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
+{
+ struct airoha_ppe *ppe = dev->priv;
+ struct airoha_eth *eth = ppe->eth;
+
+ module_put(THIS_MODULE);
+ put_device(eth->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
+
+int airoha_ppe_init(struct airoha_eth *eth)
+{
+ int foe_size, err, ppe_num_stats_entries;
+ u32 ppe_num_entries;
+ struct airoha_ppe *ppe;
+
+ ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return -ENOMEM;
+
+ ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
+ ppe->dev.ops.check_skb = airoha_ppe_check_skb;
+ ppe->dev.priv = ppe;
+ ppe->eth = eth;
+ eth->ppe = ppe;
+
+ ppe_num_entries = airoha_ppe_get_total_num_entries(ppe);
+ foe_size = ppe_num_entries * sizeof(struct airoha_foe_entry);
+ ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
+ GFP_KERNEL);
+ if (!ppe->foe)
+ return -ENOMEM;
+
+ ppe->foe_flow = devm_kzalloc(eth->dev,
+ ppe_num_entries * sizeof(*ppe->foe_flow),
+ GFP_KERNEL);
+ if (!ppe->foe_flow)
+ return -ENOMEM;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries > 0) {
+ foe_size = ppe_num_stats_entries * sizeof(*ppe->foe_stats);
+ ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
+ &ppe->foe_stats_dma,
+ GFP_KERNEL);
+ if (!ppe->foe_stats)
+ return -ENOMEM;
+ }
+
+ ppe->foe_check_time = devm_kzalloc(eth->dev, ppe_num_entries,
+ GFP_KERNEL);
+ if (!ppe->foe_check_time)
+ return -ENOMEM;
+
+ err = airoha_ppe_flush_sram_entries(ppe);
+ if (err)
+ return err;
+
+ err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
+ if (err)
+ return err;
+
+ err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
+ if (err)
+ goto error_flow_table_destroy;
+
+ err = airoha_ppe_debugfs_init(ppe);
+ if (err)
+ goto error_l2_flow_table_destroy;
+
+ return 0;
+
+error_l2_flow_table_destroy:
+ rhashtable_destroy(&ppe->l2_flows);
+error_flow_table_destroy:
+ rhashtable_destroy(&eth->flow_table);
+
+ return err;
+}
+
+void airoha_ppe_deinit(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu;
+
+ rcu_read_lock();
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ npu->ops.ppe_deinit(npu);
+ airoha_npu_put(npu);
+ }
+ rcu_read_unlock();
+
+ rhashtable_destroy(&eth->ppe->l2_flows);
+ rhashtable_destroy(&eth->flow_table);
+ debugfs_remove(eth->ppe->debugfs_dir);
+}
diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
new file mode 100644
index 000000000000..0112c41150bb
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include "airoha_eth.h"
+
+static void airoha_debugfs_ppe_print_tuple(struct seq_file *m,
+ void *src_addr, void *dest_addr,
+ u16 *src_port, u16 *dest_port,
+ bool ipv6)
+{
+ __be32 n_addr[IPV6_ADDR_WORDS];
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, src_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", src_addr);
+ }
+ if (src_port)
+ seq_printf(m, ":%d", *src_port);
+
+ seq_puts(m, "->");
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, dest_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", dest_addr);
+ }
+ if (dest_port)
+ seq_printf(m, ":%d", *dest_port);
+}
+
+static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
+ bool bind)
+{
+ static const char *const ppe_type_str[] = {
+ [PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
+ [PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
+ [PPE_PKT_TYPE_BRIDGE] = "L2B",
+ [PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
+ [PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
+ [PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
+ [PPE_PKT_TYPE_IPV6_6RD] = "6RD",
+ };
+ static const char *const ppe_state_str[] = {
+ [AIROHA_FOE_STATE_INVALID] = "INV",
+ [AIROHA_FOE_STATE_UNBIND] = "UNB",
+ [AIROHA_FOE_STATE_BIND] = "BND",
+ [AIROHA_FOE_STATE_FIN] = "FIN",
+ };
+ struct airoha_ppe *ppe = m->private;
+ u32 ppe_num_entries = airoha_ppe_get_total_num_entries(ppe);
+ int i;
+
+ for (i = 0; i < ppe_num_entries; i++) {
+ const char *state_str, *type_str = "UNKNOWN";
+ void *src_addr = NULL, *dest_addr = NULL;
+ u16 *src_port = NULL, *dest_port = NULL;
+ struct airoha_foe_mac_info_common *l2;
+ unsigned char h_source[ETH_ALEN] = {};
+ struct airoha_foe_stats64 stats = {};
+ unsigned char h_dest[ETH_ALEN];
+ struct airoha_foe_entry *hwe;
+ u32 type, state, ib2, data;
+ bool ipv6 = false;
+
+ hwe = airoha_ppe_foe_get_entry(ppe, i);
+ if (!hwe)
+ continue;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (!state)
+ continue;
+
+ if (bind && state != AIROHA_FOE_STATE_BIND)
+ continue;
+
+ state_str = ppe_state_str[state % ARRAY_SIZE(ppe_state_str)];
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ if (type < ARRAY_SIZE(ppe_type_str) && ppe_type_str[type])
+ type_str = ppe_type_str[type];
+
+ seq_printf(m, "%05x %s %7s", i, state_str, type_str);
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.orig_tuple.src_port;
+ dest_port = &hwe->ipv4.orig_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.orig_tuple.src_ip;
+ dest_addr = &hwe->ipv4.orig_tuple.dest_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ src_port = &hwe->ipv6.src_port;
+ dest_port = &hwe->ipv6.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ src_addr = &hwe->ipv6.src_ip;
+ dest_addr = &hwe->ipv6.dest_ip;
+ ipv6 = true;
+ break;
+ default:
+ break;
+ }
+
+ if (src_addr && dest_addr) {
+ seq_puts(m, " orig=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port, ipv6);
+ }
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.new_tuple.src_port;
+ dest_port = &hwe->ipv4.new_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.new_tuple.src_ip;
+ dest_addr = &hwe->ipv4.new_tuple.dest_ip;
+ seq_puts(m, " new=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port,
+ ipv6);
+ break;
+ default:
+ break;
+ }
+
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = hwe->ipv6.data;
+ ib2 = hwe->ipv6.ib2;
+ l2 = &hwe->ipv6.l2;
+ } else {
+ data = hwe->ipv4.data;
+ ib2 = hwe->ipv4.ib2;
+ l2 = &hwe->ipv4.l2.common;
+ *((__be16 *)&h_source[4]) =
+ cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
+ }
+
+ airoha_ppe_foe_entry_get_stats(ppe, i, &stats);
+
+ *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
+ *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
+ *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
+
+ seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
+ " vlan=%d,%d ib1=%08x ib2=%08x"
+ " packets=%llu bytes=%llu\n",
+ h_source, h_dest, l2->etype, data,
+ l2->vlan1, l2->vlan2, hwe->ib1, ib2,
+ stats.packets, stats.bytes);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, false);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_all);
+
+static int airoha_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, true);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_bind);
+
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ ppe->debugfs_dir = debugfs_create_dir("ppe", NULL);
+ debugfs_create_file("entries", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_bind_fops);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
new file mode 100644
index 000000000000..ed4e3407f4a0
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -0,0 +1,923 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_REGS_H
+#define AIROHA_REGS_H
+
+#include <linux/types.h>
+
+/* FE */
+#define PSE_BASE 0x0100
+#define CSR_IFC_BASE 0x0200
+#define CDM1_BASE 0x0400
+#define GDM1_BASE 0x0500
+#define PPE1_BASE 0x0c00
+#define PPE2_BASE 0x1c00
+
+#define CDM2_BASE 0x1400
+#define GDM2_BASE 0x1500
+
+#define GDM3_BASE 0x1100
+#define GDM4_BASE 0x2500
+
+#define CDM_BASE(_n) \
+ ((_n) == 2 ? CDM2_BASE : CDM1_BASE)
+#define GDM_BASE(_n) \
+ ((_n) == 4 ? GDM4_BASE : \
+ (_n) == 3 ? GDM3_BASE : \
+ (_n) == 2 ? GDM2_BASE : GDM1_BASE)
+
+#define REG_FE_DMA_GLO_CFG 0x0000
+#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
+#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
+
+#define REG_FE_RST_GLO_CFG 0x0004
+#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
+#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
+#define FE_RST_CORE_MASK BIT(0)
+
+#define REG_FE_FOE_TS 0x0010
+
+#define REG_FE_WAN_PORT 0x0024
+#define WAN1_EN_MASK BIT(16)
+#define WAN1_MASK GENMASK(12, 8)
+#define WAN0_MASK GENMASK(4, 0)
+
+#define REG_FE_WAN_MAC_H 0x0030
+#define REG_FE_LAN_MAC_H 0x0040
+
+#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
+#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
+
+#define REG_FE_CDM1_OQ_MAP0 0x0050
+#define REG_FE_CDM1_OQ_MAP1 0x0054
+#define REG_FE_CDM1_OQ_MAP2 0x0058
+#define REG_FE_CDM1_OQ_MAP3 0x005c
+
+#define REG_FE_PCE_CFG 0x0070
+#define PCE_DPI_EN_MASK BIT(2)
+#define PCE_KA_EN_MASK BIT(1)
+#define PCE_MC_EN_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
+#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
+#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
+#define PSE_CFG_WR_EN_MASK BIT(8)
+#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
+#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
+
+#define PSE_FQ_CFG 0x008c
+#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
+
+#define REG_FE_PSE_BUF_SET 0x0090
+#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
+#define PSE_ALLRSV_MASK GENMASK(14, 0)
+
+#define REG_PSE_SHARE_USED_THD 0x0094
+#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
+#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
+
+#define REG_GDM_MISC_CFG 0x0148
+#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
+#define GDM2_CHN_VLD_MODE_MASK BIT(5)
+
+#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
+#define FE_IFC_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PORT_EN 0x01f0
+#define REG_FE_IFC_PORT_EN 0x01f4
+
+#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
+#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
+
+#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
+#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
+#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
+
+#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
+#define PATN_FCPU_EN_MASK BIT(7)
+#define PATN_SWP_EN_MASK BIT(6)
+#define PATN_DP_EN_MASK BIT(5)
+#define PATN_SP_EN_MASK BIT(4)
+#define PATN_TYPE_MASK GENMASK(3, 1)
+#define PATN_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
+#define PATN_DP_MASK GENMASK(31, 16)
+#define PATN_SP_MASK GENMASK(15, 0)
+
+#define REG_CDM_VLAN_CTRL(_n) CDM_BASE(_n)
+#define CDM_VLAN_MASK GENMASK(31, 16)
+
+#define REG_CDM_FWD_CFG(_n) (CDM_BASE(_n) + 0x08)
+#define CDM_OAM_QSEL_MASK GENMASK(31, 27)
+#define CDM_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM_CRSN_QSEL(_n, _m) (CDM_BASE(_n) + 0x10 + ((_m) << 2))
+#define CDM_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
+#define GDM_PAD_EN_MASK BIT(28)
+#define GDM_DROP_CRC_ERR_MASK BIT(23)
+#define GDM_IP4_CKSUM_MASK BIT(22)
+#define GDM_TCP_CKSUM_MASK BIT(21)
+#define GDM_UDP_CKSUM_MASK BIT(20)
+#define GDM_STRIP_CRC_MASK BIT(16)
+#define GDM_UCFQ_MASK GENMASK(15, 12)
+#define GDM_BCFQ_MASK GENMASK(11, 8)
+#define GDM_MCFQ_MASK GENMASK(7, 4)
+#define GDM_OCFQ_MASK GENMASK(3, 0)
+
+#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
+#define GDM_INGRESS_FC_EN_MASK BIT(1)
+#define GDM_STAG_EN_MASK BIT(0)
+
+#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
+#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
+#define GDM_LONG_LEN_MASK GENMASK(29, 16)
+
+#define REG_GDM_LPBK_CFG(_n) (GDM_BASE(_n) + 0x1c)
+#define LPBK_GAP_MASK GENMASK(31, 24)
+#define LPBK_LEN_MASK GENMASK(23, 10)
+#define LPBK_CHAN_MASK GENMASK(8, 4)
+#define LPBK_MODE_MASK GENMASK(3, 1)
+#define LBK_GAP_MODE_MASK BIT(3)
+#define LBK_LEN_MODE_MASK BIT(2)
+#define LBK_CHAN_MODE_MASK BIT(1)
+#define LPBK_EN_MASK BIT(0)
+
+#define REG_GDM_CHN_RLS(_n) (GDM_BASE(_n) + 0x20)
+#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
+#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
+
+#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24)
+#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28)
+
+#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
+#define FE_CPORT_PAD BIT(26)
+#define FE_CPORT_PORT_XFC_MASK BIT(25)
+#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
+
+#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
+#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
+#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
+
+#define REG_FE_GDM_MIB_CFG(_n) (GDM_BASE(_n) + 0xf4)
+#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
+#define FE_GDM_TX_MIB_SPLIT_EN_MASK BIT(17)
+#define FE_GDM_RX_MIB_SPLIT_EN_MASK BIT(16)
+#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
+#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
+#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
+#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
+#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
+#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
+#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
+#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
+
+#define REG_GDM_SRC_PORT_SET(_n) (GDM_BASE(_n) + 0x23c)
+#define GDM_SPORT_OFF2_MASK GENMASK(19, 16)
+#define GDM_SPORT_OFF1_MASK GENMASK(15, 12)
+#define GDM_SPORT_OFF0_MASK GENMASK(11, 8)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
+
+#define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200)
+#define PPE_GLO_CFG_BUSY_MASK BIT(31)
+#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9)
+#define PPE_GLO_CFG_PSE_HASH_OFS_MASK BIT(6)
+#define PPE_GLO_CFG_PPE_BSWAP_MASK BIT(5)
+#define PPE_GLO_CFG_TTL_DROP_MASK BIT(4)
+#define PPE_GLO_CFG_IP4_CS_DROP_MASK BIT(3)
+#define PPE_GLO_CFG_IP4_L4_CS_DROP_MASK BIT(2)
+#define PPE_GLO_CFG_EN_MASK BIT(0)
+
+#define REG_PPE_PPE_FLOW_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x204)
+#define PPE_FLOW_CFG_IP6_HASH_GRE_KEY_MASK BIT(20)
+#define PPE_FLOW_CFG_IP4_HASH_GRE_KEY_MASK BIT(19)
+#define PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL_MASK BIT(18)
+#define PPE_FLOW_CFG_IP4_NAT_FRAG_MASK BIT(17)
+#define PPE_FLOW_CFG_IP_PROTO_BLACKLIST_MASK BIT(16)
+#define PPE_FLOW_CFG_IP4_DSLITE_MASK BIT(14)
+#define PPE_FLOW_CFG_IP4_NAPT_MASK BIT(13)
+#define PPE_FLOW_CFG_IP4_NAT_MASK BIT(12)
+#define PPE_FLOW_CFG_IP6_6RD_MASK BIT(10)
+#define PPE_FLOW_CFG_IP6_5T_ROUTE_MASK BIT(9)
+#define PPE_FLOW_CFG_IP6_3T_ROUTE_MASK BIT(8)
+#define PPE_FLOW_CFG_IP4_UDP_FRAG_MASK BIT(7)
+#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6)
+
+#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208)
+#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(31, 16)
+#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(15, 0)
+
+#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c)
+#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24)
+#define PPE_TB_CFG_KEEPALIVE_MASK GENMASK(13, 12)
+#define PPE_TB_CFG_AGE_TCP_FIN_MASK BIT(11)
+#define PPE_TB_CFG_AGE_UDP_MASK BIT(10)
+#define PPE_TB_CFG_AGE_TCP_MASK BIT(9)
+#define PPE_TB_CFG_AGE_UNBIND_MASK BIT(8)
+#define PPE_TB_CFG_AGE_NON_L4_MASK BIT(7)
+#define PPE_TB_CFG_AGE_PREBIND_MASK BIT(6)
+#define PPE_TB_CFG_SEARCH_MISS_MASK GENMASK(5, 4)
+#define PPE_TB_ENTRY_SIZE_MASK BIT(3)
+#define PPE_DRAM_TB_NUM_ENTRY_MASK GENMASK(2, 0)
+
+#define REG_PPE_TB_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x220)
+
+#define REG_PPE_BIND_RATE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x228)
+#define PPE_BIND_RATE_L2B_BIND_MASK GENMASK(31, 16)
+#define PPE_BIND_RATE_BIND_MASK GENMASK(15, 0)
+
+#define REG_PPE_BIND_LIMIT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x22c)
+#define PPE_BIND_LIMIT0_HALF_MASK GENMASK(29, 16)
+#define PPE_BIND_LIMIT0_QUARTER_MASK GENMASK(13, 0)
+
+#define REG_PPE_BIND_LIMIT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x230)
+#define PPE_BIND_LIMIT1_NON_L4_MASK GENMASK(23, 16)
+#define PPE_BIND_LIMIT1_FULL_MASK GENMASK(13, 0)
+
+#define REG_PPE_BND_AGE0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x23c)
+#define PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
+#define PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
+
+#define REG_PPE_UNBIND_AGE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x238)
+#define PPE_UNBIND_AGE_MIN_PACKETS_MASK GENMASK(31, 16)
+#define PPE_UNBIND_AGE_DELTA_MASK GENMASK(7, 0)
+
+#define REG_PPE_BND_AGE1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x240)
+#define PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
+#define PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
+
+#define REG_PPE_HASH_SEED(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x244)
+#define PPE_HASH_SEED 0x12345678
+
+#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
+#define DFT_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2))
+
+#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
+
+#define REG_PPE_TB_HASH_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x250)
+#define PPE_DRAM_HASH1_MODE_MASK GENMASK(31, 28)
+#define PPE_DRAM_HASH1_EN_MASK BIT(24)
+#define PPE_DRAM_HASH0_MODE_MASK GENMASK(23, 20)
+#define PPE_DRAM_TABLE_EN_MASK BIT(16)
+#define PPE_SRAM_HASH1_MODE_MASK GENMASK(15, 12)
+#define PPE_SRAM_HASH1_EN_MASK BIT(8)
+#define PPE_SRAM_HASH0_MODE_MASK GENMASK(7, 4)
+#define PPE_SRAM_TABLE_EN_MASK BIT(0)
+
+#define REG_PPE_MTU_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x304)
+#define REG_PPE_MTU(_m, _n) (REG_PPE_MTU_BASE(_m) + ((_n) << 2))
+#define FP1_EGRESS_MTU_MASK GENMASK(29, 16)
+#define FP0_EGRESS_MTU_MASK GENMASK(13, 0)
+
+#define REG_PPE_RAM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x31c)
+#define PPE_SRAM_CTRL_ACK_MASK BIT(31)
+#define PPE_SRAM_CTRL_DUAL_SUCESS_MASK BIT(30)
+#define PPE_SRAM_CTRL_ENTRY_MASK GENMASK(23, 8)
+#define PPE_SRAM_WR_DUAL_DIRECTION_MASK BIT(2)
+#define PPE_SRAM_CTRL_WR_MASK BIT(1)
+#define PPE_SRAM_CTRL_REQ_MASK BIT(0)
+
+#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
+#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
+
+#define REG_UPDMEM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x370)
+#define PPE_UPDMEM_ACK_MASK BIT(31)
+#define PPE_UPDMEM_ADDR_MASK GENMASK(11, 8)
+#define PPE_UPDMEM_OFFSET_MASK GENMASK(7, 4)
+#define PPE_UPDMEM_SEL_MASK GENMASK(3, 2)
+#define PPE_UPDMEM_WR_MASK BIT(1)
+#define PPE_UPDMEM_REQ_MASK BIT(0)
+
+#define REG_UPDMEM_DATA(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374)
+
+#define REG_IP_FRAG_FP 0x2010
+#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
+#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
+#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
+#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
+
+#define REG_MC_VLAN_EN 0x2100
+#define MC_VLAN_EN_MASK BIT(0)
+
+#define REG_MC_VLAN_CFG 0x2104
+#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
+#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
+#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
+#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
+#define MC_VLAN_CFG_RW_MASK BIT(0)
+
+#define REG_MC_VLAN_DATA 0x2108
+
+#define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2))
+#define SP_CPORT_DFT_MASK GENMASK(2, 0)
+#define SP_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2))
+
+#define REG_SRC_PORT_FC_MAP6 0x2298
+#define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24)
+#define FC_ID_OF_SRC_PORT26_MASK GENMASK(20, 16)
+#define FC_ID_OF_SRC_PORT25_MASK GENMASK(12, 8)
+#define FC_ID_OF_SRC_PORT24_MASK GENMASK(4, 0)
+
+#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
+
+/* QDMA */
+#define REG_QDMA_GLOBAL_CFG 0x0004
+#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
+#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
+#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
+#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
+#define GLOBAL_CFG_RESET_MASK BIT(23)
+#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
+#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
+#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
+#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
+#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
+#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
+#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
+#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
+#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
+#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
+#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
+#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
+
+#define REG_FWD_DSCP_BASE 0x0010
+#define REG_FWD_BUF_BASE 0x0014
+
+#define REG_HW_FWD_DSCP_CFG 0x0018
+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
+#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
+
+#define REG_INT_STATUS(_n) \
+ (((_n) == 4) ? 0x0730 : \
+ ((_n) == 3) ? 0x0724 : \
+ ((_n) == 2) ? 0x0720 : \
+ ((_n) == 1) ? 0x0024 : 0x0020)
+
+#define REG_INT_ENABLE(_b, _n) \
+ (((_n) == 4) ? 0x0750 + ((_b) << 5) : \
+ ((_n) == 3) ? 0x0744 + ((_b) << 5) : \
+ ((_n) == 2) ? 0x0740 + ((_b) << 5) : \
+ ((_n) == 1) ? 0x002c + ((_b) << 3) : \
+ 0x0028 + ((_b) << 3))
+
+/* QDMA_CSR_INT_ENABLE1 */
+#define RX15_COHERENT_INT_MASK BIT(31)
+#define RX14_COHERENT_INT_MASK BIT(30)
+#define RX13_COHERENT_INT_MASK BIT(29)
+#define RX12_COHERENT_INT_MASK BIT(28)
+#define RX11_COHERENT_INT_MASK BIT(27)
+#define RX10_COHERENT_INT_MASK BIT(26)
+#define RX9_COHERENT_INT_MASK BIT(25)
+#define RX8_COHERENT_INT_MASK BIT(24)
+#define RX7_COHERENT_INT_MASK BIT(23)
+#define RX6_COHERENT_INT_MASK BIT(22)
+#define RX5_COHERENT_INT_MASK BIT(21)
+#define RX4_COHERENT_INT_MASK BIT(20)
+#define RX3_COHERENT_INT_MASK BIT(19)
+#define RX2_COHERENT_INT_MASK BIT(18)
+#define RX1_COHERENT_INT_MASK BIT(17)
+#define RX0_COHERENT_INT_MASK BIT(16)
+#define TX7_COHERENT_INT_MASK BIT(15)
+#define TX6_COHERENT_INT_MASK BIT(14)
+#define TX5_COHERENT_INT_MASK BIT(13)
+#define TX4_COHERENT_INT_MASK BIT(12)
+#define TX3_COHERENT_INT_MASK BIT(11)
+#define TX2_COHERENT_INT_MASK BIT(10)
+#define TX1_COHERENT_INT_MASK BIT(9)
+#define TX0_COHERENT_INT_MASK BIT(8)
+#define CNT_OVER_FLOW_INT_MASK BIT(7)
+#define IRQ1_FULL_INT_MASK BIT(5)
+#define IRQ1_INT_MASK BIT(4)
+#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
+#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
+#define IRQ0_FULL_INT_MASK BIT(1)
+#define IRQ0_INT_MASK BIT(0)
+
+#define RX_COHERENT_LOW_INT_MASK \
+ (RX15_COHERENT_INT_MASK | RX14_COHERENT_INT_MASK | \
+ RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK | \
+ RX11_COHERENT_INT_MASK | RX10_COHERENT_INT_MASK | \
+ RX9_COHERENT_INT_MASK | RX8_COHERENT_INT_MASK | \
+ RX7_COHERENT_INT_MASK | RX6_COHERENT_INT_MASK | \
+ RX5_COHERENT_INT_MASK | RX4_COHERENT_INT_MASK | \
+ RX3_COHERENT_INT_MASK | RX2_COHERENT_INT_MASK | \
+ RX1_COHERENT_INT_MASK | RX0_COHERENT_INT_MASK)
+
+#define RX_COHERENT_LOW_OFFSET __ffs(RX_COHERENT_LOW_INT_MASK)
+#define INT_RX0_MASK(_n) \
+ (((_n) << RX_COHERENT_LOW_OFFSET) & RX_COHERENT_LOW_INT_MASK)
+
+#define TX_COHERENT_LOW_INT_MASK \
+ (TX7_COHERENT_INT_MASK | TX6_COHERENT_INT_MASK | \
+ TX5_COHERENT_INT_MASK | TX4_COHERENT_INT_MASK | \
+ TX3_COHERENT_INT_MASK | TX2_COHERENT_INT_MASK | \
+ TX1_COHERENT_INT_MASK | TX0_COHERENT_INT_MASK)
+
+#define TX_DONE_INT_MASK(_n) \
+ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
+ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_TX_MASK \
+ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
+ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+/* QDMA_CSR_INT_ENABLE2 */
+#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX15_DONE_INT_MASK BIT(15)
+#define RX14_DONE_INT_MASK BIT(14)
+#define RX13_DONE_INT_MASK BIT(13)
+#define RX12_DONE_INT_MASK BIT(12)
+#define RX11_DONE_INT_MASK BIT(11)
+#define RX10_DONE_INT_MASK BIT(10)
+#define RX9_DONE_INT_MASK BIT(9)
+#define RX8_DONE_INT_MASK BIT(8)
+#define RX7_DONE_INT_MASK BIT(7)
+#define RX6_DONE_INT_MASK BIT(6)
+#define RX5_DONE_INT_MASK BIT(5)
+#define RX4_DONE_INT_MASK BIT(4)
+#define RX3_DONE_INT_MASK BIT(3)
+#define RX2_DONE_INT_MASK BIT(2)
+#define RX1_DONE_INT_MASK BIT(1)
+#define RX0_DONE_INT_MASK BIT(0)
+
+#define RX_NO_CPU_DSCP_LOW_INT_MASK \
+ (RX15_NO_CPU_DSCP_INT_MASK | RX14_NO_CPU_DSCP_INT_MASK | \
+ RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK | \
+ RX11_NO_CPU_DSCP_INT_MASK | RX10_NO_CPU_DSCP_INT_MASK | \
+ RX9_NO_CPU_DSCP_INT_MASK | RX8_NO_CPU_DSCP_INT_MASK | \
+ RX7_NO_CPU_DSCP_INT_MASK | RX6_NO_CPU_DSCP_INT_MASK | \
+ RX5_NO_CPU_DSCP_INT_MASK | RX4_NO_CPU_DSCP_INT_MASK | \
+ RX3_NO_CPU_DSCP_INT_MASK | RX2_NO_CPU_DSCP_INT_MASK | \
+ RX1_NO_CPU_DSCP_INT_MASK | RX0_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_LOW_INT_MASK \
+ (RX15_DONE_INT_MASK | RX14_DONE_INT_MASK | \
+ RX13_DONE_INT_MASK | RX12_DONE_INT_MASK | \
+ RX11_DONE_INT_MASK | RX10_DONE_INT_MASK | \
+ RX9_DONE_INT_MASK | RX8_DONE_INT_MASK | \
+ RX7_DONE_INT_MASK | RX6_DONE_INT_MASK | \
+ RX5_DONE_INT_MASK | RX4_DONE_INT_MASK | \
+ RX3_DONE_INT_MASK | RX2_DONE_INT_MASK | \
+ RX1_DONE_INT_MASK | RX0_DONE_INT_MASK)
+
+#define RX_NO_CPU_DSCP_LOW_OFFSET __ffs(RX_NO_CPU_DSCP_LOW_INT_MASK)
+#define INT_RX1_MASK(_n) \
+ ((((_n) << RX_NO_CPU_DSCP_LOW_OFFSET) & RX_NO_CPU_DSCP_LOW_INT_MASK) | \
+ (RX_DONE_LOW_INT_MASK & (_n)))
+
+/* QDMA_CSR_INT_ENABLE3 */
+#define RX31_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX30_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX29_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX28_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX27_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX26_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX25_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX24_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX23_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX22_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX21_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX20_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX19_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX18_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX17_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX16_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX31_DONE_INT_MASK BIT(15)
+#define RX30_DONE_INT_MASK BIT(14)
+#define RX29_DONE_INT_MASK BIT(13)
+#define RX28_DONE_INT_MASK BIT(12)
+#define RX27_DONE_INT_MASK BIT(11)
+#define RX26_DONE_INT_MASK BIT(10)
+#define RX25_DONE_INT_MASK BIT(9)
+#define RX24_DONE_INT_MASK BIT(8)
+#define RX23_DONE_INT_MASK BIT(7)
+#define RX22_DONE_INT_MASK BIT(6)
+#define RX21_DONE_INT_MASK BIT(5)
+#define RX20_DONE_INT_MASK BIT(4)
+#define RX19_DONE_INT_MASK BIT(3)
+#define RX18_DONE_INT_MASK BIT(2)
+#define RX17_DONE_INT_MASK BIT(1)
+#define RX16_DONE_INT_MASK BIT(0)
+
+#define RX_NO_CPU_DSCP_HIGH_INT_MASK \
+ (RX31_NO_CPU_DSCP_INT_MASK | RX30_NO_CPU_DSCP_INT_MASK | \
+ RX29_NO_CPU_DSCP_INT_MASK | RX28_NO_CPU_DSCP_INT_MASK | \
+ RX27_NO_CPU_DSCP_INT_MASK | RX26_NO_CPU_DSCP_INT_MASK | \
+ RX25_NO_CPU_DSCP_INT_MASK | RX24_NO_CPU_DSCP_INT_MASK | \
+ RX23_NO_CPU_DSCP_INT_MASK | RX22_NO_CPU_DSCP_INT_MASK | \
+ RX21_NO_CPU_DSCP_INT_MASK | RX20_NO_CPU_DSCP_INT_MASK | \
+ RX19_NO_CPU_DSCP_INT_MASK | RX18_NO_CPU_DSCP_INT_MASK | \
+ RX17_NO_CPU_DSCP_INT_MASK | RX16_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_HIGH_INT_MASK \
+ (RX31_DONE_INT_MASK | RX30_DONE_INT_MASK | \
+ RX29_DONE_INT_MASK | RX28_DONE_INT_MASK | \
+ RX27_DONE_INT_MASK | RX26_DONE_INT_MASK | \
+ RX25_DONE_INT_MASK | RX24_DONE_INT_MASK | \
+ RX23_DONE_INT_MASK | RX22_DONE_INT_MASK | \
+ RX21_DONE_INT_MASK | RX20_DONE_INT_MASK | \
+ RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \
+ RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
+
+#define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK)
+#define RX_DONE_INT_MASK \
+ ((RX_DONE_HIGH_INT_MASK << RX_DONE_HIGH_OFFSET) | RX_DONE_LOW_INT_MASK)
+
+#define INT_RX2_MASK(_n) \
+ ((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \
+ (((_n) >> RX_DONE_HIGH_OFFSET) & RX_DONE_HIGH_INT_MASK))
+
+/* QDMA_CSR_INT_ENABLE4 */
+#define RX31_COHERENT_INT_MASK BIT(31)
+#define RX30_COHERENT_INT_MASK BIT(30)
+#define RX29_COHERENT_INT_MASK BIT(29)
+#define RX28_COHERENT_INT_MASK BIT(28)
+#define RX27_COHERENT_INT_MASK BIT(27)
+#define RX26_COHERENT_INT_MASK BIT(26)
+#define RX25_COHERENT_INT_MASK BIT(25)
+#define RX24_COHERENT_INT_MASK BIT(24)
+#define RX23_COHERENT_INT_MASK BIT(23)
+#define RX22_COHERENT_INT_MASK BIT(22)
+#define RX21_COHERENT_INT_MASK BIT(21)
+#define RX20_COHERENT_INT_MASK BIT(20)
+#define RX19_COHERENT_INT_MASK BIT(19)
+#define RX18_COHERENT_INT_MASK BIT(18)
+#define RX17_COHERENT_INT_MASK BIT(17)
+#define RX16_COHERENT_INT_MASK BIT(16)
+
+#define RX_COHERENT_HIGH_INT_MASK \
+ (RX31_COHERENT_INT_MASK | RX30_COHERENT_INT_MASK | \
+ RX29_COHERENT_INT_MASK | RX28_COHERENT_INT_MASK | \
+ RX27_COHERENT_INT_MASK | RX26_COHERENT_INT_MASK | \
+ RX25_COHERENT_INT_MASK | RX24_COHERENT_INT_MASK | \
+ RX23_COHERENT_INT_MASK | RX22_COHERENT_INT_MASK | \
+ RX21_COHERENT_INT_MASK | RX20_COHERENT_INT_MASK | \
+ RX19_COHERENT_INT_MASK | RX18_COHERENT_INT_MASK | \
+ RX17_COHERENT_INT_MASK | RX16_COHERENT_INT_MASK)
+
+#define INT_RX3_MASK(_n) (RX_COHERENT_HIGH_INT_MASK & (_n))
+
+/* QDMA_CSR_INT_ENABLE5 */
+#define TX31_COHERENT_INT_MASK BIT(31)
+#define TX30_COHERENT_INT_MASK BIT(30)
+#define TX29_COHERENT_INT_MASK BIT(29)
+#define TX28_COHERENT_INT_MASK BIT(28)
+#define TX27_COHERENT_INT_MASK BIT(27)
+#define TX26_COHERENT_INT_MASK BIT(26)
+#define TX25_COHERENT_INT_MASK BIT(25)
+#define TX24_COHERENT_INT_MASK BIT(24)
+#define TX23_COHERENT_INT_MASK BIT(23)
+#define TX22_COHERENT_INT_MASK BIT(22)
+#define TX21_COHERENT_INT_MASK BIT(21)
+#define TX20_COHERENT_INT_MASK BIT(20)
+#define TX19_COHERENT_INT_MASK BIT(19)
+#define TX18_COHERENT_INT_MASK BIT(18)
+#define TX17_COHERENT_INT_MASK BIT(17)
+#define TX16_COHERENT_INT_MASK BIT(16)
+#define TX15_COHERENT_INT_MASK BIT(15)
+#define TX14_COHERENT_INT_MASK BIT(14)
+#define TX13_COHERENT_INT_MASK BIT(13)
+#define TX12_COHERENT_INT_MASK BIT(12)
+#define TX11_COHERENT_INT_MASK BIT(11)
+#define TX10_COHERENT_INT_MASK BIT(10)
+#define TX9_COHERENT_INT_MASK BIT(9)
+#define TX8_COHERENT_INT_MASK BIT(8)
+
+#define TX_COHERENT_HIGH_INT_MASK \
+ (TX31_COHERENT_INT_MASK | TX30_COHERENT_INT_MASK | \
+ TX29_COHERENT_INT_MASK | TX28_COHERENT_INT_MASK | \
+ TX27_COHERENT_INT_MASK | TX26_COHERENT_INT_MASK | \
+ TX25_COHERENT_INT_MASK | TX24_COHERENT_INT_MASK | \
+ TX23_COHERENT_INT_MASK | TX22_COHERENT_INT_MASK | \
+ TX21_COHERENT_INT_MASK | TX20_COHERENT_INT_MASK | \
+ TX19_COHERENT_INT_MASK | TX18_COHERENT_INT_MASK | \
+ TX17_COHERENT_INT_MASK | TX16_COHERENT_INT_MASK | \
+ TX15_COHERENT_INT_MASK | TX14_COHERENT_INT_MASK | \
+ TX13_COHERENT_INT_MASK | TX12_COHERENT_INT_MASK | \
+ TX11_COHERENT_INT_MASK | TX10_COHERENT_INT_MASK | \
+ TX9_COHERENT_INT_MASK | TX8_COHERENT_INT_MASK)
+
+#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
+
+#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
+#define TX_IRQ_THR_MASK GENMASK(27, 16)
+#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
+
+#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
+#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
+
+#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
+#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
+#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
+
+#define REG_TX_RING_BASE(_n) \
+ (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
+
+#define REG_TX_RING_BLOCKING(_n) \
+ (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
+
+#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
+#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
+
+#define REG_TX_CPU_IDX(_n) \
+ (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
+
+#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_TX_DMA_IDX(_n) \
+ (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
+
+#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define IRQ_RING_IDX_MASK GENMASK(20, 16)
+#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_RING_BASE(_n) \
+ (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
+
+#define REG_RX_RING_SIZE(_n) \
+ (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
+
+#define RX_RING_THR_MASK GENMASK(31, 16)
+#define RX_RING_SIZE_MASK GENMASK(15, 0)
+
+#define REG_RX_CPU_IDX(_n) \
+ (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
+
+#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_DMA_IDX(_n) \
+ (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
+
+#define REG_RX_DELAY_INT_IDX(_n) \
+ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
+
+#define REG_RX_SCATTER_CFG(_n) \
+ (((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5))
+
+#define RX_DELAY_INT_MASK GENMASK(15, 0)
+
+#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define RX_RING_SG_EN_MASK BIT(0)
+
+#define REG_INGRESS_TRTCM_CFG 0x0070
+#define INGRESS_TRTCM_EN_MASK BIT(31)
+#define INGRESS_TRTCM_MODE_MASK BIT(30)
+#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
+
+#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
+#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+
+#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
+#define CNTR_EN_MASK BIT(31)
+#define CNTR_ALL_CHAN_EN_MASK BIT(30)
+#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
+#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
+#define CNTR_SRC_MASK GENMASK(27, 24)
+#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
+#define CNTR_CHAN_MASK GENMASK(7, 3)
+#define CNTR_QUEUE_MASK GENMASK(2, 0)
+
+#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
+
+#define REG_LMGR_INIT_CFG 0x1000
+#define LMGR_INIT_START BIT(31)
+#define LMGR_SRAM_MODE_MASK BIT(30)
+#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
+#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
+
+#define REG_FWD_DSCP_LOW_THR 0x1004
+#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
+
+#define REG_EGRESS_RATE_METER_CFG 0x100c
+#define EGRESS_RATE_METER_EN_MASK BIT(31)
+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
+#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
+#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
+
+#define REG_EGRESS_TRTCM_CFG 0x1010
+#define EGRESS_TRTCM_EN_MASK BIT(31)
+#define EGRESS_TRTCM_MODE_MASK BIT(30)
+#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define TRTCM_PARAM_RW_MASK BIT(31)
+#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
+#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
+#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
+#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
+#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
+
+#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
+#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
+#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+
+#define RATE_LIMIT_PARAM_RW_MASK BIT(31)
+#define RATE_LIMIT_PARAM_RW_DONE_MASK BIT(30)
+#define RATE_LIMIT_PARAM_TYPE_MASK GENMASK(29, 28)
+#define RATE_LIMIT_METER_GROUP_MASK GENMASK(27, 26)
+#define RATE_LIMIT_PARAM_INDEX_MASK GENMASK(23, 16)
+
+#define REG_TXWRR_MODE_CFG 0x1020
+#define TWRR_WEIGHT_SCALE_MASK BIT(31)
+#define TWRR_WEIGHT_BASE_MASK BIT(3)
+
+#define REG_TXWRR_WEIGHT_CFG 0x1024
+#define TWRR_RW_CMD_MASK BIT(31)
+#define TWRR_RW_CMD_DONE BIT(30)
+#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
+#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
+#define TWRR_VALUE_MASK GENMASK(15, 0)
+
+#define REG_PSE_BUF_USAGE_CFG 0x1028
+#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+
+#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
+#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
+
+#define REG_GLB_TRTCM_CFG 0x1080
+#define GLB_TRTCM_EN_MASK BIT(31)
+#define GLB_TRTCM_MODE_MASK BIT(30)
+#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define GLB_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXQ_CNGST_CFG 0x10a0
+#define TXQ_CNGST_DROP_EN BIT(31)
+#define TXQ_CNGST_DEI_DROP_EN BIT(30)
+
+#define REG_SLA_TRTCM_CFG 0x1150
+#define SLA_TRTCM_EN_MASK BIT(31)
+#define SLA_TRTCM_MODE_MASK BIT(30)
+#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define SLA_FAST_TICK_MASK GENMASK(15, 0)
+
+/* CTRL */
+#define QDMA_DESC_DONE_MASK BIT(31)
+#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
+#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
+#define QDMA_DESC_DEI_MASK BIT(25)
+#define QDMA_DESC_NO_DROP_MASK BIT(24)
+#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
+/* DATA */
+#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
+/* TX MSG0 */
+#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
+#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
+#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
+#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
+#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
+#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
+#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
+#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
+#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
+#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
+/* TX MSG1 */
+#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
+#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
+#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
+#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
+#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
+#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
+#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
+#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
+#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
+
+/* RX MSG0 */
+#define QDMA_ETH_RXMSG_SPTAG GENMASK(21, 14)
+/* RX MSG1 */
+#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
+#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
+#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
+#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
+#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
+#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
+#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
+#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
+
+struct airoha_qdma_desc {
+ __le32 rsv;
+ __le32 ctrl;
+ __le32 addr;
+ __le32 data;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 msg2;
+ __le32 msg3;
+};
+
+/* CTRL0 */
+#define QDMA_FWD_DESC_CTX_MASK BIT(31)
+#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
+#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
+#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
+/* CTRL1 */
+#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
+/* CTRL2 */
+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
+
+struct airoha_qdma_fwd_desc {
+ __le32 addr;
+ __le32 ctrl0;
+ __le32 ctrl1;
+ __le32 ctrl2;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 rsv0;
+ __le32 rsv1;
+};
+
+#endif /* AIROHA_REGS_H */
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
index 3add305d34b4..82071d0e5f7f 100644
--- a/drivers/net/ethernet/alacritech/slic.h
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -265,8 +265,6 @@
#define SLIC_NUM_STAT_DESC_ARRAYS 4
#define SLIC_INVALID_STAT_DESC_IDX 0xffffffff
-#define SLIC_NAPI_WEIGHT 64
-
#define SLIC_UPR_LSTAT 0
#define SLIC_UPR_CONFIG 1
@@ -290,13 +288,13 @@ do { \
u64_stats_update_end(&(st)->syncp); \
} while (0)
-#define SLIC_GET_STATS_COUNTER(newst, st, counter) \
-{ \
- unsigned int start; \
+#define SLIC_GET_STATS_COUNTER(newst, st, counter) \
+{ \
+ unsigned int start; \
do { \
- start = u64_stats_fetch_begin_irq(&(st)->syncp); \
- newst = (st)->counter; \
- } while (u64_stats_fetch_retry_irq(&(st)->syncp, start)); \
+ start = u64_stats_fetch_begin(&(st)->syncp); \
+ newst = (st)->counter; \
+ } while (u64_stats_fetch_retry(&(st)->syncp, start)); \
}
struct slic_upr {
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 1fc9a1cd3ef8..f62851708d4f 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1520,10 +1520,8 @@ static void slic_get_ethtool_stats(struct net_device *dev,
static void slic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
- if (stringset == ETH_SS_STATS) {
+ if (stringset == ETH_SS_STATS)
memcpy(data, slic_stats_strings, sizeof(slic_stats_strings));
- data += sizeof(slic_stats_strings);
- }
}
static void slic_get_drvinfo(struct net_device *dev,
@@ -1531,8 +1529,8 @@ static void slic_get_drvinfo(struct net_device *dev,
{
struct slic_device *sdev = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
}
static const struct ethtool_ops slic_ethtool_ops = {
@@ -1680,17 +1678,15 @@ static int slic_init(struct slic_device *sdev)
slic_card_reset(sdev);
err = slic_load_firmware(sdev);
- if (err) {
- dev_err(&sdev->pdev->dev, "failed to load firmware\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
+ "failed to load firmware\n");
/* we need the shared memory to read EEPROM so set it up temporarily */
err = slic_init_shmem(sdev);
- if (err) {
- dev_err(&sdev->pdev->dev, "failed to init shared memory\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
+ "failed to init shared memory\n");
err = slic_read_eeprom(sdev);
if (err) {
@@ -1743,10 +1739,9 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "failed to enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to enable PCI device\n");
pci_set_master(pdev);
pci_try_set_mwi(pdev);
@@ -1803,7 +1798,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto unmap;
}
- netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT);
+ netif_napi_add(dev, &sdev->napi, slic_poll);
netif_carrier_off(dev);
err = register_netdev(dev);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 800ee022388f..2f516b950f4e 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/soc/sunxi/sunxi_sram.h>
+#include <linux/dmaengine.h>
#include "sun4i-emac.h"
@@ -76,7 +77,6 @@ struct emac_board_info {
void __iomem *membase;
u32 msg_enable;
struct net_device *ndev;
- struct sk_buff *skb_last;
u16 tx_fifo_stat;
int emacrx_completed_flag;
@@ -87,6 +87,16 @@ struct emac_board_info {
unsigned int duplex;
phy_interface_t phy_interface;
+ struct dma_chan *rx_chan;
+ phys_addr_t emac_rx_fifo;
+};
+
+struct emac_dma_req {
+ struct emac_board_info *db;
+ struct dma_async_tx_descriptor *desc;
+ struct sk_buff *skb;
+ dma_addr_t rxbuf;
+ int count;
};
static void emac_update_speed(struct net_device *dev)
@@ -96,9 +106,9 @@ static void emac_update_speed(struct net_device *dev)
/* set EMAC SPEED, depend on PHY */
reg_val = readl(db->membase + EMAC_MAC_SUPP_REG);
- reg_val &= ~(0x1 << 8);
+ reg_val &= ~EMAC_MAC_SUPP_100M;
if (db->speed == SPEED_100)
- reg_val |= 1 << 8;
+ reg_val |= EMAC_MAC_SUPP_100M;
writel(reg_val, db->membase + EMAC_MAC_SUPP_REG);
}
@@ -206,12 +216,123 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
readsl(reg, data, round_up(count, 4) / 4);
}
+static struct emac_dma_req *
+emac_alloc_dma_req(struct emac_board_info *db,
+ struct dma_async_tx_descriptor *desc, struct sk_buff *skb,
+ dma_addr_t rxbuf, int count)
+{
+ struct emac_dma_req *req;
+
+ req = kzalloc(sizeof(struct emac_dma_req), GFP_ATOMIC);
+ if (!req)
+ return NULL;
+
+ req->db = db;
+ req->desc = desc;
+ req->skb = skb;
+ req->rxbuf = rxbuf;
+ req->count = count;
+ return req;
+}
+
+static void emac_free_dma_req(struct emac_dma_req *req)
+{
+ kfree(req);
+}
+
+static void emac_dma_done_callback(void *arg)
+{
+ struct emac_dma_req *req = arg;
+ struct emac_board_info *db = req->db;
+ struct sk_buff *skb = req->skb;
+ struct net_device *dev = db->ndev;
+ int rxlen = req->count;
+ u32 reg_val;
+
+ dma_unmap_single(db->dev, req->rxbuf, rxlen, DMA_FROM_DEVICE);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_bytes += rxlen;
+ /* Pass to upper layer */
+ dev->stats.rx_packets++;
+
+ /* re enable cpu receive */
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val &= ~EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+
+ /* re enable interrupt */
+ reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+ reg_val |= EMAC_INT_CTL_RX_EN;
+ writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+
+ db->emacrx_completed_flag = 1;
+ emac_free_dma_req(req);
+}
+
+static int emac_dma_inblk_32bit(struct emac_board_info *db,
+ struct sk_buff *skb, void *rdptr, int count)
+{
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ dma_addr_t rxbuf;
+ struct emac_dma_req *req;
+ int ret = 0;
+
+ rxbuf = dma_map_single(db->dev, rdptr, count, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(db->dev, rxbuf);
+ if (ret) {
+ dev_err(db->dev, "dma mapping error.\n");
+ return ret;
+ }
+
+ desc = dmaengine_prep_slave_single(db->rx_chan, rxbuf, count,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(db->dev, "prepare slave single failed\n");
+ ret = -ENOMEM;
+ goto prepare_err;
+ }
+
+ req = emac_alloc_dma_req(db, desc, skb, rxbuf, count);
+ if (!req) {
+ dev_err(db->dev, "alloc emac dma req error.\n");
+ ret = -ENOMEM;
+ goto alloc_req_err;
+ }
+
+ desc->callback_param = req;
+ desc->callback = emac_dma_done_callback;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(db->dev, "dma submit error.\n");
+ goto submit_err;
+ }
+
+ dma_async_issue_pending(db->rx_chan);
+ return ret;
+
+submit_err:
+ emac_free_dma_req(req);
+
+alloc_req_err:
+ dmaengine_desc_free(desc);
+
+prepare_err:
+ dma_unmap_single(db->dev, rxbuf, count, DMA_FROM_DEVICE);
+ return ret;
+}
+
/* ethtool ops */
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
static u32 emac_get_msglevel(struct net_device *dev)
@@ -308,7 +429,7 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* initial EMAC */
/* flush RX FIFO */
reg_val = readl(db->membase + EMAC_RX_CTL_REG);
- reg_val |= 0x8;
+ reg_val |= EMAC_RX_CTL_FLUSH_FIFO;
writel(reg_val, db->membase + EMAC_RX_CTL_REG);
udelay(1);
@@ -320,8 +441,8 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* set MII clock */
reg_val = readl(db->membase + EMAC_MAC_MCFG_REG);
- reg_val &= (~(0xf << 2));
- reg_val |= (0xD << 2);
+ reg_val &= ~EMAC_MAC_MCFG_MII_CLKD_MASK;
+ reg_val |= EMAC_MAC_MCFG_MII_CLKD_72;
writel(reg_val, db->membase + EMAC_MAC_MCFG_REG);
/* clear RX counter */
@@ -385,7 +506,7 @@ static void emac_init_device(struct net_device *dev)
/* enable RX/TX0/RX Hlevel interrup */
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
spin_unlock_irqrestore(&db->lock, flags);
@@ -499,7 +620,6 @@ static void emac_rx(struct net_device *dev)
struct sk_buff *skb;
u8 *rdptr;
bool good_packet;
- static int rxlen_last;
unsigned int reg_val;
u32 rxhdr, rxstatus, rxcount, rxlen;
@@ -514,26 +634,12 @@ static void emac_rx(struct net_device *dev)
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "RXCount: %x\n", rxcount);
- if ((db->skb_last != NULL) && (rxlen_last > 0)) {
- dev->stats.rx_bytes += rxlen_last;
-
- /* Pass to upper layer */
- db->skb_last->protocol = eth_type_trans(db->skb_last,
- dev);
- netif_rx(db->skb_last);
- dev->stats.rx_packets++;
- db->skb_last = NULL;
- rxlen_last = 0;
-
- reg_val = readl(db->membase + EMAC_RX_CTL_REG);
- reg_val &= ~EMAC_RX_CTL_DMA_EN;
- writel(reg_val, db->membase + EMAC_RX_CTL_REG);
- }
-
if (!rxcount) {
db->emacrx_completed_flag = 1;
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
/* had one stuck? */
@@ -565,7 +671,9 @@ static void emac_rx(struct net_device *dev)
writel(reg_val | EMAC_CTL_RX_EN,
db->membase + EMAC_CTL_REG);
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
db->emacrx_completed_flag = 1;
@@ -623,6 +731,19 @@ static void emac_rx(struct net_device *dev)
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "RxLen %x\n", rxlen);
+ if (rxlen >= dev->mtu && db->rx_chan) {
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val |= EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+ if (!emac_dma_inblk_32bit(db, skb, rdptr, rxlen))
+ break;
+
+ /* re enable cpu receive. then try to receive by emac_inblk_32bit */
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val &= ~EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+ }
+
emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG,
rdptr, rxlen);
dev->stats.rx_bytes += rxlen;
@@ -666,18 +787,23 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
}
/* Transmit Interrupt check */
- if (int_status & (0x01 | 0x02))
+ if (int_status & EMAC_INT_STA_TX_COMPLETE)
emac_tx_done(dev, db, int_status);
- if (int_status & (0x04 | 0x08))
+ if (int_status & EMAC_INT_STA_TX_ABRT)
netdev_info(dev, " ab : %x\n", int_status);
/* Re-enable interrupt mask */
if (db->emacrx_completed_flag == 1) {
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
+ writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+ } else {
+ reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
}
+
spin_unlock(&db->lock);
return IRQ_HANDLED;
@@ -782,6 +908,58 @@ static const struct net_device_ops emac_netdev_ops = {
#endif
};
+static int emac_configure_dma(struct emac_board_info *db)
+{
+ struct platform_device *pdev = db->pdev;
+ struct net_device *ndev = db->ndev;
+ struct dma_slave_config conf = {};
+ struct resource *regs;
+ int err = 0;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ netdev_err(ndev, "get io resource from device failed.\n");
+ err = -ENOMEM;
+ goto out_clear_chan;
+ }
+
+ netdev_info(ndev, "get io resource from device: %pa, size = %u\n",
+ &regs->start, (unsigned int)resource_size(regs));
+ db->emac_rx_fifo = regs->start + EMAC_RX_IO_DATA_REG;
+
+ db->rx_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(db->rx_chan)) {
+ netdev_err(ndev,
+ "failed to request dma channel. dma is disabled\n");
+ err = PTR_ERR(db->rx_chan);
+ goto out_clear_chan;
+ }
+
+ conf.direction = DMA_DEV_TO_MEM;
+ conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ conf.src_addr = db->emac_rx_fifo;
+ conf.dst_maxburst = 4;
+ conf.src_maxburst = 4;
+ conf.device_fc = false;
+
+ err = dmaengine_slave_config(db->rx_chan, &conf);
+ if (err) {
+ netdev_err(ndev, "config dma slave failed\n");
+ err = -EINVAL;
+ goto out_slave_configure_err;
+ }
+
+ return err;
+
+out_slave_configure_err:
+ dma_release_channel(db->rx_chan);
+
+out_clear_chan:
+ db->rx_chan = NULL;
+ return err;
+}
+
/* Search EMAC board, allocate space and register it
*/
static int emac_probe(struct platform_device *pdev)
@@ -824,6 +1002,9 @@ static int emac_probe(struct platform_device *pdev)
goto out_iounmap;
}
+ if (emac_configure_dma(db))
+ netdev_info(ndev, "configure dma failed. disable dma.\n");
+
db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
@@ -891,6 +1072,7 @@ out_clk_disable_unprepare:
clk_disable_unprepare(db->clk);
out_dispose_mapping:
irq_dispose_mapping(ndev->irq);
+ dma_release_channel(db->rx_chan);
out_iounmap:
iounmap(db->membase);
out:
@@ -901,11 +1083,16 @@ out:
return ret;
}
-static int emac_remove(struct platform_device *pdev)
+static void emac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_board_info *db = netdev_priv(ndev);
+ if (db->rx_chan) {
+ dmaengine_terminate_all(db->rx_chan);
+ dma_release_channel(db->rx_chan);
+ }
+
unregister_netdev(ndev);
sunxi_sram_release(&pdev->dev);
clk_disable_unprepare(db->clk);
@@ -914,7 +1101,6 @@ static int emac_remove(struct platform_device *pdev)
free_netdev(ndev);
dev_dbg(&pdev->dev, "released and freed device\n");
- return 0;
}
static int emac_suspend(struct platform_device *dev, pm_message_t state)
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h
index 38c72d9ec600..90bd9ad77607 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.h
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.h
@@ -38,6 +38,7 @@
#define EMAC_RX_CTL_REG (0x3c)
#define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1)
#define EMAC_RX_CTL_DMA_EN (1 << 2)
+#define EMAC_RX_CTL_FLUSH_FIFO (1 << 3)
#define EMAC_RX_CTL_PASS_ALL_EN (1 << 4)
#define EMAC_RX_CTL_PASS_CTL_EN (1 << 5)
#define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6)
@@ -61,7 +62,21 @@
#define EMAC_RX_IO_DATA_STATUS_OK (1 << 7)
#define EMAC_RX_FBC_REG (0x50)
#define EMAC_INT_CTL_REG (0x54)
+#define EMAC_INT_CTL_RX_EN (1 << 8)
+#define EMAC_INT_CTL_TX0_EN (1)
+#define EMAC_INT_CTL_TX1_EN (1 << 1)
+#define EMAC_INT_CTL_TX_EN (EMAC_INT_CTL_TX0_EN | EMAC_INT_CTL_TX1_EN)
+#define EMAC_INT_CTL_TX0_ABRT_EN (0x1 << 2)
+#define EMAC_INT_CTL_TX1_ABRT_EN (0x1 << 3)
+#define EMAC_INT_CTL_TX_ABRT_EN (EMAC_INT_CTL_TX0_ABRT_EN | EMAC_INT_CTL_TX1_ABRT_EN)
#define EMAC_INT_STA_REG (0x58)
+#define EMAC_INT_STA_TX0_COMPLETE (0x1)
+#define EMAC_INT_STA_TX1_COMPLETE (0x1 << 1)
+#define EMAC_INT_STA_TX_COMPLETE (EMAC_INT_STA_TX0_COMPLETE | EMAC_INT_STA_TX1_COMPLETE)
+#define EMAC_INT_STA_TX0_ABRT (0x1 << 2)
+#define EMAC_INT_STA_TX1_ABRT (0x1 << 3)
+#define EMAC_INT_STA_TX_ABRT (EMAC_INT_STA_TX0_ABRT | EMAC_INT_STA_TX1_ABRT)
+#define EMAC_INT_STA_RX_COMPLETE (0x1 << 8)
#define EMAC_MAC_CTL0_REG (0x5c)
#define EMAC_MAC_CTL0_RX_FLOW_CTL_EN (1 << 2)
#define EMAC_MAC_CTL0_TX_FLOW_CTL_EN (1 << 3)
@@ -87,8 +102,11 @@
#define EMAC_MAC_CLRT_RM (0x0f)
#define EMAC_MAC_MAXF_REG (0x70)
#define EMAC_MAC_SUPP_REG (0x74)
+#define EMAC_MAC_SUPP_100M (0x1 << 8)
#define EMAC_MAC_TEST_REG (0x78)
#define EMAC_MAC_MCFG_REG (0x7c)
+#define EMAC_MAC_MCFG_MII_CLKD_MASK (0xff << 2)
+#define EMAC_MAC_MCFG_MII_CLKD_72 (0x0d << 2)
#define EMAC_MAC_A0_REG (0x98)
#define EMAC_MAC_A1_REG (0x9c)
#define EMAC_MAC_A2_REG (0xa0)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 732da15a3827..9e6f91df2ba0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -589,8 +589,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
}
ap->name = dev->name;
- if (ap->pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pdev, dev);
@@ -1130,11 +1129,7 @@ static int ace_init(struct net_device *dev)
/*
* Configure DMA attributes.
*/
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- ap->pci_using_dac = 1;
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- ap->pci_using_dac = 0;
- } else {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
ecode = -ENODEV;
goto init_error;
}
@@ -1565,9 +1560,9 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue)
}
-static void ace_tasklet(struct tasklet_struct *t)
+static void ace_bh_work(struct work_struct *work)
{
- struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
+ struct ace_private *ap = from_work(ap, work, ace_bh_work);
struct net_device *dev = ap->ndev;
int cur_size;
@@ -1600,7 +1595,7 @@ static void ace_tasklet(struct tasklet_struct *t)
#endif
ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
}
- ap->tasklet_pending = 0;
+ ap->bh_work_pending = 0;
}
@@ -1622,7 +1617,7 @@ static void ace_dump_trace(struct ace_private *ap)
*
* Loading rings is safe without holding the spin lock since this is
* done only before the device is enabled, thus no interrupts are
- * generated and by the interrupt handler/tasklet handler.
+ * generated and by the interrupt handler/bh handler.
*/
static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
{
@@ -2165,7 +2160,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
*/
if (netif_running(dev)) {
int cur_size;
- int run_tasklet = 0;
+ int run_bh_work = 0;
cur_size = atomic_read(&ap->cur_rx_bufs);
if (cur_size < RX_LOW_STD_THRES) {
@@ -2177,7 +2172,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_std_rx_ring(dev,
RX_RING_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
if (!ACE_IS_TIGON_I(ap)) {
@@ -2193,7 +2188,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_mini_rx_ring(dev,
RX_MINI_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
}
@@ -2210,12 +2205,12 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_jumbo_rx_ring(dev,
RX_JUMBO_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
}
- if (run_tasklet && !ap->tasklet_pending) {
- ap->tasklet_pending = 1;
- tasklet_schedule(&ap->ace_tasklet);
+ if (run_bh_work && !ap->bh_work_pending) {
+ ap->bh_work_pending = 1;
+ queue_work(system_bh_wq, &ap->ace_bh_work);
}
}
@@ -2272,7 +2267,7 @@ static int ace_open(struct net_device *dev)
/*
* Setup the bottom half rx ring refill handler
*/
- tasklet_setup(&ap->ace_tasklet, ace_tasklet);
+ INIT_WORK(&ap->ace_bh_work, ace_bh_work);
return 0;
}
@@ -2306,7 +2301,7 @@ static int ace_close(struct net_device *dev)
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
- tasklet_kill(&ap->ace_tasklet);
+ cancel_work_sync(&ap->ace_bh_work);
/*
* Make sure one CPU is not processing packets while
@@ -2440,7 +2435,7 @@ restart:
} else {
dma_addr_t mapping;
u32 vlan_tag = 0;
- int i, len = 0;
+ int i;
mapping = ace_map_tx_skb(ap, skb, NULL, idx);
flagsize = (skb_headlen(skb) << 16);
@@ -2459,7 +2454,6 @@ restart:
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct tx_ring_info *info;
- len += skb_frag_size(frag);
info = ap->skb->tx_skbuff + idx;
desc = ap->tx_ring + idx;
@@ -2545,7 +2539,7 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu)
struct ace_regs __iomem *regs = ap->regs;
writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (new_mtu > ACE_STD_MTU) {
if (!(ap->jumbo)) {
@@ -2696,12 +2690,12 @@ static void ace_get_drvinfo(struct net_device *dev,
{
struct ace_private *ap = netdev_priv(dev);
- strlcpy(info->driver, "acenic", sizeof(info->driver));
+ strscpy(info->driver, "acenic", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i",
ap->firmware_major, ap->firmware_minor, ap->firmware_fix);
if (ap->pdev)
- strlcpy(info->bus_info, pci_name(ap->pdev),
+ strscpy(info->bus_info, pci_name(ap->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index 265fa601a258..0e45a97b9c9b 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -2,7 +2,7 @@
#ifndef _ACENIC_H_
#define _ACENIC_H_
#include <linux/interrupt.h>
-
+#include <linux/workqueue.h>
/*
* Generate TX index update each time, when TX ring is closed.
@@ -667,8 +667,8 @@ struct ace_private
struct rx_desc *rx_mini_ring;
struct rx_desc *rx_return_ring;
- int tasklet_pending, jumbo;
- struct tasklet_struct ace_tasklet;
+ int bh_work_pending, jumbo;
+ struct work_struct ace_bh_work;
struct event *evt_ring;
@@ -692,7 +692,6 @@ struct ace_private
__attribute__ ((aligned (SMP_CACHE_BYTES)));
u32 last_tx, last_std_rx, last_mini_rx;
#endif
- int pci_using_dac;
u8 firmware_major;
u8 firmware_minor;
u8 firmware_fix;
@@ -777,7 +776,7 @@ static int ace_open(struct net_device *dev);
static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int ace_close(struct net_device *dev);
-static void ace_tasklet(struct tasklet_struct *t);
+static void ace_bh_work(struct work_struct *work);
static void ace_dump_trace(struct ace_private *ap);
static void ace_set_multicast_list(struct net_device *dev);
static int ace_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
index 914e56b91467..4ef819a9a1ad 100644
--- a/drivers/net/ethernet/altera/Kconfig
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -2,7 +2,12 @@
config ALTERA_TSE
tristate "Altera Triple-Speed Ethernet MAC support"
depends on HAS_DMA
+ depends on HAS_IOMEM
select PHYLIB
+ select PHYLINK
+ select PCS_LYNX
+ select MDIO_REGMAP
+ select REGMAP_MMIO
help
This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index db97170da8c7..7f247ccbe6ba 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -513,7 +513,7 @@ static int sgdma_txbusy(struct altera_tse_private *priv)
{
int delay = 0;
- /* if DMA is busy, wait for current transactino to finish */
+ /* if DMA is busy, wait for current transaction to finish */
while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
& SGDMA_STSREG_BUSY) && (delay++ < 100))
udelay(1);
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index f17acfb579a0..e5a56bb989da 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000
#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in
@@ -109,17 +110,6 @@
#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31)
-/* SGMII PCS register addresses
- */
-#define SGMII_PCS_SCRATCH 0x10
-#define SGMII_PCS_REV 0x11
-#define SGMII_PCS_LINK_TIMER_0 0x12
-#define SGMII_PCS_LINK_TIMER_1 0x13
-#define SGMII_PCS_IF_MODE 0x14
-#define SGMII_PCS_DIS_READ_TO 0x15
-#define SGMII_PCS_READ_TO 0x16
-#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */
-
/* MDIO registers within MAC register Space
*/
struct altera_tse_mdio {
@@ -411,9 +401,6 @@ struct altera_tse_private {
/* MAC address space */
struct altera_tse_mac __iomem *mac_dev;
- /* TSE Revision */
- u32 revision;
-
/* mSGDMA Rx Dispatcher address space */
void __iomem *rx_dma_csr;
void __iomem *rx_dma_desc;
@@ -423,6 +410,9 @@ struct altera_tse_private {
void __iomem *tx_dma_csr;
void __iomem *tx_dma_desc;
+ /* SGMII PCS address space */
+ void __iomem *pcs_base;
+
/* Rx buffers queue */
struct tse_buffer *rx_ring;
u32 rx_cons;
@@ -479,7 +469,11 @@ struct altera_tse_private {
/* ethtool msglvl option */
u32 msg_enable;
- struct altera_dmaops *dmaops;
+ const struct altera_dmaops *dmaops;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs *pcs;
};
/* Function prototypes
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 4299f1301149..81313c85833e 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -199,9 +199,9 @@ static int tse_reglen(struct net_device *dev)
static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *regbuf)
{
- int i;
struct altera_tse_private *priv = netdev_priv(dev);
u32 *buf = regbuf;
+ int i;
/* Set version to a known value, so ethtool knows
* how to do any special formatting of this data.
@@ -221,6 +221,22 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
buf[i] = csrrd32(priv->mac_dev, i * 4);
}
+static int tse_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
+static int tse_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
static const struct ethtool_ops tse_ethtool_ops = {
.get_drvinfo = tse_get_drvinfo,
.get_regs_len = tse_reglen,
@@ -231,8 +247,9 @@ static const struct ethtool_ops tse_ethtool_ops = {
.get_ethtool_stats = tse_fill_stats,
.get_msglevel = tse_get_msglevel,
.set_msglevel = tse_set_msglevel,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = tse_ethtool_get_link_ksettings,
+ .set_link_ksettings = tse_ethtool_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void altera_tse_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index d75d95a97dd9..ca55c5fd11df 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -27,13 +27,16 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mii.h>
+#include <linux/mdio/mdio-regmap.h>
#include <linux/netdevice.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
+#include <linux/pcs-lynx.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
#include <linux/skbuff.h>
#include <asm/cacheflush.h>
@@ -72,41 +75,18 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
*/
#define ALTERA_RXDMABUFFER_SIZE 2048
-/* Allow network stack to resume queueing packets after we've
+/* Allow network stack to resume queuing packets after we've
* finished transmitting at least 1/4 of the packets in the queue.
*/
#define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
#define TXQUEUESTOP_THRESHHOLD 2
-static const struct of_device_id altera_tse_ids[];
-
static inline u32 tse_tx_avail(struct altera_tse_private *priv)
{
return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
}
-/* PCS Register read/write functions
- */
-static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
-{
- return csrrd32(priv->mac_dev,
- tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
-}
-
-static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
- u16 value)
-{
- csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
-}
-
-/* Check PCS scratch memory */
-static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
-{
- sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
- return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
-}
-
/* MDIO specific functions
*/
static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
@@ -141,10 +121,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
struct device_node *mdio_node = NULL;
- struct mii_bus *mdio = NULL;
struct device_node *child_node = NULL;
+ struct mii_bus *mdio = NULL;
+ int ret;
for_each_child_of_node(priv->device->of_node, child_node) {
if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
@@ -163,7 +143,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio = mdiobus_alloc();
if (mdio == NULL) {
netdev_err(dev, "Error allocating MDIO bus\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_node;
}
mdio->name = ALTERA_TSE_RESOURCE_NAME;
@@ -180,6 +161,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio->id);
goto out_free_mdio;
}
+ of_node_put(mdio_node);
if (netif_msg_drv(priv))
netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
@@ -189,6 +171,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
out_free_mdio:
mdiobus_free(mdio);
mdio = NULL;
+put_node:
+ of_node_put(mdio_node);
return ret;
}
@@ -232,8 +216,8 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
static void tse_free_rx_buffer(struct altera_tse_private *priv,
struct tse_buffer *rxbuffer)
{
- struct sk_buff *skb = rxbuffer->skb;
dma_addr_t dma_addr = rxbuffer->dma_addr;
+ struct sk_buff *skb = rxbuffer->skb;
if (skb != NULL) {
if (dma_addr)
@@ -354,6 +338,7 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
struct ethhdr *eth_hdr;
u16 vid;
+
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
!__vlan_get_tag(skb, &vid)) {
eth_hdr = (struct ethhdr *)skb->data;
@@ -367,10 +352,10 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static int tse_rx(struct altera_tse_private *priv, int limit)
{
- unsigned int count = 0;
+ unsigned int entry = priv->rx_cons % priv->rx_ring_size;
unsigned int next_entry;
+ unsigned int count = 0;
struct sk_buff *skb;
- unsigned int entry = priv->rx_cons % priv->rx_ring_size;
u32 rxstatus;
u16 pktlength;
u16 pktstatus;
@@ -390,7 +375,7 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
"RCV pktstatus %08X pktlength %08X\n",
pktstatus, pktlength);
- /* DMA trasfer from TSE starts with 2 aditional bytes for
+ /* DMA transfer from TSE starts with 2 additional bytes for
* IP payload alignment. Status returned by get_rx_status()
* contains DMA transfer length. Packet is 2 bytes shorter.
*/
@@ -444,10 +429,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
static int tse_tx_complete(struct altera_tse_private *priv)
{
unsigned int txsize = priv->tx_ring_size;
- u32 ready;
- unsigned int entry;
struct tse_buffer *tx_buff;
+ unsigned int entry;
int txcomplete = 0;
+ u32 ready;
spin_lock(&priv->tx_lock);
@@ -493,8 +478,8 @@ static int tse_poll(struct napi_struct *napi, int budget)
{
struct altera_tse_private *priv =
container_of(napi, struct altera_tse_private, napi);
- int rxcomplete = 0;
unsigned long int flags;
+ int rxcomplete = 0;
tse_tx_complete(priv);
@@ -557,13 +542,13 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned int nopaged_len = skb_headlen(skb);
unsigned int txsize = priv->tx_ring_size;
- unsigned int entry;
- struct tse_buffer *buffer = NULL;
int nfrags = skb_shinfo(skb)->nr_frags;
- unsigned int nopaged_len = skb_headlen(skb);
+ struct tse_buffer *buffer = NULL;
netdev_tx_t ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
+ unsigned int entry;
spin_lock_bh(&priv->tx_lock);
@@ -615,117 +600,6 @@ out:
return ret;
}
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the phydev structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-static void altera_tse_adjust_link(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int new_state = 0;
-
- /* only change config if there is a link */
- spin_lock(&priv->mac_cfg_lock);
- if (phydev->link) {
- /* Read old config */
- u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
-
- /* Check duplex */
- if (phydev->duplex != priv->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- cfg_reg |= MAC_CMDCFG_HD_ENA;
- else
- cfg_reg &= ~MAC_CMDCFG_HD_ENA;
-
- netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
- dev->name, phydev->duplex);
-
- priv->oldduplex = phydev->duplex;
- }
-
- /* Check speed */
- if (phydev->speed != priv->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case 1000:
- cfg_reg |= MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 100:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 10:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg |= MAC_CMDCFG_ENA_10;
- break;
- default:
- if (netif_msg_link(priv))
- netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
- phydev->speed);
- break;
- }
- priv->oldspeed = phydev->speed;
- }
- iowrite32(cfg_reg, &priv->mac_dev->command_config);
-
- if (!priv->oldlink) {
- new_state = 1;
- priv->oldlink = 1;
- }
- } else if (priv->oldlink) {
- new_state = 1;
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(priv))
- phy_print_status(phydev);
-
- spin_unlock(&priv->mac_cfg_lock);
-}
-static struct phy_device *connect_local_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = NULL;
- char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-
- if (priv->phy_addr != POLL_PHY) {
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
- priv->mdio->id, priv->phy_addr);
-
- netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
-
- phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
- priv->phy_iface);
- if (IS_ERR(phydev)) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
-
- } else {
- int ret;
- phydev = phy_find_first(priv->mdio);
- if (phydev == NULL) {
- netdev_err(dev, "No PHY found\n");
- return phydev;
- }
-
- ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
- priv->phy_iface);
- if (ret != 0) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
- }
- return phydev;
-}
-
static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
@@ -764,91 +638,6 @@ static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
return 0;
}
-/* Initialize driver's PHY state, and attach to the PHY
- */
-static int init_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev;
- struct device_node *phynode;
- bool fixed_link = false;
- int rc = 0;
-
- /* Avoid init phy in case of no phy present */
- if (!priv->phy_iface)
- return 0;
-
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
-
- phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
-
- if (!phynode) {
- /* check if a fixed-link is defined in device-tree */
- if (of_phy_is_fixed_link(priv->device->of_node)) {
- rc = of_phy_register_fixed_link(priv->device->of_node);
- if (rc < 0) {
- netdev_err(dev, "cannot register fixed PHY\n");
- return rc;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- phynode = of_node_get(priv->device->of_node);
- fixed_link = true;
-
- netdev_dbg(dev, "fixed-link detected\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link,
- 0, priv->phy_iface);
- } else {
- netdev_dbg(dev, "no phy-handle found\n");
- if (!priv->mdio) {
- netdev_err(dev, "No phy-handle nor local mdio specified\n");
- return -ENODEV;
- }
- phydev = connect_local_phy(dev);
- }
- } else {
- netdev_dbg(dev, "phy-handle found\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link, 0, priv->phy_iface);
- }
- of_node_put(phynode);
-
- if (!phydev) {
- netdev_err(dev, "Could not find the PHY\n");
- if (fixed_link)
- of_phy_deregister_fixed_link(priv->device->of_node);
- return -ENODEV;
- }
-
- /* Stop Advertising 1000BASE Capability if interface is not GMII
- */
- if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
- (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
- phy_set_max_speed(phydev, SPEED_100);
-
- /* Broken HW is sometimes missing the pull-up resistor on the
- * MDIO line, which results in reads to non-existent devices returning
- * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
- * device as well. If a fixed-link is used the phy_id is always 0.
- * Note: phydev->phy_id is the result of reading the UID PHY registers.
- */
- if ((phydev->phy_id == 0) && !fixed_link) {
- netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
- phy_disconnect(phydev);
- return -ENODEV;
- }
-
- netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
- phydev->mdio.addr, phydev->phy_id, phydev->link);
-
- return 0;
-}
-
static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
{
u32 msb;
@@ -999,7 +788,7 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
return 0;
@@ -1008,8 +797,8 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
static void altera_tse_set_mcfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int i;
struct netdev_hw_addr *ha;
+ int i;
/* clear the hash filter */
for (i = 0; i < 64; i++)
@@ -1044,7 +833,7 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode_hashfilter(struct net_device *dev)
{
@@ -1064,7 +853,7 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode(struct net_device *dev)
{
@@ -1083,74 +872,14 @@ static void tse_set_rx_mode(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
-/* Initialise (if necessary) the SGMII PCS component
- */
-static int init_sgmii_pcs(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- int n;
- unsigned int tmp_reg = 0;
-
- if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
- return 0; /* Nothing to do, not in SGMII mode */
-
- /* The TSE SGMII PCS block looks a little like a PHY, it is
- * mapped into the zeroth MDIO space of the MAC and it has
- * ID registers like a PHY would. Sadly this is often
- * configured to zeroes, so don't be surprised if it does
- * show 0x00000000.
- */
-
- if (sgmii_pcs_scratch_test(priv, 0x0000) &&
- sgmii_pcs_scratch_test(priv, 0xffff) &&
- sgmii_pcs_scratch_test(priv, 0xa5a5) &&
- sgmii_pcs_scratch_test(priv, 0x5a5a)) {
- netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
- sgmii_pcs_read(priv, MII_PHYSID1),
- sgmii_pcs_read(priv, MII_PHYSID2));
- } else {
- netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
- return -ENOMEM;
- }
-
- /* Starting on page 5-29 of the MegaCore Function User Guide
- * Set SGMII Link timer to 1.6ms
- */
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
-
- /* Enable SGMII Interface and Enable SGMII Auto Negotiation */
- sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
-
- /* Enable Autonegotiation */
- tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
- tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
-
- /* Reset PCS block */
- tmp_reg |= BMCR_RESET;
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
- for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
- if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
- netdev_info(dev, "SGMII PCS block initialised OK\n");
- return 0;
- }
- udelay(1);
- }
-
- /* We failed to reset the block, return a timeout */
- netdev_err(dev, "SGMII PCS block reset failed.\n");
- return -ETIMEDOUT;
-}
-
/* Open and initialize the interface
*/
static int tse_open(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned long flags;
int ret = 0;
int i;
- unsigned long int flags;
/* Reset and configure TSE MAC and probe associated PHY */
ret = priv->dmaops->init_dma(priv);
@@ -1163,18 +892,7 @@ static int tse_open(struct net_device *dev)
netdev_warn(dev, "device MAC address %pM\n",
dev->dev_addr);
- if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
- netdev_warn(dev, "TSE revision %x\n", priv->revision);
-
spin_lock(&priv->mac_cfg_lock);
- /* no-op if MAC not operating in SGMII mode*/
- ret = init_sgmii_pcs(dev);
- if (ret) {
- netdev_err(dev,
- "Cannot init the SGMII PCS (error: %d)\n", ret);
- spin_unlock(&priv->mac_cfg_lock);
- goto phy_error;
- }
ret = reset_mac(priv);
/* Note that reset_mac will fail if the clocks are gated by the PHY
@@ -1232,8 +950,12 @@ static int tse_open(struct net_device *dev)
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- if (dev->phydev)
- phy_start(dev->phydev);
+ ret = phylink_of_phy_connect(priv->phylink, priv->device->of_node, 0);
+ if (ret) {
+ netdev_err(dev, "could not connect phylink (%d)\n", ret);
+ goto tx_request_irq_error;
+ }
+ phylink_start(priv->phylink);
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1261,13 +983,11 @@ phy_error:
static int tse_shutdown(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
unsigned long int flags;
+ int ret;
- /* Stop the PHY */
- if (dev->phydev)
- phy_stop(dev->phydev);
-
+ phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1313,11 +1033,73 @@ static struct net_device_ops altera_tse_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static void alt_tse_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ spin_lock(&priv->mac_cfg_lock);
+ reset_mac(priv);
+ tse_set_mac(priv, true);
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static void alt_tse_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+}
+
+static void alt_tse_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = csrrd32(priv->mac_dev, tse_csroffs(command_config));
+ ctrl &= ~(MAC_CMDCFG_ENA_10 | MAC_CMDCFG_ETH_SPEED | MAC_CMDCFG_HD_ENA);
+
+ if (duplex == DUPLEX_HALF)
+ ctrl |= MAC_CMDCFG_HD_ENA;
+
+ if (speed == SPEED_1000)
+ ctrl |= MAC_CMDCFG_ETH_SPEED;
+ else if (speed == SPEED_10)
+ ctrl |= MAC_CMDCFG_ENA_10;
+
+ spin_lock(&priv->mac_cfg_lock);
+ csrwr32(ctrl, priv->mac_dev, tse_csroffs(command_config));
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static struct phylink_pcs *alt_tse_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX)
+ return priv->pcs;
+ else
+ return NULL;
+}
+
+static const struct phylink_mac_ops alt_tse_phylink_ops = {
+ .mac_config = alt_tse_mac_config,
+ .mac_link_down = alt_tse_mac_link_down,
+ .mac_link_up = alt_tse_mac_link_up,
+ .mac_select_pcs = alt_tse_select_pcs,
+};
+
static int request_and_map(struct platform_device *pdev, const char *name,
struct resource **res, void __iomem **ptr)
{
- struct resource *region;
struct device *device = &pdev->dev;
+ struct resource *region;
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (*res == NULL) {
@@ -1346,13 +1128,18 @@ static int request_and_map(struct platform_device *pdev, const char *name,
*/
static int altera_tse_probe(struct platform_device *pdev)
{
- struct net_device *ndev;
- int ret = -ENODEV;
+ struct regmap_config pcs_regmap_cfg;
+ struct altera_tse_private *priv;
+ struct mdio_regmap_config mrc;
struct resource *control_port;
+ struct regmap *pcs_regmap;
struct resource *dma_res;
- struct altera_tse_private *priv;
+ struct resource *pcs_res;
+ struct mii_bus *pcs_bus;
+ struct net_device *ndev;
void __iomem *descmap;
- const struct of_device_id *of_id = NULL;
+ int ret = -ENODEV;
+ u32 revision;
ndev = alloc_etherdev(sizeof(struct altera_tse_private));
if (!ndev) {
@@ -1361,17 +1148,14 @@ static int altera_tse_probe(struct platform_device *pdev)
}
SET_NETDEV_DEV(ndev, &pdev->dev);
+ platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
priv->device = &pdev->dev;
priv->dev = ndev;
priv->msg_enable = netif_msg_init(debug, default_msg_level);
- of_id = of_match_device(altera_tse_ids, &pdev->dev);
-
- if (of_id)
- priv->dmaops = (struct altera_dmaops *)of_id->data;
-
+ priv->dmaops = device_get_match_data(&pdev->dev);
if (priv->dmaops &&
priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
@@ -1430,16 +1214,19 @@ static int altera_tse_probe(struct platform_device *pdev)
priv->rxdescmem_busaddr = dma_res->start;
} else {
+ ret = -ENODEV;
goto err_free_netdev;
}
- if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
+ if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
dma_set_coherent_mask(priv->device,
DMA_BIT_MASK(priv->dmaops->dmamask));
- else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
+ } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
- else
+ } else {
+ ret = -EIO;
goto err_free_netdev;
+ }
/* MAC address space */
ret = request_and_map(pdev, "control_port", &control_port,
@@ -1460,6 +1247,40 @@ static int altera_tse_probe(struct platform_device *pdev)
if (ret)
goto err_free_netdev;
+ memset(&pcs_regmap_cfg, 0, sizeof(pcs_regmap_cfg));
+ memset(&mrc, 0, sizeof(mrc));
+ /* SGMII PCS address space. The location can vary depending on how the
+ * IP is integrated. We can have a resource dedicated to it at a specific
+ * address space, but if it's not the case, we fallback to the mdiophy0
+ * from the MAC's address space
+ */
+ ret = request_and_map(pdev, "pcs", &pcs_res, &priv->pcs_base);
+ if (ret) {
+ /* If we can't find a dedicated resource for the PCS, fallback
+ * to the internal PCS, that has a different address stride
+ */
+ priv->pcs_base = priv->mac_dev + tse_csroffs(mdio_phy0);
+ pcs_regmap_cfg.reg_bits = 32;
+ /* Values are MDIO-like values, on 16 bits */
+ pcs_regmap_cfg.val_bits = 16;
+ pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(2);
+ } else {
+ pcs_regmap_cfg.reg_bits = 16;
+ pcs_regmap_cfg.val_bits = 16;
+ pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(1);
+ }
+
+ /* Create a regmap for the PCS so that it can be used by the PCS driver */
+ pcs_regmap = devm_regmap_init_mmio(&pdev->dev, priv->pcs_base,
+ &pcs_regmap_cfg);
+ if (IS_ERR(pcs_regmap)) {
+ ret = PTR_ERR(pcs_regmap);
+ goto err_free_netdev;
+ }
+ mrc.regmap = pcs_regmap;
+ mrc.parent = &pdev->dev;
+ mrc.valid_addr = 0x0;
+ mrc.autoscan = false;
/* Rx IRQ */
priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
@@ -1559,40 +1380,73 @@ static int altera_tse_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
/* setup NAPI interface */
- netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, tse_poll);
spin_lock_init(&priv->mac_cfg_lock);
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->rxdma_irq_lock);
- netif_carrier_off(ndev);
+ snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", dev_name(&pdev->dev));
+ pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc);
+ if (IS_ERR(pcs_bus)) {
+ ret = PTR_ERR(pcs_bus);
+ goto err_init_pcs;
+ }
+
+ priv->pcs = lynx_pcs_create_mdiodev(pcs_bus, 0);
+ if (IS_ERR(priv->pcs)) {
+ ret = PTR_ERR(priv->pcs);
+ goto err_init_pcs;
+ }
+
+ priv->phylink_config.dev = &ndev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD;
+
+ phy_interface_set_rgmii(priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->phylink_config.supported_interfaces);
+
+ priv->phylink = phylink_create(&priv->phylink_config,
+ of_fwnode_handle(priv->device->of_node),
+ priv->phy_iface, &alt_tse_phylink_ops);
+ if (IS_ERR(priv->phylink)) {
+ dev_err(&pdev->dev, "failed to create phylink\n");
+ ret = PTR_ERR(priv->phylink);
+ goto err_init_phylink;
+ }
+
ret = register_netdev(ndev);
if (ret) {
dev_err(&pdev->dev, "failed to register TSE net device\n");
goto err_register_netdev;
}
- platform_set_drvdata(pdev, ndev);
+ revision = ioread32(&priv->mac_dev->megacore_revision);
- priv->revision = ioread32(&priv->mac_dev->megacore_revision);
+ if (revision < 0xd00 || revision > 0xe00)
+ netdev_warn(ndev, "TSE revision %x\n", revision);
if (netif_msg_probe(priv))
dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
- (priv->revision >> 8) & 0xff,
- priv->revision & 0xff,
- (unsigned long) control_port->start, priv->rx_irq,
+ (revision >> 8) & 0xff, revision & 0xff,
+ (unsigned long)control_port->start, priv->rx_irq,
priv->tx_irq);
- ret = init_phy(ndev);
- if (ret != 0) {
- netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
- goto err_init_phy;
- }
return 0;
-err_init_phy:
- unregister_netdev(ndev);
err_register_netdev:
+ phylink_destroy(priv->phylink);
+err_init_phylink:
+ lynx_pcs_destroy(priv->pcs);
+err_init_pcs:
netif_napi_del(&priv->napi);
altera_tse_mdio_destroy(ndev);
err_free_netdev:
@@ -1602,24 +1456,18 @@ err_free_netdev:
/* Remove Altera TSE MAC device
*/
-static int altera_tse_remove(struct platform_device *pdev)
+static void altera_tse_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct altera_tse_private *priv = netdev_priv(ndev);
- if (ndev->phydev) {
- phy_disconnect(ndev->phydev);
-
- if (of_phy_is_fixed_link(priv->device->of_node))
- of_phy_deregister_fixed_link(priv->device->of_node);
- }
-
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
unregister_netdev(ndev);
- free_netdev(ndev);
+ phylink_destroy(priv->phylink);
+ lynx_pcs_destroy(priv->pcs);
- return 0;
+ free_netdev(ndev);
}
static const struct altera_dmaops altera_dtype_sgdma = {
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index b7d772f2dcbb..3c2e32fb7389 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -3,11 +3,12 @@
* Copyright (C) 2014 Altera Corporation. All rights reserved
*/
-#include <linux/kernel.h>
-
#ifndef __ALTERA_UTILS_H__
#define __ALTERA_UTILS_H__
+#include <linux/compiler.h>
+#include <linux/types.h>
+
void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
index c37fa393b99e..95dcc3969f0c 100644
--- a/drivers/net/ethernet/amazon/Kconfig
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -19,7 +19,9 @@ if NET_VENDOR_AMAZON
config ENA_ETHERNET
tristate "Elastic Network Adapter (ENA) support"
depends on PCI_MSI && !CPU_BIG_ENDIAN
+ depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
+ select NET_DEVLINK
help
This driver supports Elastic Network Adapter (ENA)"
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
index f1f752a8f7bb..6d8036bc1823 100644
--- a/drivers/net/ethernet/amazon/ena/Makefile
+++ b/drivers/net/ethernet/amazon/ena/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_ENA_ETHERNET) += ena.o
-ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o
+ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o ena_phc.o ena_devlink.o ena_debugfs.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index f5ec35fa4c63..898ecd96b96a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -7,6 +7,21 @@
#define ENA_ADMIN_RSS_KEY_PARTS 10
+#define ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK 0x3F
+#define ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK 0x1F
+
+ /* customer metrics - in correlation with
+ * ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
+ */
+enum ena_admin_customer_metrics_id {
+ ENA_ADMIN_BW_IN_ALLOWANCE_EXCEEDED = 0,
+ ENA_ADMIN_BW_OUT_ALLOWANCE_EXCEEDED = 1,
+ ENA_ADMIN_PPS_ALLOWANCE_EXCEEDED = 2,
+ ENA_ADMIN_CONNTRACK_ALLOWANCE_EXCEEDED = 3,
+ ENA_ADMIN_LINKLOCAL_ALLOWANCE_EXCEEDED = 4,
+ ENA_ADMIN_CONNTRACK_ALLOWANCE_AVAILABLE = 5,
+};
+
enum ena_admin_aq_opcode {
ENA_ADMIN_CREATE_SQ = 1,
ENA_ADMIN_DESTROY_SQ = 2,
@@ -45,9 +60,18 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_AENQ_CONFIG = 26,
ENA_ADMIN_LINK_CONFIG = 27,
ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_PHC_CONFIG = 29,
ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
+/* device capabilities */
+enum ena_admin_aq_caps_id {
+ ENA_ADMIN_ENI_STATS = 0,
+ /* ENA SRD customer metrics */
+ ENA_ADMIN_ENA_SRD_INFO = 1,
+ ENA_ADMIN_CUSTOMER_METRICS = 2,
+};
+
enum ena_admin_placement_policy_type {
/* descriptors and headers are in host memory */
ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
@@ -94,6 +118,9 @@ enum ena_admin_get_stats_type {
ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
/* extra HW stats for specific network interface */
ENA_ADMIN_GET_STATS_TYPE_ENI = 2,
+ /* extra HW stats for ENA SRD */
+ ENA_ADMIN_GET_STATS_TYPE_ENA_SRD = 3,
+ ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS = 4,
};
enum ena_admin_get_stats_scope {
@@ -101,6 +128,24 @@ enum ena_admin_get_stats_scope {
ENA_ADMIN_ETH_TRAFFIC = 1,
};
+enum ena_admin_phc_type {
+ ENA_ADMIN_PHC_TYPE_READLESS = 0,
+};
+
+enum ena_admin_phc_error_flags {
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP = BIT(0),
+};
+
+/* ENA SRD configuration for ENI */
+enum ena_admin_ena_srd_flags {
+ /* Feature enabled */
+ ENA_ADMIN_ENA_SRD_ENABLED = BIT(0),
+ /* UDP support enabled */
+ ENA_ADMIN_ENA_SRD_UDP_ENABLED = BIT(1),
+ /* Bypass Rx UDP ordering */
+ ENA_ADMIN_ENA_SRD_UDP_ORDERING_BYPASS_ENABLED = BIT(2),
+};
+
struct ena_admin_aq_common_desc {
/* 11:0 : command_id
* 15:12 : reserved12
@@ -358,6 +403,9 @@ struct ena_admin_aq_get_stats_cmd {
* stats of other device
*/
u16 device_id;
+
+ /* a bitmap representing the requested metric values */
+ u64 requested_metrics;
};
/* Basic Statistics Command. */
@@ -414,6 +462,40 @@ struct ena_admin_eni_stats {
u64 linklocal_allowance_exceeded;
};
+struct ena_admin_ena_srd_stats {
+ /* Number of packets transmitted over ENA SRD */
+ u64 ena_srd_tx_pkts;
+
+ /* Number of packets transmitted or could have been
+ * transmitted over ENA SRD
+ */
+ u64 ena_srd_eligible_tx_pkts;
+
+ /* Number of packets received over ENA SRD */
+ u64 ena_srd_rx_pkts;
+
+ /* Percentage of the ENA SRD resources that is in use */
+ u64 ena_srd_resource_utilization;
+};
+
+/* ENA SRD Statistics Command */
+struct ena_admin_ena_srd_info {
+ /* ENA SRD configuration bitmap. See ena_admin_ena_srd_flags for
+ * details
+ */
+ u64 flags;
+
+ struct ena_admin_ena_srd_stats ena_srd_stats;
+};
+
+/* Customer Metrics Command. */
+struct ena_admin_customer_metrics {
+ /* A bitmap representing the reported customer metrics according to
+ * the order they are reported
+ */
+ u64 reported_metrics;
+};
+
struct ena_admin_acq_get_stats_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -423,6 +505,10 @@ struct ena_admin_acq_get_stats_resp {
struct ena_admin_basic_stats basic_stats;
struct ena_admin_eni_stats eni_stats;
+
+ struct ena_admin_ena_srd_info ena_srd_info;
+
+ struct ena_admin_customer_metrics customer_metrics;
} u;
};
@@ -455,7 +541,10 @@ struct ena_admin_device_attr_feature_desc {
*/
u32 supported_features;
- u32 reserved3;
+ /* bitmap of ena_admin_aq_caps_id, which represents device
+ * capabilities.
+ */
+ u32 capabilities;
/* Indicates how many bits are used physical address access. */
u32 phys_addr_width;
@@ -861,7 +950,11 @@ struct ena_admin_host_info {
* 2 : interrupt_moderation
* 3 : rx_buf_mirroring
* 4 : rss_configurable_function_key
- * 31:5 : reserved
+ * 5 : reserved
+ * 6 : rx_page_reuse
+ * 7 : reserved
+ * 8 : phc
+ * 31:9 : reserved
*/
u32 driver_supported_features;
};
@@ -893,7 +986,7 @@ struct ena_admin_feature_rss_ind_table {
struct ena_admin_rss_ind_table_entry inline_entry;
};
-/* When hint value is 0, driver should use it's own predefined value */
+/* When hint value is 0, driver should use its own predefined value */
struct ena_admin_ena_hw_hints {
/* value in ms */
u16 mmio_read_timeout;
@@ -941,6 +1034,43 @@ struct ena_admin_queue_ext_feature_desc {
};
};
+struct ena_admin_feature_phc_desc {
+ /* PHC type as defined in enum ena_admin_get_phc_type,
+ * used only for GET command.
+ */
+ u8 type;
+
+ /* Reserved - MBZ */
+ u8 reserved1[3];
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR,
+ * used only for GET command.
+ */
+ u32 doorbell_offset;
+
+ /* Max time for valid PHC retrieval, passing this threshold will
+ * fail the get-time request and block PHC requests for
+ * block_timeout_usec, used only for GET command.
+ */
+ u32 expire_timeout_usec;
+
+ /* PHC requests block period, blocking starts if PHC request expired
+ * in order to prevent floods on busy device,
+ * used only for GET command.
+ */
+ u32 block_timeout_usec;
+
+ /* Shared PHC physical address (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ struct ena_common_mem_addr output_address;
+
+ /* Shared PHC Size (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ u32 output_length;
+};
+
struct ena_admin_get_feat_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -970,6 +1100,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_feature_intr_moder_desc intr_moderation;
struct ena_admin_ena_hw_hints hw_hints;
+
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1003,6 +1135,9 @@ struct ena_admin_set_feat_cmd {
/* LLQ configuration */
struct ena_admin_feature_llq_desc llq;
+
+ /* PHC configuration */
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1080,6 +1215,23 @@ struct ena_admin_ena_mmio_req_read_less_resp {
u32 reg_val;
};
+struct ena_admin_phc_resp {
+ /* Request Id, received from DB register */
+ u16 req_id;
+
+ u8 reserved1[6];
+
+ /* PHC timestamp (nsec) */
+ u64 timestamp;
+
+ u8 reserved2[12];
+
+ /* Bit field of enum ena_admin_phc_error_flags */
+ u32 error_flags;
+
+ u8 reserved3[32];
+};
+
/* aq_common_desc */
#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
@@ -1176,6 +1328,10 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK BIT(3)
#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4
#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
+#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT 6
+#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK BIT(6)
+#define ENA_ADMIN_HOST_INFO_PHC_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_PHC_MASK BIT(8)
/* aenq_common_desc */
#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index ab413fc1f68e..e67b592e5697 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -35,10 +35,18 @@
#define ENA_REGS_ADMIN_INTR_MASK 1
+#define ENA_MAX_BACKOFF_DELAY_EXP 16U
+
#define ENA_MIN_ADMIN_POLL_US 100
#define ENA_MAX_ADMIN_POLL_US 5000
+/* PHC definitions */
+#define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 10
+#define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000
+#define ENA_PHC_REQ_ID_OFFSET 0xDEAD
+#define ENA_PHC_ERROR_FLAGS (ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP)
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -88,8 +96,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
struct ena_com_admin_sq *sq = &admin_queue->sq;
u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
- sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
- &sq->dma_addr, GFP_KERNEL);
+ sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
if (!sq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -111,8 +118,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
struct ena_com_admin_cq *cq = &admin_queue->cq;
u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
- cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
- &cq->dma_addr, GFP_KERNEL);
+ cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
if (!cq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -134,8 +140,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
- &aenq->dma_addr, GFP_KERNEL);
+ aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
if (!aenq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -153,14 +158,13 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
aenq_caps = 0;
aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
- aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
- << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
- ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ aenq_caps |=
+ (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
- netdev_err(ena_dev->net_device,
- "AENQ handlers pointer is NULL\n");
+ netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
return -EINVAL;
}
@@ -187,14 +191,12 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
}
if (unlikely(!admin_queue->comp_ctx)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Completion context is NULL\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
return NULL;
}
if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Completion context is occupied\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
return NULL;
}
@@ -224,8 +226,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- netdev_dbg(admin_queue->ena_dev->net_device,
- "Admin queue is full.\n");
+ netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -272,8 +273,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
struct ena_comp_ctx *comp_ctx;
u16 i;
- admin_queue->comp_ctx =
- devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+ admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
if (unlikely(!admin_queue->comp_ctx)) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
@@ -318,7 +318,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
struct ena_com_io_sq *io_sq)
{
size_t size;
- int dev_node = 0;
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
@@ -331,23 +330,17 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = io_sq->desc_entry_size * io_sq->q_depth;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
+ &io_sq->desc_addr.phys_addr, GFP_KERNEL);
}
if (!io_sq->desc_addr.virt_addr) {
- netdev_err(ena_dev->net_device,
- "Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
}
@@ -360,21 +353,16 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
io_sq->bounce_buf_ctrl.next_to_use = 0;
- size = io_sq->bounce_buf_ctrl.buffer_size *
+ size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
- io_sq->bounce_buf_ctrl.base_buffer =
- devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
+ io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- netdev_err(ena_dev->net_device,
- "Bounce buffer memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -408,7 +396,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
struct ena_com_io_cq *io_cq)
{
size_t size;
- int prev_node = 0;
memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
@@ -420,16 +407,11 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- prev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, prev_node);
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr,
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
GFP_KERNEL);
}
@@ -512,8 +494,8 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
u8 comp_status)
{
if (unlikely(comp_status != 0))
- netdev_err(admin_queue->ena_dev->net_device,
- "Admin command failed[%u]\n", comp_status);
+ netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
+ comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@@ -536,6 +518,7 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
{
+ exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP);
delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
usleep_range(delay_us, 2 * delay_us);
@@ -577,8 +560,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Command was aborted\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
spin_lock_irqsave(&admin_queue->q_lock, flags);
admin_queue->stats.aborted_cmd++;
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -586,8 +568,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err;
}
- WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
- comp_ctx->status);
+ WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
@@ -631,8 +612,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set LLQ configurations: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
return ret;
}
@@ -655,8 +635,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_default_cfg->llq_header_location;
} else {
netdev_err(ena_dev->net_device,
- "Invalid header location control, supported: 0x%x\n",
- supported_feat);
+ "Invalid header location control, supported: 0x%x\n", supported_feat);
return -EINVAL;
}
@@ -678,8 +657,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
netdev_err(ena_dev->net_device,
"Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_stride_ctrl,
- supported_feat, llq_info->desc_stride_ctrl);
+ llq_default_cfg->llq_stride_ctrl, supported_feat,
+ llq_info->desc_stride_ctrl);
}
} else {
llq_info->desc_stride_ctrl = 0;
@@ -701,8 +680,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->desc_list_entry_size = 256;
} else {
netdev_err(ena_dev->net_device,
- "Invalid entry_size_ctrl, supported: 0x%x\n",
- supported_feat);
+ "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
return -EINVAL;
}
@@ -747,8 +725,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
netdev_err(ena_dev->net_device,
"Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_num_decs_before_header,
- supported_feat, llq_info->descs_num_before_header);
+ llq_default_cfg->llq_num_decs_before_header, supported_feat,
+ llq_info->descs_num_before_header);
}
/* Check for accelerated queue supported */
llq_accel_mode_get = llq_features->accel_mode.u.get;
@@ -764,8 +742,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
rc = ena_com_set_llq(ena_dev);
if (rc)
- netdev_err(ena_dev->net_device,
- "Cannot set LLQ configuration: %d\n", rc);
+ netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
return rc;
}
@@ -777,8 +754,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
int ret;
wait_for_completion_timeout(&comp_ctx->wait_event,
- usecs_to_jiffies(
- admin_queue->completion_timeout));
+ usecs_to_jiffies(admin_queue->completion_timeout));
/* In case the command wasn't completed find out the root cause.
* There might be 2 kinds of errors
@@ -793,26 +769,16 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
if (comp_ctx->status == ENA_CMD_COMPLETED) {
netdev_err(admin_queue->ena_dev->net_device,
- "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode,
- admin_queue->auto_polling ? "ON" : "OFF");
- /* Check if fallback to polling is enabled */
- if (admin_queue->auto_polling)
- admin_queue->polling = true;
+ "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d)\n",
+ comp_ctx->cmd_opcode);
} else {
netdev_err(admin_queue->ena_dev->net_device,
"The ena device didn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
}
- /* Check if shifted to polling mode.
- * This will happen if there is a completion without an interrupt
- * and autopolling mode is enabled. Continuing normal execution in such case
- */
- if (!admin_queue->polling) {
- admin_queue->running_state = false;
- ret = -ETIME;
- goto err;
- }
+ admin_queue->running_state = false;
+ ret = -ETIME;
+ goto err;
}
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
@@ -864,15 +830,13 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
if (unlikely(i == timeout)) {
netdev_err(ena_dev->net_device,
"Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
- mmio_read->seq_num, offset, read_resp->req_id,
- read_resp->reg_off);
+ mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT;
goto err;
}
if (read_resp->reg_off != offset) {
- netdev_err(ena_dev->net_device,
- "Read failure: wrong offset provided\n");
+ netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -931,8 +895,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- netdev_err(ena_dev->net_device,
- "Failed to destroy io sq error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
return ret;
}
@@ -946,8 +909,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_cq->cdesc_addr.virt_addr) {
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_cq->cdesc_addr.virt_addr,
+ dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
io_cq->cdesc_addr.phys_addr);
io_cq->cdesc_addr.virt_addr = NULL;
@@ -956,8 +918,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_sq->desc_addr.virt_addr) {
size = io_sq->desc_entry_size * io_sq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_sq->desc_addr.virt_addr,
+ dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr);
io_sq->desc_addr.virt_addr = NULL;
@@ -982,8 +943,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
- netdev_err(ena_dev->net_device,
- "Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
@@ -1023,8 +983,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
- feature_id);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
return -EOPNOTSUPP;
}
@@ -1061,8 +1020,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
if (unlikely(ret))
netdev_err(ena_dev->net_device,
- "Failed to submit get_feature command %d error: %d\n",
- feature_id, ret);
+ "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
return ret;
}
@@ -1101,13 +1059,11 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_FUNCTION))
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
return -EOPNOTSUPP;
- rss->hash_key =
- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- &rss->hash_key_dma_addr, GFP_KERNEL);
+ rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ &rss->hash_key_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_key))
return -ENOMEM;
@@ -1120,8 +1076,8 @@ static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
if (rss->hash_key)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- rss->hash_key, rss->hash_key_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
+ rss->hash_key_dma_addr);
rss->hash_key = NULL;
}
@@ -1129,9 +1085,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- rss->hash_ctrl =
- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+ rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_ctrl))
return -ENOMEM;
@@ -1144,8 +1099,8 @@ static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
if (rss->hash_ctrl)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- rss->hash_ctrl, rss->hash_ctrl_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
+ rss->hash_ctrl_dma_addr);
rss->hash_ctrl = NULL;
}
@@ -1174,15 +1129,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
tbl_size = (1ULL << log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
- rss->rss_ind_tbl =
- dma_alloc_coherent(ena_dev->dmadev, tbl_size,
- &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
+ rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
+ GFP_KERNEL);
if (unlikely(!rss->rss_ind_tbl))
goto mem_err1;
tbl_size = (1ULL << log_size) * sizeof(u16);
- rss->host_rss_ind_tbl =
- devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+ rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
if (unlikely(!rss->host_rss_ind_tbl))
goto mem_err2;
@@ -1194,8 +1147,7 @@ mem_err2:
tbl_size = (1ULL << log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
- dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
- rss->rss_ind_tbl_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
rss->rss_ind_tbl = NULL;
mem_err1:
rss->tbl_log_size = 0;
@@ -1258,8 +1210,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
}
@@ -1270,8 +1221,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to create IO SQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
return ret;
}
@@ -1281,16 +1231,12 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(uintptr_t)cmd_completion.sq_doorbell_offset);
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
- + cmd_completion.llq_headers_offset);
-
io_sq->desc_addr.pbuf_dev_addr =
(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
cmd_completion.llq_descriptors_offset);
}
- netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
- io_sq->idx, io_sq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
return ret;
}
@@ -1417,8 +1363,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to create IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
return ret;
}
@@ -1427,18 +1372,12 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.cq_interrupt_unmask_register_offset);
- if (cmd_completion.cq_head_db_register_offset)
- io_cq->cq_head_db_reg =
- (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
- cmd_completion.cq_head_db_register_offset);
-
if (cmd_completion.numa_node_register_offset)
io_cq->numa_node_cfg_reg =
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
- netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
- io_cq->idx, io_cq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
return ret;
}
@@ -1448,8 +1387,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
struct ena_com_io_cq **io_cq)
{
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Invalid queue number %d but the max is %d\n", qid,
+ netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1489,8 +1427,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- ena_delay_exponential_backoff_us(exp++,
- ena_dev->ena_min_poll_delay_us);
+ ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
spin_lock_irqsave(&admin_queue->q_lock, flags);
}
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -1516,8 +1453,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- netdev_err(ena_dev->net_device,
- "Failed to destroy IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
return ret;
}
@@ -1585,8 +1521,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to config AENQ ret: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
return ret;
}
@@ -1607,8 +1542,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
- netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
- width);
+ netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
return -EINVAL;
}
@@ -1630,19 +1564,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
ctrl_ver = ena_com_reg_bar_read32(ena_dev,
ENA_REGS_CONTROLLER_VERSION_OFF);
- if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
- (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
- (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- dev_info(ena_dev->dmadev,
- "ENA controller version: %d.%d.%d implementation version %d\n",
+ dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
@@ -1691,20 +1622,17 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
- dma_free_coherent(ena_dev->dmadev, size, sq->entries,
- sq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
sq->entries = NULL;
size = ADMIN_CQ_SIZE(admin_queue->q_depth);
if (cq->entries)
- dma_free_coherent(ena_dev->dmadev, size, cq->entries,
- cq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
cq->entries = NULL;
size = ADMIN_AENQ_SIZE(aenq->q_depth);
if (ena_dev->aenq.entries)
- dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
- aenq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
aenq->entries = NULL;
}
@@ -1719,10 +1647,265 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
ena_dev->admin_queue.polling = polling;
}
-void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
- bool polling)
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev)
+{
+ return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_PHC_CONFIG);
+}
+
+int ena_com_phc_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+
+ memset(phc, 0x0, sizeof(*phc));
+
+ /* Allocate shared mem used PHC timestamp retrieved from device */
+ phc->virt_addr = dma_alloc_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ &phc->phys_addr,
+ GFP_KERNEL);
+ if (unlikely(!phc->virt_addr))
+ return -ENOMEM;
+
+ spin_lock_init(&phc->lock);
+
+ phc->virt_addr->req_id = 0;
+ phc->virt_addr->timestamp = 0;
+
+ return 0;
+}
+
+int ena_com_phc_config(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ struct ena_admin_get_feat_resp get_feat_resp;
+ struct ena_admin_set_feat_resp set_feat_resp;
+ struct ena_admin_set_feat_cmd set_feat_cmd;
+ int ret = 0;
+
+ /* Get device PHC default configuration */
+ ret = ena_com_get_feature(ena_dev,
+ &get_feat_resp,
+ ENA_ADMIN_PHC_CONFIG,
+ 0);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to get PHC feature configuration, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Supporting only readless PHC retrieval */
+ if (get_feat_resp.u.phc.type != ENA_ADMIN_PHC_TYPE_READLESS) {
+ netdev_err(ena_dev->net_device,
+ "Unsupported PHC type, error: %d\n",
+ -EOPNOTSUPP);
+ return -EOPNOTSUPP;
+ }
+
+ /* Update PHC doorbell offset according to device value,
+ * used to write req_id to PHC bar
+ */
+ phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset;
+
+ /* Update PHC expire timeout according to device
+ * or default driver value
+ */
+ phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ?
+ get_feat_resp.u.phc.expire_timeout_usec :
+ ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC;
+
+ /* Update PHC block timeout according to device
+ * or default driver value
+ */
+ phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ?
+ get_feat_resp.u.phc.block_timeout_usec :
+ ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC;
+
+ /* Sanity check - expire timeout must not exceed block timeout */
+ if (phc->expire_timeout_usec > phc->block_timeout_usec)
+ phc->expire_timeout_usec = phc->block_timeout_usec;
+
+ /* Prepare PHC feature command */
+ memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd));
+ set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG;
+ set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr);
+ ret = ena_com_mem_addr_set(ena_dev,
+ &set_feat_cmd.u.phc.output_address,
+ phc->phys_addr);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed setting PHC output address, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Send PHC feature command to the device */
+ ret = ena_com_execute_admin_command(&ena_dev->admin_queue,
+ (struct ena_admin_aq_entry *)&set_feat_cmd,
+ sizeof(set_feat_cmd),
+ (struct ena_admin_acq_entry *)&set_feat_resp,
+ sizeof(set_feat_resp));
+
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to enable PHC, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ phc->active = true;
+ netdev_dbg(ena_dev->net_device, "PHC is active in the device\n");
+
+ return ret;
+}
+
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
{
- ena_dev->admin_queue.auto_polling = polling;
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ unsigned long flags = 0;
+
+ /* In case PHC is not supported by the device, silently exiting */
+ if (!phc->virt_addr)
+ return;
+
+ spin_lock_irqsave(&phc->lock, flags);
+ phc->active = false;
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ dma_free_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ phc->virt_addr,
+ phc->phys_addr);
+ phc->virt_addr = NULL;
+}
+
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp)
+{
+ volatile struct ena_admin_phc_resp *resp = ena_dev->phc.virt_addr;
+ const ktime_t zero_system_time = ktime_set(0, 0);
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ ktime_t expire_time;
+ ktime_t block_time;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (!phc->active) {
+ netdev_err(ena_dev->net_device, "PHC feature is not active in the device\n");
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&phc->lock, flags);
+
+ /* Check if PHC is in blocked state */
+ if (unlikely(ktime_compare(phc->system_time, zero_system_time))) {
+ /* Check if blocking time expired */
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ if (!ktime_after(ktime_get(), block_time)) {
+ /* PHC is still in blocked state, skip PHC request */
+ phc->stats.phc_skp++;
+ ret = -EBUSY;
+ goto skip;
+ }
+
+ /* PHC is in active state, update statistics according
+ * to req_id and error_flags
+ */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* Device didn't update req_id during blocking time,
+ * this indicates on a device error
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (device error)\n",
+ phc->req_id);
+ phc->stats.phc_err_dv++;
+ } else if (resp->error_flags & ENA_PHC_ERROR_FLAGS) {
+ /* Device updated req_id during blocking time but got
+ * a PHC error, this occurs if device:
+ * - exceeded the get time request limit
+ * - received an invalid timestamp
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (error 0x%x)\n",
+ phc->req_id,
+ resp->error_flags);
+ phc->stats.phc_err_ts += !!(resp->error_flags &
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP);
+ } else {
+ /* Device updated req_id during blocking time
+ * with valid timestamp
+ */
+ phc->stats.phc_exp++;
+ }
+ }
+
+ /* Setting relative timeouts */
+ phc->system_time = ktime_get();
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ expire_time = ktime_add_us(phc->system_time, phc->expire_timeout_usec);
+
+ /* We expect the device to return this req_id once
+ * the new PHC timestamp is updated
+ */
+ phc->req_id++;
+
+ /* Initialize PHC shared memory with different req_id value
+ * to be able to identify once the device changes it to req_id
+ */
+ resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET;
+
+ /* Writing req_id to PHC bar */
+ writel(phc->req_id, ena_dev->reg_bar + phc->doorbell_offset);
+
+ /* Stalling until the device updates req_id */
+ while (1) {
+ if (unlikely(ktime_after(ktime_get(), expire_time))) {
+ /* Gave up waiting for updated req_id, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp will fail with
+ * device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* Check if req_id was updated by the device */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* req_id was not updated by the device yet,
+ * check again on next loop
+ */
+ continue;
+ }
+
+ /* req_id was updated by the device which indicates that
+ * PHC timestamp and error_flags are updated too,
+ * checking errors before retrieving timestamp
+ */
+ if (unlikely(resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
+ /* Retrieved invalid PHC timestamp, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp requests
+ * will fail with device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* PHC timestamp value is returned to the caller */
+ *timestamp = resp->timestamp;
+
+ /* Update statistic on valid PHC timestamp retrieval */
+ phc->stats.phc_cnt++;
+
+ /* This indicates PHC state is active */
+ phc->system_time = zero_system_time;
+ break;
+ }
+
+skip:
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ return ret;
}
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
@@ -1730,10 +1913,8 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
spin_lock_init(&mmio_read->lock);
- mmio_read->read_resp =
- dma_alloc_coherent(ena_dev->dmadev,
- sizeof(*mmio_read->read_resp),
- &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
goto err;
@@ -1764,8 +1945,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
- dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
- mmio_read->read_resp, mmio_read->read_resp_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
+ mmio_read->read_resp_dma_addr);
mmio_read->read_resp = NULL;
}
@@ -1797,8 +1978,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
}
if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
- netdev_err(ena_dev->net_device,
- "Device isn't ready, abort com init\n");
+ netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
return -ENODEV;
}
@@ -1875,8 +2055,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Qid (%d) is bigger than max num of queues (%d)\n",
+ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
ctx->qid, ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1902,8 +2081,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
/* header length is limited to 8 bits */
- io_sq->tx_max_header_size =
- min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+ io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
if (ret)
@@ -1935,8 +2113,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
struct ena_com_io_cq *io_cq;
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Qid (%d) is bigger than max num of queues (%d)\n",
+ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
qid, ENA_TOTAL_NUM_QUEUES);
return;
}
@@ -1956,6 +2133,56 @@ int ena_com_get_link_params(struct ena_com_dev *ena_dev,
return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
}
+static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ struct ena_com_stats_ctx *ctx,
+ enum ena_admin_get_stats_type type)
+{
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_com_admin_queue *admin_queue;
+ int ret;
+
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
+ get_cmd->aq_common_descriptor.flags = 0;
+ get_cmd->type = type;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+
+ return ret;
+}
+
+static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics;
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ customer_metrics = &ena_dev->customer_metrics;
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
+ return;
+ }
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK;
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
+ if (likely(ret == 0))
+ customer_metrics->supported_metrics =
+ ctx.get_resp.u.customer_metrics.reported_metrics;
+ else
+ netdev_err(ena_dev->net_device,
+ "Failed to query customer metrics support. error: %d\n", ret);
+}
+
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
@@ -1971,6 +2198,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
sizeof(get_resp.u.dev_attr));
ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
+ ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
rc = ena_com_get_feature(ena_dev, &get_resp,
@@ -1979,8 +2207,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
if (rc)
return rc;
- if (get_resp.u.max_queue_ext.version !=
- ENA_FEATURE_MAX_QUEUE_EXT_VER)
+ if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
return -EINVAL;
memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
@@ -2021,23 +2248,22 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
if (!rc)
- memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
- sizeof(get_resp.u.hw_hints));
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
else if (rc == -EOPNOTSUPP)
- memset(&get_feat_ctx->hw_hints, 0x0,
- sizeof(get_feat_ctx->hw_hints));
+ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
else
return rc;
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
if (!rc)
- memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
- sizeof(get_resp.u.llq));
+ memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
else if (rc == -EOPNOTSUPP)
memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
else
return rc;
+ ena_com_set_supported_customer_metrics(ena_dev);
+
return 0;
}
@@ -2080,8 +2306,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
aenq_common = &aenq_e->aenq_common_desc;
/* Go over all the events */
- while ((READ_ONCE(aenq_common->flags) &
- ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Make sure the phase bit (ownership) is as expected before
* reading the rest of the descriptor.
*/
@@ -2090,8 +2315,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
timestamp = (u64)aenq_common->timestamp_low |
((u64)aenq_common->timestamp_high << 32);
- netdev_dbg(ena_dev->net_device,
- "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrome, timestamp);
/* Handle specific event*/
@@ -2120,8 +2344,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel_relaxed((u32)aenq->head,
- ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2133,15 +2356,13 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
- if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
- (cap == ENA_MMIO_READ_TIMEOUT))) {
+ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
return -ETIME;
}
if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
- netdev_err(ena_dev->net_device,
- "Device isn't ready, can't reset device\n");
+ netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
return -EINVAL;
}
@@ -2164,8 +2385,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
rc = wait_for_reset_state(ena_dev, timeout,
ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
if (rc != 0) {
- netdev_err(ena_dev->net_device,
- "Reset indication didn't turn on\n");
+ netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
return rc;
}
@@ -2173,8 +2393,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
- netdev_err(ena_dev->net_device,
- "Reset indication didn't turn off\n");
+ netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
return rc;
}
@@ -2189,60 +2408,88 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
- struct ena_com_stats_ctx *ctx,
- enum ena_admin_get_stats_type type)
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats)
{
- struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
- struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
- struct ena_com_admin_queue *admin_queue;
+ struct ena_com_stats_ctx ctx;
int ret;
- admin_queue = &ena_dev->admin_queue;
-
- get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
- get_cmd->aq_common_descriptor.flags = 0;
- get_cmd->type = type;
-
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)get_cmd,
- sizeof(*get_cmd),
- (struct ena_admin_acq_entry *)get_resp,
- sizeof(*get_resp));
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
+ ENA_ADMIN_ENI_STATS);
+ return -EOPNOTSUPP;
+ }
- if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to get stats. error: %d\n", ret);
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.u.eni_stats,
+ sizeof(ctx.get_resp.u.eni_stats));
return ret;
}
-int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_eni_stats *stats)
+int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_ena_srd_info *info)
{
struct ena_com_stats_ctx ctx;
int ret;
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) {
+ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
+ ENA_ADMIN_ENA_SRD_INFO);
+ return -EOPNOTSUPP;
+ }
+
memset(&ctx, 0x0, sizeof(ctx));
- ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD);
if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.u.eni_stats,
- sizeof(ctx.get_resp.u.eni_stats));
+ memcpy(info, &ctx.get_resp.u.ena_srd_info,
+ sizeof(ctx.get_resp.u.ena_srd_info));
return ret;
}
-int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_basic_stats *stats)
+int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
{
+ struct ena_admin_aq_get_stats_cmd *get_cmd;
struct ena_com_stats_ctx ctx;
int ret;
+ if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
+ netdev_err(ena_dev->net_device,
+ "Invalid buffer size %u. The given buffer is too big.\n", len);
+ return -EINVAL;
+ }
+
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ netdev_err(ena_dev->net_device, "Capability %d not supported.\n",
+ ENA_ADMIN_CUSTOMER_METRICS);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ena_dev->customer_metrics.supported_metrics) {
+ netdev_err(ena_dev->net_device, "No supported customer metrics.\n");
+ return -EOPNOTSUPP;
+ }
+
+ get_cmd = &ctx.get_cmd;
memset(&ctx, 0x0, sizeof(ctx));
- ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd->u.control_buffer.address,
+ ena_dev->customer_metrics.buffer_dma_addr);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device, "Memory address set failed.\n");
+ return ret;
+ }
+
+ get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
+ get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.u.basic_stats,
- sizeof(ctx.get_resp.u.basic_stats));
+ memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
+ else
+ netdev_err(ena_dev->net_device, "Failed to get customer metrics. error: %d\n", ret);
return ret;
}
@@ -2255,8 +2502,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
- ENA_ADMIN_MTU);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
return -EOPNOTSUPP;
}
@@ -2275,31 +2521,11 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set mtu %d. error: %d\n", mtu, ret);
+ netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
return ret;
}
-int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload)
-{
- int ret;
- struct ena_admin_get_feat_resp resp;
-
- ret = ena_com_get_feature(ena_dev, &resp,
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
- if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to get offload capabilities %d\n", ret);
- return ret;
- }
-
- memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
-
- return 0;
-}
-
int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
@@ -2309,8 +2535,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
struct ena_admin_get_feat_resp get_resp;
int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION);
return -EOPNOTSUPP;
@@ -2323,8 +2548,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
return ret;
if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
- netdev_err(ena_dev->net_device,
- "Func hash %d isn't supported by device, abort\n",
+ netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
rss->hash_func);
return -EOPNOTSUPP;
}
@@ -2354,8 +2578,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to set hash function %d. error: %d\n",
+ netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
rss->hash_func, ret);
return -EINVAL;
}
@@ -2387,34 +2610,22 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return rc;
if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
- netdev_err(ena_dev->net_device,
- "Flow hash function %d isn't supported\n", func);
+ netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
return -EOPNOTSUPP;
}
- switch (func) {
- case ENA_ADMIN_TOEPLITZ:
- if (key) {
- if (key_len != sizeof(hash_key->key)) {
- netdev_err(ena_dev->net_device,
- "key len (%u) doesn't equal the supported size (%zu)\n",
- key_len, sizeof(hash_key->key));
- return -EINVAL;
- }
- memcpy(hash_key->key, key, key_len);
- rss->hash_init_val = init_val;
- hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
+ if ((func == ENA_ADMIN_TOEPLITZ) && key) {
+ if (key_len != sizeof(hash_key->key)) {
+ netdev_err(ena_dev->net_device,
+ "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
+ sizeof(hash_key->key));
+ return -EINVAL;
}
- break;
- case ENA_ADMIN_CRC32:
- rss->hash_init_val = init_val;
- break;
- default:
- netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n",
- func);
- return -EINVAL;
+ memcpy(hash_key->key, key, key_len);
+ hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
}
+ rss->hash_init_val = init_val;
old_func = rss->hash_func;
rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev);
@@ -2495,8 +2706,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
struct ena_admin_set_feat_resp resp;
int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_INPUT)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
return -EOPNOTSUPP;
@@ -2527,8 +2737,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set hash input. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
return ret;
}
@@ -2605,8 +2814,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
int rc;
if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
- netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
- proto);
+ netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
return -EINVAL;
}
@@ -2658,8 +2866,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
struct ena_admin_set_feat_resp resp;
int ret;
- if (!ena_com_check_supported_feature_id(
- ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
return -EOPNOTSUPP;
@@ -2699,8 +2906,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set indirect table. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
return ret;
}
@@ -2779,9 +2985,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- host_attr->host_info =
- dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
- &host_attr->host_info_dma_addr, GFP_KERNEL);
+ host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+ &host_attr->host_info_dma_addr, GFP_KERNEL);
if (unlikely(!host_attr->host_info))
return -ENOMEM;
@@ -2810,6 +3015,24 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
return 0;
}
+int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
+
+ customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
+ customer_metrics->buffer_virt_addr = NULL;
+
+ customer_metrics->buffer_virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
+ &customer_metrics->buffer_dma_addr, GFP_KERNEL);
+ if (!customer_metrics->buffer_virt_addr) {
+ customer_metrics->buffer_len = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
@@ -2827,12 +3050,24 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
if (host_attr->debug_area_virt_addr) {
dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
- host_attr->debug_area_virt_addr,
- host_attr->debug_area_dma_addr);
+ host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
host_attr->debug_area_virt_addr = NULL;
}
}
+void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
+
+ if (customer_metrics->buffer_virt_addr) {
+ dma_free_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
+ customer_metrics->buffer_virt_addr,
+ customer_metrics->buffer_dma_addr);
+ customer_metrics->buffer_virt_addr = NULL;
+ customer_metrics->buffer_len = 0;
+ }
+}
+
int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
@@ -2877,8 +3112,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set host attributes: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
return ret;
}
@@ -2896,8 +3130,7 @@ static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *en
u32 *intr_moder_interval)
{
if (!intr_delay_resolution) {
- netdev_err(ena_dev->net_device,
- "Illegal interrupt delay granularity value\n");
+ netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
return -EFAULT;
}
@@ -2935,14 +3168,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
if (rc) {
if (rc == -EOPNOTSUPP) {
- netdev_dbg(ena_dev->net_device,
- "Feature %d isn't supported\n",
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
netdev_err(ena_dev->net_device,
- "Failed to get interrupt moderation admin cmd. rc: %d\n",
- rc);
+ "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
}
/* no moderation supported, disable adaptive support */
@@ -2990,8 +3221,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
- netdev_err(ena_dev->net_device,
- "The size of the LLQ entry is smaller than needed\n");
+ netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 73b03ce59412..64df2c48c9a6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -42,12 +42,14 @@
#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+#define ENA_CUSTOMER_METRICS_BUFFER_SIZE 512
+
/*****************************************************************************/
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
-#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 20
#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
#define ENA_HASH_KEY_SIZE 40
@@ -109,16 +111,13 @@ struct ena_com_io_cq {
/* Interrupt unmask register */
u32 __iomem *unmask_reg;
- /* The completion queue head doorbell register */
- u32 __iomem *cq_head_db_reg;
-
/* numa configuration register (for TPH) */
u32 __iomem *numa_node_cfg_reg;
/* The value to write to the above register to unmask
* the interrupt of this queue
*/
- u32 msix_vector;
+ u32 msix_vector ____cacheline_aligned;
enum queue_direction direction;
@@ -134,7 +133,6 @@ struct ena_com_io_cq {
/* Device queue index */
u16 idx;
u16 head;
- u16 last_head_update;
u8 phase;
u8 cdesc_entry_size_in_bytes;
@@ -158,7 +156,6 @@ struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
u32 __iomem *db_addr;
- u8 __iomem *header_addr;
enum queue_direction direction;
enum ena_admin_placement_policy_type mem_queue_type;
@@ -213,6 +210,14 @@ struct ena_com_stats_admin {
u64 no_completion;
};
+struct ena_com_stats_phc {
+ u64 phc_cnt;
+ u64 phc_exp;
+ u64 phc_skp;
+ u64 phc_err_dv;
+ u64 phc_err_ts;
+};
+
struct ena_com_admin_queue {
void *q_dmadev;
struct ena_com_dev *ena_dev;
@@ -227,9 +232,6 @@ struct ena_com_admin_queue {
/* Indicate if the admin queue should poll for completion */
bool polling;
- /* Define if fallback to polling mode should occur */
- bool auto_polling;
-
u16 curr_cmd_id;
/* Indicate that the ena was initialized and can
@@ -264,6 +266,47 @@ struct ena_com_mmio_read {
spinlock_t lock;
};
+/* PTP hardware clock (PHC) MMIO read data info */
+struct ena_com_phc_info {
+ /* Internal PHC statistics */
+ struct ena_com_stats_phc stats;
+
+ /* PHC shared memory - virtual address */
+ struct ena_admin_phc_resp *virt_addr;
+
+ /* System time of last PHC request */
+ ktime_t system_time;
+
+ /* Spin lock to ensure a single outstanding PHC read */
+ spinlock_t lock;
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR */
+ u32 doorbell_offset;
+
+ /* Shared memory read expire timeout (usec)
+ * Max time for valid PHC retrieval, passing this threshold will fail
+ * the get time request and block new PHC requests for block_timeout_usec
+ * in order to prevent floods on busy device
+ */
+ u32 expire_timeout_usec;
+
+ /* Shared memory read abort timeout (usec)
+ * PHC requests block period, blocking starts once PHC request expired
+ * in order to prevent floods on busy device,
+ * any PHC requests during block period will be skipped
+ */
+ u32 block_timeout_usec;
+
+ /* PHC shared memory - physical address */
+ dma_addr_t phys_addr;
+
+ /* Request id sent to the device */
+ u16 req_id;
+
+ /* True if PHC is active in the device */
+ bool active;
+};
+
struct ena_rss {
/* Indirect table */
u16 *host_rss_ind_tbl;
@@ -283,6 +326,16 @@ struct ena_rss {
};
+struct ena_customer_metrics {
+ /* in correlation with ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
+ * and ena_admin_customer_metrics_id
+ */
+ u64 supported_metrics;
+ dma_addr_t buffer_dma_addr;
+ void *buffer_virt_addr;
+ u32 buffer_len;
+};
+
struct ena_host_attribute {
/* Debug area */
u8 *debug_area_virt_addr;
@@ -310,10 +363,14 @@ struct ena_com_dev {
u16 stats_func; /* Selected function for extended statistic dump */
u16 stats_queue; /* Selected queue for extended statistic dump */
+ u32 ena_min_poll_delay_us;
+
struct ena_com_mmio_read mmio_read;
+ struct ena_com_phc_info phc;
struct ena_rss rss;
u32 supported_features;
+ u32 capabilities;
u32 dma_addr_bits;
struct ena_host_attribute host_attr;
@@ -330,7 +387,7 @@ struct ena_com_dev {
struct ena_com_llq_info llq_info;
- u32 ena_min_poll_delay_us;
+ struct ena_customer_metrics customer_metrics;
};
struct ena_com_dev_get_features_ctx {
@@ -375,6 +432,40 @@ struct ena_aenq_handlers {
*/
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+/* ena_com_phc_init - Allocate and initialize PHC feature
+ * @ena_dev: ENA communication layer struct
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_supported - Return if PHC feature is supported by the device
+ * @ena_dev: ENA communication layer struct
+ * @note: This method must be called after getting supported features
+ * @return - supported or not
+ */
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_config - Configure PHC feature
+ * @ena_dev: ENA communication layer struct
+ * Configure PHC feature in driver and device
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_config(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_destroy - Destroy PHC feature
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_get_timestamp - Retrieve PHC timestamp
+ * @ena_dev: ENA communication layer struct
+ * @timestamp: Retrieved PHC timestamp
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp);
+
/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct
* @readless_supported: readless mode (enable/disable)
@@ -483,17 +574,6 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
*/
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
-/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
- * @ena_dev: ENA communication layer struct
- * @polling: Enable/Disable polling mode
- *
- * Set the autopolling mode.
- * If autopolling is on:
- * In case of missing interrupt when data is available switch to polling.
- */
-void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
- bool polling);
-
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct
*
@@ -581,40 +661,40 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx);
-/* ena_com_get_dev_basic_stats - Get device basic statistics
+/* ena_com_get_eni_stats - Get extended network interface statistics
* @ena_dev: ENA communication layer struct
* @stats: stats return value
*
* @return: 0 on Success and negative value otherwise.
*/
-int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_basic_stats *stats);
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats);
-/* ena_com_get_eni_stats - Get extended network interface statistics
+/* ena_com_get_ena_srd_info - Get ENA SRD network interface statistics
* @ena_dev: ENA communication layer struct
- * @stats: stats return value
+ * @info: ena srd stats and flags
*
* @return: 0 on Success and negative value otherwise.
*/
-int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_eni_stats *stats);
+int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_ena_srd_info *info);
-/* ena_com_set_dev_mtu - Configure the device mtu.
+/* ena_com_get_customer_metrics - Get customer metrics for network interface
* @ena_dev: ENA communication layer struct
- * @mtu: mtu value
+ * @buffer: buffer for returned customer metrics
+ * @len: size of the buffer
*
* @return: 0 on Success and negative value otherwise.
*/
-int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
+int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len);
-/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
+/* ena_com_set_dev_mtu - Configure the device mtu.
* @ena_dev: ENA communication layer struct
- * @offlad: offload return value
+ * @mtu: mtu value
*
* @return: 0 on Success and negative value otherwise.
*/
-int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload);
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
/* ena_com_rss_init - Init RSS
* @ena_dev: ENA communication layer struct
@@ -809,6 +889,13 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
u32 debug_area_size);
+/* ena_com_allocate_customer_metrics_buffer - Allocate customer metrics resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev);
+
/* ena_com_delete_debug_area - Free the debug area resources.
* @ena_dev: ENA communication layer struct
*
@@ -823,6 +910,13 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
*/
void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
+/* ena_com_delete_customer_metrics_buffer - Free the customer metrics resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocated customer metrics area.
+ */
+void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev);
+
/* ena_com_set_host_attributes - Update the device with the host
* attributes (debug area and host info) base address.
* @ena_dev: ENA communication layer struct
@@ -967,6 +1061,40 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d
ena_dev->adaptive_coalescing = false;
}
+/* ena_com_get_cap - query whether device supports a capability.
+ * @ena_dev: ENA communication layer struct
+ * @cap_id: enum value representing the capability
+ *
+ * @return - true if capability is supported or false otherwise
+ */
+static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_caps_id cap_id)
+{
+ return !!(ena_dev->capabilities & BIT(cap_id));
+}
+
+/* ena_com_get_customer_metric_support - query whether device supports a given customer metric.
+ * @ena_dev: ENA communication layer struct
+ * @metric_id: enum value representing the customer metric
+ *
+ * @return - true if customer metric is supported or false otherwise
+ */
+static inline bool ena_com_get_customer_metric_support(struct ena_com_dev *ena_dev,
+ enum ena_admin_customer_metrics_id metric_id)
+{
+ return !!(ena_dev->customer_metrics.supported_metrics & BIT(metric_id));
+}
+
+/* ena_com_get_customer_metric_count - return the number of supported customer metrics.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - the number of supported customer metrics
+ */
+static inline int ena_com_get_customer_metric_count(struct ena_com_dev *ena_dev)
+{
+ return hweight64(ena_dev->customer_metrics.supported_metrics);
+}
+
/* ena_com_update_intr_reg - Prepare interrupt register
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.c b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
new file mode 100644
index 000000000000..46ed80986724
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include "ena_debugfs.h"
+#include "ena_phc.h"
+
+static int phc_stats_show(struct seq_file *file, void *priv)
+{
+ struct ena_adapter *adapter = file->private;
+
+ if (!ena_phc_is_active(adapter))
+ return 0;
+
+ seq_printf(file,
+ "phc_cnt: %llu\n",
+ adapter->ena_dev->phc.stats.phc_cnt);
+ seq_printf(file,
+ "phc_exp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_exp);
+ seq_printf(file,
+ "phc_skp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_skp);
+ seq_printf(file,
+ "phc_err_dv: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_dv);
+ seq_printf(file,
+ "phc_err_ts: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_ts);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(phc_stats);
+
+void ena_debugfs_init(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ adapter->debugfs_base =
+ debugfs_create_dir(dev_name(&adapter->pdev->dev), NULL);
+
+ debugfs_create_file("phc_stats",
+ 0400,
+ adapter->debugfs_base,
+ adapter,
+ &phc_stats_fops);
+}
+
+void ena_debugfs_terminate(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ debugfs_remove_recursive(adapter->debugfs_base);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.h b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
new file mode 100644
index 000000000000..dc61dd998867
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifndef __ENA_DEBUGFS_H__
+#define __ENA_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+#include "ena_netdev.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+void ena_debugfs_init(struct net_device *dev);
+
+void ena_debugfs_terminate(struct net_device *dev);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline void ena_debugfs_init(struct net_device *dev) {}
+
+static inline void ena_debugfs_terminate(struct net_device *dev) {}
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __ENA_DEBUGFS_H__ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.c b/drivers/net/ethernet/amazon/ena/ena_devlink.c
new file mode 100644
index 000000000000..ac81c24016dd
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#include "linux/pci.h"
+#include "ena_devlink.h"
+#include "ena_phc.h"
+
+static int ena_devlink_enable_phc_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (!val.vbool)
+ return 0;
+
+ if (!ena_com_phc_supported(adapter->ena_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support PHC");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param ena_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_PHC,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL,
+ NULL,
+ ena_devlink_enable_phc_validate),
+};
+
+void ena_devlink_params_get(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ &val);
+ if (err) {
+ netdev_err(adapter->netdev, "Failed to query PHC param\n");
+ return;
+ }
+
+ ena_phc_enable(adapter, val.vbool);
+}
+
+void ena_devlink_disable_phc_param(struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vbool = false;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+}
+
+static void ena_devlink_port_register(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ struct devlink_port_attrs attrs = {};
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ devlink_port_attrs_set(&adapter->devlink_port, &attrs);
+ devl_port_register(devlink, &adapter->devlink_port, 0);
+}
+
+static void ena_devlink_port_unregister(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ devl_port_unregister(&adapter->devlink_port);
+}
+
+static int ena_devlink_reload_down(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (netns_change) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Namespace change is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ ena_devlink_port_unregister(devlink);
+
+ rtnl_lock();
+ ena_destroy_device(adapter, false);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int ena_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ int err = 0;
+
+ rtnl_lock();
+ /* Check that no other routine initialized the device (e.g.
+ * ena_fw_reset_device()). Also we're under devlink_mutex here,
+ * so devlink isn't freed under our feet.
+ */
+ if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ err = ena_restore_device(adapter);
+
+ rtnl_unlock();
+
+ ena_devlink_port_register(devlink);
+
+ if (!err)
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+
+ return err;
+}
+
+static const struct devlink_ops ena_devlink_ops = {
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = ena_devlink_reload_down,
+ .reload_up = ena_devlink_reload_up,
+};
+
+static int ena_devlink_configure_params(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value value;
+ int rc;
+
+ rc = devlink_params_register(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+ if (rc) {
+ netdev_err(adapter->netdev, "Failed to register devlink params\n");
+ return rc;
+ }
+
+ value.vbool = ena_phc_is_enabled(adapter);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+
+ return 0;
+}
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ena_devlink_ops,
+ sizeof(struct ena_adapter *),
+ dev);
+ if (!devlink) {
+ netdev_err(adapter->netdev,
+ "Failed to allocate devlink struct\n");
+ return NULL;
+ }
+
+ ENA_DEVLINK_PRIV(devlink) = adapter;
+ adapter->devlink = devlink;
+
+ if (ena_devlink_configure_params(devlink))
+ goto free_devlink;
+
+ return devlink;
+
+free_devlink:
+ devlink_free(devlink);
+ return NULL;
+}
+
+static void ena_devlink_configure_params_clean(struct devlink *devlink)
+{
+ devlink_params_unregister(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+}
+
+void ena_devlink_free(struct devlink *devlink)
+{
+ ena_devlink_configure_params_clean(devlink);
+
+ devlink_free(devlink);
+}
+
+void ena_devlink_register(struct devlink *devlink, struct device *dev)
+{
+ devl_lock(devlink);
+ ena_devlink_port_register(devlink);
+ devl_register(devlink);
+ devl_unlock(devlink);
+}
+
+void ena_devlink_unregister(struct devlink *devlink)
+{
+ devl_lock(devlink);
+ ena_devlink_port_unregister(devlink);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.h b/drivers/net/ethernet/amazon/ena/ena_devlink.h
new file mode 100644
index 000000000000..7a19ce4830d9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+#ifndef DEVLINK_H
+#define DEVLINK_H
+
+#include "ena_netdev.h"
+#include <net/devlink.h>
+
+#define ENA_DEVLINK_PRIV(devlink) \
+ (*(struct ena_adapter **)devlink_priv(devlink))
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter);
+void ena_devlink_free(struct devlink *devlink);
+void ena_devlink_register(struct devlink *devlink, struct device *dev);
+void ena_devlink_unregister(struct devlink *devlink);
+void ena_devlink_params_get(struct devlink *devlink);
+void ena_devlink_disable_phc_param(struct devlink *devlink);
+
+#endif /* DEVLINK_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 3d6f0a466a9e..4c6e07aa4bbb 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (READ_ONCE(cdesc->status) &
- ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
@@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
io_sq->entries_in_tx_burst_left--;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
- io_sq->qid, io_sq->entries_in_tx_burst_left);
+ "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
+ io_sq->entries_in_tx_burst_left);
}
/* Make sure everything was written into the bounce buffer before
@@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
wmb();
/* The line is completed. Copy it to dev */
- __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
- bounce_buffer, (llq_info->desc_list_entry_size) / 8);
+ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
+ (llq_info->desc_list_entry_size) / 8);
io_sq->tail++;
@@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
header_offset =
llq_info->descs_num_before_header * io_sq->desc_entry_size;
- if (unlikely((header_offset + header_len) >
- llq_info->desc_list_entry_size)) {
+ if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Trying to write header larger than llq entry can accommodate\n");
return -EFAULT;
}
if (unlikely(!bounce_buffer)) {
- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
return -EFAULT;
}
@@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
return NULL;
}
@@ -233,31 +229,43 @@ static struct ena_eth_io_rx_cdesc_base *
idx * io_cq->cdesc_entry_size_in_bytes);
}
-static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
- u16 *first_cdesc_idx)
+static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ u16 *first_cdesc_idx,
+ u16 *num_descs)
{
+ u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
struct ena_eth_io_rx_cdesc_base *cdesc;
- u16 count = 0, head_masked;
u32 last = 0;
do {
+ u32 status;
+
cdesc = ena_com_get_next_rx_cdesc(io_cq);
if (!cdesc)
break;
+ status = READ_ONCE(cdesc->status);
ena_com_cq_inc_head(io_cq);
+ if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
+ struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
+
+ netdev_err(dev->net_device,
+ "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
+ count, io_cq->qid, cdesc->req_id);
+ return -EFAULT;
+ }
count++;
- last = (READ_ONCE(cdesc->status) &
- ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
if (last) {
*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
- count += io_cq->cur_rx_pkt_cdesc_count;
head_masked = io_cq->head & (io_cq->q_depth - 1);
+ *num_descs = count;
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
@@ -265,11 +273,11 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
"ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
io_cq->qid, *first_cdesc_idx, count);
} else {
- io_cq->cur_rx_pkt_cdesc_count += count;
- count = 0;
+ io_cq->cur_rx_pkt_cdesc_count = count;
+ *num_descs = 0;
}
- return count;
+ return 0;
}
static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
@@ -328,9 +336,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
* compare it to the stored version, just create the meta
*/
if (io_sq->disable_meta_caching) {
- if (unlikely(!ena_tx_ctx->meta_valid))
- return -EINVAL;
-
*have_meta = true;
return ena_com_create_meta(io_sq, ena_meta);
}
@@ -372,9 +377,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
"l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
- ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
- ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
- ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+ ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
+ ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
}
/*****************************************************************************/
@@ -406,13 +410,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (unlikely(header_len > io_sq->tx_max_header_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Header size is too large %d max header: %d\n",
- header_len, io_sq->tx_max_header_size);
+ "Header size is too large %d max header: %d\n", header_len,
+ io_sq->tx_max_header_size);
return -EINVAL;
}
- if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
- !buffer_to_push)) {
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Push header wasn't provided in LLQ mode\n");
return -EINVAL;
@@ -549,23 +552,25 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
u16 cdesc_idx = 0;
u16 nb_hw_desc;
u16 i = 0;
+ int rc;
WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
- nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
+ rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
+ if (unlikely(rc != 0))
+ return -EFAULT;
+
if (nb_hw_desc == 0) {
ena_rx_ctx->descs = nb_hw_desc;
return 0;
}
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
- nb_hw_desc);
+ "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
- ena_rx_ctx->max_bufs);
+ "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
return -ENOSPC;
}
@@ -589,8 +594,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
io_sq->next_to_comp += nb_hw_desc;
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
- io_sq->qid, io_sq->next_to_comp);
+ "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
+ io_sq->next_to_comp);
/* Get rx flags from the last pkt */
ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
@@ -627,8 +632,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->req_id = req_id;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
- __func__, io_sq->qid, req_id);
+ "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
+ req_id);
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 689313ee25a8..449bc4960ccc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -8,8 +8,10 @@
#include "ena_com.h"
-/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
-#define ENA_COMP_HEAD_THRESH 4
+/* we allow 2 DMA descriptors per LLQ entry */
+#define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc))
+#define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
+#define ENA_LLQ_LARGE_HEADER (256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
struct ena_com_tx_ctx {
struct ena_com_tx_meta ena_meta;
@@ -45,7 +47,7 @@ struct ena_com_rx_ctx {
bool frag;
u32 hash;
u16 descs;
- int max_bufs;
+ u16 max_bufs;
u8 pkt_offset;
};
@@ -141,8 +143,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
}
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Queue: %d num_descs: %d num_entries_needed: %d\n",
- io_sq->qid, num_descs, num_entries_needed);
+ "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
+ num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
}
@@ -153,43 +155,20 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 tail = io_sq->tail;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Write submission queue doorbell for queue: %d tail: %d\n",
- io_sq->qid, tail);
+ "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Reset available entries in tx burst for queue %d to %d\n",
- io_sq->qid, max_entries_in_tx_burst);
+ "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
+ max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
return 0;
}
-static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
-{
- u16 unreported_comp, head;
- bool need_update;
-
- if (unlikely(io_cq->cq_head_db_reg)) {
- head = io_cq->head;
- unreported_comp = head - io_cq->last_head_update;
- need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
-
- if (unlikely(need_update)) {
- netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Write completion queue doorbell for queue %d: head: %d\n",
- io_cq->qid, head);
- writel(head, io_cq->cq_head_db_reg);
- io_cq->last_head_update = head;
- }
- }
-
- return 0;
-}
-
static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
u8 numa_node)
{
@@ -244,8 +223,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
*req_id = READ_ONCE(cdesc->req_id);
if (unlikely(*req_id >= io_cq->q_depth)) {
- netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Invalid req id %d\n", cdesc->req_id);
+ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n",
+ cdesc->req_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 13e745cf3781..fe3479b84a1f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -5,14 +5,21 @@
#include <linux/ethtool.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "ena_netdev.h"
+#include "ena_xdp.h"
+#include "ena_phc.h"
struct ena_stats {
char name[ETH_GSTRING_LEN];
int stat_offset;
};
+struct ena_hw_metrics {
+ char name[ETH_GSTRING_LEN];
+};
+
#define ENA_STAT_ENA_COM_ENTRY(stat) { \
.name = #stat, \
.stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \
@@ -40,6 +47,18 @@ struct ena_stats {
#define ENA_STAT_ENI_ENTRY(stat) \
ENA_STAT_HW_ENTRY(stat, eni_stats)
+#define ENA_STAT_ENA_SRD_ENTRY(stat) \
+ ENA_STAT_HW_ENTRY(stat, ena_srd_stats)
+
+#define ENA_STAT_ENA_SRD_MODE_ENTRY(stat) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_admin_ena_srd_info, flags) / sizeof(u64) \
+}
+
+#define ENA_METRIC_ENI_ENTRY(stat) { \
+ .name = #stat \
+}
+
static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(tx_timeout),
ENA_STAT_GLOBAL_ENTRY(suspend),
@@ -48,8 +67,12 @@ static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(interface_up),
ENA_STAT_GLOBAL_ENTRY(interface_down),
ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
+ ENA_STAT_GLOBAL_ENTRY(reset_fail),
};
+/* A partial list of hw stats. Used when admin command
+ * with type ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS is not supported
+ */
static const struct ena_stats ena_stats_eni_strings[] = {
ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
@@ -58,6 +81,23 @@ static const struct ena_stats ena_stats_eni_strings[] = {
ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
};
+static const struct ena_hw_metrics ena_hw_stats_strings[] = {
+ ENA_METRIC_ENI_ENTRY(bw_in_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(bw_out_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(pps_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(conntrack_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(linklocal_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(conntrack_allowance_available),
+};
+
+static const struct ena_stats ena_srd_info_strings[] = {
+ ENA_STAT_ENA_SRD_MODE_ENTRY(ena_srd_mode),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_tx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_eligible_tx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_rx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_resource_utilization)
+};
+
static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(cnt),
ENA_STAT_TX_ENTRY(bytes),
@@ -82,7 +122,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(csum_good),
ENA_STAT_RX_ENTRY(refil_partial),
- ENA_STAT_RX_ENTRY(bad_csum),
+ ENA_STAT_RX_ENTRY(csum_bad),
ENA_STAT_RX_ENTRY(page_alloc_fail),
ENA_STAT_RX_ENTRY(skb_alloc_fail),
ENA_STAT_RX_ENTRY(dma_mapping_err),
@@ -110,8 +150,9 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
-#define ENA_STATS_ARRAY_ENI(adapter) \
- (ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported)
+#define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings)
+#define ENA_STATS_ARRAY_ENA_SRD ARRAY_SIZE(ena_srd_info_strings)
+#define ENA_METRICS_ARRAY_ENI ARRAY_SIZE(ena_hw_stats_strings)
static void ena_safe_update_stat(u64 *src, u64 *dst,
struct u64_stats_sync *syncp)
@@ -119,9 +160,60 @@ static void ena_safe_update_stat(u64 *src, u64 *dst,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(syncp);
+ start = u64_stats_fetch_begin(syncp);
*(dst) = *src;
- } while (u64_stats_fetch_retry_irq(syncp, start));
+ } while (u64_stats_fetch_retry(syncp, start));
+}
+
+static void ena_metrics_stats(struct ena_adapter *adapter, u64 **data)
+{
+ struct ena_com_dev *dev = adapter->ena_dev;
+ const struct ena_stats *ena_stats;
+ u64 *ptr;
+ int i;
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ u32 supported_metrics_count;
+ int len;
+
+ supported_metrics_count = ena_com_get_customer_metric_count(dev);
+ len = supported_metrics_count * sizeof(u64);
+
+ /* Fill the data buffer, and advance its pointer */
+ ena_com_get_customer_metrics(dev, (char *)(*data), len);
+ (*data) += supported_metrics_count;
+
+ } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) {
+ ena_com_get_eni_stats(dev, &adapter->eni_stats);
+ /* Updating regardless of rc - once we told ethtool how many stats we have
+ * it will print that much stats. We can't leave holes in the stats
+ */
+ for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+
+ ptr = (u64 *)&adapter->eni_stats +
+ ena_stats->stat_offset;
+
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ }
+ }
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) {
+ ena_com_get_ena_srd_info(dev, &adapter->ena_srd_info);
+ /* Get ENA SRD mode */
+ ptr = (u64 *)&adapter->ena_srd_info;
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ for (i = 1; i < ENA_STATS_ARRAY_ENA_SRD; i++) {
+ ena_stats = &ena_srd_info_strings[i];
+ /* Wrapped within an outer struct - need to accommodate an
+ * additional offset of the ENA SRD mode that was already processed
+ */
+ ptr = (u64 *)&adapter->ena_srd_info +
+ ena_stats->stat_offset + 1;
+
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ }
+ }
}
static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
@@ -178,7 +270,7 @@ static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
static void ena_get_stats(struct ena_adapter *adapter,
u64 *data,
- bool eni_stats_needed)
+ bool hw_stats_needed)
{
const struct ena_stats *ena_stats;
u64 *ptr;
@@ -192,17 +284,8 @@ static void ena_get_stats(struct ena_adapter *adapter,
ena_safe_update_stat(ptr, data++, &adapter->syncp);
}
- if (eni_stats_needed) {
- ena_update_hw_stats(adapter);
- for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
- ena_stats = &ena_stats_eni_strings[i];
-
- ptr = (u64 *)&adapter->eni_stats +
- ena_stats->stat_offset;
-
- ena_safe_update_stat(ptr, data++, &adapter->syncp);
- }
- }
+ if (hw_stats_needed)
+ ena_metrics_stats(adapter, &data);
ena_queue_stats(adapter, &data);
ena_dev_admin_queue_stats(adapter, &data);
@@ -214,7 +297,19 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
- ena_get_stats(adapter, data, adapter->eni_stats_supported);
+ ena_get_stats(adapter, data, true);
+}
+
+static int ena_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ info->phc_index = ena_phc_get_index(adapter);
+
+ return 0;
}
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
@@ -226,7 +321,17 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter)
static int ena_get_hw_stats_count(struct ena_adapter *adapter)
{
- return ENA_STATS_ARRAY_ENI(adapter);
+ struct ena_com_dev *dev = adapter->ena_dev;
+ int count;
+
+ count = ENA_STATS_ARRAY_ENA_SRD * ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO);
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS))
+ count += ena_com_get_customer_metric_count(dev);
+ else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS))
+ count += ENA_STATS_ARRAY_ENI;
+
+ return count;
}
int ena_get_sset_count(struct net_device *netdev, int sset)
@@ -242,6 +347,35 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
return -EOPNOTSUPP;
}
+static void ena_metrics_stats_strings(struct ena_adapter *adapter, u8 **data)
+{
+ struct ena_com_dev *dev = adapter->ena_dev;
+ const struct ena_hw_metrics *ena_metrics;
+ const struct ena_stats *ena_stats;
+ int i;
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ for (i = 0; i < ENA_METRICS_ARRAY_ENI; i++) {
+ if (ena_com_get_customer_metric_support(dev, i)) {
+ ena_metrics = &ena_hw_stats_strings[i];
+ ethtool_puts(data, ena_metrics->name);
+ }
+ }
+ } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+ ethtool_puts(data, ena_stats->name);
+ }
+ }
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENA_SRD; i++) {
+ ena_stats = &ena_srd_info_strings[i];
+ ethtool_puts(data, ena_stats->name);
+ }
+ }
+}
+
static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
{
const struct ena_stats *ena_stats;
@@ -260,17 +394,14 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
ena_stats->name);
}
- if (!is_xdp) {
- /* RX stats, in XDP there isn't a RX queue
- * counterpart
- */
- for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
- ena_stats = &ena_stats_rx_strings[j];
+ /* In XDP there isn't an RX queue counterpart */
+ if (is_xdp)
+ continue;
- ethtool_sprintf(data,
- "queue_%u_rx_%s", i,
- ena_stats->name);
- }
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
+
+ ethtool_sprintf(data, "queue_%u_rx_%s", i, ena_stats->name);
}
}
}
@@ -290,22 +421,18 @@ static void ena_com_dev_strings(u8 **data)
static void ena_get_strings(struct ena_adapter *adapter,
u8 *data,
- bool eni_stats_needed)
+ bool hw_stats_needed)
{
const struct ena_stats *ena_stats;
int i;
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i];
- ethtool_sprintf(&data, ena_stats->name);
+ ethtool_puts(&data, ena_stats->name);
}
- if (eni_stats_needed) {
- for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
- ena_stats = &ena_stats_eni_strings[i];
- ethtool_sprintf(&data, ena_stats->name);
- }
- }
+ if (hw_stats_needed)
+ ena_metrics_stats_strings(adapter, &data);
ena_queue_strings(adapter, &data);
ena_com_dev_strings(&data);
@@ -319,7 +446,7 @@ static void ena_get_ethtool_strings(struct net_device *netdev,
switch (sset) {
case ETH_SS_STATS:
- ena_get_strings(adapter, data, adapter->eni_stats_supported);
+ ena_get_strings(adapter, data, true);
break;
}
}
@@ -458,28 +585,56 @@ static void ena_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct ena_adapter *adapter = netdev_priv(dev);
-
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
- sizeof(info->bus_info));
+ ssize_t ret = 0;
+
+ ret = strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ if (ret < 0)
+ netif_dbg(adapter, drv, dev,
+ "module name will be truncated, status = %zd\n", ret);
+
+ ret = strscpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+ if (ret < 0)
+ netif_dbg(adapter, drv, dev,
+ "bus info will be truncated, status = %zd\n", ret);
}
static void ena_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
ring->tx_max_pending = adapter->max_tx_ring_size;
ring->rx_max_pending = adapter->max_rx_ring_size;
+ if (adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ bool large_llq_supported = adapter->large_llq_header_supported;
+
+ kernel_ring->tx_push = true;
+ kernel_ring->tx_push_buf_len = adapter->ena_dev->tx_max_header_size;
+ if (large_llq_supported)
+ kernel_ring->tx_push_buf_max_len = ENA_LLQ_LARGE_HEADER;
+ else
+ kernel_ring->tx_push_buf_max_len = ENA_LLQ_HEADER;
+ } else {
+ kernel_ring->tx_push = false;
+ kernel_ring->tx_push_buf_max_len = 0;
+ kernel_ring->tx_push_buf_len = 0;
+ }
+
ring->tx_pending = adapter->tx_ring[0].ring_size;
ring->rx_pending = adapter->rx_ring[0].ring_size;
}
static int ena_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- u32 new_tx_size, new_rx_size;
+ u32 new_tx_size, new_rx_size, new_tx_push_buf_len;
+ bool changed = false;
new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ?
ENA_MIN_RING_SIZE : ring->tx_pending;
@@ -489,11 +644,51 @@ static int ena_set_ringparam(struct net_device *netdev,
ENA_MIN_RING_SIZE : ring->rx_pending;
new_rx_size = rounddown_pow_of_two(new_rx_size);
- if (new_tx_size == adapter->requested_tx_ring_size &&
- new_rx_size == adapter->requested_rx_ring_size)
+ changed |= new_tx_size != adapter->requested_tx_ring_size ||
+ new_rx_size != adapter->requested_rx_ring_size;
+
+ /* This value is ignored if LLQ is not supported */
+ new_tx_push_buf_len = adapter->ena_dev->tx_max_header_size;
+
+ if ((adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) !=
+ kernel_ring->tx_push) {
+ NL_SET_ERR_MSG_MOD(extack, "Push mode state cannot be modified");
+ return -EINVAL;
+ }
+
+ /* Validate that the push buffer is supported on the underlying device */
+ if (kernel_ring->tx_push_buf_len) {
+ enum ena_admin_placement_policy_type placement;
+
+ new_tx_push_buf_len = kernel_ring->tx_push_buf_len;
+
+ placement = adapter->ena_dev->tx_mem_queue_type;
+ if (placement == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return -EOPNOTSUPP;
+
+ if (new_tx_push_buf_len != ENA_LLQ_HEADER &&
+ new_tx_push_buf_len != ENA_LLQ_LARGE_HEADER) {
+ bool large_llq_sup = adapter->large_llq_header_supported;
+ char large_llq_size_str[40];
+
+ snprintf(large_llq_size_str, 40, ", %lu", ENA_LLQ_LARGE_HEADER);
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Supported tx push buff values: [%lu%s]",
+ ENA_LLQ_HEADER,
+ large_llq_sup ? large_llq_size_str : "");
+
+ return -EINVAL;
+ }
+
+ changed |= new_tx_push_buf_len != adapter->ena_dev->tx_max_header_size;
+ }
+
+ if (!changed)
return 0;
- return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size);
+ return ena_update_queue_params(adapter, new_tx_size, new_rx_size,
+ new_tx_push_buf_len);
}
static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
@@ -540,9 +735,11 @@ static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
return data;
}
-static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
int rc;
@@ -591,9 +788,12 @@ static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
@@ -635,26 +835,6 @@ static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
}
-static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- int rc = 0;
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- rc = ena_set_rss_hash(adapter->ena_dev, info);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- case ETHTOOL_SRXCLSRLINS:
- default:
- netif_err(adapter, drv, netdev,
- "Command parameter %d is not supported\n", info->cmd);
- rc = -EOPNOTSUPP;
- }
-
- return rc;
-}
-
static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -666,9 +846,6 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
info->data = adapter->num_io_queues;
rc = 0;
break;
- case ETHTOOL_GRXFH:
- rc = ena_get_rss_hash(adapter->ena_dev, info);
- break;
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
@@ -688,7 +865,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
static u32 ena_get_rxfh_key_size(struct net_device *netdev)
{
- return ENA_HASH_KEY_SIZE;
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_rss *rss = &adapter->ena_dev->rss;
+
+ return rss->hash_key ? ENA_HASH_KEY_SIZE : 0;
}
static int ena_indirection_table_set(struct ena_adapter *adapter,
@@ -739,15 +919,15 @@ static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
return rc;
}
-static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ena_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ena_adapter *adapter = netdev_priv(netdev);
enum ena_admin_hash_functions ena_func;
u8 func;
int rc;
- rc = ena_indirection_table_get(adapter, indir);
+ rc = ena_indirection_table_get(adapter, rxfh->indir);
if (rc)
return rc;
@@ -762,7 +942,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return rc;
}
- rc = ena_com_get_hash_key(adapter->ena_dev, key);
+ rc = ena_com_get_hash_key(adapter->ena_dev, rxfh->key);
if (rc)
return rc;
@@ -779,27 +959,27 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return -EOPNOTSUPP;
}
- if (hfunc)
- *hfunc = func;
+ rxfh->hfunc = func;
return 0;
}
-static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int ena_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_hash_functions func = 0;
int rc;
- if (indir) {
- rc = ena_indirection_table_set(adapter, indir);
+ if (rxfh->indir) {
+ rc = ena_indirection_table_set(adapter, rxfh->indir);
if (rc)
return rc;
}
- switch (hfunc) {
+ switch (rxfh->hfunc) {
case ETH_RSS_HASH_NO_CHANGE:
func = ena_com_get_current_hash_function(ena_dev);
break;
@@ -811,12 +991,12 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
break;
default:
netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n",
- hfunc);
+ rxfh->hfunc);
return -EOPNOTSUPP;
}
- if (key || func) {
- rc = ena_com_fill_hash_function(ena_dev, func, key,
+ if (rxfh->key || func) {
+ rc = ena_com_fill_hash_function(ena_dev, func, rxfh->key,
ENA_HASH_KEY_SIZE,
0xFFFFFFFF);
if (unlikely(rc)) {
@@ -843,11 +1023,20 @@ static int ena_set_channels(struct net_device *netdev,
struct ena_adapter *adapter = netdev_priv(netdev);
u32 count = channels->combined_count;
/* The check for max value is already done in ethtool */
- if (count < ENA_MIN_NUM_IO_QUEUES ||
- (ena_xdp_present(adapter) &&
- !ena_xdp_legal_queue_count(adapter, count)))
+ if (count < ENA_MIN_NUM_IO_QUEUES)
return -EINVAL;
+ if (!ena_xdp_legal_queue_count(adapter, count)) {
+ if (ena_xdp_present(adapter))
+ return -EINVAL;
+
+ xdp_clear_features_flag(netdev);
+ } else {
+ xdp_set_features_flag(netdev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT);
+ }
+
return ena_update_queue_count(adapter, count);
}
@@ -880,11 +1069,7 @@ static int ena_set_tunable(struct net_device *netdev,
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
len = *(u32 *)data;
- if (len > adapter->netdev->mtu) {
- ret = -EINVAL;
- break;
- }
- adapter->rx_copybreak = len;
+ ret = ena_set_rx_copybreak(adapter, len);
break;
default:
ret = -EINVAL;
@@ -897,6 +1082,8 @@ static int ena_set_tunable(struct net_device *netdev,
static const struct ethtool_ops ena_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .supported_ring_params = ETHTOOL_RING_USE_TX_PUSH_BUF_LEN |
+ ETHTOOL_RING_USE_TX_PUSH,
.get_link_ksettings = ena_get_link_ksettings,
.get_drvinfo = ena_get_drvinfo,
.get_msglevel = ena_get_msglevel,
@@ -910,16 +1097,17 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_strings = ena_get_ethtool_strings,
.get_ethtool_stats = ena_get_ethtool_stats,
.get_rxnfc = ena_get_rxnfc,
- .set_rxnfc = ena_set_rxnfc,
.get_rxfh_indir_size = ena_get_rxfh_indir_size,
.get_rxfh_key_size = ena_get_rxfh_key_size,
.get_rxfh = ena_get_rxfh,
.set_rxfh = ena_set_rxfh,
+ .get_rxfh_fields = ena_get_rxfh_fields,
+ .set_rxfh_fields = ena_set_rxfh_fields,
.get_channels = ena_get_channels,
.set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = ena_get_ts_info,
};
void ena_set_ethtool_ops(struct net_device *netdev)
@@ -941,22 +1129,18 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
return;
}
- strings_buf = devm_kcalloc(&adapter->pdev->dev,
- ETH_GSTRING_LEN, strings_num,
- GFP_ATOMIC);
+ strings_buf = kcalloc(strings_num, ETH_GSTRING_LEN, GFP_ATOMIC);
if (!strings_buf) {
netif_err(adapter, drv, netdev,
"Failed to allocate strings_buf\n");
return;
}
- data_buf = devm_kcalloc(&adapter->pdev->dev,
- strings_num, sizeof(u64),
- GFP_ATOMIC);
+ data_buf = kcalloc(strings_num, sizeof(u64), GFP_ATOMIC);
if (!data_buf) {
netif_err(adapter, drv, netdev,
"Failed to allocate data buf\n");
- devm_kfree(&adapter->pdev->dev, strings_buf);
+ kfree(strings_buf);
return;
}
@@ -978,8 +1162,8 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
strings_buf + i * ETH_GSTRING_LEN,
data_buf[i]);
- devm_kfree(&adapter->pdev->dev, strings_buf);
- devm_kfree(&adapter->pdev->dev, data_buf);
+ kfree(strings_buf);
+ kfree(data_buf);
}
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 7d5d885d85d5..92d149d4f091 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -5,9 +5,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#ifdef CONFIG_RFS_ACCEL
-#include <linux/cpu_rmap.h>
-#endif /* CONFIG_RFS_ACCEL */
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -19,8 +16,14 @@
#include <net/ip.h>
#include "ena_netdev.h"
-#include <linux/bpf_trace.h>
#include "ena_pci_id_tbl.h"
+#include "ena_xdp.h"
+
+#include "ena_phc.h"
+
+#include "ena_devlink.h"
+
+#include "ena_debugfs.h"
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION(DEVICE_NAME);
@@ -31,10 +34,8 @@ MODULE_LICENSE("GPL");
#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
-#define ENA_NAPI_BUDGET 64
-
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
- NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
static struct ena_aenq_handlers aenq_handlers;
@@ -44,69 +45,47 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
-static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
-static int ena_restore_device(struct ena_adapter *adapter);
-
-static void ena_init_io_rings(struct ena_adapter *adapter,
- int first_index, int count);
-static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
- int count);
-static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
- int count);
-static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
-static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index,
- int count);
-static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
-static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
-static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
-static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
-static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
-static void ena_napi_disable_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-static void ena_napi_enable_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-static int ena_up(struct ena_adapter *adapter);
-static void ena_down(struct ena_adapter *adapter);
-static void ena_unmask_interrupt(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring);
-static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring);
-static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
- struct ena_tx_buffer *tx_info);
-static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-
-/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
-static void ena_increase_stat(u64 *statp, u64 cnt,
- struct u64_stats_sync *syncp)
-{
- u64_stats_update_begin(syncp);
- (*statp) += cnt;
- u64_stats_update_end(syncp);
-}
-
-static void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
-{
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
-}
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
struct ena_adapter *adapter = netdev_priv(dev);
+ unsigned int time_since_last_napi, threshold;
+ struct ena_ring *tx_ring;
+ int napi_scheduled;
+
+ if (txqueue >= adapter->num_io_queues) {
+ netdev_err(dev, "TX timeout on invalid queue %u\n", txqueue);
+ goto schedule_reset;
+ }
+
+ threshold = jiffies_to_usecs(dev->watchdog_timeo);
+ tx_ring = &adapter->tx_ring[txqueue];
+
+ time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED);
+ netdev_err(dev,
+ "TX q %d is paused for too long (threshold %u). Time since last napi %u usec. napi scheduled: %d\n",
+ txqueue,
+ threshold,
+ time_since_last_napi,
+ napi_scheduled);
+
+ if (threshold < time_since_last_napi && napi_scheduled) {
+ netdev_err(dev,
+ "napi handler hasn't been called for a long time but is scheduled\n");
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
+ }
+schedule_reset:
/* Change the state of the device to trigger reset
* Check that we are not in the middle or a trigger already
*/
-
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
- adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
+ ena_reset_device(adapter, reset_reason);
ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
-
- netif_err(adapter, tx_err, dev, "Transmit time out\n");
}
static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
@@ -126,7 +105,7 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
if (!ret) {
netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
update_rx_ring_mtu(adapter, new_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
} else {
netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
new_mtu);
@@ -135,19 +114,18 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
-static int ena_xmit_common(struct net_device *dev,
- struct ena_ring *ring,
- struct ena_tx_buffer *tx_info,
- struct ena_com_tx_ctx *ena_tx_ctx,
- u16 next_to_use,
- u32 bytes)
+int ena_xmit_common(struct ena_adapter *adapter,
+ struct ena_ring *ring,
+ struct ena_tx_buffer *tx_info,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ u16 next_to_use,
+ u32 bytes)
{
- struct ena_adapter *adapter = netdev_priv(dev);
int rc, nb_hw_desc;
if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
ena_tx_ctx))) {
- netif_dbg(adapter, tx_queued, dev,
+ netif_dbg(adapter, tx_queued, adapter->netdev,
"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
ring->qid);
ena_ring_tx_doorbell(ring);
@@ -162,15 +140,11 @@ static int ena_xmit_common(struct net_device *dev,
* ena_com_prepare_tx() are fatal and therefore require a device reset.
*/
if (unlikely(rc)) {
- netif_err(adapter, tx_queued, dev,
+ netif_err(adapter, tx_queued, adapter->netdev,
"Failed to prepare tx bufs\n");
- ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
- &ring->syncp);
- if (rc != -ENOMEM) {
- adapter->reset_reason =
- ENA_REGS_RESET_DRIVER_INVALID_STATE;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
- }
+ ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp);
+ if (rc != -ENOMEM)
+ ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE);
return rc;
}
@@ -180,6 +154,7 @@ static int ena_xmit_common(struct net_device *dev,
u64_stats_update_end(&ring->syncp);
tx_info->tx_descs = nb_hw_desc;
+ tx_info->total_tx_size = bytes;
tx_info->last_jiffies = jiffies;
tx_info->print_once = 0;
@@ -188,482 +163,6 @@ static int ena_xmit_common(struct net_device *dev,
return 0;
}
-/* This is the XDP napi callback. XDP queues use a separate napi callback
- * than Rx/Tx queues.
- */
-static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
-{
- struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
- u32 xdp_work_done, xdp_budget;
- struct ena_ring *xdp_ring;
- int napi_comp_call = 0;
- int ret;
-
- xdp_ring = ena_napi->xdp_ring;
-
- xdp_budget = budget;
-
- if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
- test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
- napi_complete_done(napi, 0);
- return 0;
- }
-
- xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
-
- /* If the device is about to reset or down, avoid unmask
- * the interrupt and return 0 so NAPI won't reschedule
- */
- if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
- napi_complete_done(napi, 0);
- ret = 0;
- } else if (xdp_budget > xdp_work_done) {
- napi_comp_call = 1;
- if (napi_complete_done(napi, xdp_work_done))
- ena_unmask_interrupt(xdp_ring, NULL);
- ena_update_ring_numa_node(xdp_ring, NULL);
- ret = xdp_work_done;
- } else {
- ret = xdp_budget;
- }
-
- u64_stats_update_begin(&xdp_ring->syncp);
- xdp_ring->tx_stats.napi_comp += napi_comp_call;
- xdp_ring->tx_stats.tx_poll++;
- u64_stats_update_end(&xdp_ring->syncp);
- xdp_ring->tx_stats.last_napi_jiffies = jiffies;
-
- return ret;
-}
-
-static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
- struct ena_tx_buffer *tx_info,
- struct xdp_frame *xdpf,
- struct ena_com_tx_ctx *ena_tx_ctx)
-{
- struct ena_adapter *adapter = xdp_ring->adapter;
- struct ena_com_buf *ena_buf;
- int push_len = 0;
- dma_addr_t dma;
- void *data;
- u32 size;
-
- tx_info->xdpf = xdpf;
- data = tx_info->xdpf->data;
- size = tx_info->xdpf->len;
-
- if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- /* Designate part of the packet for LLQ */
- push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
-
- ena_tx_ctx->push_header = data;
-
- size -= push_len;
- data += push_len;
- }
-
- ena_tx_ctx->header_len = push_len;
-
- if (size > 0) {
- dma = dma_map_single(xdp_ring->dev,
- data,
- size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
- goto error_report_dma_error;
-
- tx_info->map_linear_data = 0;
-
- ena_buf = tx_info->bufs;
- ena_buf->paddr = dma;
- ena_buf->len = size;
-
- ena_tx_ctx->ena_bufs = ena_buf;
- ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
- }
-
- return 0;
-
-error_report_dma_error:
- ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
- &xdp_ring->syncp);
- netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
-
- return -EINVAL;
-}
-
-static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
- struct net_device *dev,
- struct xdp_frame *xdpf,
- int flags)
-{
- struct ena_com_tx_ctx ena_tx_ctx = {};
- struct ena_tx_buffer *tx_info;
- u16 next_to_use, req_id;
- int rc;
-
- next_to_use = xdp_ring->next_to_use;
- req_id = xdp_ring->free_ids[next_to_use];
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- tx_info->num_of_bufs = 0;
-
- rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
- if (unlikely(rc))
- return rc;
-
- ena_tx_ctx.req_id = req_id;
-
- rc = ena_xmit_common(dev,
- xdp_ring,
- tx_info,
- &ena_tx_ctx,
- next_to_use,
- xdpf->len);
- if (rc)
- goto error_unmap_dma;
-
- /* trigger the dma engine. ena_ring_tx_doorbell()
- * calls a memory barrier inside it.
- */
- if (flags & XDP_XMIT_FLUSH)
- ena_ring_tx_doorbell(xdp_ring);
-
- return rc;
-
-error_unmap_dma:
- ena_unmap_tx_buff(xdp_ring, tx_info);
- tx_info->xdpf = NULL;
- return rc;
-}
-
-static int ena_xdp_xmit(struct net_device *dev, int n,
- struct xdp_frame **frames, u32 flags)
-{
- struct ena_adapter *adapter = netdev_priv(dev);
- struct ena_ring *xdp_ring;
- int qid, i, nxmit = 0;
-
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
- return -EINVAL;
-
- if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
- return -ENETDOWN;
-
- /* We assume that all rings have the same XDP program */
- if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
- return -ENXIO;
-
- qid = smp_processor_id() % adapter->xdp_num_queues;
- qid += adapter->xdp_first_ring;
- xdp_ring = &adapter->tx_ring[qid];
-
- /* Other CPU ids might try to send thorugh this queue */
- spin_lock(&xdp_ring->xdp_tx_lock);
-
- for (i = 0; i < n; i++) {
- if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
- break;
- nxmit++;
- }
-
- /* Ring doorbell to make device aware of the packets */
- if (flags & XDP_XMIT_FLUSH)
- ena_ring_tx_doorbell(xdp_ring);
-
- spin_unlock(&xdp_ring->xdp_tx_lock);
-
- /* Return number of packets sent */
- return nxmit;
-}
-
-static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
-{
- struct bpf_prog *xdp_prog;
- struct ena_ring *xdp_ring;
- u32 verdict = XDP_PASS;
- struct xdp_frame *xdpf;
- u64 *xdp_stat;
-
- xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
-
- if (!xdp_prog)
- goto out;
-
- verdict = bpf_prog_run_xdp(xdp_prog, xdp);
-
- switch (verdict) {
- case XDP_TX:
- xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = XDP_ABORTED;
- break;
- }
-
- /* Find xmit queue */
- xdp_ring = rx_ring->xdp_ring;
-
- /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
- spin_lock(&xdp_ring->xdp_tx_lock);
-
- if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
- XDP_XMIT_FLUSH))
- xdp_return_frame(xdpf);
-
- spin_unlock(&xdp_ring->xdp_tx_lock);
- xdp_stat = &rx_ring->rx_stats.xdp_tx;
- break;
- case XDP_REDIRECT:
- if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
- xdp_stat = &rx_ring->rx_stats.xdp_redirect;
- break;
- }
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = XDP_ABORTED;
- break;
- case XDP_ABORTED:
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- break;
- case XDP_DROP:
- xdp_stat = &rx_ring->rx_stats.xdp_drop;
- break;
- case XDP_PASS:
- xdp_stat = &rx_ring->rx_stats.xdp_pass;
- break;
- default:
- bpf_warn_invalid_xdp_action(verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_invalid;
- }
-
- ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
-out:
- return verdict;
-}
-
-static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
-{
- adapter->xdp_first_ring = adapter->num_io_queues;
- adapter->xdp_num_queues = adapter->num_io_queues;
-
- ena_init_io_rings(adapter,
- adapter->xdp_first_ring,
- adapter->xdp_num_queues);
-}
-
-static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
-{
- int rc = 0;
-
- rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
- adapter->xdp_num_queues);
- if (rc)
- goto setup_err;
-
- rc = ena_create_io_tx_queues_in_range(adapter,
- adapter->xdp_first_ring,
- adapter->xdp_num_queues);
- if (rc)
- goto create_err;
-
- return 0;
-
-create_err:
- ena_free_all_io_tx_resources(adapter);
-setup_err:
- return rc;
-}
-
-/* Provides a way for both kernel and bpf-prog to know
- * more about the RX-queue a given XDP frame arrived on.
- */
-static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
-{
- int rc;
-
- rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
-
- if (rc) {
- netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
- "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
- rx_ring->qid, rc);
- goto err;
- }
-
- rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
- NULL);
-
- if (rc) {
- netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
- "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
- rx_ring->qid, rc);
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- }
-
-err:
- return rc;
-}
-
-static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
-{
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
-}
-
-static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
- struct bpf_prog *prog,
- int first, int count)
-{
- struct ena_ring *rx_ring;
- int i = 0;
-
- for (i = first; i < count; i++) {
- rx_ring = &adapter->rx_ring[i];
- xchg(&rx_ring->xdp_bpf_prog, prog);
- if (prog) {
- ena_xdp_register_rxq_info(rx_ring);
- rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
- } else {
- ena_xdp_unregister_rxq_info(rx_ring);
- rx_ring->rx_headroom = NET_SKB_PAD;
- }
- }
-}
-
-static void ena_xdp_exchange_program(struct ena_adapter *adapter,
- struct bpf_prog *prog)
-{
- struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
-
- ena_xdp_exchange_program_rx_in_range(adapter,
- prog,
- 0,
- adapter->num_io_queues);
-
- if (old_bpf_prog)
- bpf_prog_put(old_bpf_prog);
-}
-
-static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
-{
- bool was_up;
- int rc;
-
- was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
-
- if (was_up)
- ena_down(adapter);
-
- adapter->xdp_first_ring = 0;
- adapter->xdp_num_queues = 0;
- ena_xdp_exchange_program(adapter, NULL);
- if (was_up) {
- rc = ena_up(adapter);
- if (rc)
- return rc;
- }
- return 0;
-}
-
-static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- struct bpf_prog *prog = bpf->prog;
- struct bpf_prog *old_bpf_prog;
- int rc, prev_mtu;
- bool is_up;
-
- is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
- rc = ena_xdp_allowed(adapter);
- if (rc == ENA_XDP_ALLOWED) {
- old_bpf_prog = adapter->xdp_bpf_prog;
- if (prog) {
- if (!is_up) {
- ena_init_all_xdp_queues(adapter);
- } else if (!old_bpf_prog) {
- ena_down(adapter);
- ena_init_all_xdp_queues(adapter);
- }
- ena_xdp_exchange_program(adapter, prog);
-
- if (is_up && !old_bpf_prog) {
- rc = ena_up(adapter);
- if (rc)
- return rc;
- }
- } else if (old_bpf_prog) {
- rc = ena_destroy_and_free_all_xdp_queues(adapter);
- if (rc)
- return rc;
- }
-
- prev_mtu = netdev->max_mtu;
- netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
-
- if (!old_bpf_prog)
- netif_info(adapter, drv, adapter->netdev,
- "XDP program is set, changing the max_mtu from %d to %d",
- prev_mtu, netdev->max_mtu);
-
- } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
- netif_err(adapter, drv, adapter->netdev,
- "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
- netdev->mtu, ENA_XDP_MAX_MTU);
- NL_SET_ERR_MSG_MOD(bpf->extack,
- "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
- return -EINVAL;
- } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
- netif_err(adapter, drv, adapter->netdev,
- "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
- adapter->num_io_queues, adapter->max_num_io_queues);
- NL_SET_ERR_MSG_MOD(bpf->extack,
- "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
- * program as well as to query the current xdp program id.
- */
-static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
-{
- switch (bpf->command) {
- case XDP_SETUP_PROG:
- return ena_xdp_set(netdev, bpf);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
-{
-#ifdef CONFIG_RFS_ACCEL
- u32 i;
- int rc;
-
- adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
- if (!adapter->netdev->rx_cpu_rmap)
- return -ENOMEM;
- for (i = 0; i < adapter->num_io_queues; i++) {
- int irq_idx = ENA_IO_IRQ_IDX(i);
-
- rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
- pci_irq_vector(adapter->pdev, irq_idx));
- if (rc) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- return rc;
- }
- }
-#endif /* CONFIG_RFS_ACCEL */
- return 0;
-}
-
static void ena_init_io_rings_common(struct ena_adapter *adapter,
struct ena_ring *ring, u16 qid)
{
@@ -676,12 +175,13 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->ena_dev = adapter->ena_dev;
ring->per_napi_packets = 0;
ring->cpu = 0;
+ ring->numa_node = 0;
ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp);
}
-static void ena_init_io_rings(struct ena_adapter *adapter,
- int first_index, int count)
+void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev;
struct ena_ring *txr, *rxr;
@@ -779,6 +279,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
tx_ring->cpu = ena_irq->cpu;
+ tx_ring->numa_node = node;
return 0;
err_push_buf_intermediate_buf:
@@ -811,9 +312,8 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->push_buf_intermediate_buf = NULL;
}
-static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index,
- int count)
+int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
int i, rc = 0;
@@ -836,8 +336,8 @@ err_setup_tx:
return rc;
}
-static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index, int count)
+void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
int i;
@@ -850,7 +350,7 @@ static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
*
* Free all transmit software resources
*/
-static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
+void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
{
ena_free_all_io_tx_resources_in_range(adapter,
0,
@@ -911,6 +411,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_ring->cpu = ena_irq->cpu;
+ rx_ring->numa_node = node;
return 0;
}
@@ -984,8 +485,7 @@ static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
*/
page = dev_alloc_page();
if (!page) {
- ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
- &rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp);
return ERR_PTR(-ENOSPC);
}
@@ -1014,7 +514,7 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
int tailroom;
/* restore page offset value in case it has been changed by device */
- rx_info->page_offset = headroom;
+ rx_info->buf_offset = headroom;
/* if previous allocated page is not used */
if (unlikely(rx_info->page))
@@ -1022,7 +522,7 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
/* We handle DMA here */
page = ena_alloc_map_page(rx_ring, &dma);
- if (unlikely(IS_ERR(page)))
+ if (IS_ERR(page))
return PTR_ERR(page);
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
@@ -1031,6 +531,8 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
rx_info->page = page;
+ rx_info->dma_addr = dma;
+ rx_info->page_offset = 0;
ena_buf = &rx_info->ena_buf;
ena_buf->paddr = dma + headroom;
ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom;
@@ -1038,14 +540,12 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
return 0;
}
-static void ena_unmap_rx_buff(struct ena_ring *rx_ring,
- struct ena_rx_buffer *rx_info)
+static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info,
+ unsigned long attrs)
{
- struct ena_com_buf *ena_buf = &rx_info->ena_buf;
-
- dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
- ENA_PAGE_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL,
+ attrs);
}
static void ena_free_rx_page(struct ena_ring *rx_ring,
@@ -1059,7 +559,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
return;
}
- ena_unmap_rx_buff(rx_ring, rx_info);
+ ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0);
__free_page(page);
rx_info->page = NULL;
@@ -1159,8 +659,8 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
ena_free_rx_bufs(adapter, i);
}
-static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
- struct ena_tx_buffer *tx_info)
+void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info)
{
struct ena_com_buf *ena_buf;
u32 cnt;
@@ -1195,8 +695,11 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static void ena_free_tx_bufs(struct ena_ring *tx_ring)
{
bool print_once = true;
+ bool is_xdp_ring;
u32 i;
+ is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
+
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
@@ -1216,10 +719,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
ena_unmap_tx_buff(tx_ring, tx_info);
- dev_kfree_skb_any(tx_info->skb);
+ if (is_xdp_ring)
+ xdp_return_frame(tx_info->xdpf);
+ else
+ dev_kfree_skb_any(tx_info->skb);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->qid));
+
+ if (!is_xdp_ring)
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->qid));
}
static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
@@ -1252,6 +760,7 @@ static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_RXQ_IDX(i);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
+ ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
}
@@ -1262,56 +771,39 @@ static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
ena_destroy_all_rx_queues(adapter);
}
-static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
- struct ena_tx_buffer *tx_info, bool is_xdp)
+int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ struct ena_tx_buffer *tx_info, bool is_xdp)
{
if (tx_info)
netif_err(ring->adapter,
tx_done,
ring->netdev,
- "tx_info doesn't have valid %s",
- is_xdp ? "xdp frame" : "skb");
+ "tx_info doesn't have valid %s. qid %u req_id %u",
+ is_xdp ? "xdp frame" : "skb", ring->qid, req_id);
else
netif_err(ring->adapter,
tx_done,
ring->netdev,
- "Invalid req_id: %hu\n",
- req_id);
+ "Invalid req_id %u in qid %u\n",
+ req_id, ring->qid);
ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
+ ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
- /* Trigger device reset */
- ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
- set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
return -EFAULT;
}
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
{
- struct ena_tx_buffer *tx_info = NULL;
+ struct ena_tx_buffer *tx_info;
- if (likely(req_id < tx_ring->ring_size)) {
- tx_info = &tx_ring->tx_buffer_info[req_id];
- if (likely(tx_info->skb))
- return 0;
- }
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->skb))
+ return 0;
return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
}
-static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
-{
- struct ena_tx_buffer *tx_info = NULL;
-
- if (likely(req_id < xdp_ring->ring_size)) {
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- if (likely(tx_info->xdpf))
- return 0;
- }
-
- return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
-}
-
static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
{
struct netdev_queue *txq;
@@ -1332,9 +824,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
&req_id);
- if (rc)
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+ handle_invalid_req_id(tx_ring, req_id, NULL, false);
break;
+ }
+ /* validate that the request id points to a valid skb */
rc = validate_tx_req_id(tx_ring, req_id);
if (rc)
break;
@@ -1354,7 +850,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
skb);
- tx_bytes += skb->len;
+ tx_bytes += tx_info->total_tx_size;
dev_kfree_skb(skb);
tx_pkts++;
total_done += tx_info->tx_descs;
@@ -1366,7 +862,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
@@ -1398,15 +893,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
return tx_pkts;
}
-static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
+static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len)
{
struct sk_buff *skb;
if (!first_frag)
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_copybreak);
+ skb = napi_alloc_skb(rx_ring->napi, len);
else
- skb = build_skb(first_frag, ENA_PAGE_SIZE);
+ skb = napi_build_skb(first_frag, len);
if (unlikely(!skb)) {
ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
@@ -1415,23 +909,47 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"Failed to allocate skb. first_frag %s\n",
first_frag ? "provided" : "not provided");
- return NULL;
}
return skb;
}
+static bool ena_try_rx_buf_page_reuse(struct ena_rx_buffer *rx_info, u16 buf_len,
+ u16 len, int pkt_offset)
+{
+ struct ena_com_buf *ena_buf = &rx_info->ena_buf;
+
+ /* More than ENA_MIN_RX_BUF_SIZE left in the reused buffer
+ * for data + headroom + tailroom.
+ */
+ if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) {
+ page_ref_inc(rx_info->page);
+ rx_info->page_offset += buf_len;
+ ena_buf->paddr += buf_len;
+ ena_buf->len -= buf_len;
+ return true;
+ }
+
+ return false;
+}
+
static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
struct ena_com_rx_buf_info *ena_bufs,
u32 descs,
u16 *next_to_clean)
{
+ int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bool is_xdp_loaded = ena_xdp_present_ring(rx_ring);
struct ena_rx_buffer *rx_info;
+ struct ena_adapter *adapter;
+ int page_offset, pkt_offset;
+ dma_addr_t pre_reuse_paddr;
u16 len, req_id, buf = 0;
+ bool reuse_rx_buf_page;
struct sk_buff *skb;
- void *page_addr;
- u32 page_offset;
- void *data_addr;
+ void *buf_addr;
+ int buf_offset;
+ u16 buf_len;
len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
@@ -1439,8 +957,11 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(!rx_info->page)) {
- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
- "Page is NULL\n");
+ adapter = rx_ring->adapter;
+ netif_err(adapter, rx_err, rx_ring->netdev,
+ "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
+ ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
+ ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
return NULL;
}
@@ -1448,34 +969,25 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
"rx_info %p page %p\n",
rx_info, rx_info->page);
- /* save virt address of first buffer */
- page_addr = page_address(rx_info->page);
+ buf_offset = rx_info->buf_offset;
+ pkt_offset = buf_offset - rx_ring->rx_headroom;
page_offset = rx_info->page_offset;
- data_addr = page_addr + page_offset;
-
- prefetch(data_addr);
+ buf_addr = page_address(rx_info->page) + page_offset;
if (len <= rx_ring->rx_copybreak) {
- skb = ena_alloc_skb(rx_ring, NULL);
+ skb = ena_alloc_skb(rx_ring, NULL, len);
if (unlikely(!skb))
return NULL;
- netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "RX allocated small packet. len %d. data_len %d\n",
- skb->len, skb->data_len);
-
- /* sync this buffer for CPU use */
- dma_sync_single_for_cpu(rx_ring->dev,
- dma_unmap_addr(&rx_info->ena_buf, paddr),
- len,
- DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, data_addr, len);
+ skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
dma_sync_single_for_device(rx_ring->dev,
- dma_unmap_addr(&rx_info->ena_buf, paddr),
+ dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
len,
DMA_FROM_DEVICE);
skb_put(skb, len);
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "RX allocated small packet. len %d.\n", skb->len);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
rx_ring->free_ids[*next_to_clean] = req_id;
*next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
@@ -1483,14 +995,21 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
return skb;
}
- ena_unmap_rx_buff(rx_ring, rx_info);
+ buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
- skb = ena_alloc_skb(rx_ring, page_addr);
+ /* If XDP isn't loaded try to reuse part of the RX buffer */
+ reuse_rx_buf_page = !is_xdp_loaded &&
+ ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
+
+ if (!reuse_rx_buf_page)
+ ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
+
+ skb = ena_alloc_skb(rx_ring, buf_addr, buf_len);
if (unlikely(!skb))
return NULL;
/* Populate skb's linear part */
- skb_reserve(skb, page_offset);
+ skb_reserve(skb, buf_offset);
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -1499,7 +1018,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
"RX skb updated. len %d. data_len %d\n",
skb->len, skb->data_len);
- rx_info->page = NULL;
+ if (!reuse_rx_buf_page)
+ rx_info->page = NULL;
rx_ring->free_ids[*next_to_clean] = req_id;
*next_to_clean =
@@ -1514,10 +1034,27 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info = &rx_ring->rx_buffer_info[req_id];
- ena_unmap_rx_buff(rx_ring, rx_info);
+ /* rx_info->buf_offset includes rx_ring->rx_headroom */
+ buf_offset = rx_info->buf_offset;
+ pkt_offset = buf_offset - rx_ring->rx_headroom;
+ buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
+ page_offset = rx_info->page_offset;
+
+ pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
+
+ reuse_rx_buf_page = !is_xdp_loaded &&
+ ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
+
+ dma_sync_single_for_cpu(rx_ring->dev,
+ pre_reuse_paddr + pkt_offset,
+ len,
+ DMA_FROM_DEVICE);
+
+ if (!reuse_rx_buf_page)
+ ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
- rx_info->page_offset, len, ENA_PAGE_SIZE);
+ page_offset + buf_offset, len, buf_len);
} while (1);
@@ -1550,7 +1087,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l3_csum_err))) {
/* ipv4 checksum error */
skb->ip_summed = CHECKSUM_NONE;
- ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX IPv4 header checksum error\n");
@@ -1562,7 +1099,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
if (unlikely(ena_rx_ctx->l4_csum_err)) {
/* TCP/UDP checksum error */
- ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX L4 checksum error\n");
@@ -1608,31 +1145,35 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
}
}
-static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs)
{
struct ena_rx_buffer *rx_info;
int ret;
+ /* XDP multi-buffer packets not supported */
+ if (unlikely(num_descs > 1)) {
+ netdev_err_once(rx_ring->adapter->netdev,
+ "xdp: dropped unsupported multi-buffer packets\n");
+ ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp);
+ return ENA_XDP_DROP;
+ }
+
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
xdp_prepare_buff(xdp, page_address(rx_info->page),
- rx_info->page_offset,
+ rx_info->buf_offset,
rx_ring->ena_bufs[0].len, false);
- /* If for some reason we received a bigger packet than
- * we expect, then we simply drop it
- */
- if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
- return XDP_DROP;
ret = ena_xdp_execute(rx_ring, xdp);
/* The xdp program might expand the headers */
- if (ret == XDP_PASS) {
- rx_info->page_offset = xdp->data - xdp->data_hard_start;
+ if (ret == ENA_XDP_PASS) {
+ rx_info->buf_offset = xdp->data - xdp->data_hard_start;
rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
}
return ret;
}
+
/* ena_clean_rx_irq - Cleanup RX irq
* @rx_ring: RX ring to clean
* @napi: napi handler
@@ -1656,6 +1197,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
int xdp_flags = 0;
int total_len = 0;
int xdp_verdict;
+ u8 pkt_offset;
int rc = 0;
int i;
@@ -1665,7 +1207,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
do {
- xdp_verdict = XDP_PASS;
+ xdp_verdict = ENA_XDP_PASS;
skb = NULL;
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.max_bufs = rx_ring->sgl_size;
@@ -1682,18 +1224,24 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* First descriptor might have an offset set by the device */
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
- rx_info->page_offset += ena_rx_ctx.pkt_offset;
+ pkt_offset = ena_rx_ctx.pkt_offset;
+ rx_info->buf_offset += pkt_offset;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
+ dma_sync_single_for_cpu(rx_ring->dev,
+ dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
+ rx_ring->ena_bufs[0].len,
+ DMA_FROM_DEVICE);
+
if (ena_xdp_present_ring(rx_ring))
- xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
+ xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
/* allocate skb and fill it */
- if (xdp_verdict == XDP_PASS)
+ if (xdp_verdict == ENA_XDP_PASS)
skb = ena_rx_skb(rx_ring,
rx_ring->ena_bufs,
ena_rx_ctx.descs,
@@ -1711,14 +1259,16 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* Packets was passed for transmission, unmap it
* from RX side.
*/
- if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) {
- ena_unmap_rx_buff(rx_ring,
- &rx_ring->rx_buffer_info[req_id]);
+ if (xdp_verdict & ENA_XDP_FORWARDED) {
+ ena_unmap_rx_buff_attrs(rx_ring,
+ &rx_ring->rx_buffer_info[req_id],
+ DMA_ATTR_SKIP_CPU_SYNC);
rx_ring->rx_buffer_info[req_id].page = NULL;
}
}
- if (xdp_verdict != XDP_PASS) {
+ if (xdp_verdict != ENA_XDP_PASS) {
xdp_flags |= xdp_verdict;
+ total_len += ena_rx_ctx.ena_bufs[0].len;
res_budget--;
continue;
}
@@ -1757,31 +1307,30 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ENA_RX_REFILL_THRESH_PACKET);
/* Optimization, try to batch new rx buffers */
- if (refill_required > refill_threshold) {
- ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
+ if (refill_required > refill_threshold)
ena_refill_rx_bufs(rx_ring, refill_required);
- }
- if (xdp_flags & XDP_REDIRECT)
- xdp_do_flush_map();
+ if (xdp_flags & ENA_XDP_REDIRECT)
+ xdp_do_flush();
return work_done;
error:
+ if (xdp_flags & ENA_XDP_REDIRECT)
+ xdp_do_flush();
+
adapter = netdev_priv(rx_ring->netdev);
if (rc == -ENOSPC) {
- ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
- &rx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
+ ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
+ ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
+ } else if (rc == -EFAULT) {
+ ena_reset_device(adapter, ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
&rx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
}
-
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
-
return 0;
}
@@ -1811,16 +1360,17 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
rx_ring->rx_stats.bytes,
&dim_sample);
- net_dim(&ena_napi->dim, dim_sample);
+ net_dim(&ena_napi->dim, &dim_sample);
rx_ring->per_napi_packets = 0;
}
-static void ena_unmask_interrupt(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring)
+void ena_unmask_interrupt(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring)
{
+ u32 rx_interval = tx_ring->smoothed_interval;
struct ena_eth_io_intr_reg intr_reg;
- u32 rx_interval = 0;
+
/* Rx ring can be NULL when for XDP tx queues which don't have an
* accompanying rx_ring pair.
*/
@@ -1848,8 +1398,8 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
}
-static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring)
+void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring)
{
int cpu = get_cpu();
int numa_node;
@@ -1858,83 +1408,32 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
if (likely(tx_ring->cpu == cpu))
goto out;
+ tx_ring->cpu = cpu;
+ if (rx_ring)
+ rx_ring->cpu = cpu;
+
numa_node = cpu_to_node(cpu);
+
+ if (likely(tx_ring->numa_node == numa_node))
+ goto out;
+
put_cpu();
if (numa_node != NUMA_NO_NODE) {
ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
- if (rx_ring)
+ tx_ring->numa_node = numa_node;
+ if (rx_ring) {
+ rx_ring->numa_node = numa_node;
ena_com_update_numa_node(rx_ring->ena_com_io_cq,
numa_node);
+ }
}
- tx_ring->cpu = cpu;
- if (rx_ring)
- rx_ring->cpu = cpu;
-
return;
out:
put_cpu();
}
-static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
-{
- u32 total_done = 0;
- u16 next_to_clean;
- u32 tx_bytes = 0;
- int tx_pkts = 0;
- u16 req_id;
- int rc;
-
- if (unlikely(!xdp_ring))
- return 0;
- next_to_clean = xdp_ring->next_to_clean;
-
- while (tx_pkts < budget) {
- struct ena_tx_buffer *tx_info;
- struct xdp_frame *xdpf;
-
- rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
- &req_id);
- if (rc)
- break;
-
- rc = validate_xdp_req_id(xdp_ring, req_id);
- if (rc)
- break;
-
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- xdpf = tx_info->xdpf;
-
- tx_info->xdpf = NULL;
- tx_info->last_jiffies = 0;
- ena_unmap_tx_buff(xdp_ring, tx_info);
-
- netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
- "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
- xdpf);
-
- tx_bytes += xdpf->len;
- tx_pkts++;
- total_done += tx_info->tx_descs;
-
- xdp_return_frame(xdpf);
- xdp_ring->free_ids[next_to_clean] = req_id;
- next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
- xdp_ring->ring_size);
- }
-
- xdp_ring->next_to_clean = next_to_clean;
- ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
-
- netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
- "tx_poll: q %d done. total pkts: %d\n",
- xdp_ring->qid, tx_pkts);
-
- return tx_pkts;
-}
-
static int ena_io_poll(struct napi_struct *napi, int budget)
{
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
@@ -1987,11 +1486,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
ena_adjust_adaptive_rx_intr_moderation(ena_napi);
+ ena_update_ring_numa_node(tx_ring, rx_ring);
ena_unmask_interrupt(tx_ring, rx_ring);
}
- ena_update_ring_numa_node(tx_ring, rx_ring);
-
ret = rx_work_done;
} else {
ret = budget;
@@ -2075,7 +1573,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
- if (ena_init_rx_cpu_rmap(adapter))
+ if (netif_enable_cpu_rmap(adapter->netdev, adapter->num_io_queues))
netif_warn(adapter, probe, adapter->netdev,
"Failed to map IRQs to CPUs\n");
@@ -2156,9 +1654,9 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
static int ena_request_io_irq(struct ena_adapter *adapter)
{
u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
+ int rc = 0, i, k, irq_idx;
unsigned long flags = 0;
struct ena_irq *irq;
- int rc = 0, i, k;
if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
netif_err(adapter, ifup, adapter->netdev,
@@ -2184,6 +1682,16 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
}
+ /* Now that IO IRQs have been successfully allocated map them to the
+ * corresponding IO NAPI instance. Note that the mgmnt IRQ does not
+ * have a NAPI, so care must be taken to correctly map IRQs to NAPIs.
+ */
+ for (i = 0; i < io_queue_count; i++) {
+ irq_idx = ENA_IO_IRQ_IDX(i);
+ irq = &adapter->irq_tbl[irq_idx];
+ netif_napi_set_irq(&adapter->ena_napi[i].napi, irq->vector);
+ }
+
return rc;
err:
@@ -2211,16 +1719,13 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq;
int i;
-#ifdef CONFIG_RFS_ACCEL
- if (adapter->msix_vecs >= 1) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- }
-#endif /* CONFIG_RFS_ACCEL */
-
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
+ struct ena_napi *ena_napi;
+
irq = &adapter->irq_tbl[i];
irq_set_affinity_hint(irq->vector, NULL);
+ ena_napi = irq->data;
+ netif_napi_set_irq(&ena_napi->napi, -1);
free_irq(irq->vector, irq->data);
}
}
@@ -2252,30 +1757,36 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
for (i = first_index; i < first_index + count; i++) {
netif_napi_del(&adapter->ena_napi[i].napi);
- WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
- adapter->ena_napi[i].xdp_ring);
+ WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
+ adapter->ena_napi[i].rx_ring);
}
}
static void ena_init_napi_in_range(struct ena_adapter *adapter,
int first_index, int count)
{
+ int (*napi_handler)(struct napi_struct *napi, int budget);
int i;
for (i = first_index; i < first_index + count; i++) {
struct ena_napi *napi = &adapter->ena_napi[i];
+ struct ena_ring *rx_ring, *tx_ring;
- netif_napi_add(adapter->netdev,
- &napi->napi,
- ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
- ENA_NAPI_BUDGET);
+ memset(napi, 0, sizeof(*napi));
- if (!ENA_IS_XDP_INDEX(adapter, i)) {
- napi->rx_ring = &adapter->rx_ring[i];
- napi->tx_ring = &adapter->tx_ring[i];
- } else {
- napi->xdp_ring = &adapter->tx_ring[i];
- }
+ rx_ring = &adapter->rx_ring[i];
+ tx_ring = &adapter->tx_ring[i];
+
+ napi_handler = ena_io_poll;
+ if (ENA_IS_XDP_INDEX(adapter, i))
+ napi_handler = ena_xdp_io_poll;
+
+ netif_napi_add_config(adapter->netdev, &napi->napi, napi_handler, i);
+
+ if (!ENA_IS_XDP_INDEX(adapter, i))
+ napi->rx_ring = rx_ring;
+
+ napi->tx_ring = tx_ring;
napi->qid = i;
}
}
@@ -2284,20 +1795,40 @@ static void ena_napi_disable_in_range(struct ena_adapter *adapter,
int first_index,
int count)
{
+ struct napi_struct *napi;
int i;
- for (i = first_index; i < first_index + count; i++)
- napi_disable(&adapter->ena_napi[i].napi);
+ for (i = first_index; i < first_index + count; i++) {
+ napi = &adapter->ena_napi[i].napi;
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ /* This API is supported for non-XDP queues only */
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+ napi_disable(napi);
+ }
}
static void ena_napi_enable_in_range(struct ena_adapter *adapter,
int first_index,
int count)
{
+ struct napi_struct *napi;
int i;
- for (i = first_index; i < first_index + count; i++)
- napi_enable(&adapter->ena_napi[i].napi);
+ for (i = first_index; i < first_index + count; i++) {
+ napi = &adapter->ena_napi[i].napi;
+ napi_enable(napi);
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ /* This API is supported for non-XDP queues only */
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_RX, napi);
+ netif_queue_set_napi(adapter->netdev, i,
+ NETDEV_QUEUE_TYPE_TX, napi);
+ }
+ }
}
/* Configure the Rx forwarding */
@@ -2310,8 +1841,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
if (!ena_dev->rss.tbl_log_size) {
rc = ena_rss_init_default(adapter);
if (rc && (rc != -EOPNOTSUPP)) {
- netif_err(adapter, ifup, adapter->netdev,
- "Failed to init RSS rc: %d\n", rc);
+ netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc);
return rc;
}
}
@@ -2378,7 +1908,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
ctx.msix_vector = msix_vector;
ctx.queue_size = tx_ring->ring_size;
- ctx.numa_node = cpu_to_node(tx_ring->cpu);
+ ctx.numa_node = tx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
@@ -2403,8 +1933,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
return rc;
}
-static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
- int first_index, int count)
+int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
@@ -2446,7 +1976,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
ctx.msix_vector = msix_vector;
ctx.queue_size = rx_ring->ring_size;
- ctx.numa_node = cpu_to_node(rx_ring->cpu);
+ ctx.numa_node = rx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
@@ -2484,12 +2014,15 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
if (rc)
goto create_err;
INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
+
+ ena_xdp_register_rxq_info(&adapter->rx_ring[i]);
}
return 0;
create_err:
while (i--) {
+ ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
}
@@ -2614,7 +2147,7 @@ err_setup_tx:
}
}
-static int ena_up(struct ena_adapter *adapter)
+int ena_up(struct ena_adapter *adapter)
{
int io_queue_count, rc, i;
@@ -2630,6 +2163,12 @@ static int ena_up(struct ena_adapter *adapter)
*/
ena_init_napi_in_range(adapter, 0, io_queue_count);
+ /* Enabling DIM needs to happen before enabling IRQs since DIM
+ * is run from napi routine
+ */
+ if (ena_com_interrupt_moderation_supported(adapter->ena_dev))
+ ena_com_enable_adaptive_moderation(adapter->ena_dev);
+
rc = ena_request_io_irq(adapter);
if (rc)
goto err_req_irq;
@@ -2676,11 +2215,11 @@ err_req_irq:
return rc;
}
-static void ena_down(struct ena_adapter *adapter)
+void ena_down(struct ena_adapter *adapter)
{
int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
- netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
+ netif_dbg(adapter, ifdown, adapter->netdev, "%s\n", __func__);
clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
@@ -2693,8 +2232,6 @@ static void ena_down(struct ena_adapter *adapter)
/* After this point the napi handler won't enable the tx queue */
ena_napi_disable_in_range(adapter, 0, io_queue_count);
- /* After destroy the queue there won't be any new interrupts */
-
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
int rc;
@@ -2790,11 +2327,13 @@ static int ena_close(struct net_device *netdev)
return 0;
}
-int ena_update_queue_sizes(struct ena_adapter *adapter,
- u32 new_tx_size,
- u32 new_rx_size)
+int ena_update_queue_params(struct ena_adapter *adapter,
+ u32 new_tx_size,
+ u32 new_rx_size,
+ u32 new_llq_header_len)
{
- bool dev_was_up;
+ bool dev_was_up, large_llq_changed = false;
+ int rc = 0;
dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
ena_close(adapter->netdev);
@@ -2804,7 +2343,39 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
0,
adapter->xdp_num_queues +
adapter->num_io_queues);
- return dev_was_up ? ena_up(adapter) : 0;
+
+ large_llq_changed = adapter->ena_dev->tx_mem_queue_type ==
+ ENA_ADMIN_PLACEMENT_POLICY_DEV;
+ large_llq_changed &=
+ new_llq_header_len != adapter->ena_dev->tx_max_header_size;
+
+ /* a check that the configuration is valid is done by caller */
+ if (large_llq_changed) {
+ adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled;
+
+ ena_destroy_device(adapter, false);
+ rc = ena_restore_device(adapter);
+ }
+
+ return dev_was_up && !rc ? ena_up(adapter) : rc;
+}
+
+int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)
+{
+ struct ena_ring *rx_ring;
+ int i;
+
+ if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE))
+ return -EINVAL;
+
+ adapter->rx_copybreak = rx_copybreak;
+
+ for (i = 0; i < adapter->num_io_queues; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ rx_ring->rx_copybreak = rx_copybreak;
+ }
+
+ return 0;
}
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
@@ -3050,8 +2621,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(rc))
goto error_drop_packet;
- skb_tx_timestamp(skb);
-
next_to_use = tx_ring->next_to_use;
req_id = tx_ring->free_ids[next_to_use];
tx_info = &tx_ring->tx_buffer_info[req_id];
@@ -3073,7 +2642,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
- rc = ena_xmit_common(dev,
+ rc = ena_xmit_common(adapter,
tx_ring,
tx_info,
&ena_tx_ctx,
@@ -3115,6 +2684,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ skb_tx_timestamp(skb);
+
if (netif_xmit_stopped(txq) || !netdev_xmit_more())
/* trigger the dma engine. ena_ring_tx_doorbell()
* calls a memory barrier inside it.
@@ -3132,26 +2703,11 @@ error_drop_packet:
return NETDEV_TX_OK;
}
-static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- u16 qid;
- /* we suspect that this is good for in--kernel network services that
- * want to loop incoming skb rx to tx in normal user generated traffic,
- * most probably we will not get to this
- */
- if (skb_rx_queue_recorded(skb))
- qid = skb_get_rx_queue(skb);
- else
- qid = netdev_pick_tx(dev, skb, NULL);
-
- return qid;
-}
-
static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct ena_admin_host_info *host_info;
+ ssize_t ret;
int rc;
/* Allocate only the host info */
@@ -3163,14 +2719,22 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
host_info = ena_dev->host_attr.host_info;
- host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
+ host_info->bdf = pci_dev_id(pdev);
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strlcpy(host_info->kernel_ver_str, utsname()->version,
- sizeof(host_info->kernel_ver_str) - 1);
+ ret = strscpy(host_info->kernel_ver_str, utsname()->version,
+ sizeof(host_info->kernel_ver_str));
+ if (ret < 0)
+ dev_dbg(dev,
+ "kernel version string will be truncated, status = %zd\n", ret);
+
host_info->os_dist = 0;
- strncpy(host_info->os_dist_str, utsname()->release,
- sizeof(host_info->os_dist_str) - 1);
+ ret = strscpy(host_info->os_dist_str, utsname()->release,
+ sizeof(host_info->os_dist_str));
+ if (ret < 0)
+ dev_dbg(dev,
+ "OS distribution string will be truncated, status = %zd\n", ret);
+
host_info->driver_version =
(DRV_MODULE_GEN_MAJOR) |
(DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
@@ -3182,7 +2746,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
- ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
+ ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
+ ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK |
+ ENA_ADMIN_HOST_INFO_PHC_MASK;
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
@@ -3225,8 +2791,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(adapter->ena_dev);
if (rc) {
if (rc == -EOPNOTSUPP)
- netif_warn(adapter, drv, adapter->netdev,
- "Cannot set host attributes\n");
+ netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n");
else
netif_err(adapter, drv, adapter->netdev,
"Cannot set host attributes\n");
@@ -3238,24 +2803,12 @@ err:
ena_com_delete_debug_area(adapter->ena_dev);
}
-int ena_update_hw_stats(struct ena_adapter *adapter)
-{
- int rc = 0;
-
- rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
- if (rc) {
- dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n");
- return rc;
- }
-
- return 0;
-}
-
static void ena_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_ring *rx_ring, *tx_ring;
+ u64 total_xdp_rx_drops = 0;
unsigned int start;
u64 rx_drops;
u64 tx_drops;
@@ -3264,39 +2817,45 @@ static void ena_get_stats64(struct net_device *netdev,
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return;
- for (i = 0; i < adapter->num_io_queues; i++) {
- u64 bytes, packets;
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
+ u64 bytes, packets, xdp_rx_drops;
tx_ring = &adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
+ start = u64_stats_fetch_begin(&tx_ring->syncp);
packets = tx_ring->tx_stats.cnt;
bytes = tx_ring->tx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+ } while (u64_stats_fetch_retry(&tx_ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
+ /* In XDP there isn't an RX queue counterpart */
+ if (ENA_IS_XDP_INDEX(adapter, i))
+ continue;
+
rx_ring = &adapter->rx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
+ start = u64_stats_fetch_begin(&rx_ring->syncp);
packets = rx_ring->rx_stats.cnt;
bytes = rx_ring->rx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+ xdp_rx_drops = rx_ring->rx_stats.xdp_drop;
+ } while (u64_stats_fetch_retry(&rx_ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
+ total_xdp_rx_drops += xdp_rx_drops;
}
do {
- start = u64_stats_fetch_begin_irq(&adapter->syncp);
+ start = u64_stats_fetch_begin(&adapter->syncp);
rx_drops = adapter->dev_stats.rx_drops;
tx_drops = adapter->dev_stats.tx_drops;
- } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
+ } while (u64_stats_fetch_retry(&adapter->syncp, start));
- stats->rx_dropped = rx_drops;
+ stats->rx_dropped = rx_drops + total_xdp_rx_drops;
stats->tx_dropped = tx_drops;
stats->multicast = 0;
@@ -3317,16 +2876,120 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_open = ena_open,
.ndo_stop = ena_close,
.ndo_start_xmit = ena_start_xmit,
- .ndo_select_queue = ena_select_queue,
.ndo_get_stats64 = ena_get_stats64,
.ndo_tx_timeout = ena_tx_timeout,
.ndo_change_mtu = ena_change_mtu,
- .ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
.ndo_bpf = ena_xdp,
.ndo_xdp_xmit = ena_xdp_xmit,
};
+static int ena_calc_io_queue_size(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
+ u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
+ u32 max_tx_queue_size;
+ u32 max_rx_queue_size;
+
+ /* If this function is called after driver load, the ring sizes have already
+ * been configured. Take it into account when recalculating ring size.
+ */
+ if (adapter->tx_ring->ring_size)
+ tx_queue_size = adapter->tx_ring->ring_size;
+
+ if (adapter->rx_ring->ring_size)
+ rx_queue_size = adapter->rx_ring->ring_size;
+
+ if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+ struct ena_admin_queue_ext_feature_fields *max_queue_ext =
+ &get_feat_ctx->max_queue_ext.max_queue_ext;
+ max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
+ max_queue_ext->max_rx_sq_depth);
+ max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ llq->max_llq_depth);
+ else
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ max_queue_ext->max_tx_sq_depth);
+
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_rx_descs);
+ } else {
+ struct ena_admin_queue_feature_desc *max_queues =
+ &get_feat_ctx->max_queues;
+ max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
+ max_queues->max_sq_depth);
+ max_tx_queue_size = max_queues->max_cq_depth;
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ llq->max_llq_depth);
+ else
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ max_queues->max_sq_depth);
+
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_rx_descs);
+ }
+
+ max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
+ max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
+
+ if (max_tx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max TX queue size: %d < minimum: %d\n",
+ max_tx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
+ if (max_rx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max RX queue size: %d < minimum: %d\n",
+ max_rx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
+ /* When forcing large headers, we multiply the entry size by 2, and therefore divide
+ * the queue size by 2, leaving the amount of memory used by the queues unchanged.
+ */
+ if (adapter->large_llq_header_enabled) {
+ if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
+ ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ max_tx_queue_size /= 2;
+ dev_info(&adapter->pdev->dev,
+ "Forcing large headers and decreasing maximum TX queue size to %d\n",
+ max_tx_queue_size);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
+
+ adapter->large_llq_header_enabled = false;
+ }
+ }
+
+ tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
+ max_tx_queue_size);
+ rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
+ max_rx_queue_size);
+
+ tx_queue_size = rounddown_pow_of_two(tx_queue_size);
+ rx_queue_size = rounddown_pow_of_two(rx_queue_size);
+
+ adapter->max_tx_ring_size = max_tx_queue_size;
+ adapter->max_rx_ring_size = max_rx_queue_size;
+ adapter->requested_tx_ring_size = tx_queue_size;
+ adapter->requested_rx_ring_size = rx_queue_size;
+
+ return 0;
+}
+
static int ena_device_validate_params(struct ena_adapter *adapter,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
@@ -3350,13 +3013,30 @@ static int ena_device_validate_params(struct ena_adapter *adapter,
return 0;
}
-static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
+static void set_default_llq_configurations(struct ena_adapter *adapter,
+ struct ena_llq_configurations *llq_config,
+ struct ena_admin_feature_llq_desc *llq)
{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
- llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
- llq_config->llq_ring_entry_size_value = 128;
+
+ adapter->large_llq_header_supported =
+ !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ));
+ adapter->large_llq_header_supported &=
+ !!(llq->entry_size_ctrl_supported &
+ ENA_ADMIN_LIST_ENTRY_SIZE_256B);
+
+ if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
+ adapter->large_llq_header_enabled) {
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+ llq_config->llq_ring_entry_size_value = 256;
+ } else {
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+ llq_config->llq_ring_entry_size_value = 128;
+ }
}
static int ena_set_queues_placement_policy(struct pci_dev *pdev,
@@ -3375,6 +3055,13 @@ static int ena_set_queues_placement_policy(struct pci_dev *pdev,
return 0;
}
+ if (!ena_dev->mem_bar) {
+ netdev_err(ena_dev->net_device,
+ "LLQ is advertised as supported but device doesn't expose mem bar\n");
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
if (unlikely(rc)) {
dev_err(&pdev->dev,
@@ -3390,15 +3077,8 @@ static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev
{
bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
- if (!has_mem_bar) {
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- dev_err(&pdev->dev,
- "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
- }
-
+ if (!has_mem_bar)
return 0;
- }
ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
pci_resource_start(pdev, ENA_MEM_BAR),
@@ -3410,10 +3090,12 @@ static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev
return 0;
}
-static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
+static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
struct ena_com_dev_get_features_ctx *get_feat_ctx,
bool *wd_state)
{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct net_device *netdev = adapter->netdev;
struct ena_llq_configurations llq_config;
struct device *dev = &pdev->dev;
bool readless_supported;
@@ -3458,6 +3140,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
goto err_mmio_read_less;
}
+ ena_devlink_params_get(adapter->devlink);
+
/* ENA admin level init */
rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (rc) {
@@ -3498,18 +3182,28 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
- set_default_llq_configurations(&llq_config);
+ set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq);
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
if (rc) {
- dev_err(dev, "ENA device init failed\n");
+ netdev_err(netdev, "Cannot set queues placement policy rc= %d\n", rc);
goto err_admin_init;
}
+ rc = ena_calc_io_queue_size(adapter, get_feat_ctx);
+ if (unlikely(rc))
+ goto err_admin_init;
+
+ rc = ena_phc_init(adapter);
+ if (unlikely(rc && (rc != -EOPNOTSUPP)))
+ netdev_err(netdev, "Failed initializing PHC, error: %d\n", rc);
+
return 0;
err_admin_init:
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
err_mmio_read_less:
@@ -3550,32 +3244,33 @@ err_disable_msix:
return rc;
}
-static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
bool dev_up;
+ int rc = 0;
if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
- return;
+ return 0;
netif_carrier_off(netdev);
- del_timer_sync(&adapter->timer_service);
+ timer_delete_sync(&adapter->timer_service);
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
adapter->dev_up_before_reset = dev_up;
if (!graceful)
ena_com_set_admin_running_state(ena_dev, false);
- if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ if (dev_up)
ena_down(adapter);
/* Stop the device from sending AENQ events (in case reset flag is set
* and device is up, ena_down() already reset the device.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
- ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
+ rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
ena_free_mgmnt_irq(adapter);
@@ -3587,6 +3282,8 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_com_admin_destroy(ena_dev);
+ ena_phc_destroy(adapter);
+
ena_com_mmio_reg_read_request_destroy(ena_dev);
/* return reset reason to default value */
@@ -3594,24 +3291,34 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
+
+ return rc;
}
-static int ena_restore_device(struct ena_adapter *adapter)
+int ena_restore_device(struct ena_adapter *adapter)
{
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = adapter->ena_dev;
struct pci_dev *pdev = adapter->pdev;
+ struct ena_ring *txr;
+ int rc, count, i;
bool wd_state;
- int rc;
set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
- rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
+ rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "Can not initialize device\n");
goto err;
}
adapter->wd_state = wd_state;
+ count = adapter->xdp_num_queues + adapter->num_io_queues;
+ for (i = 0 ; i < count; i++) {
+ txr = &adapter->tx_ring[i];
+ txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
+ txr->tx_max_header_size = ena_dev->tx_max_header_size;
+ }
+
rc = ena_device_validate_params(adapter, &get_feat_ctx);
if (rc) {
dev_err(&pdev->dev, "Validation of device parameters failed\n");
@@ -3641,8 +3348,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
adapter->last_keep_alive_jiffies = jiffies;
- dev_err(&pdev->dev, "Device reset completed successfully\n");
-
return rc;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
@@ -3652,6 +3357,7 @@ err_device_destroy:
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ ena_phc_destroy(adapter);
ena_com_mmio_reg_read_request_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
@@ -3664,14 +3370,19 @@ err:
static void ena_fw_reset_device(struct work_struct *work)
{
+ int rc = 0;
+
struct ena_adapter *adapter =
container_of(work, struct ena_adapter, reset_task);
rtnl_lock();
if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
- ena_destroy_device(adapter, false);
- ena_restore_device(adapter);
+ rc |= ena_destroy_device(adapter, false);
+ rc |= ena_restore_device(adapter);
+ adapter->dev_stats.reset_fail += !!rc;
+
+ dev_err(&adapter->pdev->dev, "Device reset completed successfully\n");
}
rtnl_unlock();
@@ -3694,9 +3405,8 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
netif_err(adapter, rx_err, adapter->netdev,
"Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
rx_ring->qid);
- adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
- smp_mb__before_atomic();
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+
+ ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
return -EIO;
}
@@ -3707,14 +3417,18 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
struct ena_ring *tx_ring)
{
struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
unsigned int time_since_last_napi;
unsigned int missing_tx_comp_to;
bool is_tx_comp_time_expired;
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
+ int napi_scheduled;
u32 missed_tx = 0;
int i, rc = 0;
+ missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
+
for (i = 0; i < tx_ring->ring_size; i++) {
tx_buf = &tx_ring->tx_buffer_info[i];
last_jiffies = tx_buf->last_jiffies;
@@ -3733,9 +3447,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
netif_err(adapter, tx_err, adapter->netdev,
"Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
tx_ring->qid);
- adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
- smp_mb__before_atomic();
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
return -EIO;
}
@@ -3743,27 +3455,45 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
adapter->missing_tx_completion_to);
if (unlikely(is_tx_comp_time_expired)) {
- if (!tx_buf->print_once) {
- time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
- missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
- netif_notice(adapter, tx_err, adapter->netdev,
- "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n",
- tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to);
+ time_since_last_napi =
+ jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ napi_scheduled = !!(ena_napi->napi.state & NAPIF_STATE_SCHED);
+
+ if (missing_tx_comp_to < time_since_last_napi && napi_scheduled) {
+ /* We suspect napi isn't called because the
+ * bottom half is not run. Require a bigger
+ * timeout for these cases
+ */
+ if (!time_is_before_jiffies(last_jiffies +
+ 2 * adapter->missing_tx_completion_to))
+ continue;
+
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
}
- tx_buf->print_once = 1;
missed_tx++;
+
+ if (tx_buf->print_once)
+ continue;
+
+ netif_notice(adapter, tx_err, adapter->netdev,
+ "TX hasn't completed, qid %d, index %d. %u usecs from last napi execution, napi scheduled: %d\n",
+ tx_ring->qid, i, time_since_last_napi, napi_scheduled);
+
+ tx_buf->print_once = 1;
}
}
if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
netif_err(adapter, tx_err, adapter->netdev,
- "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
+ "Lost TX completions are above the threshold (%d > %d). Completion transmission timeout: %u.\n",
missed_tx,
- adapter->missing_tx_completion_threshold);
- adapter->reset_reason =
- ENA_REGS_RESET_MISS_TX_CMPL;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ adapter->missing_tx_completion_threshold,
+ missing_tx_comp_to);
+ netif_err(adapter, tx_err, adapter->netdev,
+ "Resetting the device\n");
+
+ ena_reset_device(adapter, reset_reason);
rc = -EIO;
}
@@ -3777,10 +3507,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
- int i, budget, rc;
+ int qid, budget, rc;
int io_queue_count;
io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
+
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@@ -3793,27 +3524,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
return;
- budget = ENA_MONITORED_TX_QUEUES;
+ budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
- for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
- tx_ring = &adapter->tx_ring[i];
- rx_ring = &adapter->rx_ring[i];
+ qid = adapter->last_monitored_tx_qid;
+
+ while (budget) {
+ qid = (qid + 1) % io_queue_count;
+
+ tx_ring = &adapter->tx_ring[qid];
+ rx_ring = &adapter->rx_ring[qid];
rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
if (unlikely(rc))
return;
- rc = !ENA_IS_XDP_INDEX(adapter, i) ?
+ rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
budget--;
- if (!budget)
- break;
}
- adapter->last_monitored_tx_qid = i % io_queue_count;
+ adapter->last_monitored_tx_qid = qid;
}
/* trigger napi schedule after 2 consecutive detections */
@@ -3884,8 +3617,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
"Keep alive watchdog timeout.\n");
ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
&adapter->syncp);
- adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
}
}
@@ -3896,8 +3628,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
"ENA admin queue is not in running state!\n");
ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
&adapter->syncp);
- adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO);
}
}
@@ -3950,7 +3681,8 @@ static void ena_update_host_info(struct ena_admin_host_info *host_info,
static void ena_timer_service(struct timer_list *t)
{
- struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
+ struct ena_adapter *adapter = timer_container_of(adapter, t,
+ timer_service);
u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
struct ena_admin_host_info *host_info =
adapter->ena_dev->host_attr.host_info;
@@ -4013,10 +3745,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
- if (unlikely(!max_num_io_queues)) {
- dev_err(&pdev->dev, "The device doesn't have io queues\n");
- return -EFAULT;
- }
return max_num_io_queues;
}
@@ -4101,14 +3829,14 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
- if (unlikely(rc && (rc != -EOPNOTSUPP))) {
+ if (unlikely(rc)) {
dev_err(dev, "Cannot fill indirect table\n");
goto err_fill_indir;
}
}
- rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
- ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE,
+ 0xFFFFFFFF);
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash function\n");
goto err_fill_indir;
@@ -4136,73 +3864,6 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
pci_release_selected_regions(pdev, release_bars);
}
-
-static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
-{
- struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
- struct ena_com_dev *ena_dev = ctx->ena_dev;
- u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
- u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
- u32 max_tx_queue_size;
- u32 max_rx_queue_size;
-
- if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
- struct ena_admin_queue_ext_feature_fields *max_queue_ext =
- &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
- max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
- max_queue_ext->max_rx_sq_depth);
- max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
-
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
- max_tx_queue_size = min_t(u32, max_tx_queue_size,
- llq->max_llq_depth);
- else
- max_tx_queue_size = min_t(u32, max_tx_queue_size,
- max_queue_ext->max_tx_sq_depth);
-
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_rx_descs);
- } else {
- struct ena_admin_queue_feature_desc *max_queues =
- &ctx->get_feat_ctx->max_queues;
- max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
- max_queues->max_sq_depth);
- max_tx_queue_size = max_queues->max_cq_depth;
-
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
- max_tx_queue_size = min_t(u32, max_tx_queue_size,
- llq->max_llq_depth);
- else
- max_tx_queue_size = min_t(u32, max_tx_queue_size,
- max_queues->max_sq_depth);
-
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_rx_descs);
- }
-
- max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
- max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
-
- tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
- max_tx_queue_size);
- rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
- max_rx_queue_size);
-
- tx_queue_size = rounddown_pow_of_two(tx_queue_size);
- rx_queue_size = rounddown_pow_of_two(rx_queue_size);
-
- ctx->max_tx_queue_size = max_tx_queue_size;
- ctx->max_rx_queue_size = max_rx_queue_size;
- ctx->tx_queue_size = tx_queue_size;
- ctx->rx_queue_size = rx_queue_size;
-
- return 0;
-}
-
/* ena_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ena_pci_tbl
@@ -4215,12 +3876,12 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ena_calc_queue_size_ctx calc_queue_ctx = {};
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
struct net_device *netdev;
static int adapters_found;
+ struct devlink *devlink;
u32 max_num_io_queues;
bool wd_state;
int bars, rc;
@@ -4286,23 +3947,39 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
- rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
+ rc = ena_phc_alloc(adapter);
if (rc) {
- dev_err(&pdev->dev, "ENA device init failed\n");
- if (rc == -ETIME)
- rc = -EPROBE_DEFER;
+ netdev_err(netdev, "ena_phc_alloc failed\n");
goto err_netdev_destroy;
}
+ rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
+ if (rc) {
+ netdev_err(netdev, "ena_com_allocate_customer_metrics_buffer failed\n");
+ goto err_free_phc;
+ }
+
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
- dev_err(&pdev->dev, "ENA llq bar mapping failed\n");
- goto err_device_destroy;
+ dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n");
+ goto err_metrics_destroy;
}
- calc_queue_ctx.ena_dev = ena_dev;
- calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
- calc_queue_ctx.pdev = pdev;
+ /* Need to do this before ena_device_init */
+ devlink = ena_devlink_alloc(adapter);
+ if (!devlink) {
+ netdev_err(netdev, "ena_devlink_alloc failed\n");
+ rc = -ENOMEM;
+ goto err_metrics_destroy;
+ }
+
+ rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
+ if (rc) {
+ dev_err(&pdev->dev, "ENA device init failed\n");
+ if (rc == -ETIME)
+ rc = -EPROBE_DEFER;
+ goto ena_devlink_destroy;
+ }
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity
@@ -4311,8 +3988,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
- rc = ena_calc_io_queue_size(&calc_queue_ctx);
- if (rc || !max_num_io_queues) {
+ if (unlikely(!max_num_io_queues)) {
rc = -EFAULT;
goto err_device_destroy;
}
@@ -4321,13 +3997,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
- adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
- adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
- adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
- adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
- adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
- adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
-
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
adapter->last_monitored_tx_qid = 0;
@@ -4351,6 +4020,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Failed to query interrupt moderation feature\n");
goto err_device_destroy;
}
+
ena_init_io_rings(adapter,
0,
adapter->xdp_num_queues +
@@ -4378,10 +4048,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_config_debug_area(adapter);
- if (!ena_update_hw_stats(adapter))
- adapter->eni_stats_supported = true;
- else
- adapter->eni_stats_supported = false;
+ if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT;
memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
@@ -4393,6 +4062,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_rss;
}
+ ena_debugfs_init(netdev);
+
INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
adapter->last_keep_alive_jiffies = jiffies;
@@ -4414,6 +4085,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapters_found++;
+ /* From this point, the devlink device is visible to users.
+ * Perform the registration last to ensure that all the resources
+ * are available and that the netdevice is registered.
+ */
+ ena_devlink_register(devlink, &pdev->dev);
+
return 0;
err_rss:
@@ -4426,10 +4103,16 @@ err_free_msix:
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_worker_destroy:
- del_timer(&adapter->timer_service);
+ timer_delete(&adapter->timer_service);
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
+ena_devlink_destroy:
+ ena_devlink_free(devlink);
+err_metrics_destroy:
+ ena_com_delete_customer_metrics_buffer(ena_dev);
+err_free_phc:
+ ena_phc_free(adapter);
err_netdev_destroy:
free_netdev(netdev);
err_free_region:
@@ -4460,22 +4143,23 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_dev = adapter->ena_dev;
netdev = adapter->netdev;
-#ifdef CONFIG_RFS_ACCEL
- if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
- }
-#endif /* CONFIG_RFS_ACCEL */
+ ena_debugfs_terminate(netdev);
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
- del_timer_sync(&adapter->timer_service);
+ timer_delete_sync(&adapter->timer_service);
cancel_work_sync(&adapter->reset_task);
rtnl_lock(); /* lock released inside the below if-else block */
adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
ena_destroy_device(adapter, true);
+
+ ena_phc_free(adapter);
+
+ ena_devlink_unregister(adapter->devlink);
+ ena_devlink_free(adapter->devlink);
+
if (shutdown) {
netif_device_detach(netdev);
dev_close(netdev);
@@ -4492,6 +4176,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_com_delete_host_info(ena_dev);
+ ena_com_delete_customer_metrics_buffer(ena_dev);
+
ena_release_bars(ena_dev, pdev);
pci_disable_device(pdev);
@@ -4574,13 +4260,19 @@ static struct pci_driver ena_pci_driver = {
static int __init ena_init(void)
{
+ int ret;
+
ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
if (!ena_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
}
- return pci_register_driver(&ena_pci_driver);
+ ret = pci_register_driver(&ena_pci_driver);
+ if (ret)
+ destroy_workqueue(ena_wq);
+
+ return ret;
}
static void __exit ena_cleanup(void)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 0c39fc2fa345..006f9a3acea6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -14,6 +14,9 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <net/xdp.h>
+#include <uapi/linux/bpf.h>
+#include <net/devlink.h>
#include "ena_com.h"
#include "ena_eth_com.h"
@@ -50,6 +53,8 @@
#define ENA_DEFAULT_RING_SIZE (1024)
#define ENA_MIN_RING_SIZE (256)
+#define ENA_MIN_RX_BUF_SIZE (2048)
+
#define ENA_MIN_NUM_IO_QUEUES (1)
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
@@ -106,18 +111,7 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
-/* The max MTU size is configured to be the ethernet frame size without
- * the overhead of the ethernet header, which can have a VLAN header, and
- * a frame check sequence (FCS).
- * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
- */
-
-#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
- VLAN_HLEN - XDP_PACKET_HEADROOM - \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
-
-#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
- ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
+struct ena_phc_info;
struct ena_irq {
irq_handler_t handler;
@@ -134,25 +128,18 @@ struct ena_napi {
struct napi_struct napi;
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
- struct ena_ring *xdp_ring;
u32 qid;
struct dim dim;
};
-struct ena_calc_queue_size_ctx {
- struct ena_com_dev_get_features_ctx *get_feat_ctx;
- struct ena_com_dev *ena_dev;
- struct pci_dev *pdev;
- u32 tx_queue_size;
- u32 rx_queue_size;
- u32 max_tx_queue_size;
- u32 max_rx_queue_size;
- u16 max_tx_sgl_size;
- u16 max_rx_sgl_size;
-};
-
struct ena_tx_buffer {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ /* XDP buffer structure which is used for sending packets in
+ * the xdp queues
+ */
+ struct xdp_frame *xdpf;
+ };
/* num of ena desc for this specific skb
* (includes data desc and metadata desc)
*/
@@ -160,16 +147,14 @@ struct ena_tx_buffer {
/* num of buffers used by this skb */
u32 num_of_bufs;
- /* XDP buffer structure which is used for sending packets in
- * the xdp queues
- */
- struct xdp_frame *xdpf;
+ /* Total size of all buffers in bytes */
+ u32 total_tx_size;
/* Indicate if bufs[0] map the linear data of the skb. */
u8 map_linear_data;
/* Used for detect missing tx packets to limit the number of prints */
- u32 print_once;
+ u8 print_once;
/* Save the last jiffies to detect missing tx packets
*
* sets to non zero value on ena_start_xmit and set to zero on
@@ -186,7 +171,9 @@ struct ena_tx_buffer {
struct ena_rx_buffer {
struct sk_buff *skb;
struct page *page;
+ dma_addr_t dma_addr;
u32 page_offset;
+ u32 buf_offset;
struct ena_com_buf ena_buf;
} ____cacheline_aligned;
@@ -215,7 +202,7 @@ struct ena_stats_rx {
u64 rx_copybreak_pkt;
u64 csum_good;
u64 refil_partial;
- u64 bad_csum;
+ u64 csum_bad;
u64 page_alloc_fail;
u64 skb_alloc_fail;
u64 dma_mapping_err;
@@ -273,9 +260,11 @@ struct ena_ring {
bool disable_meta_caching;
u16 no_interrupt_event_cnt;
- /* cpu for TPH */
+ /* cpu and NUMA for TPH */
int cpu;
- /* number of tx/rx_buffer_info's entries */
+ int numa_node;
+
+ /* number of tx/rx_buffer_info's entries */
int ring_size;
enum ena_admin_placement_policy_type tx_mem_queue_type;
@@ -304,6 +293,7 @@ struct ena_stats_dev {
u64 admin_q_pause;
u64 rx_drops;
u64 tx_drops;
+ u64 reset_fail;
};
enum ena_flags_t {
@@ -343,6 +333,14 @@ struct ena_adapter {
u32 msg_enable;
+ /* large_llq_header_enabled is used for two purposes:
+ * 1. Indicates that large LLQ has been requested.
+ * 2. Indicates whether large LLQ is set or not after device
+ * initialization / configuration.
+ */
+ bool large_llq_header_enabled;
+ bool large_llq_header_supported;
+
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;
@@ -353,6 +351,8 @@ struct ena_adapter {
char name[ENA_NAME_MAX_LEN];
+ struct ena_phc_info *phc_info;
+
unsigned long flags;
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
@@ -378,7 +378,7 @@ struct ena_adapter {
struct u64_stats_sync syncp;
struct ena_stats_dev dev_stats;
struct ena_admin_eni_stats eni_stats;
- bool eni_stats_supported;
+ struct ena_admin_ena_srd_info ena_srd_info;
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
@@ -388,6 +388,13 @@ struct ena_adapter {
struct bpf_prog *xdp_bpf_prog;
u32 xdp_first_ring;
u32 xdp_num_queues;
+
+ struct devlink *devlink;
+ struct devlink_port devlink_port;
+#ifdef CONFIG_DEBUG_FS
+
+ struct dentry *debugfs_base;
+#endif /* CONFIG_DEBUG_FS */
};
void ena_set_ethtool_ops(struct net_device *netdev);
@@ -396,48 +403,67 @@ void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
-int ena_update_hw_stats(struct ena_adapter *adapter);
-int ena_update_queue_sizes(struct ena_adapter *adapter,
- u32 new_tx_size,
- u32 new_rx_size);
+int ena_update_queue_params(struct ena_adapter *adapter,
+ u32 new_tx_size,
+ u32 new_rx_size,
+ u32 new_llq_header_len);
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
-int ena_get_sset_count(struct net_device *netdev, int sset);
+int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak);
-enum ena_xdp_errors_t {
- ENA_XDP_ALLOWED = 0,
- ENA_XDP_CURRENT_MTU_TOO_LARGE,
- ENA_XDP_NO_ENOUGH_QUEUES,
-};
+int ena_get_sset_count(struct net_device *netdev, int sset);
-static inline bool ena_xdp_present(struct ena_adapter *adapter)
+static inline void ena_reset_device(struct ena_adapter *adapter,
+ enum ena_regs_reset_reason_types reset_reason)
{
- return !!adapter->xdp_bpf_prog;
+ adapter->reset_reason = reset_reason;
+ /* Make sure reset reason is set before triggering the reset */
+ smp_mb__before_atomic();
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
-static inline bool ena_xdp_present_ring(struct ena_ring *ring)
-{
- return !!ring->xdp_bpf_prog;
-}
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
+int ena_restore_device(struct ena_adapter *adapter);
+int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ struct ena_tx_buffer *tx_info, bool is_xdp);
-static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
- u32 queues)
+/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
+static inline void ena_increase_stat(u64 *statp, u64 cnt,
+ struct u64_stats_sync *syncp)
{
- return 2 * queues <= adapter->max_num_io_queues;
+ u64_stats_update_begin(syncp);
+ (*statp) += cnt;
+ u64_stats_update_end(syncp);
}
-static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
+static inline void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
{
- enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
-
- if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
- rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
- else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
- rc = ENA_XDP_NO_ENOUGH_QUEUES;
-
- return rc;
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
}
+int ena_xmit_common(struct ena_adapter *adapter,
+ struct ena_ring *ring,
+ struct ena_tx_buffer *tx_info,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ u16 next_to_use,
+ u32 bytes);
+void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info);
+void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count);
+int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
+void ena_down(struct ena_adapter *adapter);
+int ena_up(struct ena_adapter *adapter);
+void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring);
+void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring);
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.c b/drivers/net/ethernet/amazon/ena/ena_phc.c
new file mode 100644
index 000000000000..7867e893fd15
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include <linux/pci.h>
+#include "ena_netdev.h"
+#include "ena_phc.h"
+#include "ena_devlink.h"
+
+static int ena_phc_adjtime(struct ptp_clock_info *clock_info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_adjfine(struct ptp_clock_info *clock_info, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_feature_enable(struct ptp_clock_info *clock_info,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_gettimex64(struct ptp_clock_info *clock_info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct ena_phc_info *phc_info =
+ container_of(clock_info, struct ena_phc_info, clock_info);
+ unsigned long flags;
+ u64 timestamp_nsec;
+ int rc;
+
+ spin_lock_irqsave(&phc_info->lock, flags);
+
+ ptp_read_system_prets(sts);
+
+ rc = ena_com_phc_get_timestamp(phc_info->adapter->ena_dev,
+ &timestamp_nsec);
+
+ ptp_read_system_postts(sts);
+
+ spin_unlock_irqrestore(&phc_info->lock, flags);
+
+ *ts = ns_to_timespec64(timestamp_nsec);
+
+ return rc;
+}
+
+static int ena_phc_settime64(struct ptp_clock_info *clock_info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ena_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjtime = ena_phc_adjtime,
+ .adjfine = ena_phc_adjfine,
+ .gettimex64 = ena_phc_gettimex64,
+ .settime64 = ena_phc_settime64,
+ .enable = ena_phc_feature_enable,
+};
+
+/* Enable/Disable PHC by the kernel, affects on the next init flow */
+void ena_phc_enable(struct ena_adapter *adapter, bool enable)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ if (!phc_info) {
+ netdev_err(adapter->netdev, "phc_info is not allocated\n");
+ return;
+ }
+
+ phc_info->enabled = enable;
+}
+
+/* Check if PHC is enabled by the kernel */
+bool ena_phc_is_enabled(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->enabled);
+}
+
+/* PHC is activated if ptp clock is registered in the kernel */
+bool ena_phc_is_active(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->clock);
+}
+
+static int ena_phc_register(struct ena_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ptp_clock_info *clock_info;
+ struct ena_phc_info *phc_info;
+ int rc = 0;
+
+ phc_info = adapter->phc_info;
+ clock_info = &phc_info->clock_info;
+
+ /* PHC may already be registered in case of a reset */
+ if (ena_phc_is_active(adapter))
+ return 0;
+
+ phc_info->adapter = adapter;
+
+ spin_lock_init(&phc_info->lock);
+
+ /* Fill the ptp_clock_info struct and register PTP clock */
+ *clock_info = ena_ptp_clock_info;
+ snprintf(clock_info->name,
+ sizeof(clock_info->name),
+ "ena-ptp-%02x",
+ PCI_SLOT(pdev->devfn));
+
+ phc_info->clock = ptp_clock_register(clock_info, &pdev->dev);
+ if (IS_ERR(phc_info->clock)) {
+ rc = PTR_ERR(phc_info->clock);
+ netdev_err(adapter->netdev, "Failed registering ptp clock, error: %d\n",
+ rc);
+ phc_info->clock = NULL;
+ }
+
+ return rc;
+}
+
+static void ena_phc_unregister(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ /* During reset flow, PHC must stay registered
+ * to keep kernel's PHC index
+ */
+ if (ena_phc_is_active(adapter) &&
+ !test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
+ ptp_clock_unregister(phc_info->clock);
+ phc_info->clock = NULL;
+ }
+}
+
+int ena_phc_alloc(struct ena_adapter *adapter)
+{
+ /* Allocate driver specific PHC info */
+ adapter->phc_info = vzalloc(sizeof(*adapter->phc_info));
+ if (unlikely(!adapter->phc_info)) {
+ netdev_err(adapter->netdev, "Failed to alloc phc_info\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ena_phc_free(struct ena_adapter *adapter)
+{
+ if (adapter->phc_info) {
+ vfree(adapter->phc_info);
+ adapter->phc_info = NULL;
+ }
+}
+
+int ena_phc_init(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct net_device *netdev = adapter->netdev;
+ int rc = -EOPNOTSUPP;
+
+ /* Validate PHC feature is supported in the device */
+ if (!ena_com_phc_supported(ena_dev)) {
+ netdev_dbg(netdev, "PHC feature is not supported by the device\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Validate PHC feature is enabled by the kernel */
+ if (!ena_phc_is_enabled(adapter)) {
+ netdev_dbg(netdev, "PHC feature is not enabled by the kernel\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Initialize device specific PHC info */
+ rc = ena_com_phc_init(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to init phc, error: %d\n", rc);
+ goto err_ena_com_phc_init;
+ }
+
+ /* Configure PHC feature in driver and device */
+ rc = ena_com_phc_config(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to config phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ /* Register to PTP class driver */
+ rc = ena_phc_register(adapter);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to register phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ return 0;
+
+err_ena_com_phc_config:
+ ena_com_phc_destroy(ena_dev);
+err_ena_com_phc_init:
+ ena_phc_enable(adapter, false);
+ ena_devlink_disable_phc_param(adapter->devlink);
+ return rc;
+}
+
+void ena_phc_destroy(struct ena_adapter *adapter)
+{
+ ena_phc_unregister(adapter);
+ ena_com_phc_destroy(adapter->ena_dev);
+}
+
+int ena_phc_get_index(struct ena_adapter *adapter)
+{
+ if (ena_phc_is_active(adapter))
+ return ptp_clock_index(adapter->phc_info->clock);
+
+ return -1;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.h b/drivers/net/ethernet/amazon/ena/ena_phc.h
new file mode 100644
index 000000000000..7364fe714e44
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef ENA_PHC_H
+#define ENA_PHC_H
+
+#include <linux/ptp_clock_kernel.h>
+
+struct ena_phc_info {
+ /* PTP hardware capabilities */
+ struct ptp_clock_info clock_info;
+
+ /* Registered PTP clock device */
+ struct ptp_clock *clock;
+
+ /* Adapter specific private data structure */
+ struct ena_adapter *adapter;
+
+ /* PHC lock */
+ spinlock_t lock;
+
+ /* Enabled by kernel */
+ bool enabled;
+};
+
+void ena_phc_enable(struct ena_adapter *adapter, bool enable);
+bool ena_phc_is_enabled(struct ena_adapter *adapter);
+bool ena_phc_is_active(struct ena_adapter *adapter);
+int ena_phc_get_index(struct ena_adapter *adapter);
+int ena_phc_init(struct ena_adapter *adapter);
+void ena_phc_destroy(struct ena_adapter *adapter);
+int ena_phc_alloc(struct ena_adapter *adapter);
+void ena_phc_free(struct ena_adapter *adapter);
+
+#endif /* ENA_PHC_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 1e007a41a525..51068dc1cc2a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -21,6 +21,8 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_USER_TRIGGER = 12,
ENA_REGS_RESET_GENERIC = 13,
ENA_REGS_RESET_MISS_INTERRUPT = 14,
+ ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15,
+ ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED = 16,
};
/* ena_registers offsets */
@@ -51,6 +53,11 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+/* phc_registers offsets */
+
+/* 100 base */
+#define ENA_REGS_PHC_DB_OFF 0x100
+
/* version register */
#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
@@ -127,4 +134,7 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+/* phc_db_req_id register */
+#define ENA_REGS_PHC_DB_REQ_ID_MASK 0xffff
+
#endif /* _ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
new file mode 100644
index 000000000000..5b175e7e92a1
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include "ena_xdp.h"
+
+static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info;
+
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->xdpf))
+ return 0;
+
+ return handle_invalid_req_id(tx_ring, req_id, tx_info, true);
+}
+
+static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info,
+ struct xdp_frame *xdpf,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_adapter *adapter = tx_ring->adapter;
+ struct ena_com_buf *ena_buf;
+ int push_len = 0;
+ dma_addr_t dma;
+ void *data;
+ u32 size;
+
+ tx_info->xdpf = xdpf;
+ data = tx_info->xdpf->data;
+ size = tx_info->xdpf->len;
+
+ if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Designate part of the packet for LLQ */
+ push_len = min_t(u32, size, tx_ring->tx_max_header_size);
+
+ ena_tx_ctx->push_header = data;
+
+ size -= push_len;
+ data += push_len;
+ }
+
+ ena_tx_ctx->header_len = push_len;
+
+ if (size > 0) {
+ dma = dma_map_single(tx_ring->dev,
+ data,
+ size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
+ goto error_report_dma_error;
+
+ tx_info->map_linear_data = 0;
+
+ ena_buf = tx_info->bufs;
+ ena_buf->paddr = dma;
+ ena_buf->len = size;
+
+ ena_tx_ctx->ena_bufs = ena_buf;
+ ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
+ }
+
+ return 0;
+
+error_report_dma_error:
+ ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
+ &tx_ring->syncp);
+ netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
+
+ return -EINVAL;
+}
+
+int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
+ struct ena_adapter *adapter,
+ struct xdp_frame *xdpf,
+ int flags)
+{
+ struct ena_com_tx_ctx ena_tx_ctx = {};
+ struct ena_tx_buffer *tx_info;
+ u16 next_to_use, req_id;
+ int rc;
+
+ next_to_use = tx_ring->next_to_use;
+ req_id = tx_ring->free_ids[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+
+ rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
+ if (unlikely(rc))
+ goto err;
+
+ ena_tx_ctx.req_id = req_id;
+
+ rc = ena_xmit_common(adapter,
+ tx_ring,
+ tx_info,
+ &ena_tx_ctx,
+ next_to_use,
+ xdpf->len);
+ if (rc)
+ goto error_unmap_dma;
+
+ /* trigger the dma engine. ena_ring_tx_doorbell()
+ * calls a memory barrier inside it.
+ */
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(tx_ring);
+
+ return rc;
+
+error_unmap_dma:
+ ena_unmap_tx_buff(tx_ring, tx_info);
+err:
+ tx_info->xdpf = NULL;
+
+ return rc;
+}
+
+int ena_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_ring *tx_ring;
+ int qid, i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ return -ENETDOWN;
+
+ /* We assume that all rings have the same XDP program */
+ if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
+ return -ENXIO;
+
+ qid = smp_processor_id() % adapter->xdp_num_queues;
+ qid += adapter->xdp_first_ring;
+ tx_ring = &adapter->tx_ring[qid];
+
+ /* Other CPU ids might try to send thorugh this queue */
+ spin_lock(&tx_ring->xdp_tx_lock);
+
+ for (i = 0; i < n; i++) {
+ if (ena_xdp_xmit_frame(tx_ring, adapter, frames[i], 0))
+ break;
+ nxmit++;
+ }
+
+ /* Ring doorbell to make device aware of the packets */
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(tx_ring);
+
+ spin_unlock(&tx_ring->xdp_tx_lock);
+
+ /* Return number of packets sent */
+ return nxmit;
+}
+
+static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
+{
+ adapter->xdp_first_ring = adapter->num_io_queues;
+ adapter->xdp_num_queues = adapter->num_io_queues;
+
+ ena_init_io_rings(adapter,
+ adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+}
+
+int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
+{
+ u32 xdp_first_ring = adapter->xdp_first_ring;
+ u32 xdp_num_queues = adapter->xdp_num_queues;
+ int rc = 0;
+
+ rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
+ if (rc)
+ goto setup_err;
+
+ rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
+ if (rc)
+ goto create_err;
+
+ return 0;
+
+create_err:
+ ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
+setup_err:
+ return rc;
+}
+
+/* Provides a way for both kernel and bpf-prog to know
+ * more about the RX-queue a given XDP frame arrived on.
+ */
+int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
+{
+ int rc;
+
+ rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
+
+ netif_dbg(rx_ring->adapter, ifup, rx_ring->netdev, "Registering RX info for queue %d",
+ rx_ring->qid);
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ goto err;
+ }
+
+ rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL);
+
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ }
+
+err:
+ return rc;
+}
+
+void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
+{
+ netif_dbg(rx_ring->adapter, ifdown, rx_ring->netdev,
+ "Unregistering RX info for queue %d",
+ rx_ring->qid);
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+}
+
+void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first, int count)
+{
+ struct bpf_prog *old_bpf_prog;
+ struct ena_ring *rx_ring;
+ int i = 0;
+
+ for (i = first; i < count; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
+
+ if (!old_bpf_prog && prog) {
+ rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
+ } else if (old_bpf_prog && !prog) {
+ rx_ring->rx_headroom = NET_SKB_PAD;
+ }
+ }
+}
+
+static void ena_xdp_exchange_program(struct ena_adapter *adapter,
+ struct bpf_prog *prog)
+{
+ struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
+
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ prog,
+ 0,
+ adapter->num_io_queues);
+
+ if (old_bpf_prog)
+ bpf_prog_put(old_bpf_prog);
+}
+
+static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
+{
+ bool was_up;
+ int rc;
+
+ was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+
+ if (was_up)
+ ena_down(adapter);
+
+ adapter->xdp_first_ring = 0;
+ adapter->xdp_num_queues = 0;
+ ena_xdp_exchange_program(adapter, NULL);
+ if (was_up) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct bpf_prog *prog = bpf->prog;
+ struct bpf_prog *old_bpf_prog;
+ int rc, prev_mtu;
+ bool is_up;
+
+ is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ rc = ena_xdp_allowed(adapter);
+ if (rc == ENA_XDP_ALLOWED) {
+ old_bpf_prog = adapter->xdp_bpf_prog;
+ if (prog) {
+ if (!is_up) {
+ ena_init_all_xdp_queues(adapter);
+ } else if (!old_bpf_prog) {
+ ena_down(adapter);
+ ena_init_all_xdp_queues(adapter);
+ }
+ ena_xdp_exchange_program(adapter, prog);
+
+ netif_dbg(adapter, drv, adapter->netdev, "Set a new XDP program\n");
+
+ if (is_up && !old_bpf_prog) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ xdp_features_set_redirect_target(netdev, false);
+ } else if (old_bpf_prog) {
+ xdp_features_clear_redirect_target(netdev);
+ netif_dbg(adapter, drv, adapter->netdev, "Removing XDP program\n");
+
+ rc = ena_destroy_and_free_all_xdp_queues(adapter);
+ if (rc)
+ return rc;
+ }
+
+ prev_mtu = netdev->max_mtu;
+ netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
+
+ if (!old_bpf_prog)
+ netif_info(adapter, drv, adapter->netdev,
+ "XDP program is set, changing the max_mtu from %d to %d",
+ prev_mtu, netdev->max_mtu);
+
+ } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
+ netdev->mtu, ENA_XDP_MAX_MTU);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
+ return -EINVAL;
+ } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
+ adapter->num_io_queues, adapter->max_num_io_queues);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
+ * program as well as to query the current xdp program id.
+ */
+int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return ena_xdp_set(netdev, bpf);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
+{
+ u32 total_done = 0;
+ u16 next_to_clean;
+ int tx_pkts = 0;
+ u16 req_id;
+ int rc;
+
+ if (unlikely(!tx_ring))
+ return 0;
+ next_to_clean = tx_ring->next_to_clean;
+
+ while (tx_pkts < budget) {
+ struct ena_tx_buffer *tx_info;
+ struct xdp_frame *xdpf;
+
+ rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
+ &req_id);
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+ handle_invalid_req_id(tx_ring, req_id, NULL, true);
+ break;
+ }
+
+ /* validate that the request id points to a valid xdp_frame */
+ rc = validate_xdp_req_id(tx_ring, req_id);
+ if (rc)
+ break;
+
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+
+ tx_info->last_jiffies = 0;
+
+ xdpf = tx_info->xdpf;
+ tx_info->xdpf = NULL;
+ ena_unmap_tx_buff(tx_ring, tx_info);
+ xdp_return_frame(xdpf);
+
+ tx_pkts++;
+ total_done += tx_info->tx_descs;
+ tx_ring->free_ids[next_to_clean] = req_id;
+ next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
+ tx_ring->ring_size);
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d pkt #%d req_id %d\n", tx_ring->qid, tx_pkts, req_id);
+ }
+
+ tx_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d done. total pkts: %d\n",
+ tx_ring->qid, tx_pkts);
+
+ return tx_pkts;
+}
+
+/* This is the XDP napi callback. XDP queues use a separate napi callback
+ * than Rx/Tx queues.
+ */
+int ena_xdp_io_poll(struct napi_struct *napi, int budget)
+{
+ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
+ struct ena_ring *tx_ring;
+ u32 work_done;
+ int ret;
+
+ tx_ring = ena_napi->tx_ring;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
+ test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ work_done = ena_clean_xdp_irq(tx_ring, budget);
+
+ /* If the device is about to reset or down, avoid unmask
+ * the interrupt and return 0 so NAPI won't reschedule
+ */
+ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) {
+ napi_complete_done(napi, 0);
+ ret = 0;
+ } else if (budget > work_done) {
+ ena_increase_stat(&tx_ring->tx_stats.napi_comp, 1,
+ &tx_ring->syncp);
+ if (napi_complete_done(napi, work_done))
+ ena_unmask_interrupt(tx_ring, NULL);
+
+ ena_update_ring_numa_node(tx_ring, NULL);
+ ret = work_done;
+ } else {
+ ret = budget;
+ }
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.tx_poll++;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->tx_stats.last_napi_jiffies = jiffies;
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.h b/drivers/net/ethernet/amazon/ena/ena_xdp.h
new file mode 100644
index 000000000000..cfd82728486a
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef ENA_XDP_H
+#define ENA_XDP_H
+
+#include "ena_netdev.h"
+#include <linux/bpf_trace.h>
+
+/* The max MTU size is configured to be the ethernet frame size without
+ * the overhead of the ethernet header, which can have a VLAN header, and
+ * a frame check sequence (FCS).
+ * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
+ */
+#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
+ VLAN_HLEN - XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
+ ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
+
+enum ENA_XDP_ACTIONS {
+ ENA_XDP_PASS = 0,
+ ENA_XDP_TX = BIT(0),
+ ENA_XDP_REDIRECT = BIT(1),
+ ENA_XDP_DROP = BIT(2)
+};
+
+#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
+
+int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter);
+void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first, int count);
+int ena_xdp_io_poll(struct napi_struct *napi, int budget);
+int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
+ struct ena_adapter *adapter,
+ struct xdp_frame *xdpf,
+ int flags);
+int ena_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags);
+int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
+int ena_xdp_register_rxq_info(struct ena_ring *rx_ring);
+void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring);
+
+enum ena_xdp_errors_t {
+ ENA_XDP_ALLOWED = 0,
+ ENA_XDP_CURRENT_MTU_TOO_LARGE,
+ ENA_XDP_NO_ENOUGH_QUEUES,
+};
+
+static inline bool ena_xdp_present(struct ena_adapter *adapter)
+{
+ return !!adapter->xdp_bpf_prog;
+}
+
+static inline bool ena_xdp_present_ring(struct ena_ring *ring)
+{
+ return !!ring->xdp_bpf_prog;
+}
+
+static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
+ u32 queues)
+{
+ return 2 * queues <= adapter->max_num_io_queues;
+}
+
+static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
+{
+ enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
+
+ if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
+ rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
+ else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+ rc = ENA_XDP_NO_ENOUGH_QUEUES;
+
+ return rc;
+}
+
+static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+{
+ u32 verdict = ENA_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct ena_ring *xdp_ring;
+ struct xdp_frame *xdpf;
+ u64 *xdp_stat;
+
+ xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
+
+ verdict = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ switch (verdict) {
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ }
+
+ /* Find xmit queue */
+ xdp_ring = rx_ring->xdp_ring;
+
+ /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
+ spin_lock(&xdp_ring->xdp_tx_lock);
+
+ if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf,
+ XDP_XMIT_FLUSH))
+ xdp_return_frame(xdpf);
+
+ spin_unlock(&xdp_ring->xdp_tx_lock);
+ xdp_stat = &rx_ring->rx_stats.xdp_tx;
+ verdict = ENA_XDP_TX;
+ break;
+ case XDP_REDIRECT:
+ if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
+ xdp_stat = &rx_ring->rx_stats.xdp_redirect;
+ verdict = ENA_XDP_REDIRECT;
+ break;
+ }
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_DROP:
+ xdp_stat = &rx_ring->rx_stats.xdp_drop;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_PASS:
+ xdp_stat = &rx_ring->rx_stats.xdp_pass;
+ verdict = ENA_XDP_PASS;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_invalid;
+ verdict = ENA_XDP_DROP;
+ }
+
+ ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
+
+ return verdict;
+}
+#endif /* ENA_XDP_H */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index ef512cf89abf..27792a52b6cf 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -667,4 +667,5 @@ void lance_poll(struct net_device *dev)
EXPORT_SYMBOL_GPL(lance_poll);
#endif
+MODULE_DESCRIPTION("LANCE Ethernet IC generic routines");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 899c8a2a34b6..d54dca3074eb 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -56,7 +56,7 @@ config LANCE
config PCNET32
tristate "AMD PCnet32 PCI support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
select MII
help
@@ -122,7 +122,7 @@ config MVME147_NET
config PCMCIA_NMCLAN
tristate "New Media PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a New Media Ethernet or LiveWire
PCMCIA (PC-card) Ethernet card to your computer.
@@ -130,16 +130,6 @@ config PCMCIA_NMCLAN
To compile this driver as a module, choose M here: the module will be
called nmclan_cs. If unsure, say N.
-config NI65
- tristate "NI6510 support"
- depends on ISA && ISA_DMA_API && !ARM && !PPC32
- select NETDEV_LEGACY_INIT
- help
- If you have a network (Ethernet) card of this type, say Y here.
-
- To compile this driver as a module, choose M here. The module
- will be called ni65.
-
config SUN3LANCE
tristate "Sun3/Sun3x on-board LANCE support"
depends on (SUN3 || SUN3X)
@@ -175,6 +165,7 @@ config AMD_XGBE
select CRC32
select PHYLIB
select AMD_XGBE_HAVE_ECC if X86
+ select NET_SELFTESTS
help
This driver supports the AMD 10GbE Ethernet device found on an
AMD SoC.
@@ -196,4 +187,18 @@ config AMD_XGBE_HAVE_ECC
bool
default n
+config PDS_CORE
+ tristate "AMD/Pensando Data Systems Core Device Support"
+ depends on 64BIT && PCI
+ select AUXILIARY_BUS
+ select NET_DEVLINK
+ help
+ This enables the support for the AMD/Pensando Core device family of
+ adapters. More specific information on this driver can be
+ found in
+ <file:Documentation/networking/device_drivers/ethernet/amd/pds_core.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pds_core.
+
endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index 0d2f478b49a5..2dcfb84731e1 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -13,8 +13,8 @@ obj-$(CONFIG_LANCE) += lance.o
obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o
-obj-$(CONFIG_NI65) += ni65.o
obj-$(CONFIG_PCNET32) += pcnet32.o
obj-$(CONFIG_SUN3LANCE) += sun3lance.o
obj-$(CONFIG_SUNLANCE) += sunlance.o
obj-$(CONFIG_AMD_XGBE) += xgbe/
+obj-$(CONFIG_PDS_CORE) += pds_core/
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 2f808dbc8b0e..ce9445425045 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -486,7 +486,7 @@ static int lance_close(struct net_device *dev)
volatile struct lance_regs *ll = lp->ll;
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
/* Stop the card */
ll->rap = LE_CSR0;
@@ -636,7 +636,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
lance_set_multicast(lp->dev);
}
@@ -680,6 +680,7 @@ static int a2065_init_one(struct zorro_dev *z,
unsigned long base_addr = board + A2065_LANCE;
unsigned long mem_start = board + A2065_RAM;
struct resource *r1, *r2;
+ u8 addr[ETH_ALEN];
u32 serial;
int err;
@@ -694,7 +695,7 @@ static int a2065_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct lance_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct lance_regs));
release_mem_region(mem_start, A2065_RAM_SIZE);
return -ENOMEM;
@@ -706,17 +707,18 @@ static int a2065_init_one(struct zorro_dev *z,
r2->name = dev->name;
serial = be32_to_cpu(z->rom.er_SerialNumber);
- dev->dev_addr[0] = 0x00;
+ addr[0] = 0x00;
if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
- dev->dev_addr[1] = 0x80;
- dev->dev_addr[2] = 0x10;
+ addr[1] = 0x80;
+ addr[2] = 0x10;
} else { /* Ameristar */
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x9f;
+ addr[1] = 0x00;
+ addr[2] = 0x9f;
}
- dev->dev_addr[3] = (serial >> 16) & 0xff;
- dev->dev_addr[4] = (serial >> 8) & 0xff;
- dev->dev_addr[5] = serial & 0xff;
+ addr[3] = (serial >> 16) & 0xff;
+ addr[4] = (serial >> 8) & 0xff;
+ addr[5] = serial & 0xff;
+ eth_hw_addr_set(dev, addr);
dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
@@ -779,4 +781,5 @@ static void __exit a2065_cleanup_module(void)
module_init(a2065_init_module);
module_exit(a2065_cleanup_module);
+MODULE_DESCRIPTION("Commodore A2065 Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 9421afb950f7..76e8c13d5985 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -43,7 +43,7 @@ Revision History:
3.0.4 12/09/2003
1. Added set_mac_address routine for bonding driver support.
2. Tested the driver for bonding support
- 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
+ 3. Bug fix: Fixed mismach in actual receive buffer length and length
indicated to the h/w.
4. Modified amd8111e_rx() routine to receive all the received packets
in the first interrupt.
@@ -185,24 +185,23 @@ static void amd8111e_set_ext_phy(struct net_device *dev)
advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
switch (lp->ext_phy_option) {
-
- default:
- case SPEED_AUTONEG: /* advertise all values */
- tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
- break;
- case SPEED10_HALF:
- tmp |= ADVERTISE_10HALF;
- break;
- case SPEED10_FULL:
- tmp |= ADVERTISE_10FULL;
- break;
- case SPEED100_HALF:
- tmp |= ADVERTISE_100HALF;
- break;
- case SPEED100_FULL:
- tmp |= ADVERTISE_100FULL;
- break;
+ default:
+ case SPEED_AUTONEG: /* advertise all values */
+ tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+ break;
+ case SPEED10_HALF:
+ tmp |= ADVERTISE_10HALF;
+ break;
+ case SPEED10_FULL:
+ tmp |= ADVERTISE_10FULL;
+ break;
+ case SPEED100_HALF:
+ tmp |= ADVERTISE_100HALF;
+ break;
+ case SPEED100_FULL:
+ tmp |= ADVERTISE_100FULL;
+ break;
}
if(advert != tmp)
@@ -237,7 +236,7 @@ static int amd8111e_free_skbs(struct net_device *dev)
/* Freeing previously allocated receive buffers */
for (i = 0; i < NUM_RX_BUFFERS; i++) {
rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff != NULL) {
+ if (rx_skbuff) {
dma_unmap_single(&lp->pci_dev->dev,
lp->rx_dma_addr[i],
lp->rx_buff_len - 2, DMA_FROM_DEVICE);
@@ -1084,7 +1083,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
unsigned int intr0, intren0;
unsigned int handled = 1;
- if (unlikely(dev == NULL))
+ if (unlikely(!dev))
return IRQ_NONE;
spin_lock(&lp->lock);
@@ -1109,7 +1108,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Check if Receive Interrupt has occurred. */
if (intr0 & RINT0) {
if (napi_schedule_prep(&lp->napi)) {
- /* Disable receive interupts */
+ /* Disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
/* Schedule a polling routine */
__napi_schedule(&lp->napi);
@@ -1174,7 +1173,7 @@ static int amd8111e_close(struct net_device *dev)
/* Delete ipg timer */
if (lp->options & OPTION_DYN_IPG_ENABLE)
- del_timer_sync(&lp->ipg_data.ipg_timer);
+ timer_delete_sync(&lp->ipg_data.ipg_timer);
spin_unlock_irq(&lp->lock);
free_irq(dev->irq, dev);
@@ -1364,10 +1363,10 @@ static void amd8111e_get_drvinfo(struct net_device *dev,
{
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
- strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, MODULE_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version),
"%u", chip_version);
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1521,9 +1520,9 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
if (!netif_running(dev)) {
/* new_mtu will be used
- * when device starts netxt time
+ * when device starts next time
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -1532,7 +1531,7 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
/* stop the chip */
writel(RUN, lp->mmio + CMD0);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
@@ -1554,7 +1553,7 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
{
- /* Adapter is already stoped/suspended/interrupt-disabled */
+ /* Adapter is already stopped/suspended/interrupt-disabled */
writel(VAL0 | LCMODE_SW, lp->mmio + CMD7);
/* To eliminate PCI posting bug */
@@ -1599,7 +1598,7 @@ static int __maybe_unused amd8111e_suspend(struct device *dev_d)
/* stop chip */
spin_lock_irq(&lp->lock);
if (lp->options & OPTION_DYN_IPG_ENABLE)
- del_timer_sync(&lp->ipg_data.ipg_timer);
+ timer_delete_sync(&lp->ipg_data.ipg_timer);
amd8111e_stop_chip(lp);
spin_unlock_irq(&lp->lock);
@@ -1642,7 +1641,8 @@ static int __maybe_unused amd8111e_resume(struct device *dev_d)
static void amd8111e_config_ipg(struct timer_list *t)
{
- struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
+ struct amd8111e_priv *lp = timer_container_of(lp, t,
+ ipg_data.ipg_timer);
struct ipg_info *ipg_data = &lp->ipg_data;
void __iomem *mmio = lp->mmio;
unsigned int prev_col_cnt = ipg_data->col_cnt;
@@ -1797,7 +1797,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
lp = netdev_priv(dev);
lp->pci_dev = pdev;
lp->amd8111e_net_dev = dev;
- lp->pm_cap = pdev->pm_cap;
spin_lock_init(&lp->lock);
@@ -1828,11 +1827,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
dev->min_mtu = AMD8111E_MIN_MTU;
dev->max_mtu = AMD8111E_MAX_MTU;
- netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
+ netif_napi_add_weight(dev, &lp->napi, amd8111e_rx_poll, 32);
-#if AMD8111E_VLAN_TAG_USED
- dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-#endif
/* Probe the external PHY */
amd8111e_probe_ext_phy(dev);
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 37da79da5f5e..e4ee4c28800c 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -550,7 +550,6 @@ typedef enum {
/* Driver definitions */
-#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_DEVICE_ID_AMD8111E_7462 0x7462
#define MAX_UNITS 8 /* Maximum number of devices possible */
@@ -600,7 +599,7 @@ typedef enum {
#define CSTATE 1
#define SSTATE 2
-/* Assume contoller gets data 10 times the maximum processing time */
+/* Assume controller gets data 10 times the maximum processing time */
#define REPEAT_CNT 10
/* amd8111e descriptor flag definitions */
@@ -764,7 +763,6 @@ struct amd8111e_priv{
u32 ext_phy_id;
struct amd8111e_link_config link_config;
- int pm_cap;
struct net_device *next;
int mii;
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 5e0f645f5bde..fa201da567ed 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,7 @@ static int ariadne_rx(struct net_device *dev)
struct sk_buff *skb;
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for (i = 0; i < RX_RING_SIZE; i++)
if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
break;
@@ -441,11 +441,11 @@ static int ariadne_open(struct net_device *dev)
/* Set the Ethernet Hardware Address */
lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[0];
lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[1];
lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[2];
/* Set the Init Block Mode */
lance->RAP = CSR15; /* Mode Register */
@@ -717,6 +717,7 @@ static int ariadne_init_one(struct zorro_dev *z,
unsigned long mem_start = board + ARIADNE_RAM;
struct resource *r1, *r2;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
u32 serial;
int err;
@@ -730,7 +731,7 @@ static int ariadne_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct ariadne_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct Am79C960));
release_mem_region(mem_start, ARIADNE_RAM_SIZE);
return -ENOMEM;
@@ -740,12 +741,13 @@ static int ariadne_init_one(struct zorro_dev *z,
r2->name = dev->name;
serial = be32_to_cpu(z->rom.er_SerialNumber);
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x60;
- dev->dev_addr[2] = 0x30;
- dev->dev_addr[3] = (serial >> 16) & 0xff;
- dev->dev_addr[4] = (serial >> 8) & 0xff;
- dev->dev_addr[5] = serial & 0xff;
+ addr[0] = 0x00;
+ addr[1] = 0x60;
+ addr[2] = 0x30;
+ addr[3] = (serial >> 16) & 0xff;
+ addr[4] = (serial >> 8) & 0xff;
+ addr[5] = serial & 0xff;
+ eth_hw_addr_set(dev, addr);
dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
@@ -788,4 +790,5 @@ static void __exit ariadne_cleanup_module(void)
module_init(ariadne_init_module);
module_exit(ariadne_cleanup_module);
+MODULE_DESCRIPTION("Ariadne Ethernet Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 9c7d9690d00c..8c8cc7d0f42d 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -79,6 +79,7 @@ static int lance_debug = 1;
#endif
module_param(lance_debug, int, 0);
MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
+MODULE_DESCRIPTION("Atari LANCE Ethernet driver");
MODULE_LICENSE("GPL");
/* Print debug messages on probing? */
@@ -367,7 +368,7 @@ static void *slow_memcpy( void *dst, const void *src, size_t len )
}
-struct net_device * __init atarilance_probe(void)
+static struct net_device * __init atarilance_probe(void)
{
int i;
static int found;
@@ -471,6 +472,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
int i;
static int did_version;
unsigned short save1, save2;
+ u8 addr[ETH_ALEN];
PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
(long)memaddr, (long)ioaddr ));
@@ -580,19 +582,21 @@ static unsigned long __init lance_probe1( struct net_device *dev,
/* Get the ethernet address */
switch( lp->cardtype ) {
- case OLD_RIEBL:
+ case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
- case NEW_RIEBL:
- lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ case NEW_RIEBL:
+ lp->memcpy_f(addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
break;
- case PAM_CARD:
+ case PAM_CARD:
i = IO->eeprom;
for( i = 0; i < 6; ++i )
- dev->dev_addr[i] =
+ addr[i] =
((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
((((unsigned short *)MEM)[i*2+1] & 0x0f));
+ eth_hw_addr_set(dev, addr);
i = IO->mem;
break;
}
@@ -821,7 +825,7 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
dev->stats.tx_bytes += skb->len;
- dev_kfree_skb( skb );
+ dev_consume_skb_irq(skb);
lp->cur_tx++;
while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
lp->cur_tx -= TX_RING_SIZE;
@@ -851,7 +855,7 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id )
int csr0, boguscnt = 10;
int handled = 0;
- if (dev == NULL) {
+ if (!dev) {
DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
return IRQ_NONE;
}
@@ -992,7 +996,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for( i = 0; i < RX_RING_SIZE; i++ )
if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
RMD1_OWN_CHIP)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index c6f003975621..9d35ac348ebe 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -571,7 +571,7 @@ static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
return pDB;
}
-void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
+static void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
{
struct db_dest *pDBfree = aup->pDBfree;
if (pDBfree)
@@ -650,7 +650,7 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct au1000_private *aup = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
aup->mac_id);
}
@@ -786,7 +786,7 @@ static int au1000_rx(struct net_device *dev)
frmlen = (status & RX_FRAME_LEN_MASK);
frmlen -= 4; /* Remove FCS */
skb = netdev_alloc_skb(dev, frmlen + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
continue;
}
@@ -820,7 +820,7 @@ static int au1000_rx(struct net_device *dev)
pr_cont("\n");
}
}
- prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
+ prxd->buff_stat = lower_32_bits(pDB->dma_addr) | RX_DMA_ENABLE;
aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
wmb(); /* drain writebuffer */
@@ -996,7 +996,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
ps->tx_packets++;
ps->tx_bytes += ptxd->len;
- ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
+ ptxd->buff_stat = lower_32_bits(pDB->dma_addr) | TX_DMA_ENABLE;
wmb(); /* drain writebuffer */
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
@@ -1131,9 +1131,9 @@ static int au1000_probe(struct platform_device *pdev)
/* Allocate the data buffers
* Snooping works fine with eth on all au1xxx
*/
- aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
- (NUM_TX_BUFFS + NUM_RX_BUFFS),
- &aup->dma_addr, 0);
+ aup->vaddr = dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ &aup->dma_addr, 0);
if (!aup->vaddr) {
dev_err(&pdev->dev, "failed to allocate data buffers\n");
err = -ENOMEM;
@@ -1199,7 +1199,7 @@ static int au1000_probe(struct platform_device *pdev)
}
aup->mii_bus = mdiobus_alloc();
- if (aup->mii_bus == NULL) {
+ if (!aup->mii_bus) {
dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
err = -ENOMEM;
goto err_mdiobus_alloc;
@@ -1234,8 +1234,8 @@ static int au1000_probe(struct platform_device *pdev)
for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
pDB->pnext = pDBfree;
pDBfree = pDB;
- pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
- pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB->vaddr = aup->vaddr + MAX_BUF_SIZE * i;
+ pDB->dma_addr = aup->dma_addr + MAX_BUF_SIZE * i;
pDB++;
}
aup->pDBfree = pDBfree;
@@ -1246,7 +1246,7 @@ static int au1000_probe(struct platform_device *pdev)
if (!pDB)
goto err_out;
- aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->rx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
aup->rx_db_inuse[i] = pDB;
}
@@ -1255,7 +1255,7 @@ static int au1000_probe(struct platform_device *pdev)
if (!pDB)
goto err_out;
- aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->tx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
aup->tx_dma_ring[i]->len = 0;
aup->tx_db_inuse[i] = pDB;
}
@@ -1284,7 +1284,7 @@ static int au1000_probe(struct platform_device *pdev)
return 0;
err_out:
- if (aup->mii_bus != NULL)
+ if (aup->mii_bus)
mdiobus_unregister(aup->mii_bus);
/* here we should have a valid dev plus aup-> register addresses
@@ -1310,7 +1310,7 @@ err_remap2:
iounmap(aup->mac);
err_remap1:
dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
+ aup->vaddr, aup->dma_addr);
err_vaddr:
free_netdev(dev);
err_alloc:
@@ -1323,7 +1323,7 @@ out:
return err;
}
-static int au1000_remove(struct platform_device *pdev)
+static void au1000_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct au1000_private *aup = netdev_priv(dev);
@@ -1343,7 +1343,7 @@ static int au1000_remove(struct platform_device *pdev)
au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
+ aup->vaddr, aup->dma_addr);
iounmap(aup->macdma);
iounmap(aup->mac);
@@ -1359,8 +1359,6 @@ static int au1000_remove(struct platform_device *pdev)
release_mem_region(macen->start, resource_size(macen));
free_netdev(dev);
-
- return 0;
}
static struct platform_driver au1000_eth_driver = {
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index e3a3ed29db61..2489c2f4fd8a 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -106,8 +106,8 @@ struct au1000_private {
struct mac_reg *mac; /* mac registers */
u32 *enable; /* address of MAC Enable Register */
void __iomem *macdma; /* base of MAC DMA port */
- u32 vaddr; /* virtual address of rx/tx buffers */
- dma_addr_t dma_addr; /* dma address of rx/tx buffers */
+ void *vaddr; /* virtual address of rx/tx buffers */
+ dma_addr_t dma_addr; /* dma address of rx/tx buffers */
spinlock_t lock; /* Serialise access to device */
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 493b0cefcc2a..8d05a0c5f2d5 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -842,7 +842,7 @@ static int lance_close(struct net_device *dev)
volatile struct lance_regs *ll = lp->ll;
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
/* Stop the card */
writereg(&ll->rap, LE_CSR0);
@@ -1004,7 +1004,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
struct net_device *dev = lp->dev;
lance_set_multicast(dev);
@@ -1032,6 +1032,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
int i, ret;
unsigned long esar_base;
unsigned char *esar;
+ u8 addr[ETH_ALEN];
const char *desc;
if (dec_lance_debug && version_printed++ == 0)
@@ -1228,7 +1229,8 @@ static int dec_lance_probe(struct device *bdev, const int type)
break;
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = esar[i * 4];
+ addr[i] = esar[i * 4];
+ eth_hw_addr_set(dev, addr);
printk("%s: %s, addr = %pM, irq = %d\n",
name, desc, dev->dev_addr, dev->irq);
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 6784f8748638..df42294530cb 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -129,6 +129,7 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
{
unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
struct hplance_private *lp;
+ u8 addr[ETH_ALEN];
int i;
/* reset the board */
@@ -144,9 +145,10 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
/* The NVRAM holds our ethernet address, one nibble per byte,
* at bytes NVRAMOFF+1,3,5,7,9...
*/
- dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
| (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
}
+ eth_hw_addr_set(dev, addr);
lp = netdev_priv(dev);
lp->lance.name = d->name;
@@ -232,4 +234,5 @@ static void __exit hplance_cleanup_module(void)
module_init(hplance_init_module);
module_exit(hplance_cleanup_module);
+MODULE_DESCRIPTION("HP300 on-board LANCE Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 945bf1d87507..b1e6620ad41d 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -59,6 +59,7 @@ static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@c
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/bitops.h>
+#include <net/Space.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -384,6 +385,7 @@ static void __exit lance_cleanup_module(void)
}
module_exit(lance_cleanup_module);
#endif /* MODULE */
+MODULE_DESCRIPTION("AMD LANCE/PCnet Ethernet driver");
MODULE_LICENSE("GPL");
@@ -480,6 +482,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
unsigned long flags;
int err = -ENOMEM;
void __iomem *bios;
+ u8 addr[ETH_ALEN];
/* First we look for special cases.
Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
@@ -541,7 +544,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
/* There is a 16 byte station address PROM at the base address.
The first six bytes are the station address. */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
+ addr[i] = inb(ioaddr + i);
+ eth_hw_addr_set(dev, addr);
printk("%pM", dev->dev_addr);
dev->base_addr = ioaddr;
@@ -878,7 +882,7 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
rx_buff = skb->data;
else
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
- if (rx_buff == NULL)
+ if (!rx_buff)
lp->rx_ring[i].base = 0;
else
lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
@@ -999,7 +1003,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
lp->tx_ring[entry].base =
((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
- dev_kfree_skb(skb);
+ dev_consume_skb_irq(skb);
} else {
lp->tx_skbuff[entry] = skb;
lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
@@ -1184,7 +1188,7 @@ lance_rx(struct net_device *dev)
else
{
skb = dev_alloc_skb(pkt_len+2);
- if (skb == NULL)
+ if (!skb)
{
printk("%s: Memory squeeze, deferring packet.\n", dev->name);
for (i=0; i < RX_RING_SIZE; i++)
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index da97fccea9ea..f19b04b92fa9 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -74,6 +74,7 @@ static struct net_device * __init mvme147lance_probe(void)
static int called;
static const char name[] = "MVME147 LANCE";
struct m147lance_private *lp;
+ u8 macaddr[ETH_ALEN];
u_long *addr;
u_long address;
int err;
@@ -93,19 +94,16 @@ static struct net_device * __init mvme147lance_probe(void)
addr = (u_long *)ETHERNET_ADDRESS;
address = *addr;
- dev->dev_addr[0] = 0x08;
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x3e;
+ macaddr[0] = 0x08;
+ macaddr[1] = 0x00;
+ macaddr[2] = 0x3e;
address = address >> 8;
- dev->dev_addr[5] = address&0xff;
+ macaddr[5] = address&0xff;
address = address >> 8;
- dev->dev_addr[4] = address&0xff;
+ macaddr[4] = address&0xff;
address = address >> 8;
- dev->dev_addr[3] = address&0xff;
-
- printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
- dev->name, dev->base_addr, MVME147_LANCE_IRQ,
- dev->dev_addr);
+ macaddr[3] = address&0xff;
+ eth_hw_addr_set(dev, macaddr);
lp = netdev_priv(dev);
lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
@@ -136,6 +134,9 @@ static struct net_device * __init mvme147lance_probe(void)
return ERR_PTR(err);
}
+ netdev_info(dev, "MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
+ dev->base_addr, MVME147_LANCE_IRQ, dev->dev_addr);
+
return dev;
}
@@ -176,6 +177,7 @@ static int m147lance_close(struct net_device *dev)
return 0;
}
+MODULE_DESCRIPTION("MVME147 LANCE Ethernet driver");
MODULE_LICENSE("GPL");
static struct net_device *dev_mvme147_lance;
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
deleted file mode 100644
index 032e8922b482..000000000000
--- a/drivers/net/ethernet/amd/ni65.c
+++ /dev/null
@@ -1,1249 +0,0 @@
-/*
- * ni6510 (am7990 'lance' chip) driver for Linux-net-3
- * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
- * copyrights (c) 1994,1995,1996 by M.Hipp
- *
- * This driver can handle the old ni6510 board and the newer ni6510
- * EtherBlaster. (probably it also works with every full NE2100
- * compatible card)
- *
- * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers the Linux-kernel.
- *
- * comments/bugs/suggestions can be sent to:
- * Michael Hipp
- * email: hippm@informatik.uni-tuebingen.de
- *
- * sources:
- * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
- * and from the original drivers by D.Becker
- *
- * known problems:
- * - on some PCI boards (including my own) the card/board/ISA-bridge has
- * problems with bus master DMA. This results in lotsa overruns.
- * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
- * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
- * Or just play with your BIOS options to optimize ISA-DMA access.
- * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
- * defines -> please report me your experience then
- * - Harald reported for ASUS SP3G mainboards, that you should use
- * the 'optimal settings' from the user's manual on page 3-12!
- *
- * credits:
- * thanx to Jason Sullivan for sending me a ni6510 card!
- * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
- *
- * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
- * average: FTP -> 8384421 bytes received in 8.5 seconds
- * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
- * peak: FTP -> 8384421 bytes received in 7.5 seconds
- * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
- */
-
-/*
- * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
- * 96.Sept.29: virt_to_bus stuff added for new memory modell
- * 96.April.29: Added Harald Koenig's Patches (MH)
- * 96.April.13: enhanced error handling .. more tests (MH)
- * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
- * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
- * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
- * hopefully no more 16MB limit
- *
- * 95.Nov.18: multicast tweaked (AC).
- *
- * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
- *
- * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "ni65.h"
-
-/*
- * the current setting allows an acceptable performance
- * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
- * the header of this file
- * 'invert' the defines for max. performance. This may cause DMA problems
- * on some boards (e.g on my ASUS SP3G)
- */
-#undef XMT_VIA_SKB
-#undef RCV_VIA_SKB
-#define RCV_PARANOIA_CHECK
-
-#define MID_PERFORMANCE
-
-#if defined( LOW_PERFORMANCE )
- static int isa0=7,isa1=7,csr80=0x0c10;
-#elif defined( MID_PERFORMANCE )
- static int isa0=5,isa1=5,csr80=0x2810;
-#else /* high performance */
- static int isa0=4,isa1=4,csr80=0x0017;
-#endif
-
-/*
- * a few card/vendor specific defines
- */
-#define NI65_ID0 0x00
-#define NI65_ID1 0x55
-#define NI65_EB_ID0 0x52
-#define NI65_EB_ID1 0x44
-#define NE2100_ID0 0x57
-#define NE2100_ID1 0x57
-
-#define PORT p->cmdr_addr
-
-/*
- * buffer configuration
- */
-#if 1
-#define RMDNUM 16
-#define RMDNUMMASK 0x80000000
-#else
-#define RMDNUM 8
-#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
-#endif
-
-#if 0
-#define TMDNUM 1
-#define TMDNUMMASK 0x00000000
-#else
-#define TMDNUM 4
-#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
-#endif
-
-/* slightly oversized */
-#define R_BUF_SIZE 1544
-#define T_BUF_SIZE 1544
-
-/*
- * lance register defines
- */
-#define L_DATAREG 0x00
-#define L_ADDRREG 0x02
-#define L_RESET 0x04
-#define L_CONFIG 0x05
-#define L_BUSIF 0x06
-
-/*
- * to access the lance/am7990-regs, you have to write
- * reg-number into L_ADDRREG, then you can access it using L_DATAREG
- */
-#define CSR0 0x00
-#define CSR1 0x01
-#define CSR2 0x02
-#define CSR3 0x03
-
-#define INIT_RING_BEFORE_START 0x1
-#define FULL_RESET_ON_ERROR 0x2
-
-#if 0
-#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
- outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
-#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
- inw(PORT+L_DATAREG))
-#if 0
-#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
-#else
-#define writedatareg(val) { writereg(val,CSR0); }
-#endif
-#else
-#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
-#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
-#define writedatareg(val) { writereg(val,CSR0); }
-#endif
-
-static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
-
-static struct card {
- unsigned char id0,id1;
- short id_offset;
- short total_size;
- short cmd_offset;
- short addr_offset;
- unsigned char *vendor_id;
- char *cardname;
- unsigned long config;
-} cards[] = {
- {
- .id0 = NI65_ID0,
- .id1 = NI65_ID1,
- .id_offset = 0x0e,
- .total_size = 0x10,
- .cmd_offset = 0x0,
- .addr_offset = 0x8,
- .vendor_id = ni_vendor,
- .cardname = "ni6510",
- .config = 0x1,
- },
- {
- .id0 = NI65_EB_ID0,
- .id1 = NI65_EB_ID1,
- .id_offset = 0x0e,
- .total_size = 0x18,
- .cmd_offset = 0x10,
- .addr_offset = 0x0,
- .vendor_id = ni_vendor,
- .cardname = "ni6510 EtherBlaster",
- .config = 0x2,
- },
- {
- .id0 = NE2100_ID0,
- .id1 = NE2100_ID1,
- .id_offset = 0x0e,
- .total_size = 0x18,
- .cmd_offset = 0x10,
- .addr_offset = 0x0,
- .vendor_id = NULL,
- .cardname = "generic NE2100",
- .config = 0x0,
- },
-};
-#define NUM_CARDS 3
-
-struct priv
-{
- struct rmd rmdhead[RMDNUM];
- struct tmd tmdhead[TMDNUM];
- struct init_block ib;
- int rmdnum;
- int tmdnum,tmdlast;
-#ifdef RCV_VIA_SKB
- struct sk_buff *recv_skb[RMDNUM];
-#else
- void *recvbounce[RMDNUM];
-#endif
-#ifdef XMT_VIA_SKB
- struct sk_buff *tmd_skb[TMDNUM];
-#endif
- void *tmdbounce[TMDNUM];
- int tmdbouncenum;
- int lock,xmit_queued;
-
- void *self;
- int cmdr_addr;
- int cardno;
- int features;
- spinlock_t ring_lock;
-};
-
-static int ni65_probe1(struct net_device *dev,int);
-static irqreturn_t ni65_interrupt(int irq, void * dev_id);
-static void ni65_recv_intr(struct net_device *dev,int);
-static void ni65_xmit_intr(struct net_device *dev,int);
-static int ni65_open(struct net_device *dev);
-static int ni65_lance_reinit(struct net_device *dev);
-static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
-static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static void ni65_timeout(struct net_device *dev, unsigned int txqueue);
-static int ni65_close(struct net_device *dev);
-static int ni65_alloc_buffer(struct net_device *dev);
-static void ni65_free_buffer(struct priv *p);
-static void set_multicast_list(struct net_device *dev);
-
-static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
-static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
-
-static int debuglevel = 1;
-
-/*
- * set 'performance' registers .. we must STOP lance for that
- */
-static void ni65_set_performance(struct priv *p)
-{
- writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
-
- if( !(cards[p->cardno].config & 0x02) )
- return;
-
- outw(80,PORT+L_ADDRREG);
- if(inw(PORT+L_ADDRREG) != 80)
- return;
-
- writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
- outw(0,PORT+L_ADDRREG);
- outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
- outw(1,PORT+L_ADDRREG);
- outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
-
- outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
-}
-
-/*
- * open interface (up)
- */
-static int ni65_open(struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
- int irqval = request_irq(dev->irq, ni65_interrupt,0,
- cards[p->cardno].cardname,dev);
- if (irqval) {
- printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
- dev->name,dev->irq, irqval);
- return -EAGAIN;
- }
-
- if(ni65_lance_reinit(dev))
- {
- netif_start_queue(dev);
- return 0;
- }
- else
- {
- free_irq(dev->irq,dev);
- return -EAGAIN;
- }
-}
-
-/*
- * close interface (down)
- */
-static int ni65_close(struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
-
- netif_stop_queue(dev);
-
- outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
-
-#ifdef XMT_VIA_SKB
- {
- int i;
- for(i=0;i<TMDNUM;i++)
- {
- if(p->tmd_skb[i]) {
- dev_kfree_skb(p->tmd_skb[i]);
- p->tmd_skb[i] = NULL;
- }
- }
- }
-#endif
- free_irq(dev->irq,dev);
- return 0;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
- disable_dma(dev->dma);
- free_dma(dev->dma);
- release_region(dev->base_addr, cards[p->cardno].total_size);
- ni65_free_buffer(p);
-}
-
-/* set: io,irq,dma or set it when calling insmod */
-static int irq;
-static int io;
-static int dma;
-
-/*
- * Probe The Card (not the lance-chip)
- */
-struct net_device * __init ni65_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(0);
- static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
- const int *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- irq = dev->irq;
- dma = dev->dma;
- } else {
- dev->base_addr = io;
- }
-
- if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
- err = ni65_probe1(dev, dev->base_addr);
- } else if (dev->base_addr > 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = ports; *port && ni65_probe1(dev, *port); port++)
- ;
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
-
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- cleanup_card(dev);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops ni65_netdev_ops = {
- .ndo_open = ni65_open,
- .ndo_stop = ni65_close,
- .ndo_start_xmit = ni65_send_packet,
- .ndo_tx_timeout = ni65_timeout,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/*
- * this is the real card probe ..
- */
-static int __init ni65_probe1(struct net_device *dev,int ioaddr)
-{
- int i,j;
- struct priv *p;
- unsigned long flags;
-
- dev->irq = irq;
- dev->dma = dma;
-
- for(i=0;i<NUM_CARDS;i++) {
- if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
- continue;
- if(cards[i].id_offset >= 0) {
- if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
- inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
- release_region(ioaddr, cards[i].total_size);
- continue;
- }
- }
- if(cards[i].vendor_id) {
- for(j=0;j<3;j++)
- if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j])
- release_region(ioaddr, cards[i].total_size);
- }
- break;
- }
- if(i == NUM_CARDS)
- return -ENODEV;
-
- for(j=0;j<6;j++)
- dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
-
- if( (j=ni65_alloc_buffer(dev)) < 0) {
- release_region(ioaddr, cards[i].total_size);
- return j;
- }
- p = dev->ml_priv;
- p->cmdr_addr = ioaddr + cards[i].cmd_offset;
- p->cardno = i;
- spin_lock_init(&p->ring_lock);
-
- printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
-
- outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
- if( (j=readreg(CSR0)) != 0x4) {
- printk("failed.\n");
- printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
-
- outw(88,PORT+L_ADDRREG);
- if(inw(PORT+L_ADDRREG) == 88) {
- unsigned long v;
- v = inw(PORT+L_DATAREG);
- v <<= 16;
- outw(89,PORT+L_ADDRREG);
- v |= inw(PORT+L_DATAREG);
- printk("Version %#08lx, ",v);
- p->features = INIT_RING_BEFORE_START;
- }
- else {
- printk("ancient LANCE, ");
- p->features = 0x0;
- }
-
- if(test_bit(0,&cards[i].config)) {
- dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
- dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
- printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
- }
- else {
- if(dev->dma == 0) {
- /* 'stuck test' from lance.c */
- unsigned long dma_channels =
- ((inb(DMA1_STAT_REG) >> 4) & 0x0f)
- | (inb(DMA2_STAT_REG) & 0xf0);
- for(i=1;i<5;i++) {
- int dma = dmatab[i];
- if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
- continue;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- set_dma_mode(dma,DMA_MODE_CASCADE);
- enable_dma(dma);
- release_dma_lock(flags);
-
- ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
-
- flags=claim_dma_lock();
- disable_dma(dma);
- free_dma(dma);
- release_dma_lock(flags);
-
- if(readreg(CSR0) & CSR0_IDON)
- break;
- }
- if(i == 5) {
- printk("failed.\n");
- printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
- dev->dma = dmatab[i];
- printk("DMA %d (autodetected), ",dev->dma);
- }
- else
- printk("DMA %d (assigned), ",dev->dma);
-
- if(dev->irq < 2)
- {
- unsigned long irq_mask;
-
- ni65_init_lance(p,dev->dev_addr,0,0);
- irq_mask = probe_irq_on();
- writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
- msleep(20);
- dev->irq = probe_irq_off(irq_mask);
- if(!dev->irq)
- {
- printk("Failed to detect IRQ line!\n");
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
- printk("IRQ %d (autodetected).\n",dev->irq);
- }
- else
- printk("IRQ %d (assigned).\n",dev->irq);
- }
-
- if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
- {
- printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
-
- dev->base_addr = ioaddr;
- dev->netdev_ops = &ni65_netdev_ops;
- dev->watchdog_timeo = HZ/2;
-
- return 0; /* everything is OK */
-}
-
-/*
- * set lance register and trigger init
- */
-static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
-{
- int i;
- u32 pib;
-
- writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
-
- for(i=0;i<6;i++)
- p->ib.eaddr[i] = daddr[i];
-
- for(i=0;i<8;i++)
- p->ib.filter[i] = filter;
- p->ib.mode = mode;
-
- p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
- p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
- writereg(0,CSR3); /* busmaster/no word-swap */
- pib = (u32) isa_virt_to_bus(&p->ib);
- writereg(pib & 0xffff,CSR1);
- writereg(pib >> 16,CSR2);
-
- writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
-
- for(i=0;i<32;i++)
- {
- mdelay(4);
- if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
- break; /* init ok ? */
- }
-}
-
-/*
- * allocate memory area and check the 16MB border
- */
-static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
-{
- struct sk_buff *skb=NULL;
- unsigned char *ptr;
- void *ret;
-
- if(type) {
- ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
- if(!skb) {
- printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
- return NULL;
- }
- skb_reserve(skb,2+16);
- skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
- ptr = skb->data;
- }
- else {
- ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
- if(!ret)
- return NULL;
- }
- if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
- printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
- if(type)
- kfree_skb(skb);
- else
- kfree(ptr);
- return NULL;
- }
- return ret;
-}
-
-/*
- * allocate all memory structures .. send/recv buffers etc ...
- */
-static int ni65_alloc_buffer(struct net_device *dev)
-{
- unsigned char *ptr;
- struct priv *p;
- int i;
-
- /*
- * we need 8-aligned memory ..
- */
- ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
- if(!ptr)
- return -ENOMEM;
-
- p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
- memset((char *)p, 0, sizeof(struct priv));
- p->self = ptr;
-
- for(i=0;i<TMDNUM;i++)
- {
-#ifdef XMT_VIA_SKB
- p->tmd_skb[i] = NULL;
-#endif
- p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
- if(!p->tmdbounce[i]) {
- ni65_free_buffer(p);
- return -ENOMEM;
- }
- }
-
- for(i=0;i<RMDNUM;i++)
- {
-#ifdef RCV_VIA_SKB
- p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
- if(!p->recv_skb[i]) {
- ni65_free_buffer(p);
- return -ENOMEM;
- }
-#else
- p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
- if(!p->recvbounce[i]) {
- ni65_free_buffer(p);
- return -ENOMEM;
- }
-#endif
- }
-
- return 0; /* everything is OK */
-}
-
-/*
- * free buffers and private struct
- */
-static void ni65_free_buffer(struct priv *p)
-{
- int i;
-
- if(!p)
- return;
-
- for(i=0;i<TMDNUM;i++) {
- kfree(p->tmdbounce[i]);
-#ifdef XMT_VIA_SKB
- dev_kfree_skb(p->tmd_skb[i]);
-#endif
- }
-
- for(i=0;i<RMDNUM;i++)
- {
-#ifdef RCV_VIA_SKB
- dev_kfree_skb(p->recv_skb[i]);
-#else
- kfree(p->recvbounce[i]);
-#endif
- }
- kfree(p->self);
-}
-
-
-/*
- * stop and (re)start lance .. e.g after an error
- */
-static void ni65_stop_start(struct net_device *dev,struct priv *p)
-{
- int csr0 = CSR0_INEA;
-
- writedatareg(CSR0_STOP);
-
- if(debuglevel > 1)
- printk(KERN_DEBUG "ni65_stop_start\n");
-
- if(p->features & INIT_RING_BEFORE_START) {
- int i;
-#ifdef XMT_VIA_SKB
- struct sk_buff *skb_save[TMDNUM];
-#endif
- unsigned long buffer[TMDNUM];
- short blen[TMDNUM];
-
- if(p->xmit_queued) {
- while(1) {
- if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
- break;
- p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
- if(p->tmdlast == p->tmdnum)
- break;
- }
- }
-
- for(i=0;i<TMDNUM;i++) {
- struct tmd *tmdp = p->tmdhead + i;
-#ifdef XMT_VIA_SKB
- skb_save[i] = p->tmd_skb[i];
-#endif
- buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer);
- blen[i] = tmdp->blen;
- tmdp->u.s.status = 0x0;
- }
-
- for(i=0;i<RMDNUM;i++) {
- struct rmd *rmdp = p->rmdhead + i;
- rmdp->u.s.status = RCV_OWN;
- }
- p->tmdnum = p->xmit_queued = 0;
- writedatareg(CSR0_STRT | csr0);
-
- for(i=0;i<TMDNUM;i++) {
- int num = (i + p->tmdlast) & (TMDNUM-1);
- p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
- p->tmdhead[i].blen = blen[num];
- if(p->tmdhead[i].u.s.status & XMIT_OWN) {
- p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
- p->xmit_queued = 1;
- writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
- }
-#ifdef XMT_VIA_SKB
- p->tmd_skb[i] = skb_save[num];
-#endif
- }
- p->rmdnum = p->tmdlast = 0;
- if(!p->lock)
- if (p->tmdnum || !p->xmit_queued)
- netif_wake_queue(dev);
- netif_trans_update(dev); /* prevent tx timeout */
- }
- else
- writedatareg(CSR0_STRT | csr0);
-}
-
-/*
- * init lance (write init-values .. init-buffers) (open-helper)
- */
-static int ni65_lance_reinit(struct net_device *dev)
-{
- int i;
- struct priv *p = dev->ml_priv;
- unsigned long flags;
-
- p->lock = 0;
- p->xmit_queued = 0;
-
- flags=claim_dma_lock();
- disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
- set_dma_mode(dev->dma,DMA_MODE_CASCADE);
- enable_dma(dev->dma);
- release_dma_lock(flags);
-
- outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
- if( (i=readreg(CSR0) ) != 0x4)
- {
- printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
- cards[p->cardno].cardname,(int) i);
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- release_dma_lock(flags);
- return 0;
- }
-
- p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
- for(i=0;i<TMDNUM;i++)
- {
- struct tmd *tmdp = p->tmdhead + i;
-#ifdef XMT_VIA_SKB
- if(p->tmd_skb[i]) {
- dev_kfree_skb(p->tmd_skb[i]);
- p->tmd_skb[i] = NULL;
- }
-#endif
- tmdp->u.buffer = 0x0;
- tmdp->u.s.status = XMIT_START | XMIT_END;
- tmdp->blen = tmdp->status2 = 0;
- }
-
- for(i=0;i<RMDNUM;i++)
- {
- struct rmd *rmdp = p->rmdhead + i;
-#ifdef RCV_VIA_SKB
- rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
-#else
- rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
-#endif
- rmdp->blen = -(R_BUF_SIZE-8);
- rmdp->mlen = 0;
- rmdp->u.s.status = RCV_OWN;
- }
-
- if(dev->flags & IFF_PROMISC)
- ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
- else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
- ni65_init_lance(p,dev->dev_addr,0xff,0x0);
- else
- ni65_init_lance(p,dev->dev_addr,0x00,0x00);
-
- /*
- * ni65_set_lance_mem() sets L_ADDRREG to CSR0
- * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
- */
-
- if(inw(PORT+L_DATAREG) & CSR0_IDON) {
- ni65_set_performance(p);
- /* init OK: start lance , enable interrupts */
- writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
- return 1; /* ->OK */
- }
- printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- release_dma_lock(flags);
- return 0; /* ->Error */
-}
-
-/*
- * interrupt handler
- */
-static irqreturn_t ni65_interrupt(int irq, void * dev_id)
-{
- int csr0 = 0;
- struct net_device *dev = dev_id;
- struct priv *p;
- int bcnt = 32;
-
- p = dev->ml_priv;
-
- spin_lock(&p->ring_lock);
-
- while(--bcnt) {
- csr0 = inw(PORT+L_DATAREG);
-
-#if 0
- writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
-#else
- writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
-#endif
-
- if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
- break;
-
- if(csr0 & CSR0_RINT) /* RECV-int? */
- ni65_recv_intr(dev,csr0);
- if(csr0 & CSR0_TINT) /* XMIT-int? */
- ni65_xmit_intr(dev,csr0);
-
- if(csr0 & CSR0_ERR)
- {
- if(debuglevel > 1)
- printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
- if(csr0 & CSR0_BABL)
- dev->stats.tx_errors++;
- if(csr0 & CSR0_MISS) {
- int i;
- for(i=0;i<RMDNUM;i++)
- printk("%02x ",p->rmdhead[i].u.s.status);
- printk("\n");
- dev->stats.rx_errors++;
- }
- if(csr0 & CSR0_MERR) {
- if(debuglevel > 1)
- printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
- ni65_stop_start(dev,p);
- }
- }
- }
-
-#ifdef RCV_PARANOIA_CHECK
-{
- int j;
- for(j=0;j<RMDNUM;j++)
- {
- int i, num2;
- for(i=RMDNUM-1;i>0;i--) {
- num2 = (p->rmdnum + i) & (RMDNUM-1);
- if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
- break;
- }
-
- if(i) {
- int k, num1;
- for(k=0;k<RMDNUM;k++) {
- num1 = (p->rmdnum + k) & (RMDNUM-1);
- if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
- break;
- }
- if(!k)
- break;
-
- if(debuglevel > 0)
- {
- char buf[256],*buf1;
- buf1 = buf;
- for(k=0;k<RMDNUM;k++) {
- sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
- buf1 += 3;
- }
- *buf1 = 0;
- printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
- }
-
- p->rmdnum = num1;
- ni65_recv_intr(dev,csr0);
- if((p->rmdhead[num2].u.s.status & RCV_OWN))
- break; /* ok, we are 'in sync' again */
- }
- else
- break;
- }
-}
-#endif
-
- if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
- printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
- ni65_stop_start(dev,p);
- }
- else
- writedatareg(CSR0_INEA);
-
- spin_unlock(&p->ring_lock);
- return IRQ_HANDLED;
-}
-
-/*
- * We have received an Xmit-Interrupt ..
- * send a new packet if necessary
- */
-static void ni65_xmit_intr(struct net_device *dev,int csr0)
-{
- struct priv *p = dev->ml_priv;
-
- while(p->xmit_queued)
- {
- struct tmd *tmdp = p->tmdhead + p->tmdlast;
- int tmdstat = tmdp->u.s.status;
-
- if(tmdstat & XMIT_OWN)
- break;
-
- if(tmdstat & XMIT_ERR)
- {
-#if 0
- if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
- printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
-#endif
- /* checking some errors */
- if(tmdp->status2 & XMIT_RTRY)
- dev->stats.tx_aborted_errors++;
- if(tmdp->status2 & XMIT_LCAR)
- dev->stats.tx_carrier_errors++;
- if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
- /* this stops the xmitter */
- dev->stats.tx_fifo_errors++;
- if(debuglevel > 0)
- printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
- if(p->features & INIT_RING_BEFORE_START) {
- tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
- ni65_stop_start(dev,p);
- break; /* no more Xmit processing .. */
- }
- else
- ni65_stop_start(dev,p);
- }
- if(debuglevel > 2)
- printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
- if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
- dev->stats.tx_errors++;
- tmdp->status2 = 0;
- }
- else {
- dev->stats.tx_bytes -= (short)(tmdp->blen);
- dev->stats.tx_packets++;
- }
-
-#ifdef XMT_VIA_SKB
- if(p->tmd_skb[p->tmdlast]) {
- dev_consume_skb_irq(p->tmd_skb[p->tmdlast]);
- p->tmd_skb[p->tmdlast] = NULL;
- }
-#endif
-
- p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
- if(p->tmdlast == p->tmdnum)
- p->xmit_queued = 0;
- }
- netif_wake_queue(dev);
-}
-
-/*
- * We have received a packet
- */
-static void ni65_recv_intr(struct net_device *dev,int csr0)
-{
- struct rmd *rmdp;
- int rmdstat,len;
- int cnt=0;
- struct priv *p = dev->ml_priv;
-
- rmdp = p->rmdhead + p->rmdnum;
- while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
- {
- cnt++;
- if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
- {
- if(!(rmdstat & RCV_ERR)) {
- if(rmdstat & RCV_START)
- {
- dev->stats.rx_length_errors++;
- printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
- }
- }
- else {
- if(debuglevel > 2)
- printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
- dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
- if(rmdstat & RCV_FRAM)
- dev->stats.rx_frame_errors++;
- if(rmdstat & RCV_OFLO)
- dev->stats.rx_over_errors++;
- if(rmdstat & RCV_CRC)
- dev->stats.rx_crc_errors++;
- if(rmdstat & RCV_BUF_ERR)
- dev->stats.rx_fifo_errors++;
- }
- if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
- dev->stats.rx_errors++;
- }
- else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
- {
-#ifdef RCV_VIA_SKB
- struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
- if (skb)
- skb_reserve(skb,16);
-#else
- struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
-#endif
- if(skb)
- {
- skb_reserve(skb,2);
-#ifdef RCV_VIA_SKB
- if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
- skb_put(skb,len);
- skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
- }
- else {
- struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
- skb_put(skb,R_BUF_SIZE);
- p->recv_skb[p->rmdnum] = skb;
- rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
- skb = skb1;
- skb_trim(skb,len);
- }
-#else
- skb_put(skb,len);
- skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
-#endif
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- }
- else
- {
- printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
- dev->stats.rx_dropped++;
- }
- }
- else {
- printk(KERN_INFO "%s: received runt packet\n",dev->name);
- dev->stats.rx_errors++;
- }
- rmdp->blen = -(R_BUF_SIZE-8);
- rmdp->mlen = 0;
- rmdp->u.s.status = RCV_OWN; /* change owner */
- p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
- rmdp = p->rmdhead + p->rmdnum;
- }
-}
-
-/*
- * kick xmitter ..
- */
-
-static void ni65_timeout(struct net_device *dev, unsigned int txqueue)
-{
- int i;
- struct priv *p = dev->ml_priv;
-
- printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
- for(i=0;i<TMDNUM;i++)
- printk("%02x ",p->tmdhead[i].u.s.status);
- printk("\n");
- ni65_lance_reinit(dev);
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-/*
- * Send a packet
- */
-
-static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
-
- netif_stop_queue(dev);
-
- if (test_and_set_bit(0, (void*)&p->lock)) {
- printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
- return NETDEV_TX_BUSY;
- }
-
- {
- short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- struct tmd *tmdp;
- unsigned long flags;
-
-#ifdef XMT_VIA_SKB
- if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
-#endif
-
- skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
- skb->len > T_BUF_SIZE ? T_BUF_SIZE :
- skb->len);
- if (len > skb->len)
- memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
- dev_kfree_skb (skb);
-
- spin_lock_irqsave(&p->ring_lock, flags);
- tmdp = p->tmdhead + p->tmdnum;
- tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
- p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
-
-#ifdef XMT_VIA_SKB
- }
- else {
- spin_lock_irqsave(&p->ring_lock, flags);
-
- tmdp = p->tmdhead + p->tmdnum;
- tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
- p->tmd_skb[p->tmdnum] = skb;
- }
-#endif
- tmdp->blen = -len;
-
- tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
- writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
-
- p->xmit_queued = 1;
- p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
-
- if(p->tmdnum != p->tmdlast)
- netif_wake_queue(dev);
-
- p->lock = 0;
-
- spin_unlock_irqrestore(&p->ring_lock, flags);
- }
-
- return NETDEV_TX_OK;
-}
-
-static void set_multicast_list(struct net_device *dev)
-{
- if(!ni65_lance_reinit(dev))
- printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
- netif_wake_queue(dev);
-}
-
-#ifdef MODULE
-static struct net_device *dev_ni65;
-
-module_param_hw(irq, int, irq, 0);
-module_param_hw(io, int, ioport, 0);
-module_param_hw(dma, int, dma, 0);
-MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
-MODULE_PARM_DESC(io, "ni6510 I/O base address");
-MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
-
-static int __init ni65_init_module(void)
-{
- dev_ni65 = ni65_probe(-1);
- return PTR_ERR_OR_ZERO(dev_ni65);
-}
-module_init(ni65_init_module);
-
-static void __exit ni65_cleanup_module(void)
-{
- unregister_netdev(dev_ni65);
- cleanup_card(dev_ni65);
- free_netdev(dev_ni65);
-}
-module_exit(ni65_cleanup_module);
-#endif /* MODULE */
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ni65.h b/drivers/net/ethernet/amd/ni65.h
deleted file mode 100644
index e6217e35edf0..000000000000
--- a/drivers/net/ethernet/amd/ni65.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/* am7990 (lance) definitions
- *
- * This is an extension to the Linux operating system, and is covered by
- * same GNU General Public License that covers that work.
- *
- * Michael Hipp
- * email: mhipp@student.uni-tuebingen.de
- *
- * sources: (mail me or ask archie if you need them)
- * crynwr-packet-driver
- */
-
-/*
- * Control and Status Register 0 (CSR0) bit definitions
- * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
- *
- */
-
-#define CSR0_ERR 0x8000 /* Error summary (R) */
-#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
-#define CSR0_CERR 0x2000 /* Collision Error (RC) */
-#define CSR0_MISS 0x1000 /* Missed packet (RC) */
-#define CSR0_MERR 0x0800 /* Memory Error (RC) */
-#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
-#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
-#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
-#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
-#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
-#define CSR0_RXON 0x0020 /* Receiver on (R) */
-#define CSR0_TXON 0x0010 /* Transmitter on (R) */
-#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
-#define CSR0_STOP 0x0004 /* Stop (RS) */
-#define CSR0_STRT 0x0002 /* Start (RS) */
-#define CSR0_INIT 0x0001 /* Initialize (RS) */
-
-#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
-/*
- * Initialization Block Mode operation Bit Definitions.
- */
-
-#define M_PROM 0x8000 /* Promiscuous Mode */
-#define M_INTL 0x0040 /* Internal Loopback */
-#define M_DRTY 0x0020 /* Disable Retry */
-#define M_COLL 0x0010 /* Force Collision */
-#define M_DTCR 0x0008 /* Disable Transmit CRC) */
-#define M_LOOP 0x0004 /* Loopback */
-#define M_DTX 0x0002 /* Disable the Transmitter */
-#define M_DRX 0x0001 /* Disable the Receiver */
-
-
-/*
- * Receive message descriptor bit definitions.
- */
-
-#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */
-#define RCV_ERR 0x40 /* Error Summary */
-#define RCV_FRAM 0x20 /* Framing Error */
-#define RCV_OFLO 0x10 /* Overflow Error */
-#define RCV_CRC 0x08 /* CRC Error */
-#define RCV_BUF_ERR 0x04 /* Buffer Error */
-#define RCV_START 0x02 /* Start of Packet */
-#define RCV_END 0x01 /* End of Packet */
-
-
-/*
- * Transmit message descriptor bit definitions.
- */
-
-#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */
-#define XMIT_ERR 0x40 /* Error Summary */
-#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */
-#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */
-#define XMIT_DEF 0x04 /* Deferred */
-#define XMIT_START 0x02 /* Start of Packet */
-#define XMIT_END 0x01 /* End of Packet */
-
-/*
- * transmit status (2) (valid if XMIT_ERR == 1)
- */
-
-#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */
-#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */
-#define XMIT_LCAR 0x0800 /* Loss of Carrier */
-#define XMIT_LCOL 0x1000 /* Late collision */
-#define XMIT_RESERV 0x2000 /* Reserved */
-#define XMIT_UFLO 0x4000 /* Underflow (late memory) */
-#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */
-
-struct init_block {
- unsigned short mode;
- unsigned char eaddr[6];
- unsigned char filter[8];
- /* bit 29-31: number of rmd's (power of 2) */
- u32 rrp; /* receive ring pointer (align 8) */
- /* bit 29-31: number of tmd's (power of 2) */
- u32 trp; /* transmit ring pointer (align 8) */
-};
-
-struct rmd { /* Receive Message Descriptor */
- union {
- volatile u32 buffer;
- struct {
- volatile unsigned char dummy[3];
- volatile unsigned char status;
- } s;
- } u;
- volatile short blen;
- volatile unsigned short mlen;
-};
-
-struct tmd {
- union {
- volatile u32 buffer;
- struct {
- volatile unsigned char dummy[3];
- volatile unsigned char status;
- } s;
- } u;
- volatile unsigned short blen;
- volatile unsigned short status2;
-};
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 30ee5329bd7c..37054a670407 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -485,10 +485,10 @@ static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
data = inb(ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -512,10 +512,10 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -567,13 +567,13 @@ static int mace_init(mace_private *lp, unsigned int ioaddr,
* Or just set ASEL in PHYCC below!
*/
switch (if_port) {
- case 1:
+ case 1:
mace_write(lp, ioaddr, MACE_PLSCC, 0x02);
break;
- case 2:
+ case 2:
mace_write(lp, ioaddr, MACE_PLSCC, 0x00);
break;
- default:
+ default:
mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4);
/* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
and the MACE device will automatically select the operating media
@@ -651,7 +651,7 @@ static int nmclan_config(struct pcmcia_device *link)
} else {
pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
sig[0], sig[1]);
- return -ENODEV;
+ goto failed;
}
}
@@ -760,7 +760,7 @@ static int mace_config(struct net_device *dev, struct ifmap *map)
{
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 2) {
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
} else
return -EINVAL;
@@ -815,7 +815,7 @@ static int mace_close(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
@@ -918,7 +918,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
int status;
int IntrCnt = MACE_MAX_IR_ITERATIONS;
- if (dev == NULL) {
+ if (!dev) {
pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
irq);
return IRQ_NONE;
@@ -1102,7 +1102,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb != NULL) {
+ if (skb) {
skb_reserve(skb, 2);
insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
if (pkt_len & 1)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f5c50ff377ff..9eaefa0f5e80 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -462,7 +462,7 @@ static void pcnet32_netif_start(struct net_device *dev)
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
}
/*
@@ -488,7 +488,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_tx_ring == NULL)
+ if (!new_tx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -547,7 +547,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_rx_ring == NULL)
+ if (!new_rx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -797,9 +797,9 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
{
struct pcnet32_private *lp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (lp->pci_dev)
- strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ strscpy(info->bus_info, pci_name(lp->pci_dev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
@@ -860,7 +860,9 @@ static int pcnet32_nway_reset(struct net_device *dev)
}
static void pcnet32_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pcnet32_private *lp = netdev_priv(dev);
@@ -871,7 +873,9 @@ static void pcnet32_get_ringparam(struct net_device *dev,
}
static int pcnet32_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
@@ -885,6 +889,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -916,6 +921,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
lp->rx_ring_size, lp->tx_ring_size);
@@ -981,6 +987,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -1118,6 +1125,7 @@ clean_up:
lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return rc;
} /* end pcnet32_loopback_test */
@@ -1245,7 +1253,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
} else
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
return;
}
@@ -1877,7 +1885,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* napi.weight is used in both the napi and non-napi cases */
lp->napi.weight = lp->rx_ring_size / 2;
- netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
+ netif_napi_add_weight(dev, &lp->napi, pcnet32_poll,
+ lp->rx_ring_size / 2);
if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
@@ -2013,7 +2022,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
&lp->tx_ring_dma_addr, GFP_KERNEL);
- if (lp->tx_ring == NULL) {
+ if (!lp->tx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2021,7 +2030,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
&lp->rx_ring_dma_addr, GFP_KERNEL);
- if (lp->rx_ring == NULL) {
+ if (!lp->rx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2096,6 +2105,7 @@ static int pcnet32_open(struct net_device *dev)
return -EAGAIN;
}
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
/* Check for a valid station address */
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -2261,7 +2271,7 @@ static int pcnet32_open(struct net_device *dev)
goto err_free_ring;
}
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
/* Re-initialize the PCNET32, and start it when done. */
lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
@@ -2295,6 +2305,7 @@ static int pcnet32_open(struct net_device *dev)
lp->a->read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return 0; /* Always succeed */
@@ -2310,6 +2321,7 @@ err_free_ring:
err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
free_irq(dev->irq, dev);
return rc;
}
@@ -2360,7 +2372,7 @@ static int pcnet32_init_ring(struct net_device *dev)
for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff == NULL) {
+ if (!rx_skbuff) {
lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = lp->rx_skbuff[i];
if (!rx_skbuff) {
@@ -2618,7 +2630,7 @@ static int pcnet32_close(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
- del_timer_sync(&lp->watchdog_timer);
+ timer_delete_sync(&lp->watchdog_timer);
netif_stop_queue(dev);
napi_disable(&lp->napi);
@@ -2893,7 +2905,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
static void pcnet32_watchdog(struct timer_list *t)
{
- struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer);
+ struct pcnet32_private *lp = timer_container_of(lp, t, watchdog_timer);
struct net_device *dev = lp->dev;
unsigned long flags;
diff --git a/drivers/net/ethernet/amd/pds_core/Makefile b/drivers/net/ethernet/amd/pds_core/Makefile
new file mode 100644
index 000000000000..8239742e681f
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2023 Advanced Micro Devices, Inc.
+
+obj-$(CONFIG_PDS_CORE) := pds_core.o
+
+pds_core-y := main.o \
+ devlink.o \
+ auxbus.o \
+ dev.o \
+ adminq.o \
+ core.o \
+ debugfs.o \
+ fw.o
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
new file mode 100644
index 000000000000..097bb092bdb8
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/dynamic_debug.h>
+
+#include "core.h"
+
+static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
+{
+ union pds_core_notifyq_comp *comp;
+ struct pdsc *pdsc = qcq->pdsc;
+ struct pdsc_cq *cq = &qcq->cq;
+ struct pdsc_cq_info *cq_info;
+ int nq_work = 0;
+ u64 eid;
+
+ cq_info = &cq->info[cq->tail_idx];
+ comp = cq_info->comp;
+ eid = le64_to_cpu(comp->event.eid);
+ while (eid > pdsc->last_eid) {
+ u16 ecode = le16_to_cpu(comp->event.ecode);
+
+ switch (ecode) {
+ case PDS_EVENT_LINK_CHANGE:
+ dev_info(pdsc->dev, "NotifyQ LINK_CHANGE ecode %d eid %lld\n",
+ ecode, eid);
+ pdsc_notify(PDS_EVENT_LINK_CHANGE, comp);
+ break;
+
+ case PDS_EVENT_RESET:
+ dev_info(pdsc->dev, "NotifyQ RESET ecode %d eid %lld\n",
+ ecode, eid);
+ pdsc_notify(PDS_EVENT_RESET, comp);
+ break;
+
+ case PDS_EVENT_XCVR:
+ dev_info(pdsc->dev, "NotifyQ XCVR ecode %d eid %lld\n",
+ ecode, eid);
+ break;
+
+ default:
+ dev_info(pdsc->dev, "NotifyQ ecode %d eid %lld\n",
+ ecode, eid);
+ break;
+ }
+
+ pdsc->last_eid = eid;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+ cq_info = &cq->info[cq->tail_idx];
+ comp = cq_info->comp;
+ eid = le64_to_cpu(comp->event.eid);
+
+ nq_work++;
+ }
+
+ qcq->accum_work += nq_work;
+
+ return nq_work;
+}
+
+static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
+{
+ if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
+ pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
+ return false;
+
+ return refcount_inc_not_zero(&pdsc->adminq_refcnt);
+}
+
+void pdsc_process_adminq(struct pdsc_qcq *qcq)
+{
+ union pds_core_adminq_comp *comp;
+ struct pdsc_queue *q = &qcq->q;
+ struct pdsc *pdsc = qcq->pdsc;
+ struct pdsc_cq *cq = &qcq->cq;
+ struct pdsc_q_info *q_info;
+ unsigned long irqflags;
+ int nq_work = 0;
+ int aq_work = 0;
+
+ /* Don't process AdminQ when it's not up */
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
+ __func__);
+ return;
+ }
+
+ /* Check for NotifyQ event */
+ nq_work = pdsc_process_notifyq(&pdsc->notifyqcq);
+
+ /* Check for empty queue, which can happen if the interrupt was
+ * for a NotifyQ event and there are no new AdminQ completions.
+ */
+ if (q->tail_idx == q->head_idx)
+ goto credits;
+
+ /* Find the first completion to clean,
+ * run the callback in the related q_info,
+ * and continue while we still match done color
+ */
+ spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
+ comp = cq->info[cq->tail_idx].comp;
+ while (pdsc_color_match(comp->color, cq->done_color)) {
+ q_info = &q->info[q->tail_idx];
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+
+ if (!completion_done(&q_info->completion)) {
+ memcpy(q_info->dest, comp, sizeof(*comp));
+ complete(&q_info->completion);
+ }
+
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+ comp = cq->info[cq->tail_idx].comp;
+
+ aq_work++;
+ }
+ spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
+
+ qcq->accum_work += aq_work;
+
+credits:
+ /* Return the interrupt credits, one for each completion */
+ pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
+ nq_work + aq_work,
+ PDS_CORE_INTR_CRED_REARM);
+ refcount_dec(&pdsc->adminq_refcnt);
+}
+
+void pdsc_work_thread(struct work_struct *work)
+{
+ struct pdsc_qcq *qcq = container_of(work, struct pdsc_qcq, work);
+
+ pdsc_process_adminq(qcq);
+}
+
+irqreturn_t pdsc_adminq_isr(int irq, void *data)
+{
+ struct pdsc *pdsc = data;
+ struct pdsc_qcq *qcq;
+
+ /* Don't process AdminQ when it's not up */
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ qcq = &pdsc->adminqcq;
+ queue_work(pdsc->wq, &qcq->work);
+ refcount_dec(&pdsc->adminq_refcnt);
+
+ return IRQ_HANDLED;
+}
+
+static int __pdsc_adminq_post(struct pdsc *pdsc,
+ struct pdsc_qcq *qcq,
+ union pds_core_adminq_cmd *cmd,
+ union pds_core_adminq_comp *comp)
+{
+ struct pdsc_queue *q = &qcq->q;
+ struct pdsc_q_info *q_info;
+ unsigned long irqflags;
+ unsigned int avail;
+ int index;
+ int ret;
+
+ spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
+
+ /* Check for space in the queue */
+ avail = q->tail_idx;
+ if (q->head_idx >= avail)
+ avail += q->num_descs - q->head_idx - 1;
+ else
+ avail -= q->head_idx + 1;
+ if (!avail) {
+ ret = -ENOSPC;
+ goto err_out_unlock;
+ }
+
+ /* Check that the FW is running */
+ if (!pdsc_is_fw_running(pdsc)) {
+ if (pdsc->info_regs) {
+ u8 fw_status =
+ ioread8(&pdsc->info_regs->fw_status);
+
+ dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ } else {
+ dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
+ __func__);
+ }
+ ret = -ENXIO;
+
+ goto err_out_unlock;
+ }
+
+ /* Post the request */
+ index = q->head_idx;
+ q_info = &q->info[index];
+ q_info->dest = comp;
+ memcpy(q_info->desc, cmd, sizeof(*cmd));
+ reinit_completion(&q_info->completion);
+
+ dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
+ q->head_idx, q->tail_idx);
+ dev_dbg(pdsc->dev, "post admin queue command:\n");
+ dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
+ cmd, sizeof(*cmd), true);
+
+ q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
+
+ pds_core_dbell_ring(pdsc->kern_dbpage,
+ q->hw_type, q->dbval | q->head_idx);
+ ret = index;
+
+err_out_unlock:
+ spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
+ return ret;
+}
+
+int pdsc_adminq_post(struct pdsc *pdsc,
+ union pds_core_adminq_cmd *cmd,
+ union pds_core_adminq_comp *comp,
+ bool fast_poll)
+{
+ unsigned long poll_interval = 200;
+ unsigned long poll_jiffies;
+ unsigned long time_limit;
+ unsigned long time_start;
+ unsigned long time_done;
+ unsigned long remaining;
+ struct completion *wc;
+ int err = 0;
+ int index;
+
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
+ __func__, cmd->opcode);
+ return -ENXIO;
+ }
+
+ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
+ if (index < 0) {
+ err = index;
+ goto err_out;
+ }
+
+ wc = &pdsc->adminqcq.q.info[index].completion;
+ time_start = jiffies;
+ time_limit = time_start + HZ * pdsc->devcmd_timeout;
+ do {
+ /* Timeslice the actual wait to catch IO errors etc early */
+ poll_jiffies = usecs_to_jiffies(poll_interval);
+ remaining = wait_for_completion_timeout(wc, poll_jiffies);
+ if (remaining)
+ break;
+
+ if (!pdsc_is_fw_running(pdsc)) {
+ if (pdsc->info_regs) {
+ u8 fw_status =
+ ioread8(&pdsc->info_regs->fw_status);
+
+ dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ } else {
+ dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
+ __func__);
+ }
+ err = -ENXIO;
+ break;
+ }
+
+ /* When fast_poll is not requested, prevent aggressive polling
+ * on failures due to timeouts by doing exponential back off.
+ */
+ if (!fast_poll && poll_interval < PDSC_ADMINQ_MAX_POLL_INTERVAL)
+ poll_interval <<= 1;
+ } while (time_before(jiffies, time_limit));
+ time_done = jiffies;
+ dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
+ __func__, jiffies_to_msecs(time_done - time_start));
+
+ /* Check the results and clear an un-completed timeout */
+ if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
+ err = -ETIMEDOUT;
+ complete(wc);
+ }
+
+ dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
+ dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
+ comp, sizeof(*comp), true);
+
+ if (remaining && comp->status)
+ err = pdsc_err_to_errno(comp->status);
+
+err_out:
+ if (err) {
+ dev_dbg(pdsc->dev, "%s: opcode %d status %d err %pe\n",
+ __func__, cmd->opcode, comp->status, ERR_PTR(err));
+ if (err == -ENXIO || err == -ETIMEDOUT)
+ queue_work(pdsc->wq, &pdsc->health_work);
+ }
+
+ refcount_dec(&pdsc->adminq_refcnt);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pdsc_adminq_post);
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
new file mode 100644
index 000000000000..92f359f2b449
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/pci.h>
+
+#include "core.h"
+#include <linux/pds/pds_auxbus.h>
+
+/**
+ * pds_client_register - Link the client to the firmware
+ * @pf: ptr to the PF driver's private data struct
+ * @devname: name that includes service into, e.g. pds_core.vDPA
+ *
+ * Return: positive client ID (ci) on success, or
+ * negative for error
+ */
+int pds_client_register(struct pdsc *pf, char *devname)
+{
+ union pds_core_adminq_comp comp = {};
+ union pds_core_adminq_cmd cmd = {};
+ int err;
+ u16 ci;
+
+ cmd.client_reg.opcode = PDS_AQ_CMD_CLIENT_REG;
+ strscpy(cmd.client_reg.devname, devname,
+ sizeof(cmd.client_reg.devname));
+
+ err = pdsc_adminq_post(pf, &cmd, &comp, false);
+ if (err) {
+ dev_info(pf->dev, "register dev_name %s with DSC failed, status %d: %pe\n",
+ devname, comp.status, ERR_PTR(err));
+ return err;
+ }
+
+ ci = le16_to_cpu(comp.client_reg.client_id);
+ if (!ci) {
+ dev_err(pf->dev, "%s: device returned null client_id\n",
+ __func__);
+ return -EIO;
+ }
+
+ dev_dbg(pf->dev, "%s: device returned client_id %d for %s\n",
+ __func__, ci, devname);
+
+ return ci;
+}
+EXPORT_SYMBOL_GPL(pds_client_register);
+
+/**
+ * pds_client_unregister - Unlink the client from the firmware
+ * @pf: ptr to the PF driver's private data struct
+ * @client_id: id returned from pds_client_register()
+ *
+ * Return: 0 on success, or
+ * negative for error
+ */
+int pds_client_unregister(struct pdsc *pf, u16 client_id)
+{
+ union pds_core_adminq_comp comp = {};
+ union pds_core_adminq_cmd cmd = {};
+ int err;
+
+ cmd.client_unreg.opcode = PDS_AQ_CMD_CLIENT_UNREG;
+ cmd.client_unreg.client_id = cpu_to_le16(client_id);
+
+ err = pdsc_adminq_post(pf, &cmd, &comp, false);
+ if (err)
+ dev_info(pf->dev, "unregister client_id %d failed, status %d: %pe\n",
+ client_id, comp.status, ERR_PTR(err));
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pds_client_unregister);
+
+/**
+ * pds_client_adminq_cmd - Process an adminq request for the client
+ * @padev: ptr to the client device
+ * @req: ptr to buffer with request
+ * @req_len: length of actual struct used for request
+ * @resp: ptr to buffer where answer is to be copied
+ * @flags: optional flags from pds_core_adminq_flags
+ *
+ * Return: 0 on success, or
+ * negative for error
+ *
+ * Client sends pointers to request and response buffers
+ * Core copies request data into pds_core_client_request_cmd
+ * Core sets other fields as needed
+ * Core posts to AdminQ
+ * Core copies completion data into response buffer
+ */
+int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
+ union pds_core_adminq_cmd *req,
+ size_t req_len,
+ union pds_core_adminq_comp *resp,
+ u64 flags)
+{
+ union pds_core_adminq_cmd cmd = {};
+ struct pci_dev *pf_pdev;
+ struct pdsc *pf;
+ size_t cp_len;
+ int err;
+
+ pf_pdev = pci_physfn(padev->vf_pdev);
+ pf = pci_get_drvdata(pf_pdev);
+
+ dev_dbg(pf->dev, "%s: %s opcode %d\n",
+ __func__, dev_name(&padev->aux_dev.dev), req->opcode);
+
+ /* Wrap the client's request */
+ cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
+ cmd.client_request.client_id = cpu_to_le16(padev->client_id);
+ cp_len = min_t(size_t, req_len, sizeof(cmd.client_request.client_cmd));
+ memcpy(cmd.client_request.client_cmd, req, cp_len);
+
+ err = pdsc_adminq_post(pf, &cmd, resp,
+ !!(flags & PDS_AQ_FLAG_FASTPOLL));
+ if (err && err != -EAGAIN)
+ dev_info(pf->dev, "client admin cmd failed: %pe\n",
+ ERR_PTR(err));
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pds_client_adminq_cmd);
+
+static void pdsc_auxbus_dev_release(struct device *dev)
+{
+ struct pds_auxiliary_dev *padev =
+ container_of(dev, struct pds_auxiliary_dev, aux_dev.dev);
+
+ kfree(padev);
+}
+
+static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
+ struct pdsc *pf,
+ u16 client_id,
+ char *name)
+{
+ struct auxiliary_device *aux_dev;
+ struct pds_auxiliary_dev *padev;
+ int err;
+
+ padev = kzalloc(sizeof(*padev), GFP_KERNEL);
+ if (!padev)
+ return ERR_PTR(-ENOMEM);
+
+ padev->vf_pdev = cf->pdev;
+ padev->client_id = client_id;
+
+ aux_dev = &padev->aux_dev;
+ aux_dev->name = name;
+ aux_dev->id = cf->uid;
+ aux_dev->dev.parent = cf->dev;
+ aux_dev->dev.release = pdsc_auxbus_dev_release;
+
+ err = auxiliary_device_init(aux_dev);
+ if (err < 0) {
+ dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n",
+ name, ERR_PTR(err));
+ kfree(padev);
+ return ERR_PTR(err);
+ }
+
+ err = auxiliary_device_add(aux_dev);
+ if (err) {
+ dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n",
+ name, ERR_PTR(err));
+ auxiliary_device_uninit(aux_dev);
+ return ERR_PTR(err);
+ }
+
+ return padev;
+}
+
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr)
+{
+ struct pds_auxiliary_dev *padev;
+
+ if (!*pd_ptr)
+ return;
+
+ mutex_lock(&pf->config_lock);
+
+ padev = *pd_ptr;
+ pds_client_unregister(pf, padev->client_id);
+ auxiliary_device_delete(&padev->aux_dev);
+ auxiliary_device_uninit(&padev->aux_dev);
+ *pd_ptr = NULL;
+
+ mutex_unlock(&pf->config_lock);
+}
+
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr)
+{
+ struct pds_auxiliary_dev *padev;
+ char devname[PDS_DEVNAME_LEN];
+ unsigned long mask;
+ u16 vt_support;
+ int client_id;
+ int err = 0;
+
+ if (!cf)
+ return -ENODEV;
+
+ if (vt >= PDS_DEV_TYPE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&pf->config_lock);
+
+ mask = BIT_ULL(PDSC_S_FW_DEAD) |
+ BIT_ULL(PDSC_S_STOPPING_DRIVER);
+ if (cf->state & mask) {
+ dev_err(pf->dev, "%s: can't add dev, VF client in bad state %#lx\n",
+ __func__, cf->state);
+ err = -ENXIO;
+ goto out_unlock;
+ }
+
+ /* Verify that the type is supported and enabled. It is not
+ * an error if the firmware doesn't support the feature, the
+ * driver just won't set up an auxiliary_device for it.
+ */
+ vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
+ if (!(vt_support &&
+ pf->viftype_status[vt].supported &&
+ pf->viftype_status[vt].enabled))
+ goto out_unlock;
+
+ /* Need to register with FW and get the client_id before
+ * creating the aux device so that the aux client can run
+ * adminq commands as part its probe
+ */
+ snprintf(devname, sizeof(devname), "%s.%s.%d",
+ PDS_CORE_DRV_NAME, pf->viftype_status[vt].name, cf->uid);
+ client_id = pds_client_register(pf, devname);
+ if (client_id < 0) {
+ err = client_id;
+ goto out_unlock;
+ }
+
+ padev = pdsc_auxbus_dev_register(cf, pf, client_id,
+ pf->viftype_status[vt].name);
+ if (IS_ERR(padev)) {
+ pds_client_unregister(pf, client_id);
+ err = PTR_ERR(padev);
+ goto out_unlock;
+ }
+ *pd_ptr = padev;
+
+out_unlock:
+ mutex_unlock(&pf->config_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
new file mode 100644
index 000000000000..076dfe2910c7
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "core.h"
+
+static BLOCKING_NOTIFIER_HEAD(pds_notify_chain);
+
+int pdsc_register_notify(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&pds_notify_chain, nb);
+}
+EXPORT_SYMBOL_GPL(pdsc_register_notify);
+
+void pdsc_unregister_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&pds_notify_chain, nb);
+}
+EXPORT_SYMBOL_GPL(pdsc_unregister_notify);
+
+void pdsc_notify(unsigned long event, void *data)
+{
+ blocking_notifier_call_chain(&pds_notify_chain, event, data);
+}
+
+void pdsc_intr_free(struct pdsc *pdsc, int index)
+{
+ struct pdsc_intr_info *intr_info;
+
+ if (index >= pdsc->nintrs || index < 0) {
+ WARN(true, "bad intr index %d\n", index);
+ return;
+ }
+
+ intr_info = &pdsc->intr_info[index];
+ if (!intr_info->vector)
+ return;
+ dev_dbg(pdsc->dev, "%s: idx %d vec %d name %s\n",
+ __func__, index, intr_info->vector, intr_info->name);
+
+ pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
+ pds_core_intr_clean(&pdsc->intr_ctrl[index]);
+
+ free_irq(intr_info->vector, intr_info->data);
+
+ memset(intr_info, 0, sizeof(*intr_info));
+}
+
+int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
+ irq_handler_t handler, void *data)
+{
+ struct pdsc_intr_info *intr_info;
+ unsigned int index;
+ int err;
+
+ /* Find the first available interrupt */
+ for (index = 0; index < pdsc->nintrs; index++)
+ if (!pdsc->intr_info[index].vector)
+ break;
+ if (index >= pdsc->nintrs) {
+ dev_warn(pdsc->dev, "%s: no intr, index=%d nintrs=%d\n",
+ __func__, index, pdsc->nintrs);
+ return -ENOSPC;
+ }
+
+ pds_core_intr_clean_flags(&pdsc->intr_ctrl[index],
+ PDS_CORE_INTR_CRED_RESET_COALESCE);
+
+ intr_info = &pdsc->intr_info[index];
+
+ intr_info->index = index;
+ intr_info->data = data;
+ strscpy(intr_info->name, name, sizeof(intr_info->name));
+
+ /* Get the OS vector number for the interrupt */
+ err = pci_irq_vector(pdsc->pdev, index);
+ if (err < 0) {
+ dev_err(pdsc->dev, "failed to get intr vector index %d: %pe\n",
+ index, ERR_PTR(err));
+ goto err_out_free_intr;
+ }
+ intr_info->vector = err;
+
+ /* Init the device's intr mask */
+ pds_core_intr_clean(&pdsc->intr_ctrl[index]);
+ pds_core_intr_mask_assert(&pdsc->intr_ctrl[index], 1);
+ pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
+
+ /* Register the isr with a name */
+ err = request_irq(intr_info->vector, handler, 0, intr_info->name, data);
+ if (err) {
+ dev_err(pdsc->dev, "failed to get intr irq vector %d: %pe\n",
+ intr_info->vector, ERR_PTR(err));
+ goto err_out_free_intr;
+ }
+
+ return index;
+
+err_out_free_intr:
+ pdsc_intr_free(pdsc, index);
+ return err;
+}
+
+static void pdsc_qcq_intr_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ if (!(qcq->flags & PDS_CORE_QCQ_F_INTR) ||
+ qcq->intx == PDS_CORE_INTR_INDEX_NOT_ASSIGNED)
+ return;
+
+ pdsc_intr_free(pdsc, qcq->intx);
+ qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
+}
+
+static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ char name[PDSC_INTR_NAME_MAX_SZ];
+ int index;
+
+ if (!(qcq->flags & PDS_CORE_QCQ_F_INTR)) {
+ qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
+ return 0;
+ }
+
+ snprintf(name, sizeof(name), "%s-%d-%s",
+ PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
+ index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc);
+ if (index < 0)
+ return index;
+ qcq->intx = index;
+ qcq->cq.bound_intr = &pdsc->intr_info[index];
+
+ return 0;
+}
+
+void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ struct device *dev = pdsc->dev;
+
+ if (!(qcq && qcq->pdsc))
+ return;
+
+ pdsc_debugfs_del_qcq(qcq);
+
+ pdsc_qcq_intr_free(pdsc, qcq);
+
+ if (qcq->q_base)
+ dma_free_coherent(dev, qcq->q_size,
+ qcq->q_base, qcq->q_base_pa);
+
+ if (qcq->cq_base)
+ dma_free_coherent(dev, qcq->cq_size,
+ qcq->cq_base, qcq->cq_base_pa);
+
+ vfree(qcq->cq.info);
+ vfree(qcq->q.info);
+
+ memset(qcq, 0, sizeof(*qcq));
+}
+
+static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
+{
+ struct pdsc_q_info *cur;
+ unsigned int i;
+
+ q->base = base;
+ q->base_pa = base_pa;
+
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
+ cur->desc = base + (i * q->desc_size);
+ init_completion(&cur->completion);
+ }
+}
+
+static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
+{
+ struct pdsc_cq_info *cur;
+ unsigned int i;
+
+ cq->base = base;
+ cq->base_pa = base_pa;
+
+ for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
+ cur->comp = base + (i * cq->desc_size);
+}
+
+int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
+ const char *name, unsigned int flags, unsigned int num_descs,
+ unsigned int desc_size, unsigned int cq_desc_size,
+ unsigned int pid, struct pdsc_qcq *qcq)
+{
+ struct device *dev = pdsc->dev;
+ void *q_base, *cq_base;
+ dma_addr_t cq_base_pa;
+ dma_addr_t q_base_pa;
+ int err;
+
+ qcq->q.info = vcalloc(num_descs, sizeof(*qcq->q.info));
+ if (!qcq->q.info) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ qcq->pdsc = pdsc;
+ qcq->flags = flags;
+ INIT_WORK(&qcq->work, pdsc_work_thread);
+
+ qcq->q.type = type;
+ qcq->q.index = index;
+ qcq->q.num_descs = num_descs;
+ qcq->q.desc_size = desc_size;
+ qcq->q.tail_idx = 0;
+ qcq->q.head_idx = 0;
+ qcq->q.pid = pid;
+ snprintf(qcq->q.name, sizeof(qcq->q.name), "%s%u", name, index);
+
+ err = pdsc_qcq_intr_alloc(pdsc, qcq);
+ if (err)
+ goto err_out_free_q_info;
+
+ qcq->cq.info = vcalloc(num_descs, sizeof(*qcq->cq.info));
+ if (!qcq->cq.info) {
+ err = -ENOMEM;
+ goto err_out_free_irq;
+ }
+
+ qcq->cq.num_descs = num_descs;
+ qcq->cq.desc_size = cq_desc_size;
+ qcq->cq.tail_idx = 0;
+ qcq->cq.done_color = 1;
+
+ if (flags & PDS_CORE_QCQ_F_NOTIFYQ) {
+ /* q & cq need to be contiguous in case of notifyq */
+ qcq->q_size = PDS_PAGE_SIZE +
+ ALIGN(num_descs * desc_size, PDS_PAGE_SIZE) +
+ ALIGN(num_descs * cq_desc_size, PDS_PAGE_SIZE);
+ qcq->q_base = dma_alloc_coherent(dev,
+ qcq->q_size + qcq->cq_size,
+ &qcq->q_base_pa,
+ GFP_KERNEL);
+ if (!qcq->q_base) {
+ err = -ENOMEM;
+ goto err_out_free_cq_info;
+ }
+ q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
+ q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
+ pdsc_q_map(&qcq->q, q_base, q_base_pa);
+
+ cq_base = PTR_ALIGN(q_base +
+ ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
+ PDS_PAGE_SIZE);
+ cq_base_pa = ALIGN(qcq->q_base_pa +
+ ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
+ PDS_PAGE_SIZE);
+
+ } else {
+ /* q DMA descriptors */
+ qcq->q_size = PDS_PAGE_SIZE + (num_descs * desc_size);
+ qcq->q_base = dma_alloc_coherent(dev, qcq->q_size,
+ &qcq->q_base_pa,
+ GFP_KERNEL);
+ if (!qcq->q_base) {
+ err = -ENOMEM;
+ goto err_out_free_cq_info;
+ }
+ q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
+ q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
+ pdsc_q_map(&qcq->q, q_base, q_base_pa);
+
+ /* cq DMA descriptors */
+ qcq->cq_size = PDS_PAGE_SIZE + (num_descs * cq_desc_size);
+ qcq->cq_base = dma_alloc_coherent(dev, qcq->cq_size,
+ &qcq->cq_base_pa,
+ GFP_KERNEL);
+ if (!qcq->cq_base) {
+ err = -ENOMEM;
+ goto err_out_free_q;
+ }
+ cq_base = PTR_ALIGN(qcq->cq_base, PDS_PAGE_SIZE);
+ cq_base_pa = ALIGN(qcq->cq_base_pa, PDS_PAGE_SIZE);
+ }
+
+ pdsc_cq_map(&qcq->cq, cq_base, cq_base_pa);
+ qcq->cq.bound_q = &qcq->q;
+
+ pdsc_debugfs_add_qcq(pdsc, qcq);
+
+ return 0;
+
+err_out_free_q:
+ dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
+err_out_free_cq_info:
+ vfree(qcq->cq.info);
+err_out_free_irq:
+ pdsc_qcq_intr_free(pdsc, qcq);
+err_out_free_q_info:
+ vfree(qcq->q.info);
+ memset(qcq, 0, sizeof(*qcq));
+err_out:
+ dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
+ return err;
+}
+
+static void pdsc_core_uninit(struct pdsc *pdsc)
+{
+ pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
+ pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+
+ if (pdsc->kern_dbpage) {
+ iounmap(pdsc->kern_dbpage);
+ pdsc->kern_dbpage = NULL;
+ }
+}
+
+static int pdsc_core_init(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .init.opcode = PDS_CORE_CMD_INIT,
+ };
+ struct pds_core_dev_init_data_out cido;
+ struct pds_core_dev_init_data_in cidi;
+ u32 dbid_count;
+ u32 dbpage_num;
+ int numdescs;
+ size_t sz;
+ int err;
+
+ numdescs = PDSC_ADMINQ_MAX_LENGTH;
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
+ PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
+ numdescs,
+ sizeof(union pds_core_adminq_cmd),
+ sizeof(union pds_core_adminq_comp),
+ 0, &pdsc->adminqcq);
+ if (err)
+ return err;
+
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
+ PDS_CORE_QCQ_F_NOTIFYQ,
+ PDSC_NOTIFYQ_LENGTH,
+ sizeof(struct pds_core_notifyq_cmd),
+ sizeof(union pds_core_notifyq_comp),
+ 0, &pdsc->notifyqcq);
+ if (err)
+ goto err_out_uninit;
+
+ cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
+ cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
+ cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
+ cidi.flags = cpu_to_le32(PDS_CORE_QINIT_F_IRQ | PDS_CORE_QINIT_F_ENA);
+ cidi.intr_index = cpu_to_le16(pdsc->adminqcq.intx);
+ cidi.adminq_ring_size = ilog2(pdsc->adminqcq.q.num_descs);
+ cidi.notifyq_ring_size = ilog2(pdsc->notifyqcq.q.num_descs);
+
+ mutex_lock(&pdsc->devcmd_lock);
+
+ sz = min_t(size_t, sizeof(cidi), sizeof(pdsc->cmd_regs->data));
+ memcpy_toio(&pdsc->cmd_regs->data, &cidi, sz);
+
+ err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+ if (!err) {
+ sz = min_t(size_t, sizeof(cido), sizeof(pdsc->cmd_regs->data));
+ memcpy_fromio(&cido, &pdsc->cmd_regs->data, sz);
+ }
+
+ mutex_unlock(&pdsc->devcmd_lock);
+ if (err) {
+ dev_err(pdsc->dev, "Device init command failed: %pe\n",
+ ERR_PTR(err));
+ goto err_out_uninit;
+ }
+
+ pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
+
+ dbid_count = le32_to_cpu(pdsc->dev_ident.ndbpgs_per_lif);
+ dbpage_num = pdsc->hw_index * dbid_count;
+ pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
+ if (!pdsc->kern_dbpage) {
+ dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
+ err = -ENOMEM;
+ goto err_out_uninit;
+ }
+
+ pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
+ pdsc->adminqcq.q.hw_index = le32_to_cpu(cido.adminq_hw_index);
+ pdsc->adminqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->adminqcq.q.hw_index);
+
+ pdsc->notifyqcq.q.hw_type = cido.notifyq_hw_type;
+ pdsc->notifyqcq.q.hw_index = le32_to_cpu(cido.notifyq_hw_index);
+ pdsc->notifyqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->notifyqcq.q.hw_index);
+
+ pdsc->last_eid = 0;
+
+ return 0;
+
+err_out_uninit:
+ pdsc_core_uninit(pdsc);
+ return err;
+}
+
+static struct pdsc_viftype pdsc_viftype_defaults[] = {
+ [PDS_DEV_TYPE_FWCTL] = { .name = PDS_DEV_TYPE_FWCTL_STR,
+ .enabled = true,
+ .vif_id = PDS_DEV_TYPE_FWCTL,
+ .dl_id = -1 },
+ [PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
+ .vif_id = PDS_DEV_TYPE_VDPA,
+ .dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
+ [PDS_DEV_TYPE_MAX] = {}
+};
+
+static int pdsc_viftypes_init(struct pdsc *pdsc)
+{
+ enum pds_core_vif_types vt;
+
+ pdsc->viftype_status = kcalloc(ARRAY_SIZE(pdsc_viftype_defaults),
+ sizeof(*pdsc->viftype_status),
+ GFP_KERNEL);
+ if (!pdsc->viftype_status)
+ return -ENOMEM;
+
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
+ bool vt_support;
+
+ if (!pdsc_viftype_defaults[vt].name)
+ continue;
+
+ /* Grab the defaults */
+ pdsc->viftype_status[vt] = pdsc_viftype_defaults[vt];
+
+ /* See what the Core device has for support */
+ vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
+
+ dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
+ pdsc->viftype_status[vt].name,
+ vt_support ? "" : "not ");
+
+ pdsc->viftype_status[vt].supported = vt_support;
+ }
+
+ return 0;
+}
+
+int pdsc_setup(struct pdsc *pdsc, bool init)
+{
+ int err;
+
+ err = pdsc_dev_init(pdsc);
+ if (err)
+ return err;
+
+ /* Set up the Core with the AdminQ and NotifyQ info */
+ err = pdsc_core_init(pdsc);
+ if (err)
+ goto err_out_teardown;
+
+ /* Set up the VIFs */
+ if (init) {
+ err = pdsc_viftypes_init(pdsc);
+ if (err)
+ goto err_out_teardown;
+
+ pdsc_debugfs_add_viftype(pdsc);
+ }
+
+ refcount_set(&pdsc->adminq_refcnt, 1);
+ clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
+ return 0;
+
+err_out_teardown:
+ pdsc_teardown(pdsc, init);
+ return err;
+}
+
+void pdsc_teardown(struct pdsc *pdsc, bool removing)
+{
+ if (!pdsc->pdev->is_virtfn)
+ pdsc_devcmd_reset(pdsc);
+ if (pdsc->adminqcq.work.func)
+ cancel_work_sync(&pdsc->adminqcq.work);
+
+ pdsc_core_uninit(pdsc);
+
+ if (removing) {
+ kfree(pdsc->viftype_status);
+ pdsc->viftype_status = NULL;
+ }
+
+ pdsc_dev_uninit(pdsc);
+
+ set_bit(PDSC_S_FW_DEAD, &pdsc->state);
+}
+
+int pdsc_start(struct pdsc *pdsc)
+{
+ pds_core_intr_mask(&pdsc->intr_ctrl[pdsc->adminqcq.intx],
+ PDS_CORE_INTR_MASK_CLEAR);
+
+ return 0;
+}
+
+void pdsc_stop(struct pdsc *pdsc)
+{
+ int i;
+
+ if (!pdsc->intr_info)
+ return;
+
+ /* Mask interrupts that are in use */
+ for (i = 0; i < pdsc->nintrs; i++)
+ if (pdsc->intr_info[i].vector)
+ pds_core_intr_mask(&pdsc->intr_ctrl[i],
+ PDS_CORE_INTR_MASK_SET);
+}
+
+static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc)
+{
+ /* The driver initializes the adminq_refcnt to 1 when the adminq is
+ * allocated and ready for use. Other users/requesters will increment
+ * the refcnt while in use. If the refcnt is down to 1 then the adminq
+ * is not in use and the refcnt can be cleared and adminq freed. Before
+ * calling this function the driver will set PDSC_S_FW_DEAD, which
+ * prevent subsequent attempts to use the adminq and increment the
+ * refcnt to fail. This guarantees that this function will eventually
+ * exit.
+ */
+ while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) {
+ dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n",
+ __func__);
+ cpu_relax();
+ }
+}
+
+void pdsc_fw_down(struct pdsc *pdsc)
+{
+ union pds_core_notifyq_comp reset_event = {
+ .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
+ .reset.state = 0,
+ };
+
+ if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
+ dev_warn(pdsc->dev, "%s: already happening\n", __func__);
+ return;
+ }
+
+ if (pdsc->pdev->is_virtfn)
+ return;
+
+ pdsc_adminq_wait_and_dec_once_unused(pdsc);
+
+ /* Notify clients of fw_down */
+ if (pdsc->fw_reporter)
+ devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
+ pdsc_notify(PDS_EVENT_RESET, &reset_event);
+
+ pdsc_stop(pdsc);
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
+}
+
+void pdsc_fw_up(struct pdsc *pdsc)
+{
+ union pds_core_notifyq_comp reset_event = {
+ .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
+ .reset.state = 1,
+ };
+ int err;
+
+ if (!test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
+ dev_err(pdsc->dev, "%s: fw not dead\n", __func__);
+ return;
+ }
+
+ if (pdsc->pdev->is_virtfn) {
+ clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
+ return;
+ }
+
+ err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
+ if (err)
+ goto err_out;
+
+ err = pdsc_start(pdsc);
+ if (err)
+ goto err_out;
+
+ /* Notify clients of fw_up */
+ pdsc->fw_recoveries++;
+ if (pdsc->fw_reporter)
+ devlink_health_reporter_state_update(pdsc->fw_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+ pdsc_notify(PDS_EVENT_RESET, &reset_event);
+
+ return;
+
+err_out:
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
+}
+
+void pdsc_pci_reset_thread(struct work_struct *work)
+{
+ struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
+ struct pci_dev *pdev = pdsc->pdev;
+
+ pci_dev_get(pdev);
+ pci_reset_function(pdev);
+ pci_dev_put(pdev);
+}
+
+static void pdsc_check_pci_health(struct pdsc *pdsc)
+{
+ u8 fw_status;
+
+ /* some sort of teardown already in progress */
+ if (!pdsc->info_regs)
+ return;
+
+ fw_status = ioread8(&pdsc->info_regs->fw_status);
+
+ /* is PCI broken? */
+ if (fw_status != PDS_RC_BAD_PCI)
+ return;
+
+ /* prevent deadlock between pdsc_reset_prepare and pdsc_health_thread */
+ queue_work(pdsc->wq, &pdsc->pci_reset_work);
+}
+
+void pdsc_health_thread(struct work_struct *work)
+{
+ struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
+ unsigned long mask;
+ bool healthy;
+
+ mutex_lock(&pdsc->config_lock);
+
+ /* Don't do a check when in a transition state */
+ mask = BIT_ULL(PDSC_S_INITING_DRIVER) |
+ BIT_ULL(PDSC_S_STOPPING_DRIVER);
+ if (pdsc->state & mask)
+ goto out_unlock;
+
+ healthy = pdsc_is_fw_good(pdsc);
+ dev_dbg(pdsc->dev, "%s: health %d fw_status %#02x fw_heartbeat %d\n",
+ __func__, healthy, pdsc->fw_status, pdsc->last_hb);
+
+ if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
+ if (healthy)
+ pdsc_fw_up(pdsc);
+ } else {
+ if (!healthy)
+ pdsc_fw_down(pdsc);
+ }
+
+ pdsc_check_pci_health(pdsc);
+
+ pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+
+out_unlock:
+ mutex_unlock(&pdsc->config_lock);
+}
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
new file mode 100644
index 000000000000..4a6b35c84dab
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _PDSC_H_
+#define _PDSC_H_
+
+#include <linux/debugfs.h>
+#include <net/devlink.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+#include <linux/pds/pds_intr.h>
+
+#define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver"
+
+#define PDSC_WATCHDOG_SECS 5
+#define PDSC_QUEUE_NAME_MAX_SZ 16
+#define PDSC_ADMINQ_MAX_LENGTH 16 /* must be a power of two */
+#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
+#define PDSC_TEARDOWN_RECOVERY false
+#define PDSC_TEARDOWN_REMOVING true
+#define PDSC_SETUP_RECOVERY false
+#define PDSC_SETUP_INIT true
+
+struct pdsc_dev_bar {
+ void __iomem *vaddr;
+ phys_addr_t bus_addr;
+ unsigned long len;
+ int res_index;
+};
+
+struct pdsc;
+
+struct pdsc_vf {
+ struct pds_auxiliary_dev *padev;
+ struct pdsc *vf;
+ u16 index;
+ __le16 vif_types[PDS_DEV_TYPE_MAX];
+};
+
+struct pdsc_devinfo {
+ u8 asic_type;
+ u8 asic_rev;
+ char fw_version[PDS_CORE_DEVINFO_FWVERS_BUFLEN + 1];
+ char serial_num[PDS_CORE_DEVINFO_SERIAL_BUFLEN + 1];
+};
+
+struct pdsc_queue {
+ struct pdsc_q_info *info;
+ u64 dbval;
+ u16 head_idx;
+ u16 tail_idx;
+ u8 hw_type;
+ unsigned int index;
+ unsigned int num_descs;
+ u64 dbell_count;
+ u64 features;
+ unsigned int type;
+ unsigned int hw_index;
+ union {
+ void *base;
+ struct pds_core_admin_cmd *adminq;
+ };
+ dma_addr_t base_pa; /* must be page aligned */
+ unsigned int desc_size;
+ unsigned int pid;
+ char name[PDSC_QUEUE_NAME_MAX_SZ];
+};
+
+#define PDSC_INTR_NAME_MAX_SZ 32
+
+struct pdsc_intr_info {
+ char name[PDSC_INTR_NAME_MAX_SZ];
+ unsigned int index;
+ unsigned int vector;
+ void *data;
+};
+
+struct pdsc_cq_info {
+ void *comp;
+};
+
+struct pdsc_buf_info {
+ struct page *page;
+ dma_addr_t dma_addr;
+ u32 page_offset;
+ u32 len;
+};
+
+struct pdsc_q_info {
+ union {
+ void *desc;
+ struct pdsc_admin_cmd *adminq_desc;
+ };
+ unsigned int bytes;
+ unsigned int nbufs;
+ struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
+ struct completion completion;
+ void *dest;
+};
+
+struct pdsc_cq {
+ struct pdsc_cq_info *info;
+ struct pdsc_queue *bound_q;
+ struct pdsc_intr_info *bound_intr;
+ u16 tail_idx;
+ bool done_color;
+ unsigned int num_descs;
+ unsigned int desc_size;
+ void *base;
+ dma_addr_t base_pa; /* must be page aligned */
+} ____cacheline_aligned_in_smp;
+
+struct pdsc_qcq {
+ struct pdsc *pdsc;
+ void *q_base;
+ dma_addr_t q_base_pa; /* might not be page aligned */
+ void *cq_base;
+ dma_addr_t cq_base_pa; /* might not be page aligned */
+ u32 q_size;
+ u32 cq_size;
+ bool armed;
+ unsigned int flags;
+
+ struct work_struct work;
+ struct pdsc_queue q;
+ struct pdsc_cq cq;
+ int intx;
+
+ u32 accum_work;
+ struct dentry *dentry;
+};
+
+struct pdsc_viftype {
+ char *name;
+ bool supported;
+ bool enabled;
+ int dl_id;
+ int vif_id;
+ struct pds_auxiliary_dev *padev;
+};
+
+/* No state flags set means we are in a steady running state */
+enum pdsc_state_flags {
+ PDSC_S_FW_DEAD, /* stopped, wait on startup or recovery */
+ PDSC_S_INITING_DRIVER, /* initial startup from probe */
+ PDSC_S_STOPPING_DRIVER, /* driver remove */
+
+ /* leave this as last */
+ PDSC_S_STATE_SIZE
+};
+
+struct pdsc {
+ struct pci_dev *pdev;
+ struct dentry *dentry;
+ struct device *dev;
+ struct pdsc_dev_bar bars[PDS_CORE_BARS_MAX];
+ struct pds_auxiliary_dev *padev;
+ struct pdsc_vf *vfs;
+ int num_vfs;
+ int vf_id;
+ int hw_index;
+ int uid;
+
+ unsigned long state;
+ u8 fw_status;
+ u8 fw_generation;
+ unsigned long last_fw_time;
+ u32 last_hb;
+ struct timer_list wdtimer;
+ unsigned int wdtimer_period;
+ struct work_struct health_work;
+ struct devlink_health_reporter *fw_reporter;
+ u32 fw_recoveries;
+
+ struct pdsc_devinfo dev_info;
+ struct pds_core_dev_identity dev_ident;
+ unsigned int nintrs;
+ struct pdsc_intr_info *intr_info; /* array of nintrs elements */
+
+ struct workqueue_struct *wq;
+
+ unsigned int devcmd_timeout;
+ struct mutex devcmd_lock; /* lock for dev_cmd operations */
+ struct mutex config_lock; /* lock for configuration operations */
+ spinlock_t adminq_lock; /* lock for adminq operations */
+ refcount_t adminq_refcnt;
+ struct pds_core_dev_info_regs __iomem *info_regs;
+ struct pds_core_dev_cmd_regs __iomem *cmd_regs;
+ struct pds_core_intr __iomem *intr_ctrl;
+ u64 __iomem *intr_status;
+ u64 __iomem *db_pages;
+ dma_addr_t phy_db_pages;
+ u64 __iomem *kern_dbpage;
+
+ struct pdsc_qcq adminqcq;
+ struct pdsc_qcq notifyqcq;
+ u64 last_eid;
+ struct pdsc_viftype *viftype_status;
+ struct work_struct pci_reset_work;
+};
+
+/** enum pds_core_dbell_bits - bitwise composition of dbell values.
+ *
+ * @PDS_CORE_DBELL_QID_MASK: unshifted mask of valid queue id bits.
+ * @PDS_CORE_DBELL_QID_SHIFT: queue id shift amount in dbell value.
+ * @PDS_CORE_DBELL_QID: macro to build QID component of dbell value.
+ *
+ * @PDS_CORE_DBELL_RING_MASK: unshifted mask of valid ring bits.
+ * @PDS_CORE_DBELL_RING_SHIFT: ring shift amount in dbell value.
+ * @PDS_CORE_DBELL_RING: macro to build ring component of dbell value.
+ *
+ * @PDS_CORE_DBELL_RING_0: ring zero dbell component value.
+ * @PDS_CORE_DBELL_RING_1: ring one dbell component value.
+ * @PDS_CORE_DBELL_RING_2: ring two dbell component value.
+ * @PDS_CORE_DBELL_RING_3: ring three dbell component value.
+ *
+ * @PDS_CORE_DBELL_INDEX_MASK: bit mask of valid index bits, no shift needed.
+ */
+enum pds_core_dbell_bits {
+ PDS_CORE_DBELL_QID_MASK = 0xffffff,
+ PDS_CORE_DBELL_QID_SHIFT = 24,
+
+#define PDS_CORE_DBELL_QID(n) \
+ (((u64)(n) & PDS_CORE_DBELL_QID_MASK) << PDS_CORE_DBELL_QID_SHIFT)
+
+ PDS_CORE_DBELL_RING_MASK = 0x7,
+ PDS_CORE_DBELL_RING_SHIFT = 16,
+
+#define PDS_CORE_DBELL_RING(n) \
+ (((u64)(n) & PDS_CORE_DBELL_RING_MASK) << PDS_CORE_DBELL_RING_SHIFT)
+
+ PDS_CORE_DBELL_RING_0 = 0,
+ PDS_CORE_DBELL_RING_1 = PDS_CORE_DBELL_RING(1),
+ PDS_CORE_DBELL_RING_2 = PDS_CORE_DBELL_RING(2),
+ PDS_CORE_DBELL_RING_3 = PDS_CORE_DBELL_RING(3),
+
+ PDS_CORE_DBELL_INDEX_MASK = 0xffff,
+};
+
+static inline void pds_core_dbell_ring(u64 __iomem *db_page,
+ enum pds_core_logical_qtype qtype,
+ u64 val)
+{
+ writeq(val, &db_page[qtype]);
+}
+
+int pdsc_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack);
+int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+int pdsc_dl_flash_update(struct devlink *dl,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+int pdsc_dl_enable_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
+int pdsc_dl_enable_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
+int pdsc_dl_enable_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack);
+
+void __iomem *pdsc_map_dbpage(struct pdsc *pdsc, int page_num);
+
+void pdsc_debugfs_create(void);
+void pdsc_debugfs_destroy(void);
+void pdsc_debugfs_add_dev(struct pdsc *pdsc);
+void pdsc_debugfs_del_dev(struct pdsc *pdsc);
+void pdsc_debugfs_add_ident(struct pdsc *pdsc);
+void pdsc_debugfs_add_viftype(struct pdsc *pdsc);
+void pdsc_debugfs_add_irqs(struct pdsc *pdsc);
+void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq);
+void pdsc_debugfs_del_qcq(struct pdsc_qcq *qcq);
+
+int pdsc_err_to_errno(enum pds_core_status_code code);
+bool pdsc_is_fw_running(struct pdsc *pdsc);
+bool pdsc_is_fw_good(struct pdsc *pdsc);
+int pdsc_devcmd(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds);
+int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds);
+int pdsc_devcmd_init(struct pdsc *pdsc);
+int pdsc_devcmd_reset(struct pdsc *pdsc);
+int pdsc_dev_init(struct pdsc *pdsc);
+void pdsc_dev_uninit(struct pdsc *pdsc);
+
+int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
+ irq_handler_t handler, void *data);
+void pdsc_intr_free(struct pdsc *pdsc, int index);
+void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq);
+int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
+ const char *name, unsigned int flags, unsigned int num_descs,
+ unsigned int desc_size, unsigned int cq_desc_size,
+ unsigned int pid, struct pdsc_qcq *qcq);
+int pdsc_setup(struct pdsc *pdsc, bool init);
+void pdsc_teardown(struct pdsc *pdsc, bool removing);
+int pdsc_start(struct pdsc *pdsc);
+void pdsc_stop(struct pdsc *pdsc);
+void pdsc_health_thread(struct work_struct *work);
+
+int pdsc_register_notify(struct notifier_block *nb);
+void pdsc_unregister_notify(struct notifier_block *nb);
+void pdsc_notify(unsigned long event, void *data);
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr);
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr);
+
+void pdsc_process_adminq(struct pdsc_qcq *qcq);
+void pdsc_work_thread(struct work_struct *work);
+irqreturn_t pdsc_adminq_isr(int irq, void *data);
+
+int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+
+void pdsc_fw_down(struct pdsc *pdsc);
+void pdsc_fw_up(struct pdsc *pdsc);
+void pdsc_pci_reset_thread(struct work_struct *work);
+
+#endif /* _PDSC_H_ */
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
new file mode 100644
index 000000000000..04c5e3abd8d7
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/pci.h>
+
+#include "core.h"
+
+static struct dentry *pdsc_dir;
+
+void pdsc_debugfs_create(void)
+{
+ pdsc_dir = debugfs_create_dir(PDS_CORE_DRV_NAME, NULL);
+}
+
+void pdsc_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(pdsc_dir);
+}
+
+void pdsc_debugfs_add_dev(struct pdsc *pdsc)
+{
+ pdsc->dentry = debugfs_create_dir(pci_name(pdsc->pdev), pdsc_dir);
+
+ debugfs_create_ulong("state", 0400, pdsc->dentry, &pdsc->state);
+}
+
+void pdsc_debugfs_del_dev(struct pdsc *pdsc)
+{
+ debugfs_remove_recursive(pdsc->dentry);
+ pdsc->dentry = NULL;
+}
+
+static int identity_show(struct seq_file *seq, void *v)
+{
+ struct pds_core_dev_identity *ident;
+ struct pdsc *pdsc = seq->private;
+ int vt;
+
+ ident = &pdsc->dev_ident;
+
+ seq_printf(seq, "fw_heartbeat: 0x%x\n",
+ ioread32(&pdsc->info_regs->fw_heartbeat));
+
+ seq_printf(seq, "nlifs: %d\n",
+ le32_to_cpu(ident->nlifs));
+ seq_printf(seq, "nintrs: %d\n",
+ le32_to_cpu(ident->nintrs));
+ seq_printf(seq, "ndbpgs_per_lif: %d\n",
+ le32_to_cpu(ident->ndbpgs_per_lif));
+ seq_printf(seq, "intr_coal_mult: %d\n",
+ le32_to_cpu(ident->intr_coal_mult));
+ seq_printf(seq, "intr_coal_div: %d\n",
+ le32_to_cpu(ident->intr_coal_div));
+
+ seq_puts(seq, "vif_types: ");
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++)
+ seq_printf(seq, "%d ",
+ le16_to_cpu(pdsc->dev_ident.vif_types[vt]));
+ seq_puts(seq, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(identity);
+
+void pdsc_debugfs_add_ident(struct pdsc *pdsc)
+{
+ /* This file will already exist in the reset flow */
+ if (debugfs_lookup("identity", pdsc->dentry))
+ return;
+
+ debugfs_create_file("identity", 0400, pdsc->dentry,
+ pdsc, &identity_fops);
+}
+
+static int viftype_show(struct seq_file *seq, void *v)
+{
+ struct pdsc *pdsc = seq->private;
+ int vt;
+
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
+ if (!pdsc->viftype_status[vt].name)
+ continue;
+
+ seq_printf(seq, "%s\t%d supported %d enabled\n",
+ pdsc->viftype_status[vt].name,
+ pdsc->viftype_status[vt].supported,
+ pdsc->viftype_status[vt].enabled);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(viftype);
+
+void pdsc_debugfs_add_viftype(struct pdsc *pdsc)
+{
+ debugfs_create_file("viftypes", 0400, pdsc->dentry,
+ pdsc, &viftype_fops);
+}
+
+static const struct debugfs_reg32 intr_ctrl_regs[] = {
+ { .name = "coal_init", .offset = 0, },
+ { .name = "mask", .offset = 4, },
+ { .name = "credits", .offset = 8, },
+ { .name = "mask_on_assert", .offset = 12, },
+ { .name = "coal_timer", .offset = 16, },
+};
+
+void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ struct dentry *qcq_dentry, *q_dentry, *cq_dentry, *intr_dentry;
+ struct debugfs_regset32 *intr_ctrl_regset;
+ struct pdsc_queue *q = &qcq->q;
+ struct pdsc_cq *cq = &qcq->cq;
+
+ qcq_dentry = debugfs_create_dir(q->name, pdsc->dentry);
+ if (IS_ERR(qcq_dentry))
+ return;
+ qcq->dentry = qcq_dentry;
+
+ debugfs_create_x64("q_base_pa", 0400, qcq_dentry, &qcq->q_base_pa);
+ debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size);
+ debugfs_create_x64("cq_base_pa", 0400, qcq_dentry, &qcq->cq_base_pa);
+ debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size);
+ debugfs_create_x32("accum_work", 0400, qcq_dentry, &qcq->accum_work);
+
+ q_dentry = debugfs_create_dir("q", qcq->dentry);
+ if (IS_ERR(q_dentry))
+ return;
+
+ debugfs_create_u32("index", 0400, q_dentry, &q->index);
+ debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
+ debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
+ debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
+
+ debugfs_create_u16("tail", 0400, q_dentry, &q->tail_idx);
+ debugfs_create_u16("head", 0400, q_dentry, &q->head_idx);
+
+ cq_dentry = debugfs_create_dir("cq", qcq->dentry);
+ if (IS_ERR(cq_dentry))
+ return;
+
+ debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
+ debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
+ debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
+ debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color);
+ debugfs_create_u16("tail", 0400, cq_dentry, &cq->tail_idx);
+
+ if (qcq->flags & PDS_CORE_QCQ_F_INTR) {
+ struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
+
+ intr_dentry = debugfs_create_dir("intr", qcq->dentry);
+ if (IS_ERR(intr_dentry))
+ return;
+
+ debugfs_create_u32("index", 0400, intr_dentry, &intr->index);
+ debugfs_create_u32("vector", 0400, intr_dentry, &intr->vector);
+
+ intr_ctrl_regset = devm_kzalloc(pdsc->dev,
+ sizeof(*intr_ctrl_regset),
+ GFP_KERNEL);
+ if (!intr_ctrl_regset)
+ return;
+ intr_ctrl_regset->regs = intr_ctrl_regs;
+ intr_ctrl_regset->nregs = ARRAY_SIZE(intr_ctrl_regs);
+ intr_ctrl_regset->base = &pdsc->intr_ctrl[intr->index];
+
+ debugfs_create_regset32("intr_ctrl", 0400, intr_dentry,
+ intr_ctrl_regset);
+ }
+};
+
+void pdsc_debugfs_del_qcq(struct pdsc_qcq *qcq)
+{
+ debugfs_remove_recursive(qcq->dentry);
+ qcq->dentry = NULL;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
new file mode 100644
index 000000000000..495ef4ef8c10
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/utsname.h>
+
+#include "core.h"
+
+int pdsc_err_to_errno(enum pds_core_status_code code)
+{
+ switch (code) {
+ case PDS_RC_SUCCESS:
+ return 0;
+ case PDS_RC_EVERSION:
+ case PDS_RC_EQTYPE:
+ case PDS_RC_EQID:
+ case PDS_RC_EINVAL:
+ case PDS_RC_ENOSUPP:
+ return -EINVAL;
+ case PDS_RC_EPERM:
+ return -EPERM;
+ case PDS_RC_ENOENT:
+ return -ENOENT;
+ case PDS_RC_EAGAIN:
+ return -EAGAIN;
+ case PDS_RC_ENOMEM:
+ return -ENOMEM;
+ case PDS_RC_EFAULT:
+ return -EFAULT;
+ case PDS_RC_EBUSY:
+ return -EBUSY;
+ case PDS_RC_EEXIST:
+ return -EEXIST;
+ case PDS_RC_EVFID:
+ return -ENODEV;
+ case PDS_RC_ECLIENT:
+ return -ECHILD;
+ case PDS_RC_ENOSPC:
+ return -ENOSPC;
+ case PDS_RC_ERANGE:
+ return -ERANGE;
+ case PDS_RC_BAD_ADDR:
+ return -EFAULT;
+ case PDS_RC_BAD_PCI:
+ return -ENXIO;
+ case PDS_RC_EOPCODE:
+ case PDS_RC_EINTR:
+ case PDS_RC_DEV_CMD:
+ case PDS_RC_ERROR:
+ case PDS_RC_ERDMA:
+ case PDS_RC_EIO:
+ default:
+ return -EIO;
+ }
+}
+
+bool pdsc_is_fw_running(struct pdsc *pdsc)
+{
+ if (!pdsc->info_regs)
+ return false;
+
+ pdsc->fw_status = ioread8(&pdsc->info_regs->fw_status);
+ pdsc->last_fw_time = jiffies;
+ pdsc->last_hb = ioread32(&pdsc->info_regs->fw_heartbeat);
+
+ /* Firmware is useful only if the running bit is set and
+ * fw_status != 0xff (bad PCI read)
+ */
+ return (pdsc->fw_status != PDS_RC_BAD_PCI) &&
+ (pdsc->fw_status & PDS_CORE_FW_STS_F_RUNNING);
+}
+
+bool pdsc_is_fw_good(struct pdsc *pdsc)
+{
+ bool fw_running = pdsc_is_fw_running(pdsc);
+ u8 gen;
+
+ /* Make sure to update the cached fw_status by calling
+ * pdsc_is_fw_running() before getting the generation
+ */
+ gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+
+ return fw_running && gen == pdsc->fw_generation;
+}
+
+static u8 pdsc_devcmd_status(struct pdsc *pdsc)
+{
+ return ioread8(&pdsc->cmd_regs->comp.status);
+}
+
+static bool pdsc_devcmd_done(struct pdsc *pdsc)
+{
+ return ioread32(&pdsc->cmd_regs->done) & PDS_CORE_DEV_CMD_DONE;
+}
+
+static void pdsc_devcmd_dbell(struct pdsc *pdsc)
+{
+ iowrite32(0, &pdsc->cmd_regs->done);
+ iowrite32(1, &pdsc->cmd_regs->doorbell);
+}
+
+static void pdsc_devcmd_clean(struct pdsc *pdsc)
+{
+ iowrite32(0, &pdsc->cmd_regs->doorbell);
+ memset_io(&pdsc->cmd_regs->cmd, 0, sizeof(pdsc->cmd_regs->cmd));
+}
+
+static const char *pdsc_devcmd_str(int opcode)
+{
+ switch (opcode) {
+ case PDS_CORE_CMD_NOP:
+ return "PDS_CORE_CMD_NOP";
+ case PDS_CORE_CMD_IDENTIFY:
+ return "PDS_CORE_CMD_IDENTIFY";
+ case PDS_CORE_CMD_RESET:
+ return "PDS_CORE_CMD_RESET";
+ case PDS_CORE_CMD_INIT:
+ return "PDS_CORE_CMD_INIT";
+ case PDS_CORE_CMD_FW_DOWNLOAD:
+ return "PDS_CORE_CMD_FW_DOWNLOAD";
+ case PDS_CORE_CMD_FW_CONTROL:
+ return "PDS_CORE_CMD_FW_CONTROL";
+ default:
+ return "PDS_CORE_CMD_UNKNOWN";
+ }
+}
+
+static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
+{
+ struct device *dev = pdsc->dev;
+ unsigned long start_time;
+ unsigned long max_wait;
+ unsigned long duration;
+ int timeout = 0;
+ bool running;
+ int done = 0;
+ int err = 0;
+ int status;
+
+ start_time = jiffies;
+ max_wait = start_time + (max_seconds * HZ);
+
+ while (!done && !timeout) {
+ running = pdsc_is_fw_running(pdsc);
+ if (!running)
+ break;
+
+ done = pdsc_devcmd_done(pdsc);
+ if (done)
+ break;
+
+ timeout = time_after(jiffies, max_wait);
+ if (timeout)
+ break;
+
+ usleep_range(100, 200);
+ }
+ duration = jiffies - start_time;
+
+ if (done && duration > HZ)
+ dev_dbg(dev, "DEVCMD %d %s after %ld secs\n",
+ opcode, pdsc_devcmd_str(opcode), duration / HZ);
+
+ if ((!done || timeout) && running) {
+ dev_err(dev, "DEVCMD %d %s timeout, done %d timeout %d max_seconds=%d\n",
+ opcode, pdsc_devcmd_str(opcode), done, timeout,
+ max_seconds);
+ err = -ETIMEDOUT;
+ pdsc_devcmd_clean(pdsc);
+ }
+
+ status = pdsc_devcmd_status(pdsc);
+ err = pdsc_err_to_errno(status);
+ if (err && err != -EAGAIN)
+ dev_err(dev, "DEVCMD %d %s failed, status=%d err %d %pe\n",
+ opcode, pdsc_devcmd_str(opcode), status, err,
+ ERR_PTR(err));
+
+ return err;
+}
+
+int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds)
+{
+ int err;
+
+ if (!pdsc->cmd_regs)
+ return -ENXIO;
+
+ memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd));
+ pdsc_devcmd_dbell(pdsc);
+ err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds);
+
+ if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq)
+ queue_work(pdsc->wq, &pdsc->health_work);
+ else
+ memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
+
+ return err;
+}
+
+int pdsc_devcmd(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds)
+{
+ int err;
+
+ mutex_lock(&pdsc->devcmd_lock);
+ err = pdsc_devcmd_locked(pdsc, cmd, comp, max_seconds);
+ mutex_unlock(&pdsc->devcmd_lock);
+
+ return err;
+}
+
+int pdsc_devcmd_init(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .opcode = PDS_CORE_CMD_INIT,
+ };
+
+ return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+int pdsc_devcmd_reset(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .reset.opcode = PDS_CORE_CMD_RESET,
+ };
+
+ if (!pdsc_is_fw_running(pdsc))
+ return 0;
+
+ return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static int pdsc_devcmd_identify_locked(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .identify.opcode = PDS_CORE_CMD_IDENTIFY,
+ .identify.ver = PDS_CORE_IDENTITY_VERSION_1,
+ };
+
+ return pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static void pdsc_init_devinfo(struct pdsc *pdsc)
+{
+ pdsc->dev_info.asic_type = ioread8(&pdsc->info_regs->asic_type);
+ pdsc->dev_info.asic_rev = ioread8(&pdsc->info_regs->asic_rev);
+ pdsc->fw_generation = PDS_CORE_FW_STS_F_GENERATION &
+ ioread8(&pdsc->info_regs->fw_status);
+
+ memcpy_fromio(pdsc->dev_info.fw_version,
+ pdsc->info_regs->fw_version,
+ PDS_CORE_DEVINFO_FWVERS_BUFLEN);
+ pdsc->dev_info.fw_version[PDS_CORE_DEVINFO_FWVERS_BUFLEN] = 0;
+
+ memcpy_fromio(pdsc->dev_info.serial_num,
+ pdsc->info_regs->serial_num,
+ PDS_CORE_DEVINFO_SERIAL_BUFLEN);
+ pdsc->dev_info.serial_num[PDS_CORE_DEVINFO_SERIAL_BUFLEN] = 0;
+
+ dev_dbg(pdsc->dev, "fw_version %s\n", pdsc->dev_info.fw_version);
+}
+
+static int pdsc_identify(struct pdsc *pdsc)
+{
+ struct pds_core_drv_identity drv = {};
+ size_t sz;
+ int err;
+ int n;
+
+ drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX);
+ /* Catching the return quiets a Wformat-truncation complaint */
+ n = snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
+ "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
+ if (n > sizeof(drv.driver_ver_str))
+ dev_dbg(pdsc->dev, "release name truncated, don't care\n");
+
+ /* Next let's get some info about the device
+ * We use the devcmd_lock at this level in order to
+ * get safe access to the cmd_regs->data before anyone
+ * else can mess it up
+ */
+ mutex_lock(&pdsc->devcmd_lock);
+
+ sz = min_t(size_t, sizeof(drv), sizeof(pdsc->cmd_regs->data));
+ memcpy_toio(&pdsc->cmd_regs->data, &drv, sz);
+
+ err = pdsc_devcmd_identify_locked(pdsc);
+ if (!err) {
+ sz = min_t(size_t, sizeof(pdsc->dev_ident),
+ sizeof(pdsc->cmd_regs->data));
+ memcpy_fromio(&pdsc->dev_ident, &pdsc->cmd_regs->data, sz);
+ }
+ mutex_unlock(&pdsc->devcmd_lock);
+
+ if (err) {
+ dev_err(pdsc->dev, "Cannot identify device: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ if (isprint(pdsc->dev_info.fw_version[0]) &&
+ isascii(pdsc->dev_info.fw_version[0]))
+ dev_info(pdsc->dev, "FW: %.*s\n",
+ (int)(sizeof(pdsc->dev_info.fw_version) - 1),
+ pdsc->dev_info.fw_version);
+ else
+ dev_info(pdsc->dev, "FW: (invalid string) 0x%02x 0x%02x 0x%02x 0x%02x ...\n",
+ (u8)pdsc->dev_info.fw_version[0],
+ (u8)pdsc->dev_info.fw_version[1],
+ (u8)pdsc->dev_info.fw_version[2],
+ (u8)pdsc->dev_info.fw_version[3]);
+
+ return 0;
+}
+
+void pdsc_dev_uninit(struct pdsc *pdsc)
+{
+ if (pdsc->intr_info) {
+ int i;
+
+ for (i = 0; i < pdsc->nintrs; i++)
+ pdsc_intr_free(pdsc, i);
+
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+ pdsc->nintrs = 0;
+ }
+
+ pci_free_irq_vectors(pdsc->pdev);
+}
+
+int pdsc_dev_init(struct pdsc *pdsc)
+{
+ unsigned int nintrs;
+ int err;
+
+ /* Initial init and reset of device */
+ pdsc_init_devinfo(pdsc);
+ pdsc->devcmd_timeout = PDS_CORE_DEVCMD_TIMEOUT;
+
+ err = pdsc_devcmd_reset(pdsc);
+ if (err)
+ return err;
+
+ err = pdsc_identify(pdsc);
+ if (err)
+ return err;
+
+ pdsc_debugfs_add_ident(pdsc);
+
+ /* Now we can reserve interrupts */
+ nintrs = le32_to_cpu(pdsc->dev_ident.nintrs);
+ nintrs = min_t(unsigned int, num_online_cpus(), nintrs);
+
+ /* Get intr_info struct array for tracking */
+ pdsc->intr_info = kcalloc(nintrs, sizeof(*pdsc->intr_info), GFP_KERNEL);
+ if (!pdsc->intr_info)
+ return -ENOMEM;
+
+ err = pci_alloc_irq_vectors(pdsc->pdev, nintrs, nintrs, PCI_IRQ_MSIX);
+ if (err != nintrs) {
+ dev_err(pdsc->dev, "Can't get %d intrs from OS: %pe\n",
+ nintrs, ERR_PTR(err));
+ err = -ENOSPC;
+ goto err_out;
+ }
+ pdsc->nintrs = nintrs;
+
+ return 0;
+
+err_out:
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+
+ return err;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
new file mode 100644
index 000000000000..b576be626a29
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include "core.h"
+#include <linux/pds/pds_auxbus.h>
+
+static struct
+pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc,
+ enum devlink_param_type dl_id)
+{
+ int vt;
+
+ if (!pdsc->viftype_status)
+ return NULL;
+
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
+ if (pdsc->viftype_status[vt].dl_id == dl_id)
+ return &pdsc->viftype_status[vt];
+ }
+
+ return NULL;
+}
+
+int pdsc_dl_enable_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+ struct pdsc_viftype *vt_entry;
+
+ vt_entry = pdsc_dl_find_viftype_by_id(pdsc, id);
+ if (!vt_entry)
+ return -ENOENT;
+
+ ctx->val.vbool = vt_entry->enabled;
+
+ return 0;
+}
+
+int pdsc_dl_enable_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+ struct pdsc_viftype *vt_entry;
+ int err = 0;
+ int vf_id;
+
+ vt_entry = pdsc_dl_find_viftype_by_id(pdsc, id);
+ if (!vt_entry || !vt_entry->supported)
+ return -EOPNOTSUPP;
+
+ if (vt_entry->enabled == ctx->val.vbool)
+ return 0;
+
+ vt_entry->enabled = ctx->val.vbool;
+ for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
+ struct pdsc *vf = pdsc->vfs[vf_id].vf;
+
+ if (ctx->val.vbool)
+ err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id,
+ &pdsc->vfs[vf_id].padev);
+ else
+ pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev);
+ }
+
+ return err;
+}
+
+int pdsc_dl_enable_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+ struct pdsc_viftype *vt_entry;
+
+ vt_entry = pdsc_dl_find_viftype_by_id(pdsc, id);
+ if (!vt_entry || !vt_entry->supported)
+ return -EOPNOTSUPP;
+
+ if (!pdsc->viftype_status[vt_entry->vif_id].supported)
+ return -ENODEV;
+
+ return 0;
+}
+
+int pdsc_dl_flash_update(struct devlink *dl,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+
+ return pdsc_firmware_update(pdsc, params->fw, extack);
+}
+
+static char *fw_slotnames[] = {
+ "fw.goldfw",
+ "fw.mainfwa",
+ "fw.mainfwb",
+};
+
+int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = PDS_CORE_FW_GET_LIST,
+ };
+ struct pds_core_fw_list_info fw_list = {};
+ struct pdsc *pdsc = devlink_priv(dl);
+ union pds_core_dev_comp comp;
+ char buf[32];
+ int listlen;
+ int err;
+ int i;
+
+ mutex_lock(&pdsc->devcmd_lock);
+ err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout * 2);
+ if (!err)
+ memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
+ mutex_unlock(&pdsc->devcmd_lock);
+
+ listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
+ for (i = 0; i < listlen; i++) {
+ if (i < ARRAY_SIZE(fw_slotnames))
+ strscpy(buf, fw_slotnames[i], sizeof(buf));
+ else
+ snprintf(buf, sizeof(buf), "fw.slot_%d", i);
+ err = devlink_info_version_stored_put(req, buf,
+ fw_list.fw_names[i].fw_version);
+ if (err)
+ return err;
+ }
+
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ pdsc->dev_info.fw_version);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "0x%x", pdsc->dev_info.asic_type);
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ buf);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "0x%x", pdsc->dev_info.asic_rev);
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV,
+ buf);
+ if (err)
+ return err;
+
+ return devlink_info_serial_number_put(req, pdsc->dev_info.serial_num);
+}
+
+int pdsc_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct pdsc *pdsc = devlink_health_reporter_priv(reporter);
+
+ mutex_lock(&pdsc->config_lock);
+ if (test_bit(PDSC_S_FW_DEAD, &pdsc->state))
+ devlink_fmsg_string_pair_put(fmsg, "Status", "dead");
+ else if (!pdsc_is_fw_good(pdsc))
+ devlink_fmsg_string_pair_put(fmsg, "Status", "unhealthy");
+ else
+ devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
+ mutex_unlock(&pdsc->config_lock);
+
+ devlink_fmsg_u32_pair_put(fmsg, "State",
+ pdsc->fw_status & ~PDS_CORE_FW_STS_F_GENERATION);
+ devlink_fmsg_u32_pair_put(fmsg, "Generation", pdsc->fw_generation >> 4);
+ devlink_fmsg_u32_pair_put(fmsg, "Recoveries", pdsc->fw_recoveries);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/fw.c b/drivers/net/ethernet/amd/pds_core/fw.c
new file mode 100644
index 000000000000..fa626719e68d
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/fw.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include "core.h"
+
+/* The worst case wait for the install activity is about 25 minutes when
+ * installing a new CPLD, which is very seldom. Normal is about 30-35
+ * seconds. Since the driver can't tell if a CPLD update will happen we
+ * set the timeout for the ugly case.
+ */
+#define PDSC_FW_INSTALL_TIMEOUT (25 * 60)
+#define PDSC_FW_SELECT_TIMEOUT 30
+
+/* Number of periodic log updates during fw file download */
+#define PDSC_FW_INTERVAL_FRACTION 32
+
+static int pdsc_devcmd_fw_download_locked(struct pdsc *pdsc, u64 addr,
+ u32 offset, u32 length)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_download.opcode = PDS_CORE_CMD_FW_DOWNLOAD,
+ .fw_download.offset = cpu_to_le32(offset),
+ .fw_download.addr = cpu_to_le64(addr),
+ .fw_download.length = cpu_to_le32(length),
+ };
+ union pds_core_dev_comp comp;
+
+ return pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static int pdsc_devcmd_fw_install(struct pdsc *pdsc)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = PDS_CORE_FW_INSTALL_ASYNC
+ };
+ union pds_core_dev_comp comp;
+ int err;
+
+ err = pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+ if (err < 0)
+ return err;
+
+ return comp.fw_control.slot;
+}
+
+static int pdsc_devcmd_fw_activate(struct pdsc *pdsc,
+ enum pds_core_fw_slot slot)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = PDS_CORE_FW_ACTIVATE_ASYNC,
+ .fw_control.slot = slot
+ };
+ union pds_core_dev_comp comp;
+
+ return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static int pdsc_fw_status_long_wait(struct pdsc *pdsc,
+ const char *label,
+ unsigned long timeout,
+ u8 fw_cmd,
+ struct netlink_ext_ack *extack)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = fw_cmd,
+ };
+ union pds_core_dev_comp comp;
+ unsigned long start_time;
+ unsigned long end_time;
+ int err;
+
+ /* Ping on the status of the long running async install
+ * command. We get EAGAIN while the command is still
+ * running, else we get the final command status.
+ */
+ start_time = jiffies;
+ end_time = start_time + (timeout * HZ);
+ do {
+ err = pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+ msleep(20);
+ } while (time_before(jiffies, end_time) &&
+ (err == -EAGAIN || err == -ETIMEDOUT));
+
+ if (err == -EAGAIN || err == -ETIMEDOUT) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait timed out");
+ dev_err(pdsc->dev, "DEV_CMD firmware wait %s timed out\n",
+ label);
+ } else if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait failed");
+ }
+
+ return err;
+}
+
+int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ u32 buf_sz, copy_sz, offset;
+ struct devlink *dl;
+ int next_interval;
+ u64 data_addr;
+ int err = 0;
+ int fw_slot;
+
+ dev_info(pdsc->dev, "Installing firmware\n");
+
+ if (!pdsc->cmd_regs)
+ return -ENXIO;
+
+ dl = priv_to_devlink(pdsc);
+ devlink_flash_update_status_notify(dl, "Preparing to flash",
+ NULL, 0, 0);
+
+ buf_sz = sizeof(pdsc->cmd_regs->data);
+
+ dev_dbg(pdsc->dev,
+ "downloading firmware - size %d part_sz %d nparts %lu\n",
+ (int)fw->size, buf_sz, DIV_ROUND_UP(fw->size, buf_sz));
+
+ offset = 0;
+ next_interval = 0;
+ data_addr = offsetof(struct pds_core_dev_cmd_regs, data);
+ while (offset < fw->size) {
+ if (offset >= next_interval) {
+ devlink_flash_update_status_notify(dl, "Downloading",
+ NULL, offset,
+ fw->size);
+ next_interval = offset +
+ (fw->size / PDSC_FW_INTERVAL_FRACTION);
+ }
+
+ copy_sz = min_t(unsigned int, buf_sz, fw->size - offset);
+ mutex_lock(&pdsc->devcmd_lock);
+ memcpy_toio(&pdsc->cmd_regs->data, fw->data + offset, copy_sz);
+ err = pdsc_devcmd_fw_download_locked(pdsc, data_addr,
+ offset, copy_sz);
+ mutex_unlock(&pdsc->devcmd_lock);
+ if (err) {
+ dev_err(pdsc->dev,
+ "download failed offset 0x%x addr 0x%llx len 0x%x: %pe\n",
+ offset, data_addr, copy_sz, ERR_PTR(err));
+ NL_SET_ERR_MSG_MOD(extack, "Segment download failed");
+ goto err_out;
+ }
+ offset += copy_sz;
+ }
+ devlink_flash_update_status_notify(dl, "Downloading", NULL,
+ fw->size, fw->size);
+
+ devlink_flash_update_timeout_notify(dl, "Installing", NULL,
+ PDSC_FW_INSTALL_TIMEOUT);
+
+ fw_slot = pdsc_devcmd_fw_install(pdsc);
+ if (fw_slot < 0) {
+ err = fw_slot;
+ dev_err(pdsc->dev, "install failed: %pe\n", ERR_PTR(err));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware install");
+ goto err_out;
+ }
+
+ err = pdsc_fw_status_long_wait(pdsc, "Installing",
+ PDSC_FW_INSTALL_TIMEOUT,
+ PDS_CORE_FW_INSTALL_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ devlink_flash_update_timeout_notify(dl, "Selecting", NULL,
+ PDSC_FW_SELECT_TIMEOUT);
+
+ err = pdsc_devcmd_fw_activate(pdsc, fw_slot);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware select");
+ goto err_out;
+ }
+
+ err = pdsc_fw_status_long_wait(pdsc, "Selecting",
+ PDSC_FW_SELECT_TIMEOUT,
+ PDS_CORE_FW_ACTIVATE_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ dev_info(pdsc->dev, "Firmware update completed, slot %d\n", fw_slot);
+
+err_out:
+ if (err)
+ devlink_flash_update_status_notify(dl, "Flash failed",
+ NULL, 0, 0);
+ else
+ devlink_flash_update_status_notify(dl, "Flash done",
+ NULL, 0, 0);
+ return err;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
new file mode 100644
index 000000000000..c7a2eff57632
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -0,0 +1,609 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+
+#include <linux/pds/pds_common.h>
+
+#include "core.h"
+
+MODULE_DESCRIPTION(PDSC_DRV_DESCRIPTION);
+MODULE_AUTHOR("Advanced Micro Devices, Inc");
+MODULE_LICENSE("GPL");
+
+/* Supported devices */
+static const struct pci_device_id pdsc_id_table[] = {
+ { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_CORE_PF) },
+ { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_VDPA_VF) },
+ { 0, } /* end of table */
+};
+MODULE_DEVICE_TABLE(pci, pdsc_id_table);
+
+static void pdsc_wdtimer_cb(struct timer_list *t)
+{
+ struct pdsc *pdsc = timer_container_of(pdsc, t, wdtimer);
+
+ dev_dbg(pdsc->dev, "%s: jiffies %ld\n", __func__, jiffies);
+ mod_timer(&pdsc->wdtimer,
+ round_jiffies(jiffies + pdsc->wdtimer_period));
+
+ queue_work(pdsc->wq, &pdsc->health_work);
+}
+
+static void pdsc_unmap_bars(struct pdsc *pdsc)
+{
+ struct pdsc_dev_bar *bars = pdsc->bars;
+ unsigned int i;
+
+ pdsc->info_regs = NULL;
+ pdsc->cmd_regs = NULL;
+ pdsc->intr_status = NULL;
+ pdsc->intr_ctrl = NULL;
+
+ for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
+ if (bars[i].vaddr)
+ pci_iounmap(pdsc->pdev, bars[i].vaddr);
+ bars[i].vaddr = NULL;
+ }
+}
+
+static int pdsc_map_bars(struct pdsc *pdsc)
+{
+ struct pdsc_dev_bar *bar = pdsc->bars;
+ struct pci_dev *pdev = pdsc->pdev;
+ struct device *dev = pdsc->dev;
+ struct pdsc_dev_bar *bars;
+ unsigned int i, j;
+ int num_bars = 0;
+ int err;
+ u32 sig;
+
+ bars = pdsc->bars;
+
+ /* Since the PCI interface in the hardware is configurable,
+ * we need to poke into all the bars to find the set we're
+ * expecting.
+ */
+ for (i = 0, j = 0; i < PDS_CORE_BARS_MAX; i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
+
+ bars[j].len = pci_resource_len(pdev, i);
+ bars[j].bus_addr = pci_resource_start(pdev, i);
+ bars[j].res_index = i;
+
+ /* only map the whole bar 0 */
+ if (j > 0) {
+ bars[j].vaddr = NULL;
+ } else {
+ bars[j].vaddr = pci_iomap(pdev, i, bars[j].len);
+ if (!bars[j].vaddr) {
+ dev_err(dev, "Cannot map BAR %d, aborting\n", i);
+ return -ENODEV;
+ }
+ }
+
+ j++;
+ }
+ num_bars = j;
+
+ /* BAR0: dev_cmd and interrupts */
+ if (num_bars < 1) {
+ dev_err(dev, "No bars found\n");
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ if (bar->len < PDS_CORE_BAR0_SIZE) {
+ dev_err(dev, "Resource bar size %lu too small\n", bar->len);
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ pdsc->info_regs = bar->vaddr + PDS_CORE_BAR0_DEV_INFO_REGS_OFFSET;
+ pdsc->cmd_regs = bar->vaddr + PDS_CORE_BAR0_DEV_CMD_REGS_OFFSET;
+ pdsc->intr_status = bar->vaddr + PDS_CORE_BAR0_INTR_STATUS_OFFSET;
+ pdsc->intr_ctrl = bar->vaddr + PDS_CORE_BAR0_INTR_CTRL_OFFSET;
+
+ sig = ioread32(&pdsc->info_regs->signature);
+ if (sig != PDS_CORE_DEV_INFO_SIGNATURE) {
+ dev_err(dev, "Incompatible firmware signature %x", sig);
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ /* BAR1: doorbells */
+ bar++;
+ if (num_bars < 2) {
+ dev_err(dev, "Doorbell bar missing\n");
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ pdsc->db_pages = bar->vaddr;
+ pdsc->phy_db_pages = bar->bus_addr;
+
+ return 0;
+
+err_out:
+ pdsc_unmap_bars(pdsc);
+ return err;
+}
+
+void __iomem *pdsc_map_dbpage(struct pdsc *pdsc, int page_num)
+{
+ return pci_iomap_range(pdsc->pdev,
+ pdsc->bars[PDS_CORE_PCI_BAR_DBELL].res_index,
+ (u64)page_num << PAGE_SHIFT, PAGE_SIZE);
+}
+
+static int pdsc_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+ struct device *dev = pdsc->dev;
+ int ret = 0;
+
+ if (num_vfs > 0) {
+ pdsc->vfs = kcalloc(num_vfs, sizeof(struct pdsc_vf),
+ GFP_KERNEL);
+ if (!pdsc->vfs)
+ return -ENOMEM;
+ pdsc->num_vfs = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ dev_err(dev, "Cannot enable SRIOV: %pe\n",
+ ERR_PTR(ret));
+ goto no_vfs;
+ }
+
+ return num_vfs;
+ }
+
+no_vfs:
+ pci_disable_sriov(pdev);
+
+ kfree(pdsc->vfs);
+ pdsc->vfs = NULL;
+ pdsc->num_vfs = 0;
+
+ return ret;
+}
+
+static int pdsc_init_vf(struct pdsc *vf)
+{
+ struct devlink *dl;
+ struct pdsc *pf;
+ int err;
+
+ pf = pdsc_get_pf_struct(vf->pdev);
+ if (IS_ERR_OR_NULL(pf))
+ return PTR_ERR(pf) ?: -1;
+
+ vf->vf_id = pci_iov_vf_id(vf->pdev);
+
+ dl = priv_to_devlink(vf);
+ devl_lock(dl);
+ devl_register(dl);
+ devl_unlock(dl);
+
+ pf->vfs[vf->vf_id].vf = vf;
+ err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[vf->vf_id].padev);
+ if (err) {
+ devl_lock(dl);
+ devl_unregister(dl);
+ devl_unlock(dl);
+ }
+
+ return err;
+}
+
+static const struct devlink_health_reporter_ops pdsc_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = pdsc_fw_reporter_diagnose,
+};
+
+static const struct devlink_param pdsc_dl_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_VNET,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ pdsc_dl_enable_get,
+ pdsc_dl_enable_set,
+ pdsc_dl_enable_validate),
+};
+
+#define PDSC_WQ_NAME_LEN 24
+
+static int pdsc_init_pf(struct pdsc *pdsc)
+{
+ struct devlink_health_reporter *hr;
+ char wq_name[PDSC_WQ_NAME_LEN];
+ struct devlink *dl;
+ int err;
+
+ pcie_print_link_status(pdsc->pdev);
+
+ err = pci_request_regions(pdsc->pdev, PDS_CORE_DRV_NAME);
+ if (err) {
+ dev_err(pdsc->dev, "Cannot request PCI regions: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = pdsc_map_bars(pdsc);
+ if (err)
+ goto err_out_release_regions;
+
+ /* General workqueue and timer, but don't start timer yet */
+ snprintf(wq_name, sizeof(wq_name), "%s.%d", PDS_CORE_DRV_NAME, pdsc->uid);
+ pdsc->wq = create_singlethread_workqueue(wq_name);
+ INIT_WORK(&pdsc->health_work, pdsc_health_thread);
+ INIT_WORK(&pdsc->pci_reset_work, pdsc_pci_reset_thread);
+ timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
+ pdsc->wdtimer_period = PDSC_WATCHDOG_SECS * HZ;
+
+ mutex_init(&pdsc->devcmd_lock);
+ mutex_init(&pdsc->config_lock);
+ spin_lock_init(&pdsc->adminq_lock);
+
+ mutex_lock(&pdsc->config_lock);
+ set_bit(PDSC_S_FW_DEAD, &pdsc->state);
+
+ err = pdsc_setup(pdsc, PDSC_SETUP_INIT);
+ if (err) {
+ mutex_unlock(&pdsc->config_lock);
+ goto err_out_unmap_bars;
+ }
+
+ err = pdsc_start(pdsc);
+ if (err) {
+ mutex_unlock(&pdsc->config_lock);
+ goto err_out_teardown;
+ }
+
+ mutex_unlock(&pdsc->config_lock);
+
+ err = pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL, &pdsc->padev);
+ if (err)
+ goto err_out_stop;
+
+ dl = priv_to_devlink(pdsc);
+ devl_lock(dl);
+ err = devl_params_register(dl, pdsc_dl_params,
+ ARRAY_SIZE(pdsc_dl_params));
+ if (err) {
+ devl_unlock(dl);
+ dev_warn(pdsc->dev, "Failed to register devlink params: %pe\n",
+ ERR_PTR(err));
+ goto err_out_del_dev;
+ }
+
+ hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, pdsc);
+ if (IS_ERR(hr)) {
+ devl_unlock(dl);
+ dev_warn(pdsc->dev, "Failed to create fw reporter: %pe\n", hr);
+ err = PTR_ERR(hr);
+ goto err_out_unreg_params;
+ }
+ pdsc->fw_reporter = hr;
+
+ devl_register(dl);
+ devl_unlock(dl);
+
+ /* Lastly, start the health check timer */
+ mod_timer(&pdsc->wdtimer, round_jiffies(jiffies + pdsc->wdtimer_period));
+
+ return 0;
+
+err_out_unreg_params:
+ devlink_params_unregister(dl, pdsc_dl_params,
+ ARRAY_SIZE(pdsc_dl_params));
+err_out_del_dev:
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
+err_out_stop:
+ pdsc_stop(pdsc);
+err_out_teardown:
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
+err_out_unmap_bars:
+ timer_shutdown_sync(&pdsc->wdtimer);
+ if (pdsc->wq)
+ destroy_workqueue(pdsc->wq);
+ mutex_destroy(&pdsc->config_lock);
+ mutex_destroy(&pdsc->devcmd_lock);
+ pci_free_irq_vectors(pdsc->pdev);
+ pdsc_unmap_bars(pdsc);
+err_out_release_regions:
+ pci_release_regions(pdsc->pdev);
+
+ return err;
+}
+
+static const struct devlink_ops pdsc_dl_ops = {
+ .info_get = pdsc_dl_info_get,
+ .flash_update = pdsc_dl_flash_update,
+};
+
+static const struct devlink_ops pdsc_dl_vf_ops = {
+};
+
+static DEFINE_IDA(pdsc_ida);
+
+static int pdsc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ const struct devlink_ops *ops;
+ struct devlink *dl;
+ struct pdsc *pdsc;
+ bool is_pf;
+ int err;
+
+ is_pf = !pdev->is_virtfn;
+ ops = is_pf ? &pdsc_dl_ops : &pdsc_dl_vf_ops;
+ dl = devlink_alloc(ops, sizeof(struct pdsc), dev);
+ if (!dl)
+ return -ENOMEM;
+ pdsc = devlink_priv(dl);
+
+ pdsc->pdev = pdev;
+ pdsc->dev = &pdev->dev;
+ set_bit(PDSC_S_INITING_DRIVER, &pdsc->state);
+ pci_set_drvdata(pdev, pdsc);
+ pdsc_debugfs_add_dev(pdsc);
+
+ err = ida_alloc(&pdsc_ida, GFP_KERNEL);
+ if (err < 0) {
+ dev_err(pdsc->dev, "%s: id alloc failed: %pe\n",
+ __func__, ERR_PTR(err));
+ goto err_out_free_devlink;
+ }
+ pdsc->uid = err;
+
+ /* Query system for DMA addressing limitation for the device. */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(PDS_CORE_ADDR_LEN));
+ if (err) {
+ dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting: %pe\n",
+ ERR_PTR(err));
+ goto err_out_free_ida;
+ }
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device: %pe\n", ERR_PTR(err));
+ goto err_out_free_ida;
+ }
+ pci_set_master(pdev);
+
+ if (is_pf)
+ err = pdsc_init_pf(pdsc);
+ else
+ err = pdsc_init_vf(pdsc);
+ if (err) {
+ dev_err(dev, "Cannot init device: %pe\n", ERR_PTR(err));
+ goto err_out_disable_device;
+ }
+
+ clear_bit(PDSC_S_INITING_DRIVER, &pdsc->state);
+ return 0;
+
+err_out_disable_device:
+ pci_disable_device(pdev);
+err_out_free_ida:
+ ida_free(&pdsc_ida, pdsc->uid);
+err_out_free_devlink:
+ pdsc_debugfs_del_dev(pdsc);
+ devlink_free(dl);
+
+ return err;
+}
+
+static void pdsc_remove(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+ struct devlink *dl;
+
+ /* Unhook the registrations first to be sure there
+ * are no requests while we're stopping.
+ */
+ dl = priv_to_devlink(pdsc);
+ devl_lock(dl);
+ devl_unregister(dl);
+ if (!pdev->is_virtfn) {
+ if (pdsc->fw_reporter) {
+ devl_health_reporter_destroy(pdsc->fw_reporter);
+ pdsc->fw_reporter = NULL;
+ }
+ devl_params_unregister(dl, pdsc_dl_params,
+ ARRAY_SIZE(pdsc_dl_params));
+ }
+ devl_unlock(dl);
+
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf)) {
+ pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev);
+ pf->vfs[pdsc->vf_id].vf = NULL;
+ }
+ } else {
+ /* Remove the VFs and their aux_bus connections before other
+ * cleanup so that the clients can use the AdminQ to cleanly
+ * shut themselves down.
+ */
+ pdsc_sriov_configure(pdev, 0);
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
+
+ timer_shutdown_sync(&pdsc->wdtimer);
+ if (pdsc->wq)
+ destroy_workqueue(pdsc->wq);
+
+ mutex_lock(&pdsc->config_lock);
+ set_bit(PDSC_S_STOPPING_DRIVER, &pdsc->state);
+
+ pdsc_stop(pdsc);
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
+ mutex_unlock(&pdsc->config_lock);
+ mutex_destroy(&pdsc->config_lock);
+ mutex_destroy(&pdsc->devcmd_lock);
+
+ pdsc_unmap_bars(pdsc);
+ pci_release_regions(pdev);
+ }
+
+ pci_disable_device(pdev);
+
+ ida_free(&pdsc_ida, pdsc->uid);
+ pdsc_debugfs_del_dev(pdsc);
+ devlink_free(dl);
+}
+
+static void pdsc_stop_health_thread(struct pdsc *pdsc)
+{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
+ timer_shutdown_sync(&pdsc->wdtimer);
+ if (pdsc->health_work.func)
+ cancel_work_sync(&pdsc->health_work);
+}
+
+static void pdsc_restart_health_thread(struct pdsc *pdsc)
+{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
+ timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
+ mod_timer(&pdsc->wdtimer, jiffies + 1);
+}
+
+static void pdsc_reset_prepare(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+
+ pdsc_stop_health_thread(pdsc);
+ pdsc_fw_down(pdsc);
+
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf))
+ pdsc_auxbus_dev_del(pdsc, pf,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_del(pdsc, pdsc, &pdsc->padev);
+ }
+
+ pdsc_unmap_bars(pdsc);
+ pci_release_regions(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+}
+
+static void pdsc_reset_done(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+ struct device *dev = pdsc->dev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device: %pe\n", ERR_PTR(err));
+ return;
+ }
+ pci_set_master(pdev);
+
+ if (!pdev->is_virtfn) {
+ pcie_print_link_status(pdsc->pdev);
+
+ err = pci_request_regions(pdsc->pdev, PDS_CORE_DRV_NAME);
+ if (err) {
+ dev_err(pdsc->dev, "Cannot request PCI regions: %pe\n",
+ ERR_PTR(err));
+ return;
+ }
+
+ err = pdsc_map_bars(pdsc);
+ if (err)
+ return;
+ }
+
+ pdsc_fw_up(pdsc);
+ pdsc_restart_health_thread(pdsc);
+
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf))
+ pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[pdsc->vf_id].padev);
+ } else {
+ pdsc_auxbus_dev_add(pdsc, pdsc, PDS_DEV_TYPE_FWCTL,
+ &pdsc->padev);
+ }
+}
+
+static pci_ers_result_t pdsc_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t error)
+{
+ if (error == pci_channel_io_frozen) {
+ pdsc_reset_prepare(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_NONE;
+}
+
+static void pdsc_pci_error_resume(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+
+ if (test_bit(PDSC_S_FW_DEAD, &pdsc->state))
+ pci_reset_function_locked(pdev);
+}
+
+static const struct pci_error_handlers pdsc_err_handler = {
+ /* FLR handling */
+ .reset_prepare = pdsc_reset_prepare,
+ .reset_done = pdsc_reset_done,
+
+ /* AER handling */
+ .error_detected = pdsc_pci_error_detected,
+ .resume = pdsc_pci_error_resume,
+};
+
+static struct pci_driver pdsc_driver = {
+ .name = PDS_CORE_DRV_NAME,
+ .id_table = pdsc_id_table,
+ .probe = pdsc_probe,
+ .remove = pdsc_remove,
+ .sriov_configure = pdsc_sriov_configure,
+ .err_handler = &pdsc_err_handler,
+};
+
+void *pdsc_get_pf_struct(struct pci_dev *vf_pdev)
+{
+ return pci_iov_get_pf_drvdata(vf_pdev, &pdsc_driver);
+}
+EXPORT_SYMBOL_GPL(pdsc_get_pf_struct);
+
+static int __init pdsc_init_module(void)
+{
+ if (strcmp(KBUILD_MODNAME, PDS_CORE_DRV_NAME))
+ return -EINVAL;
+
+ pdsc_debugfs_create();
+ return pci_register_driver(&pdsc_driver);
+}
+
+static void __exit pdsc_cleanup_module(void)
+{
+ pci_unregister_driver(&pdsc_driver);
+ pdsc_debugfs_destroy();
+}
+
+module_init(pdsc_init_module);
+module_exit(pdsc_cleanup_module);
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 007bd7787291..c60df4a21158 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -74,6 +74,7 @@ static int lance_debug = 1;
#endif
module_param(lance_debug, int, 0);
MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)");
+MODULE_DESCRIPTION("Sun3/Sun3x on-board LANCE Ethernet driver");
MODULE_LICENSE("GPL");
#define DPRINTK(n,a) \
@@ -341,7 +342,7 @@ static int __init lance_probe( struct net_device *dev)
/* XXX - leak? */
MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
- if (MEM == NULL) {
+ if (!MEM) {
#ifdef CONFIG_SUN3
iounmap((void __iomem *)ioaddr);
#endif
@@ -796,7 +797,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
head->msg_length = 0;
head->flag |= RMD1_OWN_CHIP;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 22d609563af8..0b273327f5a6 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -92,7 +92,7 @@ static char lancestr[] = "LANCE";
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <linux/pgtable.h>
@@ -530,7 +530,7 @@ static void lance_rx_dvma(struct net_device *dev)
len = (rd->mblength & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
@@ -700,7 +700,7 @@ static void lance_rx_pio(struct net_device *dev)
len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
sbus_writew(0, &rd->mblength);
sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -963,7 +963,7 @@ static int lance_close(struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
netif_stop_queue(dev);
- del_timer_sync(&lp->multicast_timer);
+ timer_delete_sync(&lp->multicast_timer);
STOP_LANCE(lp);
@@ -1246,7 +1246,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
struct net_device *dev = lp->dev;
lance_set_multicast(dev);
@@ -1276,7 +1276,7 @@ static void lance_free_hwresources(struct lance_private *lp)
/* Ethtool support... */
static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "sunlance", sizeof(info->driver));
+ strscpy(info->driver, "sunlance", sizeof(info->driver));
}
static const struct ethtool_ops sparc_lance_ethtool_ops = {
@@ -1487,7 +1487,7 @@ static int sunlance_sbus_probe(struct platform_device *op)
return err;
}
-static int sunlance_sbus_remove(struct platform_device *op)
+static void sunlance_sbus_remove(struct platform_device *op)
{
struct lance_private *lp = platform_get_drvdata(op);
struct net_device *net_dev = lp->dev;
@@ -1497,8 +1497,6 @@ static int sunlance_sbus_remove(struct platform_device *op)
lance_free_hwresources(lp);
free_netdev(net_dev);
-
- return 0;
}
static const struct of_device_id sunlance_sbus_match[] = {
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 620785ffbd51..5992f7fd4d9b 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -3,9 +3,9 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
- xgbe-ptp.o \
+ xgbe-hwtstamp.o xgbe-ptp.o xgbe-pps.o \
xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
- xgbe-platform.o
+ xgbe-platform.o xgbe-selftest.o
amd-xgbe-$(CONFIG_PCI) += xgbe-pci.o
amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index b2cd3bdba9f8..62b01de93db4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_COMMON_H__
@@ -332,7 +223,15 @@
#define MAC_TSSR 0x0d20
#define MAC_TXSNR 0x0d30
#define MAC_TXSSR 0x0d34
-
+#define MAC_TICNR 0x0d58
+#define MAC_TICSNR 0x0d5C
+#define MAC_TECNR 0x0d60
+#define MAC_TECSNR 0x0d64
+#define MAC_PPSCR 0x0d70
+#define MAC_PPS0_TTSR 0x0d80
+#define MAC_PPS0_TTNSR 0x0d84
+#define MAC_PPS0_INTERVAL 0x0d88
+#define MAC_PPS0_WIDTH 0x0d8C
#define MAC_QTFCR_INC 4
#define MAC_MACA_INC 4
#define MAC_HTR_INC 4
@@ -340,6 +239,18 @@
#define MAC_RQC2_INC 4
#define MAC_RQC2_Q_PER_REG 4
+/* PPS helpers */
+#define PPSEN0 BIT(4)
+#define MAC_PPSx_TTSR(x) ((MAC_PPS0_TTSR) + ((x) * 0x10))
+#define MAC_PPSx_TTNSR(x) ((MAC_PPS0_TTNSR) + ((x) * 0x10))
+#define MAC_PPSx_INTERVAL(x) ((MAC_PPS0_INTERVAL) + ((x) * 0x10))
+#define MAC_PPSx_WIDTH(x) ((MAC_PPS0_WIDTH) + ((x) * 0x10))
+#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define PPS_MINIDX(x) ((x) * 8)
+#define XGBE_PPSCMD_STOP 0x5
+#define XGBE_PPSCMD_START 0x2
+#define XGBE_PPSTARGET_PULSE 0x2
+
/* MAC register entry bit positions and sizes */
#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
@@ -473,6 +384,10 @@
#define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_GPSLCE_INDEX 6
+#define MAC_RCR_GPSLCE_WIDTH 1
+#define MAC_RCR_WD_INDEX 7
+#define MAC_RCR_WD_WIDTH 1
#define MAC_RCR_HDSMS_INDEX 12
#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9
@@ -483,6 +398,8 @@
#define MAC_RCR_LM_WIDTH 1
#define MAC_RCR_RE_INDEX 0
#define MAC_RCR_RE_WIDTH 1
+#define MAC_RCR_GPSL_INDEX 16
+#define MAC_RCR_GPSL_WIDTH 14
#define MAC_RFCR_PFCE_INDEX 8
#define MAC_RFCR_PFCE_WIDTH 1
#define MAC_RFCR_RFE_INDEX 0
@@ -521,6 +438,8 @@
#define MAC_TCR_VNE_WIDTH 1
#define MAC_TCR_VNM_INDEX 25
#define MAC_TCR_VNM_WIDTH 1
+#define MAC_TCR_JD_INDEX 16
+#define MAC_TCR_JD_WIDTH 1
#define MAC_TIR_TNID_INDEX 0
#define MAC_TIR_TNID_WIDTH 16
#define MAC_TSCR_AV8021ASMEN_INDEX 28
@@ -529,6 +448,8 @@
#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
#define MAC_TSCR_TSADDREG_INDEX 5
#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSUPDT_INDEX 3
+#define MAC_TSCR_TSUPDT_WIDTH 1
#define MAC_TSCR_TSCFUPDT_INDEX 1
#define MAC_TSCR_TSCFUPDT_WIDTH 1
#define MAC_TSCR_TSCTRLSSR_INDEX 9
@@ -557,6 +478,10 @@
#define MAC_TSSR_TXTSC_WIDTH 1
#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_TICSNR_TSICSNS_INDEX 8
+#define MAC_TICSNR_TSICSNS_WIDTH 8
+#define MAC_TECSNR_TSECSNS_INDEX 8
+#define MAC_TECSNR_TSECSNS_WIDTH 8
#define MAC_VLANHTR_VLHT_INDEX 0
#define MAC_VLANHTR_VLHT_WIDTH 16
#define MAC_VLANIR_VLTI_INDEX 20
@@ -587,8 +512,10 @@
#define MAC_VR_SNPSVER_WIDTH 8
#define MAC_VR_USERVER_INDEX 16
#define MAC_VR_USERVER_WIDTH 8
+#define MAC_PPSx_TTNSR_TRGTBUSY0_INDEX 31
+#define MAC_PPSx_TTNSR_TRGTBUSY0_WIDTH 1
-/* MMC register offsets */
+ /* MMC register offsets */
#define MMC_CR 0x0800
#define MMC_RISR 0x0804
#define MMC_TISR 0x0808
@@ -898,6 +825,13 @@
#define PCS_V2_WINDOW_SELECT 0x9064
#define PCS_V2_RV_WINDOW_DEF 0x1060
#define PCS_V2_RV_WINDOW_SELECT 0x1064
+#define PCS_V2_YC_WINDOW_DEF 0x18060
+#define PCS_V2_YC_WINDOW_SELECT 0x18064
+#define PCS_V3_RN_WINDOW_DEF 0xf8078
+#define PCS_V3_RN_WINDOW_SELECT 0xf807c
+
+#define PCS_RN_SMN_BASE_ADDR 0x11e00000
+#define PCS_RN_PORT_ADDR_SIZE 0x100000
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
@@ -1030,8 +964,8 @@
#define XP_PROP_0_PORT_ID_WIDTH 8
#define XP_PROP_0_PORT_MODE_INDEX 8
#define XP_PROP_0_PORT_MODE_WIDTH 4
-#define XP_PROP_0_PORT_SPEEDS_INDEX 23
-#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 22
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 5
#define XP_PROP_1_MAX_RX_DMA_INDEX 24
#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
@@ -1283,6 +1217,22 @@
#define MDIO_PMA_RX_CTRL1 0x8051
#endif
+#ifndef MDIO_PMA_RX_LSTS
+#define MDIO_PMA_RX_LSTS 0x018020
+#endif
+
+#ifndef MDIO_PMA_RX_EQ_CTRL4
+#define MDIO_PMA_RX_EQ_CTRL4 0x0001805C
+#endif
+
+#ifndef MDIO_PMA_MP_MISC_STS
+#define MDIO_PMA_MP_MISC_STS 0x0078
+#endif
+
+#ifndef MDIO_PMA_PHY_RX_EQ_CEU
+#define MDIO_PMA_PHY_RX_EQ_CEU 0x1800E
+#endif
+
#ifndef MDIO_PCS_DIG_CTRL
#define MDIO_PCS_DIG_CTRL 0x8000
#endif
@@ -1331,6 +1281,10 @@
#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
#endif
+#ifndef MDIO_VEND2_PMA_MISC_CTRL0
+#define MDIO_VEND2_PMA_MISC_CTRL0 0x8090
+#endif
+
#ifndef MDIO_CTRL1_SPEED1G
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
#endif
@@ -1351,6 +1305,8 @@
#define MDIO_VEND2_CTRL1_SS13 BIT(13)
#endif
+#define XGBE_VEND2_MAC_AUTO_SW BIT(9)
+
/* MDIO mask values */
#define XGBE_AN_CL73_INT_CMPLT BIT(0)
#define XGBE_AN_CL73_INC_LINK BIT(1)
@@ -1389,6 +1345,32 @@
#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
+#define XGBE_PMA_RX_SIG_DET_0_MASK BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_ENABLE BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_DISABLE 0x0000
+
+#define XGBE_PMA_RX_VALID_0_MASK BIT(12)
+#define XGBE_PMA_RX_VALID_0_ENABLE BIT(12)
+#define XGBE_PMA_RX_VALID_0_DISABLE 0x0000
+
+#define XGBE_PMA_RX_AD_REQ_MASK BIT(12)
+#define XGBE_PMA_RX_AD_REQ_ENABLE BIT(12)
+#define XGBE_PMA_RX_AD_REQ_DISABLE 0x0000
+
+#define XGBE_PMA_RX_ADPT_ACK_MASK BIT(12)
+#define XGBE_PMA_RX_ADPT_ACK BIT(12)
+
+#define XGBE_PMA_CFF_UPDTM1_VLD BIT(8)
+#define XGBE_PMA_CFF_UPDT0_VLD BIT(9)
+#define XGBE_PMA_CFF_UPDT1_VLD BIT(10)
+#define XGBE_PMA_CFF_UPDT_MASK (XGBE_PMA_CFF_UPDTM1_VLD |\
+ XGBE_PMA_CFF_UPDT0_VLD | \
+ XGBE_PMA_CFF_UPDT1_VLD)
+
+#define XGBE_PMA_PLL_CTRL_MASK BIT(15)
+#define XGBE_PMA_PLL_CTRL_ENABLE BIT(15)
+#define XGBE_PMA_PLL_CTRL_DISABLE 0x0000
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
@@ -1689,20 +1671,21 @@ do { \
} while (0)
/* Macros for building, reading or writing register values or bits
- * using MDIO. Different from above because of the use of standardized
- * Linux include values. No shifting is performed with the bit
- * operations, everything works on mask values.
+ * using MDIO.
*/
+
+#define XGBE_ADDR_C45 BIT(30)
+
#define XMDIO_READ(_pdata, _mmd, _reg) \
((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
- MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
+ XGBE_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
(XMDIO_READ((_pdata), _mmd, _reg) & _mask)
#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
- MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
+ XGBE_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
do { \
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index 895d35639129..1474df5544fa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/netdevice.h>
@@ -230,7 +121,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
netif_dbg(pdata, drv, netdev,
- "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
+ "cap=%d, en=%#x, mbc=%d, delay=%d\n",
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
/* Check PFC for supported number of traffic classes */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b0a6c96b6ef4..d9157c4acde9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/debugfs.h>
@@ -505,21 +396,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
{
- char *buf;
-
- if (!pdata->xgbe_debugfs)
- return;
-
- buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
- if (!buf)
- return;
-
- if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
- goto out;
-
- debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs,
- pdata->xgbe_debugfs->d_parent, buf);
-
-out:
- kfree(buf);
+ debugfs_change_name(pdata->xgbe_debugfs,
+ "amd-xgbe-%s", pdata->netdev->name);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 230726d7b74f..7c8a19988a52 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include "xgbe.h"
@@ -373,8 +264,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
}
/* Set up the header page info */
- xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
- XGBE_SKB_ALLOC_SIZE);
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+ } else {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ pdata->rx_buf_size);
+ }
/* Set up the buffer page info */
xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 3936543a74d8..b646ae575e6a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/phy.h>
@@ -120,9 +11,11 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
#include <linux/crc32poly.h>
+#include <linux/pci.h>
#include "xgbe.h"
#include "xgbe-common.h"
+#include "xgbe-smn.h"
static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
{
@@ -318,6 +211,20 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
}
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+ pdata->sph = true;
+}
+
+static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
+ }
+ pdata->sph = false;
}
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
@@ -524,19 +431,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
}
+static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+
+ /* From MAC ver 30H the TFCR is per priority, instead of per queue */
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
+ return max_q_count;
+ else
+ return min_t(unsigned int, pdata->tx_q_count, max_q_count);
+}
+
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
- unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
- unsigned int i;
+ unsigned int i, q_count;
/* Clear MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
/* Clear MAC flow control */
- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ q_count = xgbe_get_fc_queue_count(pdata);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -553,9 +469,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
{
struct ieee_pfc *pfc = pdata->pfc;
struct ieee_ets *ets = pdata->ets;
- unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
- unsigned int i;
+ unsigned int i, q_count;
/* Set MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++) {
@@ -579,8 +494,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
}
/* Set MAC flow control */
- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ q_count = xgbe_get_fc_queue_count(pdata);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -807,6 +721,9 @@ static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
unsigned int ss;
switch (speed) {
+ case SPEED_10:
+ ss = 0x07;
+ break;
case SPEED_1000:
ss = 0x03;
break;
@@ -1140,18 +1057,19 @@ static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
return 0;
}
-static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
- int mmd_reg)
+static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata,
+ int mmd_reg)
{
- unsigned long flags;
- unsigned int mmd_address, index, offset;
- int mmd_data;
-
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ return (mmd_reg & XGBE_ADDR_C45) ?
+ mmd_reg & ~XGBE_ADDR_C45 :
+ (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+}
+static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata,
+ unsigned int mmd_address,
+ unsigned int *index,
+ unsigned int *offset)
+{
/* The PCS registers are accessed using mmio. The underlying
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
@@ -1162,8 +1080,98 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
* offset 1 bit and reading 16 bits of data.
*/
mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ *index = mmd_address & ~pdata->xpcs_window_mask;
+ *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+}
+
+static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ u32 smn_address;
+ int mmd_data;
+ int ret;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret)
+ return ret;
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data);
+ if (ret)
+ return ret;
+
+ mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) :
+ FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int pci_mmd_data, hi_mask, lo_mask;
+ unsigned int mmd_address, index, offset;
+ struct pci_dev *dev;
+ u32 smn_address;
+ int ret;
+
+ dev = pdata->pcidev;
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to read data\n");
+ return;
+ }
+
+ if (offset % 4) {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data);
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data);
+ } else {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK,
+ FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data));
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+ }
+
+ pci_mmd_data = hi_mask | lo_mask;
+
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data);
+ return;
+ }
+}
+
+static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ unsigned long flags;
+ int mmd_data;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1179,23 +1187,9 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned long flags;
unsigned int mmd_address, index, offset;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
- /* The PCS registers are accessed using mmio. The underlying
- * management interface uses indirect addressing to access the MMD
- * register sets. This requires accessing of the PCS register in two
- * phases, an address phase and a data phase.
- *
- * The mmio interface is based on 16-bit offsets and values. All
- * register offsets must therefore be adjusted by left shifting the
- * offset 1 bit and writing 16 bits of data.
- */
- mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1210,10 +1204,7 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
int mmd_data;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1238,10 +1229,7 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
unsigned long flags;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1268,6 +1256,9 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg);
}
}
@@ -1278,17 +1269,29 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V1:
return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data);
+
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
}
}
-static unsigned int xgbe_create_mdio_sca(int port, int reg)
+static unsigned int xgbe_create_mdio_sca_c22(int port, int reg)
{
- unsigned int mdio_sca, da;
+ unsigned int mdio_sca;
+
+ mdio_sca = 0;
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
+
+ return mdio_sca;
+}
- da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
+static unsigned int xgbe_create_mdio_sca_c45(int port, unsigned int da, int reg)
+{
+ unsigned int mdio_sca;
mdio_sca = 0;
XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
@@ -1298,14 +1301,13 @@ static unsigned int xgbe_create_mdio_sca(int port, int reg)
return mdio_sca;
}
-static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
- int reg, u16 val)
+static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata,
+ unsigned int mdio_sca, u16 val)
{
- unsigned int mdio_sca, mdio_sccd;
+ unsigned int mdio_sccd;
reinit_completion(&pdata->mdio_complete);
- mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
@@ -1322,14 +1324,33 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
return 0;
}
-static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
- int reg)
+static int xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c22(addr, reg);
+
+ return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
+}
+
+static int xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg, u16 val)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg);
+
+ return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
+}
+
+static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata,
+ unsigned int mdio_sca)
{
- unsigned int mdio_sca, mdio_sccd;
+ unsigned int mdio_sccd;
reinit_completion(&pdata->mdio_complete);
- mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
@@ -1345,6 +1366,26 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
}
+static int xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c22(addr, reg);
+
+ return xgbe_read_ext_mii_regs(pdata, mdio_sca);
+}
+
+static int xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg);
+
+ return xgbe_read_ext_mii_regs(pdata, mdio_sca);
+}
+
static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
enum xgbe_mdio_mode mode)
{
@@ -1519,125 +1560,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
DBGPR("<--rx_desc_init\n");
}
-static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
- unsigned int addend)
-{
- unsigned int count = 10000;
-
- /* Set the addend register value and tell the device */
- XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
-
- /* Wait for addend update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev,
- "timed out updating timestamp addend register\n");
-}
-
-static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
- unsigned int nsec)
-{
- unsigned int count = 10000;
-
- /* Set the time values and tell the device */
- XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
- XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
-
- /* Wait for time update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev, "timed out initializing timestamp\n");
-}
-
-static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
-{
- u64 nsec;
-
- nsec = XGMAC_IOREAD(pdata, MAC_STSR);
- nsec *= NSEC_PER_SEC;
- nsec += XGMAC_IOREAD(pdata, MAC_STNR);
-
- return nsec;
-}
-
-static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
-{
- unsigned int tx_snr, tx_ssr;
- u64 nsec;
-
- if (pdata->vdata->tx_tstamp_workaround) {
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- } else {
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- }
-
- if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
- return 0;
-
- nsec = tx_ssr;
- nsec *= NSEC_PER_SEC;
- nsec += tx_snr;
-
- return nsec;
-}
-
-static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
- struct xgbe_ring_desc *rdesc)
-{
- u64 nsec;
-
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
- !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
- nsec = le32_to_cpu(rdesc->desc1);
- nsec <<= 32;
- nsec |= le32_to_cpu(rdesc->desc0);
- if (nsec != 0xffffffffffffffffULL) {
- packet->rx_tstamp = nsec;
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- RX_TSTAMP, 1);
- }
- }
-}
-
-static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
- unsigned int mac_tscr)
-{
- /* Set one nano-second accuracy */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
-
- /* Set fine timestamp update */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
-
- /* Overwrite earlier timestamps */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
-
- XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
-
- /* Exit if timestamping is not enabled */
- if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
- return 0;
-
- /* Initialize time registers */
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
- xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
- xgbe_set_tstamp_time(pdata, 0, 0);
-
- /* Initialize the timecounter */
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
- return 0;
-}
-
static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_ring *ring)
{
@@ -2811,9 +2733,19 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
{
unsigned int val;
- val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
-
- XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+ if (pdata->netdev->mtu > XGMAC_JUMBO_PACKET_MTU) {
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSL,
+ XGMAC_GIANT_PACKET_MTU);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 1);
+ } else {
+ val = pdata->netdev->mtu > XGMAC_STD_PACKET_MTU ? 1 : 0;
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+ }
}
static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
@@ -3488,8 +3420,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
- xgbe_config_sph_mode(pdata);
- xgbe_config_rss(pdata);
+
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
+ }
+
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -3558,8 +3494,10 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->set_speed = xgbe_set_speed;
hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
- hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
- hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
+ hw_if->read_ext_mii_regs_c22 = xgbe_read_ext_mii_regs_c22;
+ hw_if->write_ext_mii_regs_c22 = xgbe_write_ext_mii_regs_c22;
+ hw_if->read_ext_mii_regs_c45 = xgbe_read_ext_mii_regs_c45;
+ hw_if->write_ext_mii_regs_c45 = xgbe_write_ext_mii_regs_c45;
hw_if->set_gpio = xgbe_set_gpio;
hw_if->clr_gpio = xgbe_clr_gpio;
@@ -3616,13 +3554,6 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
hw_if->read_mmc_stats = xgbe_read_mmc_stats;
- /* For PTP config */
- hw_if->config_tstamp = xgbe_config_tstamp;
- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
-
/* For Data Center Bridging config */
hw_if->config_tc = xgbe_config_tc;
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
@@ -3643,5 +3574,26 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->disable_vxlan = xgbe_disable_vxlan;
hw_if->set_vxlan_id = xgbe_set_vxlan_id;
+ /* For Split Header*/
+ hw_if->enable_sph = xgbe_config_sph_mode;
+ hw_if->disable_sph = xgbe_disable_sph_mode;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}
+
+int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata)
+{
+ /* Enable MAC loopback mode */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 1);
+
+ /* Wait for loopback to stabilize */
+ usleep_range(10, 15);
+
+ return 0;
+}
+
+void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata)
+{
+ /* Disable MAC loopback mode */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 0);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 30d24d19f40d..3ddd896d6987 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -403,9 +294,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
return false;
}
-static void xgbe_ecc_isr_task(struct tasklet_struct *t)
+static void xgbe_ecc_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, ecc_bh_work);
unsigned int ecc_isr;
bool stop = false;
@@ -465,21 +356,22 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_ecc);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->ecc_bh_work);
else
- xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+ xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
return IRQ_HANDLED;
}
-static void xgbe_isr_task(struct tasklet_struct *t)
+static void xgbe_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work);
+ unsigned int mac_isr, mac_tssr, mac_mdioisr;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_channel *channel;
+ bool per_ch_irq, ti, ri, rbu, fbe;
unsigned int dma_isr, dma_ch_isr;
- unsigned int mac_isr, mac_tssr, mac_mdioisr;
+ struct xgbe_channel *channel;
unsigned int i;
/* The DMA interrupt status register also reports MAC and MTL
@@ -493,43 +385,73 @@ static void xgbe_isr_task(struct tasklet_struct *t)
netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
for (i = 0; i < pdata->channel_count; i++) {
+ bool schedule_napi = false;
+ struct napi_struct *napi;
+
if (!(dma_isr & (1 << i)))
continue;
channel = pdata->channel[i];
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+
+ /* Precompute flags once */
+ ti = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI);
+ ri = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI);
+ rbu = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU);
+ fbe = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE);
+
netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
i, dma_ch_isr);
- /* The TI or RI interrupt bits may still be set even if using
- * per channel DMA interrupts. Check to be sure those are not
- * enabled before using the private data napi structure.
+ per_ch_irq = pdata->per_channel_irq;
+
+ /*
+ * Decide which NAPI to use and whether to schedule:
+ * - When not using per-channel IRQs: schedule on global NAPI
+ * if TI or RI are set.
+ * - RBU should also trigger NAPI (either per-channel or global)
+ * to allow refill.
*/
- if (!pdata->per_channel_irq &&
- (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
- XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
- if (napi_schedule_prep(&pdata->napi)) {
- /* Disable Tx and Rx interrupts */
- xgbe_disable_rx_tx_ints(pdata);
+ if (!per_ch_irq && (ti || ri))
+ schedule_napi = true;
- /* Turn on polling */
- __napi_schedule(&pdata->napi);
+ if (rbu) {
+ schedule_napi = true;
+ pdata->ext_stats.rx_buffer_unavailable++;
+ }
+
+ napi = per_ch_irq ? &channel->napi : &pdata->napi;
+
+ if (schedule_napi && napi_schedule_prep(napi)) {
+ /* Disable interrupts appropriately before polling */
+ if (per_ch_irq) {
+ if (pdata->channel_irq_mode)
+ xgbe_disable_rx_tx_int(pdata, channel);
+ else
+ disable_irq_nosync(channel->dma_irq);
+ } else {
+ xgbe_disable_rx_tx_ints(pdata);
}
+
+ /* Turn on polling */
+ __napi_schedule(napi);
} else {
- /* Don't clear Rx/Tx status if doing per channel DMA
- * interrupts, these will be cleared by the ISR for
- * per channel DMA interrupts.
+ /*
+ * Don't clear Rx/Tx status if doing per-channel DMA
+ * interrupts; those bits will be serviced/cleared by
+ * the per-channel ISR/NAPI. In non-per-channel mode
+ * when we're not scheduling NAPI here, ensure we don't
+ * accidentally clear TI/RI in HW: zero them in the
+ * local copy so that the eventual write-back does not
+ * clear TI/RI.
*/
XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
}
- if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
- pdata->ext_stats.rx_buffer_unavailable++;
-
/* Restart the device on a Fatal Bus Error */
- if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
+ if (fbe)
schedule_work(&pdata->restart_work);
/* Clear interrupt signals */
@@ -557,7 +479,7 @@ static void xgbe_isr_task(struct tasklet_struct *t)
if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
/* Read Tx Timestamp to clear interrupt */
pdata->tx_tstamp =
- hw_if->get_tx_tstamp(pdata);
+ xgbe_get_tx_tstamp(pdata);
queue_work(pdata->dev_workqueue,
&pdata->tx_tstamp_work);
}
@@ -582,7 +504,7 @@ isr_done:
/* If there is not a separate ECC irq, handle it here */
if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
- xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+ xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
/* If there is not a separate I2C irq, handle it here */
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
@@ -604,10 +526,10 @@ static irqreturn_t xgbe_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_dev);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->dev_bh_work);
else
- xgbe_isr_task(&pdata->tasklet_dev);
+ xgbe_isr_bh_work(&pdata->dev_bh_work);
return IRQ_HANDLED;
}
@@ -643,7 +565,8 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
static void xgbe_tx_timer(struct timer_list *t)
{
- struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
+ struct xgbe_channel *channel = timer_container_of(channel, t,
+ tx_timer);
struct xgbe_prv_data *pdata = channel->pdata;
struct napi_struct *napi;
@@ -681,11 +604,26 @@ static void xgbe_service(struct work_struct *work)
static void xgbe_service_timer(struct timer_list *t)
{
- struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
+ struct xgbe_prv_data *pdata = timer_container_of(pdata, t,
+ service_timer);
+ struct xgbe_channel *channel;
+ unsigned int i;
queue_work(pdata->dev_workqueue, &pdata->service_work);
mod_timer(&pdata->service_timer, jiffies + HZ);
+
+ if (!pdata->tx_usecs)
+ return;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ if (!channel->tx_ring || channel->tx_timer_active)
+ break;
+ channel->tx_timer_active = 1;
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
}
static void xgbe_init_timers(struct xgbe_prv_data *pdata)
@@ -714,14 +652,16 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
- del_timer_sync(&pdata->service_timer);
+ timer_delete_sync(&pdata->service_timer);
for (i = 0; i < pdata->channel_count; i++) {
channel = pdata->channel[i];
if (!channel->tx_ring)
break;
- del_timer_sync(&channel->tx_timer);
+ /* Deactivate the Tx timer */
+ timer_delete_sync(&channel->tx_timer);
+ channel->tx_timer_active = 0;
}
}
@@ -782,6 +722,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+ /* Sanity check and warn if hardware reports more than supported */
+ if (hw_feat->pps_out_num > XGBE_MAX_PPS_OUT) {
+ dev_warn(pdata->dev,
+ "Hardware reports %u PPS outputs, limiting to %u\n",
+ hw_feat->pps_out_num, XGBE_MAX_PPS_OUT);
+ hw_feat->pps_out_num = XGBE_MAX_PPS_OUT;
+ }
+
+ if (hw_feat->aux_snap_num > XGBE_MAX_AUX_SNAP) {
+ dev_warn(pdata->dev,
+ "Hardware reports %u aux snapshot inputs, limiting to %u\n",
+ hw_feat->aux_snap_num, XGBE_MAX_AUX_SNAP);
+ hw_feat->aux_snap_num = XGBE_MAX_AUX_SNAP;
+ }
+
/* Translate the Hash Table size into actual number */
switch (hw_feat->hash_table_size) {
case 0:
@@ -950,14 +905,14 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
channel = pdata->channel[i];
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
- xgbe_one_poll, NAPI_POLL_WEIGHT);
+ xgbe_one_poll);
napi_enable(&channel->napi);
}
} else {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
- xgbe_all_poll, NAPI_POLL_WEIGHT);
+ xgbe_all_poll);
napi_enable(&pdata->napi);
}
@@ -991,8 +946,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
- tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
- tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
+ INIT_WORK(&pdata->dev_bh_work, xgbe_isr_bh_work);
+ INIT_WORK(&pdata->ecc_bh_work, xgbe_ecc_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev_name(netdev), pdata);
@@ -1062,6 +1017,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ cancel_work_sync(&pdata->dev_bh_work);
+ cancel_work_sync(&pdata->ecc_bh_work);
+
if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
@@ -1153,7 +1111,6 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
- pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
return pdata->phy_if.phy_reset(pdata);
@@ -1333,6 +1290,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
udp_tunnel_nic_reset_ntf(netdev);
+ /* Reset the phy settings */
+ ret = xgbe_phy_reset(pdata);
+ if (ret)
+ goto err_txrx;
+
netif_tx_start_all_queues(netdev);
xgbe_start_timers(pdata);
@@ -1342,6 +1304,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
return 0;
+err_txrx:
+ hw_if->disable_rx(pdata);
+ hw_if->disable_tx(pdata);
+
err_irqs:
xgbe_free_irqs(pdata);
@@ -1459,202 +1425,6 @@ static void xgbe_restart(struct work_struct *work)
rtnl_unlock();
}
-static void xgbe_tx_tstamp(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- tx_tstamp_work);
- struct skb_shared_hwtstamps hwtstamps;
- u64 nsec;
- unsigned long flags;
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (!pdata->tx_tstamp_skb)
- goto unlock;
-
- if (pdata->tx_tstamp) {
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- pdata->tx_tstamp);
-
- memset(&hwtstamps, 0, sizeof(hwtstamps));
- hwtstamps.hwtstamp = ns_to_ktime(nsec);
- skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
- }
-
- dev_kfree_skb_any(pdata->tx_tstamp_skb);
-
- pdata->tx_tstamp_skb = NULL;
-
-unlock:
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-}
-
-static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
- sizeof(pdata->tstamp_config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- struct hwtstamp_config config;
- unsigned int mac_tscr;
-
- if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- if (config.flags)
- return -EINVAL;
-
- mac_tscr = 0;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
-
- case HWTSTAMP_TX_ON:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
-
- case HWTSTAMP_FILTER_NTP_ALL:
- case HWTSTAMP_FILTER_ALL:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- pdata->hw_if.config_tstamp(pdata, mac_tscr);
-
- memcpy(&pdata->tstamp_config, &config, sizeof(config));
-
- return 0;
-}
-
-static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
- struct sk_buff *skb,
- struct xgbe_packet_data *packet)
-{
- unsigned long flags;
-
- if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (pdata->tx_tstamp_skb) {
- /* Another timestamp in progress, ignore this one */
- XGMAC_SET_BITS(packet->attributes,
- TX_PACKET_ATTRIBUTES, PTP, 0);
- } else {
- pdata->tx_tstamp_skb = skb_get(skb);
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- }
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
- }
-
- skb_tx_timestamp(skb);
-}
-
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
{
if (skb_vlan_tag_present(skb))
@@ -1674,12 +1444,10 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
return ret;
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
- packet->header_len = skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
+ packet->header_len = skb_inner_tcp_all_headers(skb);
packet->tcp_header_len = inner_tcp_hdrlen(skb);
} else {
- packet->header_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ packet->header_len = skb_tcp_all_headers(skb);
packet->tcp_header_len = tcp_hdrlen(skb);
}
packet->tcp_payload_len = skb->len - packet->header_len;
@@ -1846,11 +1614,6 @@ static int xgbe_open(struct net_device *netdev)
goto err_dev_wq;
}
- /* Reset the phy settings */
- ret = xgbe_phy_reset(pdata);
- if (ret)
- goto err_an_wq;
-
/* Enable the clocks */
ret = clk_prepare_enable(pdata->sysclk);
if (ret) {
@@ -1869,6 +1632,9 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ /* Initialize PTP timestamping and clock. */
+ xgbe_init_ptp(pdata);
+
ret = xgbe_alloc_memory(pdata);
if (ret)
goto err_ptpclk;
@@ -2023,27 +1789,6 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
-static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
- break;
-
- case SIOCSHWTSTAMP:
- ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
- break;
-
- default:
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
static int xgbe_change_mtu(struct net_device *netdev, int mtu)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -2056,7 +1801,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
return ret;
pdata->rx_buf_size = ret;
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
xgbe_restart_dev(pdata);
@@ -2243,10 +1988,17 @@ static int xgbe_set_features(struct net_device *netdev,
if (ret)
return ret;
- if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ if ((features & NETIF_F_RXCSUM) && !rxcsum) {
+ hw_if->enable_sph(pdata);
+ hw_if->enable_vxlan(pdata);
hw_if->enable_rx_csum(pdata);
- else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ schedule_work(&pdata->restart_work);
+ } else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
+ hw_if->disable_sph(pdata);
+ hw_if->disable_vxlan(pdata);
hw_if->disable_rx_csum(pdata);
+ schedule_work(&pdata->restart_work);
+ }
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
hw_if->enable_rx_vlan_stripping(pdata);
@@ -2282,7 +2034,6 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_set_rx_mode = xgbe_set_rx_mode,
.ndo_set_mac_address = xgbe_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = xgbe_ioctl,
.ndo_change_mtu = xgbe_change_mtu,
.ndo_tx_timeout = xgbe_tx_timeout,
.ndo_get_stats64 = xgbe_get_stats64,
@@ -2295,6 +2046,8 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_fix_features = xgbe_fix_features,
.ndo_set_features = xgbe_set_features,
.ndo_features_check = xgbe_features_check,
+ .ndo_hwtstamp_get = xgbe_get_hwtstamp_settings,
+ .ndo_hwtstamp_set = xgbe_set_hwtstamp_settings,
};
const struct net_device_ops *xgbe_get_netdev_ops(void)
@@ -2553,6 +2306,14 @@ read_again:
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len;
+ if (buf2_len > rdata->rx.buf.dma_len) {
+ /* Hardware inconsistency within the descriptors
+ * that has resulted in a length underflow.
+ */
+ error = 1;
+ goto skip_data;
+ }
+
if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
buf1_len);
@@ -2582,8 +2343,10 @@ skip_data:
if (!last || context_next)
goto read_again;
- if (!skb)
+ if (!skb || error) {
+ dev_kfree_skb(skb);
goto next_packet;
+ }
/* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN;
@@ -2622,12 +2385,8 @@ skip_data:
if (XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
- u64 nsec;
-
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- packet->rx_tstamp);
hwtstamps = skb_hwtstamps(skb);
- hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ hwtstamps->hwtstamp = ns_to_ktime(packet->rx_tstamp);
}
if (XGMAC_GET_BITS(packet->attributes,
@@ -2775,7 +2534,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
- netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
+ netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto));
for (i = 0; i < skb->len; i += 32) {
unsigned int len = min(skb->len - i, 32U);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 94879cf8b420..0d19b09497a0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/spinlock.h>
@@ -194,24 +85,23 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
int i;
switch (stringset) {
+ case ETH_SS_TEST:
+ xgbe_selftest_get_strings(pdata, data);
+ break;
case ETH_SS_STATS:
- for (i = 0; i < XGBE_STATS_COUNT; i++) {
- memcpy(data, xgbe_gstring_stats[i].stat_string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < XGBE_STATS_COUNT; i++)
+ ethtool_puts(&data, xgbe_gstring_stats[i].stat_string);
+
for (i = 0; i < pdata->tx_ring_count; i++) {
- sprintf(data, "txq_%u_packets", i);
- data += ETH_GSTRING_LEN;
- sprintf(data, "txq_%u_bytes", i);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "txq_%u_packets", i);
+ ethtool_sprintf(&data, "txq_%u_bytes", i);
}
+
for (i = 0; i < pdata->rx_ring_count; i++) {
- sprintf(data, "rxq_%u_packets", i);
- data += ETH_GSTRING_LEN;
- sprintf(data, "rxq_%u_bytes", i);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "rxq_%u_packets", i);
+ ethtool_sprintf(&data, "rxq_%u_bytes", i);
}
+
break;
}
}
@@ -244,6 +134,9 @@ static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
int ret;
switch (stringset) {
+ case ETH_SS_TEST:
+ ret = xgbe_selftest_get_count(pdata);
+ break;
case ETH_SS_STATS:
ret = XGBE_STATS_COUNT +
(pdata->tx_ring_count * 2) +
@@ -314,10 +207,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
cmd->base.phy_address = pdata->phy.address;
- cmd->base.autoneg = pdata->phy.autoneg;
- cmd->base.speed = pdata->phy.speed;
- cmd->base.duplex = pdata->phy.duplex;
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = pdata->phy.speed;
+ cmd->base.duplex = pdata->phy.duplex;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ cmd->base.autoneg = pdata->phy.autoneg;
cmd->base.port = PORT_NONE;
XGBE_LM_COPY(cmd, supported, lks, supported);
@@ -402,8 +300,8 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
- strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ strscpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
@@ -437,6 +335,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
ec->rx_coalesce_usecs = pdata->rx_usecs;
ec->rx_max_coalesced_frames = pdata->rx_frames;
+ ec->tx_coalesce_usecs = pdata->tx_usecs;
ec->tx_max_coalesced_frames = pdata->tx_frames;
return 0;
@@ -450,7 +349,8 @@ static int xgbe_set_coalesce(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int rx_frames, rx_riwt, rx_usecs;
- unsigned int tx_frames;
+ unsigned int tx_frames, tx_usecs;
+ unsigned int jiffy_us = jiffies_to_usecs(1);
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
rx_usecs = ec->rx_coalesce_usecs;
@@ -472,20 +372,42 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
+ tx_usecs = ec->tx_coalesce_usecs;
tx_frames = ec->tx_max_coalesced_frames;
/* Check the bounds of values for Tx */
+ if (!tx_usecs) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-usecs must not be 0");
+ return -EINVAL;
+ }
+ if (tx_usecs > XGMAC_MAX_COAL_TX_TICK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "tx-usecs is limited to %d usec",
+ XGMAC_MAX_COAL_TX_TICK);
+ return -EINVAL;
+ }
if (tx_frames > pdata->tx_desc_count) {
netdev_err(netdev, "tx-frames is limited to %d frames\n",
pdata->tx_desc_count);
return -EINVAL;
}
+ /* Round tx-usecs to nearest multiple of jiffy granularity */
+ if (tx_usecs % jiffy_us) {
+ tx_usecs = rounddown(tx_usecs, jiffy_us);
+ if (!tx_usecs)
+ tx_usecs = jiffy_us;
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-usecs rounded to %u usec due to jiffy granularity (%u usec)",
+ tx_usecs, jiffy_us);
+ }
+
pdata->rx_riwt = rx_riwt;
pdata->rx_usecs = rx_usecs;
pdata->rx_frames = rx_frames;
hw_if->config_rx_coalesce(pdata);
+ pdata->tx_usecs = tx_usecs;
pdata->tx_frames = tx_frames;
hw_if->config_tx_coalesce(pdata);
@@ -522,47 +444,48 @@ static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
return ARRAY_SIZE(pdata->rss_table);
}
-static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int xgbe_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int i;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
- indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
- MAC_RSSDR, DMCH);
+ rxfh->indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
+ MAC_RSSDR, DMCH);
}
- if (key)
- memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
+ if (rxfh->key)
+ memcpy(rxfh->key, pdata->rss_key, sizeof(pdata->rss_key));
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int xgbe_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int ret;
+ int ret;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP) {
netdev_err(netdev, "unsupported hash function\n");
return -EOPNOTSUPP;
}
- if (indir) {
- ret = hw_if->set_rss_lookup_table(pdata, indir);
+ if (rxfh->indir) {
+ ret = hw_if->set_rss_lookup_table(pdata, rxfh->indir);
if (ret)
return ret;
}
- if (key) {
- ret = hw_if->set_rss_hash_key(pdata, key);
+ if (rxfh->key) {
+ ret = hw_if->set_rss_hash_key(pdata, rxfh->key);
if (ret)
return ret;
}
@@ -571,21 +494,17 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static int xgbe_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (pdata->ptp_clock)
ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
- else
- ts_info->phc_index = -1;
ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
@@ -619,8 +538,11 @@ static int xgbe_get_module_eeprom(struct net_device *netdev,
return pdata->phy_if.module_eeprom(pdata, eeprom, data);
}
-static void xgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+static void
+xgbe_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -631,7 +553,9 @@ static void xgbe_get_ringparam(struct net_device *netdev,
}
static int xgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int rx, tx;
@@ -815,7 +739,7 @@ out:
}
static const struct ethtool_ops xgbe_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = xgbe_get_drvinfo,
.get_msglevel = xgbe_get_msglevel,
@@ -842,6 +766,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.set_ringparam = xgbe_set_ringparam,
.get_channels = xgbe_get_channels,
.set_channels = xgbe_set_channels,
+ .self_test = xgbe_selftest_run,
};
const struct ethtool_ops *xgbe_get_ethtool_ops(void)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
new file mode 100644
index 000000000000..0127988e10be
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata,
+ unsigned int sec, unsigned int nsec)
+{
+ int count;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+
+ /* issue command to update the system time value */
+ XGMAC_IOWRITE(pdata, MAC_TSCR,
+ XGMAC_IOREAD(pdata, MAC_TSCR) |
+ (1 << MAC_TSCR_TSUPDT_INDEX));
+
+ /* Wait for the time adjust/update to complete */
+ count = 10000;
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
+ udelay(5);
+
+ if (count < 0)
+ netdev_err(pdata->netdev,
+ "timed out updating system timestamp\n");
+}
+
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend)
+{
+ unsigned int count = 10000;
+
+ /* Set the addend register value and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+ /* Wait for addend update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev,
+ "timed out updating timestamp addend register\n");
+}
+
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec)
+{
+ unsigned int count = 10000;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+ /* Wait for time update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev, "timed out initializing timestamp\n");
+}
+
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+ u64 nsec;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+ return nsec;
+}
+
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+ unsigned int tx_snr, tx_ssr;
+ u64 nsec;
+
+ if (pdata->vdata->tx_tstamp_workaround) {
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ } else {
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ }
+
+ if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+ return 0;
+
+ nsec = tx_ssr;
+ nsec *= NSEC_PER_SEC;
+ nsec += tx_snr;
+
+ return nsec;
+}
+
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc)
+{
+ u64 nsec;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+ !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+ nsec = le32_to_cpu(rdesc->desc1);
+ nsec *= NSEC_PER_SEC;
+ nsec += le32_to_cpu(rdesc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ packet->rx_tstamp = nsec;
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RX_TSTAMP, 1);
+ }
+ }
+}
+
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr)
+{
+ unsigned int value = 0;
+
+ value = XGMAC_IOREAD(pdata, MAC_TSCR);
+ value |= mac_tscr;
+ XGMAC_IOWRITE(pdata, MAC_TSCR, value);
+}
+
+void xgbe_tx_tstamp(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ tx_tstamp_work);
+ struct skb_shared_hwtstamps hwtstamps;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (!pdata->tx_tstamp_skb)
+ goto unlock;
+
+ if (pdata->tx_tstamp) {
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(pdata->tx_tstamp);
+ skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+ }
+
+ dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+ pdata->tx_tstamp_skb = NULL;
+
+unlock:
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+int xgbe_get_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ *config = pdata->tstamp_config;
+
+ return 0;
+}
+
+int xgbe_set_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int mac_tscr = 0;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+
+ case HWTSTAMP_TX_ON:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+ /* PTP v2, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ pdata->tstamp_config = *config;
+
+ return 0;
+}
+
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ unsigned long flags;
+
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (pdata->tx_tstamp_skb) {
+ /* Another timestamp in progress, ignore this one */
+ XGMAC_SET_BITS(packet->attributes,
+ TX_PACKET_ATTRIBUTES, PTP, 0);
+ } else {
+ pdata->tx_tstamp_skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+ skb_tx_timestamp(skb);
+}
+
+int xgbe_init_ptp(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_tscr = 0;
+ struct timespec64 now;
+ u64 dividend;
+
+ /* Register Settings to be done based on the link speed. */
+ switch (pdata->phy.speed) {
+ case SPEED_1000:
+ XGMAC_IOWRITE(pdata, MAC_TICNR, MAC_TICNR_1G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_1G_INITVAL);
+ break;
+ case SPEED_2500:
+ case SPEED_10000:
+ XGMAC_IOWRITE_BITS(pdata, MAC_TICSNR, TSICSNS,
+ MAC_TICSNR_10G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_10G_INITVAL);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TECSNR, TSECSNS,
+ MAC_TECSNR_10G_INITVAL);
+ break;
+ case SPEED_UNKNOWN:
+ default:
+ break;
+ }
+
+ /* Enable IEEE1588 PTP clock. */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+
+ /* Overwrite earlier timestamps */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+ /* Set one nano-second accuracy */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+ /* Set fine timestamp update */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ /* Exit if timestamping is not enabled */
+ if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+ return -EOPNOTSUPP;
+
+ if (pdata->vdata->tstamp_ptp_clock_freq) {
+ /* Initialize time registers based on
+ * 125MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC,
+ XGBE_V2_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC,
+ XGBE_V2_TSTAMP_SNSINC);
+ } else {
+ /* Initialize time registers based on
+ * 50MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+ }
+
+ /* Calculate the addend:
+ * addend = 2^32 / (PTP ref clock / (PTP clock based on SSINC))
+ * = (2^32 * (PTP clock based on SSINC)) / PTP ref clock
+ */
+ if (pdata->vdata->tstamp_ptp_clock_freq)
+ dividend = XGBE_V2_PTP_ACT_CLK_FREQ;
+ else
+ dividend = XGBE_PTP_ACT_CLK_FREQ;
+
+ dividend = (u64)(dividend << 32);
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
+
+ xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+
+ dma_wmb();
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ xgbe_set_tstamp_time(pdata, (u32)now.tv_sec, now.tv_nsec);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 22d4fc547a0a..65eb7b577b65 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -179,7 +70,7 @@ static int xgbe_i2c_set_enable(struct xgbe_prv_data *pdata, bool enable)
static int xgbe_i2c_disable(struct xgbe_prv_data *pdata)
{
- unsigned int ret;
+ int ret;
ret = xgbe_i2c_set_enable(pdata, false);
if (ret) {
@@ -274,9 +165,9 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
}
-static void xgbe_i2c_isr_task(struct tasklet_struct *t)
+static void xgbe_i2c_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_i2c);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, i2c_bh_work);
struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
unsigned int isr;
@@ -321,10 +212,10 @@ static irqreturn_t xgbe_i2c_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_i2c);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->i2c_bh_work);
else
- xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+ xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
return IRQ_HANDLED;
}
@@ -369,7 +260,7 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+ xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
return IRQ_HANDLED;
}
@@ -447,8 +338,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
xgbe_i2c_disable(pdata);
xgbe_i2c_clear_all_interrupts(pdata);
- if (pdata->dev_irq != pdata->i2c_irq)
+ if (pdata->dev_irq != pdata->i2c_irq) {
devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
+ cancel_work_sync(&pdata->i2c_bh_work);
+ }
}
static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
@@ -462,7 +355,7 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
/* If we have a separate I2C irq, enable it */
if (pdata->dev_irq != pdata->i2c_irq) {
- tasklet_setup(&pdata->tasklet_i2c, xgbe_i2c_isr_task);
+ INIT_WORK(&pdata->i2c_bh_work, xgbe_i2c_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
xgbe_i2c_isr, 0, pdata->i2c_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 0e8698928e4d..d1f0419edb23 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -384,7 +275,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->min_mtu = 0;
- netdev->max_mtu = XGMAC_JUMBO_PACKET_MTU;
+ netdev->max_mtu = XGMAC_GIANT_PACKET_MTU - XGBE_ETH_FRAME_HDR;
/* Use default watchdog timeout */
netdev->watchdog_timeo = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4e97b4869522..7675bb98f029 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/interrupt.h>
@@ -274,6 +165,15 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000);
}
+static void xgbe_sgmii_10_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 10M speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_10);
+}
+
static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
{
/* Set MAC to 1G speed */
@@ -306,6 +206,9 @@ static void xgbe_change_mode(struct xgbe_prv_data *pdata,
case XGBE_MODE_KR:
xgbe_kr_mode(pdata);
break;
+ case XGBE_MODE_SGMII_10:
+ xgbe_sgmii_10_mode(pdata);
+ break;
case XGBE_MODE_SGMII_100:
xgbe_sgmii_100_mode(pdata);
break;
@@ -363,6 +266,10 @@ static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
reg |= MDIO_VEND2_CTRL1_AN_RESTART;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL);
+ reg |= XGBE_VEND2_MAC_AUTO_SW;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL, reg);
}
static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
@@ -496,6 +403,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
reg |= XGBE_KR_TRAINING_ENABLE;
reg |= XGBE_KR_TRAINING_START;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+ pdata->kr_start_time = jiffies;
netif_dbg(pdata, link, pdata->netdev,
"KR training initiated\n");
@@ -632,6 +540,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
xgbe_switch_mode(pdata);
+ pdata->an_result = XGBE_AN_READY;
+
xgbe_an_restart(pdata);
return XGBE_AN_INCOMPAT_LINK;
@@ -688,9 +598,9 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_an_isr_task(struct tasklet_struct *t)
+static void xgbe_an_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_an);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, an_bh_work);
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
@@ -712,17 +622,17 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_an);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->an_bh_work);
else
- xgbe_an_isr_task(&pdata->tasklet_an);
+ xgbe_an_isr_bh_work(&pdata->an_bh_work);
return IRQ_HANDLED;
}
static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_an_isr_task(&pdata->tasklet_an);
+ xgbe_an_isr_bh_work(&pdata->an_bh_work);
return IRQ_HANDLED;
}
@@ -988,6 +898,11 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
(pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+
}
static void xgbe_an73_init(struct xgbe_prv_data *pdata)
@@ -1074,6 +989,8 @@ static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
static const char *xgbe_phy_speed_string(int speed)
{
switch (speed) {
+ case SPEED_10:
+ return "10Mbps";
case SPEED_100:
return "100Mbps";
case SPEED_1000:
@@ -1161,6 +1078,7 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
case XGBE_MODE_KX_1000:
case XGBE_MODE_KX_2500:
case XGBE_MODE_KR:
+ case XGBE_MODE_SGMII_10:
case XGBE_MODE_SGMII_100:
case XGBE_MODE_SGMII_1000:
case XGBE_MODE_X:
@@ -1175,7 +1093,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
if (pdata->phy.duplex != DUPLEX_FULL)
return -EINVAL;
- xgbe_set_mode(pdata, mode);
+ /* Force the mode change for SFI in Fixed PHY config.
+ * Fixed PHY configs needs PLL to be enabled while doing mode set.
+ * When the SFP module isn't connected during boot, driver assumes
+ * AN is ON and attempts autonegotiation. However, if the connected
+ * SFP comes up in Fixed PHY config, the link will not come up as
+ * PLL isn't enabled while the initial mode set command is issued.
+ * So, force the mode change for SFI in Fixed PHY configuration to
+ * fix link issues.
+ */
+ if (mode == XGBE_MODE_SFI)
+ xgbe_change_mode(pdata, mode);
+ else
+ xgbe_set_mode(pdata, mode);
return 0;
}
@@ -1222,6 +1152,8 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode)
xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
} else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_10)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_10);
} else {
enable_irq(pdata->an_irq);
ret = -EINVAL;
@@ -1275,9 +1207,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
{
unsigned long link_timeout;
+ unsigned long kr_time;
+ int wait;
link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
if (time_after(jiffies, link_timeout)) {
+ if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
+ pdata->phy.autoneg == AUTONEG_ENABLE) {
+ /* AN restart should not happen while KR training is in progress.
+ * The while loop ensures no AN restart during KR training,
+ * waits up to 500ms and AN restart is triggered only if KR
+ * training is failed.
+ */
+ wait = XGBE_KR_TRAINING_WAIT_ITER;
+ while (wait--) {
+ kr_time = pdata->kr_start_time +
+ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+ if (time_after(jiffies, kr_time))
+ break;
+ /* AN restart is not required, if AN result is COMPLETE */
+ if (pdata->an_result == XGBE_AN_COMPLETE)
+ return;
+ usleep_range(10000, 11000);
+ }
+ }
netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
xgbe_phy_config_aneg(pdata);
}
@@ -1288,7 +1241,7 @@ static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
return pdata->phy_if.phy_impl.an_outcome(pdata);
}
-static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
+static bool xgbe_phy_status_result(struct xgbe_prv_data *pdata)
{
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
enum xgbe_mode mode;
@@ -1301,6 +1254,9 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
mode = xgbe_phy_status_aneg(pdata);
switch (mode) {
+ case XGBE_MODE_SGMII_10:
+ pdata->phy.speed = SPEED_10;
+ break;
case XGBE_MODE_SGMII_100:
pdata->phy.speed = SPEED_100;
break;
@@ -1323,8 +1279,13 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL;
- if (xgbe_set_mode(pdata, mode) && pdata->an_again)
+ if (!xgbe_set_mode(pdata, mode))
+ return false;
+
+ if (pdata->an_again)
xgbe_phy_reconfig_aneg(pdata);
+
+ return true;
}
static void xgbe_phy_status(struct xgbe_prv_data *pdata)
@@ -1343,6 +1304,10 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
&an_restart);
+ /* bail out if the link status register read fails */
+ if (pdata->phy.link < 0)
+ return;
+
if (an_restart) {
xgbe_phy_config_aneg(pdata);
goto adjust_link;
@@ -1354,7 +1319,8 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
return;
}
- xgbe_phy_status_result(pdata);
+ if (xgbe_phy_status_result(pdata))
+ return;
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
@@ -1390,8 +1356,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
/* Disable auto-negotiation */
xgbe_an_disable_all(pdata);
- if (pdata->dev_irq != pdata->an_irq)
+ if (pdata->dev_irq != pdata->an_irq) {
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+ cancel_work_sync(&pdata->an_bh_work);
+ }
pdata->phy_if.phy_impl.stop(pdata);
@@ -1413,7 +1381,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* If we have a separate AN irq, enable it */
if (pdata->dev_irq != pdata->an_irq) {
- tasklet_setup(&pdata->tasklet_an, xgbe_an_isr_task);
+ INIT_WORK(&pdata->an_bh_work, xgbe_an_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->an_irq,
xgbe_an_isr, 0, pdata->an_name,
@@ -1441,6 +1409,8 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
xgbe_sgmii_1000_mode(pdata);
} else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
xgbe_sgmii_100_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_10)) {
+ xgbe_sgmii_10_mode(pdata);
} else {
ret = -EINVAL;
goto err_irq;
@@ -1538,6 +1508,8 @@ static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata)
return SPEED_1000;
else if (XGBE_ADV(lks, 100baseT_Full))
return SPEED_100;
+ else if (XGBE_ADV(lks, 10baseT_Full))
+ return SPEED_10;
return SPEED_UNKNOWN;
}
@@ -1583,6 +1555,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL;
}
+ pdata->phy_link = 0;
pdata->phy.link = 0;
pdata->phy.pause_autoneg = pdata->pause_autoneg;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 90cb55eb5466..e3e1dca9856a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -1,123 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/log2.h>
+#include "xgbe-smn.h"
#include "xgbe.h"
#include "xgbe-common.h"
@@ -139,7 +31,7 @@ static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
return ret;
}
- pdata->isr_as_tasklet = 1;
+ pdata->isr_as_bh_work = 1;
pdata->irq_count = ret;
pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
@@ -170,13 +62,13 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
goto out;
ret = pci_alloc_irq_vectors(pdata->pcidev, 1, 1,
- PCI_IRQ_LEGACY | PCI_IRQ_MSI);
+ PCI_IRQ_INTX | PCI_IRQ_MSI);
if (ret < 0) {
dev_info(pdata->dev, "single IRQ enablement failed\n");
return ret;
}
- pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0;
+ pdata->isr_as_bh_work = pdata->pcidev->msi_enabled ? 1 : 0;
pdata->irq_count = 1;
pdata->channel_irq_count = 1;
@@ -207,14 +99,14 @@ out:
static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct xgbe_prv_data *pdata;
- struct device *dev = &pdev->dev;
void __iomem * const *iomap_table;
- struct pci_dev *rdev;
+ unsigned int port_addr_size, reg;
+ struct device *dev = &pdev->dev;
+ struct xgbe_prv_data *pdata;
unsigned int ma_lo, ma_hi;
- unsigned int reg;
- int bar_mask;
- int ret;
+ struct pci_dev *rdev;
+ int bar_mask, ret;
+ u32 address;
pdata = xgbe_alloc_pdata(dev);
if (IS_ERR(pdata)) {
@@ -274,10 +166,31 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the PCS indirect addressing definition registers */
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
- if (rdev &&
- (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
- pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
- pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ if (rdev && rdev->vendor == PCI_VENDOR_ID_AMD) {
+ switch (rdev->device) {
+ case XGBE_RV_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ break;
+ case XGBE_YC_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
+
+ /* Yellow Carp devices do not need cdr workaround */
+ pdata->vdata->an_cdr_workaround = 0;
+
+ /* Yellow Carp devices do not need rrc */
+ pdata->vdata->enable_rrc = 0;
+ break;
+ case XGBE_RN_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V3_RN_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V3_RN_WINDOW_SELECT;
+ break;
+ default:
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ break;
+ }
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -285,7 +198,22 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_dev_put(rdev);
/* Configure the PCS indirect addressing support */
- reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ if (pdata->vdata->xpcs_access == XGBE_XPCS_ACCESS_V3) {
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ port_addr_size = PCS_RN_PORT_ADDR_SIZE *
+ XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+ pdata->smn_base = PCS_RN_SMN_BASE_ADDR + port_addr_size;
+
+ address = pdata->smn_base + (pdata->xpcs_window_def_reg);
+ ret = amd_smn_read(0, address, &reg);
+ if (ret) {
+ pci_err(pdata->pcidev, "Failed to read data\n");
+ goto err_pci_enable;
+ }
+ } else {
+ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ }
+
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
pdata->xpcs_window <<= 6;
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
@@ -418,6 +346,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdata->pcidev);
+ /* Disable all interrupts in the hardware */
+ XP_IOWRITE(pdata, XP_INT_EN, 0x0);
+
xgbe_free_pdata(pdata);
}
@@ -460,34 +391,54 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev)
return ret;
}
-static const struct xgbe_version_data xgbe_v2a = {
+static struct xgbe_version_data xgbe_v3 = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V3,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
+ .an_cdr_workaround = 0,
+ .enable_rrc = 0,
+};
+
+static struct xgbe_version_data xgbe_v2a = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
.mmc_64bit = 1,
.tx_max_fifo_size = 229376,
.rx_max_fifo_size = 229376,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
.tx_desc_prefetch = 5,
.rx_desc_prefetch = 5,
.an_cdr_workaround = 1,
+ .enable_rrc = 1,
};
-static const struct xgbe_version_data xgbe_v2b = {
+static struct xgbe_version_data xgbe_v2b = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
.mmc_64bit = 1,
.tx_max_fifo_size = 65536,
.rx_max_fifo_size = 65536,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
.tx_desc_prefetch = 5,
.rx_desc_prefetch = 5,
.an_cdr_workaround = 1,
+ .enable_rrc = 1,
};
static const struct pci_device_id xgbe_pci_table[] = {
@@ -495,6 +446,8 @@ static const struct pci_device_id xgbe_pci_table[] = {
.driver_data = (kernel_ulong_t)&xgbe_v2a },
{ PCI_VDEVICE(AMD, 0x1459),
.driver_data = (kernel_ulong_t)&xgbe_v2b },
+ { PCI_VDEVICE(AMD, 0x1641),
+ .driver_data = (kernel_ulong_t)&xgbe_v3 },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
index d16eae415f72..2e6b8ffe785c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 18e48b3bc402..a68757e8fd22 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -124,10 +15,11 @@
#include "xgbe.h"
#include "xgbe-common.h"
-#define XGBE_PHY_PORT_SPEED_100 BIT(0)
-#define XGBE_PHY_PORT_SPEED_1000 BIT(1)
-#define XGBE_PHY_PORT_SPEED_2500 BIT(2)
-#define XGBE_PHY_PORT_SPEED_10000 BIT(3)
+#define XGBE_PHY_PORT_SPEED_10 BIT(0)
+#define XGBE_PHY_PORT_SPEED_100 BIT(1)
+#define XGBE_PHY_PORT_SPEED_1000 BIT(2)
+#define XGBE_PHY_PORT_SPEED_2500 BIT(3)
+#define XGBE_PHY_PORT_SPEED_10000 BIT(4)
#define XGBE_MUTEX_RELEASE 0x80000000
@@ -189,6 +81,7 @@ enum xgbe_sfp_cable {
XGBE_SFP_CABLE_UNKNOWN = 0,
XGBE_SFP_CABLE_ACTIVE,
XGBE_SFP_CABLE_PASSIVE,
+ XGBE_SFP_CABLE_FIBER,
};
enum xgbe_sfp_base {
@@ -236,9 +129,7 @@ enum xgbe_sfp_speed {
#define XGBE_SFP_BASE_BR 12
#define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a
-#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
-#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
#define XGBE_SFP_BASE_CU_CABLE_LEN 18
@@ -284,6 +175,8 @@ struct xgbe_sfp_eeprom {
#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
+#define XGBE_MOLEX_VENDOR "Molex Inc. "
+
struct xgbe_sfp_ascii {
union {
char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
@@ -386,6 +279,10 @@ struct xgbe_phy_data {
static DEFINE_MUTEX(xgbe_phy_comm_lock);
static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
+static void xgbe_phy_rrc(struct xgbe_prv_data *pdata);
+static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ enum xgbe_mb_cmd cmd,
+ enum xgbe_mb_subcmd sub_cmd);
static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
struct xgbe_i2c_op *i2c_op)
@@ -598,20 +495,27 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
return -ETIMEDOUT;
}
-static int xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr,
- int reg, u16 val)
+static int xgbe_phy_mdio_mii_write_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- if (reg & MII_ADDR_C45) {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
- return -ENOTSUPP;
- } else {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
- return -ENOTSUPP;
- }
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.write_ext_mii_regs_c22(pdata, addr, reg, val);
+}
- return pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val);
+static int xgbe_phy_mdio_mii_write_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg, u16 val)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.write_ext_mii_regs_c45(pdata, addr, devad,
+ reg, val);
}
static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val)
@@ -636,7 +540,8 @@ static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val)
return ret;
}
-static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val)
+static int xgbe_phy_mii_write_c22(struct mii_bus *mii, int addr, int reg,
+ u16 val)
{
struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -649,29 +554,58 @@ static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val)
if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
ret = xgbe_phy_i2c_mii_write(pdata, reg, val);
else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
- ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val);
+ ret = xgbe_phy_mdio_mii_write_c22(pdata, addr, reg, val);
else
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
xgbe_phy_put_comm_ownership(pdata);
return ret;
}
-static int xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr,
- int reg)
+static int xgbe_phy_mii_write_c45(struct mii_bus *mii, int addr, int devad,
+ int reg, u16 val)
{
+ struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
- if (reg & MII_ADDR_C45) {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
- return -ENOTSUPP;
- } else {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
- return -ENOTSUPP;
- }
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
+
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = -EOPNOTSUPP;
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_write_c45(pdata, addr, devad, reg, val);
+ else
+ ret = -EOPNOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
- return pdata->hw_if.read_ext_mii_regs(pdata, addr, reg);
+ return ret;
+}
+
+static int xgbe_phy_mdio_mii_read_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.read_ext_mii_regs_c22(pdata, addr, reg);
+}
+
+static int xgbe_phy_mdio_mii_read_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.read_ext_mii_regs_c45(pdata, addr, devad, reg);
}
static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
@@ -696,7 +630,7 @@ static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
return ret;
}
-static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
+static int xgbe_phy_mii_read_c22(struct mii_bus *mii, int addr, int reg)
{
struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -709,9 +643,32 @@ static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
ret = xgbe_phy_i2c_mii_read(pdata, reg);
else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
- ret = xgbe_phy_mdio_mii_read(pdata, addr, reg);
+ ret = xgbe_phy_mdio_mii_read_c22(pdata, addr, reg);
+ else
+ ret = -EOPNOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mii_read_c45(struct mii_bus *mii, int addr, int devad,
+ int reg)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
+
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = -EOPNOTSUPP;
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_read_c45(pdata, addr, devad, reg);
else
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
xgbe_phy_put_comm_ownership(pdata);
@@ -758,6 +715,8 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) {
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10)
+ XGBE_SET_SUP(lks, 10baseT_Full);
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
XGBE_SET_SUP(lks, 100baseT_Full);
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
@@ -823,25 +782,22 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
enum xgbe_sfp_speed sfp_speed)
{
- u8 *sfp_base, min, max;
+ u8 *sfp_base, min;
sfp_base = sfp_eeprom->base;
switch (sfp_speed) {
case XGBE_SFP_SPEED_1000:
min = XGBE_SFP_BASE_BR_1GBE_MIN;
- max = XGBE_SFP_BASE_BR_1GBE_MAX;
break;
case XGBE_SFP_SPEED_10000:
min = XGBE_SFP_BASE_BR_10GBE_MIN;
- max = XGBE_SFP_BASE_BR_10GBE_MAX;
break;
default:
return false;
}
- return ((sfp_base[XGBE_SFP_BASE_BR] >= min) &&
- (sfp_base[XGBE_SFP_BASE_BR] <= max));
+ return sfp_base[XGBE_SFP_BASE_BR] >= min;
}
static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
@@ -858,7 +814,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -880,14 +835,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
-
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
@@ -899,7 +847,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -963,13 +910,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -1048,6 +989,7 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
return ret;
}
phy_data->phydev = phydev;
+ phy_data->phydev->mac_managed_pm = true;
xgbe_phy_external_phy_quirks(pdata);
@@ -1142,16 +1084,21 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
- /* Assume ACTIVE cable unless told it is PASSIVE */
+ /* Assume FIBER cable unless told otherwise */
if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN];
- } else {
+ } else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) {
phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
+ } else {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER;
}
/* Determine the type of SFP */
- if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+ if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER &&
+ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
@@ -1167,9 +1114,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
- else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
- xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
- phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:
@@ -1542,6 +1486,16 @@ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata)
xgbe_phy_phydev_flowctrl(pdata);
switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) {
+ case XGBE_SGMII_AN_LINK_SPEED_10:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+ XGBE_SET_LP_ADV(lks, 10baseT_Full);
+ mode = XGBE_MODE_SGMII_10;
+ } else {
+ /* Half-duplex not supported */
+ XGBE_SET_LP_ADV(lks, 10baseT_Half);
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
case XGBE_SGMII_AN_LINK_SPEED_100:
if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
XGBE_SET_LP_ADV(lks, 100baseT_Full);
@@ -1658,7 +1612,10 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:
if (phy_data->phydev &&
- (phy_data->phydev->speed == SPEED_100))
+ (phy_data->phydev->speed == SPEED_10))
+ mode = XGBE_MODE_SGMII_10;
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
mode = XGBE_MODE_SGMII_100;
else
mode = XGBE_MODE_SGMII_1000;
@@ -1673,7 +1630,10 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
break;
default:
if (phy_data->phydev &&
- (phy_data->phydev->speed == SPEED_100))
+ (phy_data->phydev->speed == SPEED_10))
+ mode = XGBE_MODE_SGMII_10;
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
mode = XGBE_MODE_SGMII_100;
else
mode = XGBE_MODE_SGMII_1000;
@@ -1803,6 +1763,9 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
if (phy_data->phydev &&
(phy_data->phydev->speed == SPEED_10000))
XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_2500))
+ XGBE_SET_ADV(dlks, 2500baseX_Full);
else
XGBE_SET_ADV(dlks, 1000baseKX_Full);
break;
@@ -1910,8 +1873,8 @@ static int xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata,
redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
redrv_val = (u16)mode;
- return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
- redrv_reg, redrv_val);
+ return pdata->hw_if.write_ext_mii_regs_c22(pdata, phy_data->redrv_addr,
+ redrv_reg, redrv_val);
}
static int xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata,
@@ -1956,6 +1919,93 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
xgbe_phy_put_comm_ownership(pdata);
}
+#define MAX_RX_ADAPT_RETRIES 1
+#define XGBE_PMA_RX_VAL_SIG_MASK (XGBE_PMA_RX_SIG_DET_0_MASK | \
+ XGBE_PMA_RX_VALID_0_MASK)
+
+static void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+ pdata->rx_adapt_retries = 0;
+ return;
+ }
+
+ xgbe_phy_perform_ratechange(pdata,
+ mode == XGBE_MODE_KR ?
+ XGBE_MB_CMD_SET_10G_KR :
+ XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_RX_ADAP);
+}
+
+static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* step 2: force PCS to send RX_ADAPT Req to PHY */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+ XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_ENABLE);
+
+ /* Step 3: Wait for RX_ADAPT ACK from the PHY */
+ msleep(200);
+
+ /* Software polls for coefficient update command (given by local PHY) */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_PHY_RX_EQ_CEU);
+
+ /* Clear the RX_AD_REQ bit */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+ XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_DISABLE);
+
+ /* Check if coefficient update command is set */
+ if ((reg & XGBE_PMA_CFF_UPDT_MASK) != XGBE_PMA_CFF_UPDT_MASK)
+ goto set_mode;
+
+ /* Step 4: Check for Block lock */
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS) {
+ /* If the block lock is found, update the helpers
+ * and declare the link up
+ */
+ netif_dbg(pdata, link, pdata->netdev, "Block_lock done");
+ pdata->rx_adapt_done = true;
+ pdata->mode_set = false;
+ return;
+ }
+
+set_mode:
+ xgbe_set_rx_adap_mode(pdata, phy_data->cur_mode);
+}
+
+static void xgbe_phy_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+rx_adapt_reinit:
+ reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_LSTS,
+ XGBE_PMA_RX_VAL_SIG_MASK);
+
+ /* step 1: Check for RX_VALID && LF_SIGDET */
+ if ((reg & XGBE_PMA_RX_VAL_SIG_MASK) != XGBE_PMA_RX_VAL_SIG_MASK) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "RX_VALID or LF_SIGDET is unset, issue rrc");
+ xgbe_phy_rrc(pdata);
+ if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+ pdata->rx_adapt_retries = 0;
+ return;
+ }
+ goto rx_adapt_reinit;
+ }
+
+ /* perform rx adaptation */
+ xgbe_rx_adaptation(pdata);
+}
+
static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
{
int reg;
@@ -1977,12 +2027,30 @@ static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
}
}
+static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)
+{
+ /* PLL_CTRL feature needs to be enabled for fixed PHY modes (Non-Autoneg) only */
+ if (pdata->phy.autoneg != AUTONEG_DISABLE)
+ return;
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0,
+ XGBE_PMA_PLL_CTRL_MASK,
+ enable ? XGBE_PMA_PLL_CTRL_ENABLE
+ : XGBE_PMA_PLL_CTRL_DISABLE);
+
+ /* Wait for command to complete */
+ usleep_range(100, 200);
+}
+
static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
- unsigned int cmd, unsigned int sub_cmd)
+ enum xgbe_mb_cmd cmd, enum xgbe_mb_subcmd sub_cmd)
{
unsigned int s0 = 0;
unsigned int wait;
+ /* Disable PLL re-initialization during FW command processing */
+ xgbe_phy_pll_ctrl(pdata, false);
+
/* Log if a previous command did not complete */
if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
netif_dbg(pdata, link, pdata->netdev,
@@ -2003,7 +2071,7 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
- return;
+ goto do_rx_adaptation;
usleep_range(1000, 2000);
}
@@ -2013,12 +2081,32 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
/* Reset on error */
xgbe_phy_rx_reset(pdata);
+ goto reenable_pll;
+
+do_rx_adaptation:
+ if (pdata->en_rx_adap && sub_cmd == XGBE_MB_SUBCMD_RX_ADAP &&
+ (cmd == XGBE_MB_CMD_SET_10G_KR || cmd == XGBE_MB_CMD_SET_10G_SFI)) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "Enabling RX adaptation\n");
+ pdata->mode_set = true;
+ xgbe_phy_rx_adaptation(pdata);
+ /* return from here to avoid enabling PLL ctrl
+ * during adaptation phase
+ */
+ return;
+ }
+
+reenable_pll:
+ /* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */
+ if (cmd != XGBE_MB_CMD_POWER_OFF &&
+ cmd != XGBE_MB_CMD_RRC)
+ xgbe_phy_pll_ctrl(pdata, true);
}
static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
{
/* Receiver Reset Cycle */
- xgbe_phy_perform_ratechange(pdata, 5, 0);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_RRC, XGBE_MB_SUBCMD_NONE);
netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n");
}
@@ -2028,13 +2116,38 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
struct xgbe_phy_data *phy_data = pdata->phy_data;
/* Power off */
- xgbe_phy_perform_ratechange(pdata, 0, 0);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_POWER_OFF, XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_UNKNOWN;
netif_dbg(pdata, link, pdata->netdev, "phy powered off\n");
}
+static bool enable_rx_adap(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
+ /* Rx-Adaptation is not supported on older platforms(< 0x30H) */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ if (ver < 0x30)
+ return false;
+
+ /* Re-driver models 4223 && 4227 do not support Rx-Adaptation */
+ if (phy_data->redrv &&
+ (phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4223 ||
+ phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4227))
+ return false;
+
+ /* 10G KR mode with AN does not support Rx-Adaptation */
+ if (mode == XGBE_MODE_KR &&
+ phy_data->port_mode != XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG)
+ return false;
+
+ pdata->en_rx_adap = 1;
+ return true;
+}
+
static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2043,14 +2156,22 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
/* 10G/SFI */
if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
- xgbe_phy_perform_ratechange(pdata, 3, 0);
+ pdata->en_rx_adap = 0;
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_ACTIVE);
+ } else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
+ (enable_rx_adap(pdata, XGBE_MODE_SFI))) {
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_RX_ADAP);
} else {
if (phy_data->sfp_cable_len <= 1)
- xgbe_phy_perform_ratechange(pdata, 3, 1);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_PASSIVE_1M);
else if (phy_data->sfp_cable_len <= 3)
- xgbe_phy_perform_ratechange(pdata, 3, 2);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_PASSIVE_3M);
else
- xgbe_phy_perform_ratechange(pdata, 3, 3);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_PASSIVE_OTHER);
}
phy_data->cur_mode = XGBE_MODE_SFI;
@@ -2065,7 +2186,7 @@ static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 1G/X */
- xgbe_phy_perform_ratechange(pdata, 1, 3);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX);
phy_data->cur_mode = XGBE_MODE_X;
@@ -2079,7 +2200,7 @@ static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 1G/SGMII */
- xgbe_phy_perform_ratechange(pdata, 1, 2);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_SGMII);
phy_data->cur_mode = XGBE_MODE_SGMII_1000;
@@ -2093,13 +2214,27 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 100M/SGMII */
- xgbe_phy_perform_ratechange(pdata, 1, 1);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_100MBITS);
phy_data->cur_mode = XGBE_MODE_SGMII_100;
netif_dbg(pdata, link, pdata->netdev, "100MbE SGMII mode set\n");
}
+static void xgbe_phy_sgmii_10_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 10M/SGMII */
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_10MBITS);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_10;
+
+ netif_dbg(pdata, link, pdata->netdev, "10MbE SGMII mode set\n");
+}
+
static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2107,7 +2242,12 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 10G/KR */
- xgbe_phy_perform_ratechange(pdata, 4, 0);
+ if (enable_rx_adap(pdata, XGBE_MODE_KR))
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_SUBCMD_RX_ADAP);
+ else
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_KR;
@@ -2121,7 +2261,7 @@ static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 2.5G/KX */
- xgbe_phy_perform_ratechange(pdata, 2, 0);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_2_5G, XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_KX_2500;
@@ -2135,7 +2275,7 @@ static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 1G/KX */
- xgbe_phy_perform_ratechange(pdata, 1, 3);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX);
phy_data->cur_mode = XGBE_MODE_KX_1000;
@@ -2158,12 +2298,15 @@ static enum xgbe_mode xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata)
return xgbe_phy_cur_mode(pdata);
switch (xgbe_phy_cur_mode(pdata)) {
+ case XGBE_MODE_SGMII_10:
case XGBE_MODE_SGMII_100:
case XGBE_MODE_SGMII_1000:
return XGBE_MODE_KR;
+ case XGBE_MODE_KX_2500:
+ return XGBE_MODE_SGMII_1000;
case XGBE_MODE_KR:
default:
- return XGBE_MODE_SGMII_1000;
+ return XGBE_MODE_KX_2500;
}
}
@@ -2225,6 +2368,8 @@ static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data,
int speed)
{
switch (speed) {
+ case SPEED_10:
+ return XGBE_MODE_SGMII_10;
case SPEED_100:
return XGBE_MODE_SGMII_100;
case SPEED_1000:
@@ -2242,6 +2387,8 @@ static enum xgbe_mode xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data,
int speed)
{
switch (speed) {
+ case SPEED_10:
+ return XGBE_MODE_SGMII_10;
case SPEED_100:
return XGBE_MODE_SGMII_100;
case SPEED_1000:
@@ -2316,6 +2463,9 @@ static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
case XGBE_MODE_KR:
xgbe_phy_kr_mode(pdata);
break;
+ case XGBE_MODE_SGMII_10:
+ xgbe_phy_sgmii_10_mode(pdata);
+ break;
case XGBE_MODE_SGMII_100:
xgbe_phy_sgmii_100_mode(pdata);
break;
@@ -2372,6 +2522,9 @@ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
switch (mode) {
+ case XGBE_MODE_SGMII_10:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10baseT_Full));
case XGBE_MODE_SGMII_100:
return xgbe_phy_check_mode(pdata, mode,
XGBE_ADV(lks, 100baseT_Full));
@@ -2401,6 +2554,11 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
return false;
return xgbe_phy_check_mode(pdata, mode,
XGBE_ADV(lks, 1000baseX_Full));
+ case XGBE_MODE_SGMII_10:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10baseT_Full));
case XGBE_MODE_SGMII_100:
if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
return false;
@@ -2493,15 +2651,23 @@ static bool xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data,
}
}
-static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
+static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_prv_data *pdata,
int speed)
{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
switch (speed) {
+ case SPEED_10:
+ /* Supported in ver 21H and ver >= 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ return (ver == 0x21 || ver >= 0x30);
case SPEED_100:
case SPEED_1000:
return true;
case SPEED_2500:
- return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T);
+ return ((phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T) ||
+ (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T));
case SPEED_10000:
return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
default:
@@ -2509,10 +2675,18 @@ static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
}
}
-static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data,
+static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_prv_data *pdata,
int speed)
{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
switch (speed) {
+ case SPEED_10:
+ /* Supported in ver 21H and ver >= 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ return ((ver == 0x21 || ver >= 0x30) &&
+ (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000));
case SPEED_100:
return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000);
case SPEED_1000:
@@ -2559,12 +2733,12 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
case XGBE_PORT_MODE_1000BASE_T:
case XGBE_PORT_MODE_NBASE_T:
case XGBE_PORT_MODE_10GBASE_T:
- return xgbe_phy_valid_speed_baset_mode(phy_data, speed);
+ return xgbe_phy_valid_speed_baset_mode(pdata, speed);
case XGBE_PORT_MODE_1000BASE_X:
case XGBE_PORT_MODE_10GBASE_R:
return xgbe_phy_valid_speed_basex_mode(phy_data, speed);
case XGBE_PORT_MODE_SFP:
- return xgbe_phy_valid_speed_sfp_mode(phy_data, speed);
+ return xgbe_phy_valid_speed_sfp_mode(pdata, speed);
default:
return false;
}
@@ -2573,8 +2747,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int reg;
- int ret;
+ int reg, ret;
*an_restart = 0;
@@ -2587,8 +2760,11 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 0;
}
- if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) {
+ if (pdata->en_rx_adap)
+ pdata->rx_adapt_done = false;
return 0;
+ }
}
if (phy_data->phydev) {
@@ -2605,12 +2781,41 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 0;
}
- /* Link status is latched low, so read once to clear
- * and then read again to get current state
- */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- if (reg & MDIO_STAT1_LSTATUS)
+ if (reg < 0)
+ return reg;
+
+ /* Link status is latched low so that momentary link drops
+ * can be detected. If link was already down read again
+ * to get the latest state.
+ */
+
+ if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) {
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg < 0)
+ return reg;
+ }
+
+ if (pdata->en_rx_adap) {
+ /* if the link is available and adaptation is done,
+ * declare link up
+ */
+ if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ return 1;
+ /* If either link is not available or adaptation is not done,
+ * retrigger the adaptation logic. (if the mode is not set,
+ * then issue mailbox command first)
+ */
+ if (pdata->mode_set) {
+ xgbe_phy_rx_adaptation(pdata);
+ } else {
+ pdata->rx_adapt_done = false;
+ xgbe_phy_set_mode(pdata, phy_data->cur_mode);
+ }
+
+ if (pdata->rx_adapt_done)
+ return 1;
+ } else if (reg & MDIO_STAT1_LSTATUS)
return 1;
if (pdata->phy.autoneg == AUTONEG_ENABLE &&
@@ -2622,7 +2827,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
}
/* No link, attempt a receiver reset cycle */
- if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
+ if (pdata->vdata->enable_rrc && phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
phy_data->rrc_count = 0;
xgbe_phy_rrc(pdata);
}
@@ -2698,7 +2903,7 @@ static void xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata)
static int xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int ret;
+ int ret;
ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio);
if (ret)
@@ -2835,6 +3040,12 @@ static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
+ /* 10 Mbps speed is supported in ver 21H and ver >= 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ if ((ver < 0x30 && ver != 0x21) && (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10))
+ return true;
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
@@ -2848,7 +3059,8 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_1000BASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000))
return false;
break;
@@ -2857,14 +3069,17 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_NBASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500))
return false;
break;
case XGBE_PORT_MODE_10GBASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false;
break;
@@ -2873,7 +3088,8 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_SFP:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false;
@@ -3242,6 +3458,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3272,6 +3492,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3294,6 +3518,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3302,6 +3530,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, 1000baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_1000;
}
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) {
+ XGBE_SET_SUP(lks, 2500baseT_Full);
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
XGBE_SET_SUP(lks, 10000baseT_Full);
phy_data->start_mode = XGBE_MODE_KR;
@@ -3334,6 +3566,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
XGBE_SET_SUP(lks, FIBRE);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10)
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
phy_data->start_mode = XGBE_MODE_SGMII_100;
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
@@ -3388,8 +3622,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
mii->priv = pdata;
mii->name = "amd-xgbe-mii";
- mii->read = xgbe_phy_mii_read;
- mii->write = xgbe_phy_mii_write;
+ mii->read = xgbe_phy_mii_read_c22;
+ mii->write = xgbe_phy_mii_write_c22;
+ mii->read_c45 = xgbe_phy_mii_read_c45;
+ mii->write_c45 = xgbe_phy_mii_write_c45;
mii->parent = pdata->dev;
mii->phy_mask = ~0;
snprintf(mii->id, sizeof(mii->id), "%s", dev_name(pdata->dev));
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 4ebd2410185a..47d53e59ccf6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -123,9 +14,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
-#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/property.h>
#include <linux/acpi.h>
@@ -135,17 +24,6 @@
#include "xgbe-common.h"
#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgbe_acpi_match[];
-
-static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(xgbe_acpi_match, pdata->dev);
-
- return id ? (struct xgbe_version_data *)id->driver_data : NULL;
-}
-
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
@@ -173,11 +51,6 @@ static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
return 0;
}
#else /* CONFIG_ACPI */
-static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
-{
- return NULL;
-}
-
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
@@ -185,17 +58,6 @@ static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
#endif /* CONFIG_ACPI */
#ifdef CONFIG_OF
-static const struct of_device_id xgbe_of_match[];
-
-static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
-{
- const struct of_device_id *id;
-
- id = of_match_device(xgbe_of_match, pdata->dev);
-
- return id ? (struct xgbe_version_data *)id->data : NULL;
-}
-
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
@@ -244,11 +106,6 @@ static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
return phy_pdev;
}
#else /* CONFIG_OF */
-static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
-{
- return NULL;
-}
-
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
@@ -290,12 +147,6 @@ static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
return phy_pdev;
}
-static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata)
-{
- return pdata->use_acpi ? xgbe_acpi_vdata(pdata)
- : xgbe_of_vdata(pdata);
-}
-
static int xgbe_platform_probe(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata;
@@ -321,7 +172,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
pdata->use_acpi = dev->of_node ? 0 : 1;
/* Get the version data */
- pdata->vdata = xgbe_get_vdata(pdata);
+ pdata->vdata = (struct xgbe_version_data *)device_get_match_data(dev);
phy_pdev = xgbe_get_phy_pdev(pdata);
if (!phy_pdev) {
@@ -338,7 +189,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
* the PHY resources listed last
*/
phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
- phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+ phy_irqnum = platform_irq_count(pdev) - 1;
dma_irqnum = 1;
dma_irqend = phy_irqnum;
} else {
@@ -348,7 +199,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
phy_memnum = 0;
phy_irqnum = 0;
dma_irqnum = 1;
- dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
+ dma_irqend = platform_irq_count(pdev);
}
/* Obtain the mmio areas for the device */
@@ -512,7 +363,7 @@ err_alloc:
return ret;
}
-static int xgbe_platform_remove(struct platform_device *pdev)
+static void xgbe_platform_remove(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata = platform_get_drvdata(pdev);
@@ -521,8 +372,6 @@ static int xgbe_platform_remove(struct platform_device *pdev)
platform_device_put(pdata->phy_platdev);
xgbe_free_pdata(pdata);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -580,7 +429,6 @@ static const struct xgbe_version_data xgbe_v1 = {
.tx_tstamp_workaround = 1,
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
{ .id = "AMDI8001",
.driver_data = (kernel_ulong_t)&xgbe_v1 },
@@ -588,9 +436,7 @@ static const struct acpi_device_id xgbe_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
-#endif
-#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[] = {
{ .compatible = "amd,xgbe-seattle-v1a",
.data = &xgbe_v1 },
@@ -598,7 +444,6 @@ static const struct of_device_id xgbe_of_match[] = {
};
MODULE_DEVICE_TABLE(of, xgbe_of_match);
-#endif
static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
xgbe_platform_suspend, xgbe_platform_resume);
@@ -606,12 +451,8 @@ static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
static struct platform_driver xgbe_driver = {
.driver = {
.name = XGBE_DRV_NAME,
-#ifdef CONFIG_ACPI
.acpi_match_table = xgbe_acpi_match,
-#endif
-#ifdef CONFIG_OF
.of_match_table = xgbe_of_match,
-#endif
.pm = &xgbe_platform_pm_ops,
},
.probe = xgbe_platform_probe,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pps.c b/drivers/net/ethernet/amd/xgbe/xgbe-pps.c
new file mode 100644
index 000000000000..6d03ae7ab36f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pps.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static u32 get_pps_mask(unsigned int x)
+{
+ return GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x));
+}
+
+static u32 get_pps_cmd(unsigned int x, u32 val)
+{
+ return (val & GENMASK(3, 0)) << PPS_MINIDX(x);
+}
+
+static u32 get_target_mode_sel(unsigned int x, u32 val)
+{
+ return (val & GENMASK(1, 0)) << (PPS_MAXIDX(x) - 2);
+}
+
+int xgbe_pps_config(struct xgbe_prv_data *pdata,
+ struct xgbe_pps_config *cfg, int index, bool on)
+{
+ unsigned int ppscr = 0;
+ unsigned int tnsec;
+ u64 period;
+
+ /* Check if target time register is busy */
+ tnsec = XGMAC_IOREAD(pdata, MAC_PPSx_TTNSR(index));
+ if (XGMAC_GET_BITS(tnsec, MAC_PPSx_TTNSR, TRGTBUSY0))
+ return -EBUSY;
+
+ ppscr = XGMAC_IOREAD(pdata, MAC_PPSCR);
+ ppscr &= ~get_pps_mask(index);
+
+ if (!on) {
+ /* Disable PPS output */
+ ppscr |= get_pps_cmd(index, XGBE_PPSCMD_STOP);
+ ppscr |= PPSEN0;
+ XGMAC_IOWRITE(pdata, MAC_PPSCR, ppscr);
+
+ return 0;
+ }
+
+ /* Configure start time */
+ XGMAC_IOWRITE(pdata, MAC_PPSx_TTSR(index), cfg->start.tv_sec);
+ XGMAC_IOWRITE(pdata, MAC_PPSx_TTNSR(index), cfg->start.tv_nsec);
+
+ period = cfg->period.tv_sec * NSEC_PER_SEC + cfg->period.tv_nsec;
+ period = div_u64(period, XGBE_V2_TSTAMP_SSINC);
+
+ if (period < 4)
+ return -EINVAL;
+
+ /* Configure interval and pulse width (50% duty cycle) */
+ XGMAC_IOWRITE(pdata, MAC_PPSx_INTERVAL(index), period - 1);
+ XGMAC_IOWRITE(pdata, MAC_PPSx_WIDTH(index), (period >> 1) - 1);
+
+ /* Enable PPS with pulse train mode */
+ ppscr |= get_pps_cmd(index, XGBE_PPSCMD_START);
+ ppscr |= get_target_mode_sel(index, XGBE_PPSTARGET_PULSE);
+ ppscr |= PPSEN0;
+
+ XGMAC_IOWRITE(pdata, MAC_PPSCR, ppscr);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index d06d260cf1e2..0e0b8ec3b504 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/clk.h>
@@ -122,43 +13,19 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static u64 xgbe_cc_read(const struct cyclecounter *cc)
-{
- struct xgbe_prv_data *pdata = container_of(cc,
- struct xgbe_prv_data,
- tstamp_cc);
- u64 nsec;
-
- nsec = pdata->hw_if.get_tstamp_time(pdata);
-
- return nsec;
-}
-
-static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
+static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
ptp_clock_info);
unsigned long flags;
- u64 adjust;
- u32 addend, diff;
- unsigned int neg_adjust = 0;
-
- if (delta < 0) {
- neg_adjust = 1;
- delta = -delta;
- }
+ u64 addend;
- adjust = pdata->tstamp_addend;
- adjust *= delta;
- diff = div_u64(adjust, 1000000000UL);
-
- addend = (neg_adjust) ? pdata->tstamp_addend - diff :
- pdata->tstamp_addend + diff;
+ addend = adjust_by_scaled_ppm(pdata->tstamp_addend, scaled_ppm);
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- pdata->hw_if.update_tstamp_addend(pdata, addend);
+ xgbe_update_tstamp_addend(pdata, addend);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
@@ -170,16 +37,39 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
ptp_clock_info);
+ unsigned int neg_adjust = 0;
+ unsigned int sec, nsec;
+ u32 quotient, reminder;
unsigned long flags;
+ if (delta < 0) {
+ neg_adjust = 1;
+ delta = -delta;
+ }
+
+ quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+ sec = quotient;
+ nsec = reminder;
+
+ /* Negative adjustment for Hw timer register. */
+ if (neg_adjust) {
+ sec = -sec;
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSCTRLSSR))
+ nsec = (1000000000UL - nsec);
+ else
+ nsec = (0x80000000UL - nsec);
+ }
+ nsec = (neg_adjust << 31) | nsec;
+
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- timecounter_adjtime(&pdata->tstamp_tc, delta);
+ xgbe_update_tstamp_time(pdata, sec, nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
}
-static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+static int xgbe_gettimex(struct ptp_clock_info *info, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
@@ -188,9 +78,9 @@ static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
u64 nsec;
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- nsec = timecounter_read(&pdata->tstamp_tc);
-
+ ptp_read_system_prets(sts);
+ nsec = xgbe_get_tstamp_time(pdata);
+ ptp_read_system_postts(sts);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
*ts = ns_to_timespec64(nsec);
@@ -205,14 +95,9 @@ static int xgbe_settime(struct ptp_clock_info *info,
struct xgbe_prv_data,
ptp_clock_info);
unsigned long flags;
- u64 nsec;
-
- nsec = timespec64_to_ns(ts);
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
-
+ xgbe_set_tstamp_time(pdata, ts->tv_sec, ts->tv_nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
@@ -221,24 +106,46 @@ static int xgbe_settime(struct ptp_clock_info *info,
static int xgbe_enable(struct ptp_clock_info *info,
struct ptp_clock_request *request, int on)
{
- return -EOPNOTSUPP;
+ struct xgbe_prv_data *pdata = container_of(info, struct xgbe_prv_data,
+ ptp_clock_info);
+ struct xgbe_pps_config *pps_cfg;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(pdata->dev, "rq->type %d on %d\n", request->type, on);
+
+ if (request->type != PTP_CLK_REQ_PEROUT)
+ return -EOPNOTSUPP;
+
+ pps_cfg = &pdata->pps[request->perout.index];
+
+ pps_cfg->start.tv_sec = request->perout.start.sec;
+ pps_cfg->start.tv_nsec = request->perout.start.nsec;
+ pps_cfg->period.tv_sec = request->perout.period.sec;
+ pps_cfg->period.tv_nsec = request->perout.period.nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ ret = xgbe_pps_config(pdata, pps_cfg, request->perout.index, on);
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return ret;
}
void xgbe_ptp_register(struct xgbe_prv_data *pdata)
{
struct ptp_clock_info *info = &pdata->ptp_clock_info;
struct ptp_clock *clock;
- struct cyclecounter *cc = &pdata->tstamp_cc;
- u64 dividend;
snprintf(info->name, sizeof(info->name), "%s",
netdev_name(pdata->netdev));
info->owner = THIS_MODULE;
info->max_adj = pdata->ptpclk_rate;
- info->adjfreq = xgbe_adjfreq;
+ info->adjfine = xgbe_adjfine;
info->adjtime = xgbe_adjtime;
- info->gettime64 = xgbe_gettime;
+ info->gettimex64 = xgbe_gettimex;
info->settime64 = xgbe_settime;
+ info->n_per_out = pdata->hw_feat.pps_out_num;
+ info->n_ext_ts = pdata->hw_feat.aux_snap_num;
info->enable = xgbe_enable;
clock = ptp_clock_register(info, pdata->dev);
@@ -249,23 +156,6 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
pdata->ptp_clock = clock;
- /* Calculate the addend:
- * addend = 2^32 / (PTP ref clock / 50Mhz)
- * = (2^32 * 50Mhz) / PTP ref clock
- */
- dividend = 50000000;
- dividend <<= 32;
- pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
-
- /* Setup the timecounter */
- cc->read = xgbe_cc_read;
- cc->mask = CLOCKSOURCE_MASK(64);
- cc->mult = 1;
- cc->shift = 0;
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
/* Disable all timestamping to start */
XGMAC_IOWRITE(pdata, MAC_TSCR, 0);
pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c b/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c
new file mode 100644
index 000000000000..55e5e467facd
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+#include <linux/crc32.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/checksum.h>
+#include <net/selftests.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+#define XGBE_LOOPBACK_NONE 0
+#define XGBE_LOOPBACK_MAC 1
+#define XGBE_LOOPBACK_PHY 2
+
+struct xgbe_test {
+ char name[ETH_GSTRING_LEN];
+ int lb;
+ int (*fn)(struct xgbe_prv_data *pdata);
+};
+
+static u8 xgbe_test_id;
+
+static int xgbe_test_loopback_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct net_test_priv *tdata = pt->af_packet_priv;
+ const unsigned char *dst = tdata->packet->dst;
+ const unsigned char *src = tdata->packet->src;
+ struct netsfhdr *hdr;
+ struct ethhdr *eh;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ struct iphdr *ih;
+ int eat;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ eat = (skb->tail + skb->data_len) - skb->end;
+ if (eat > 0 && skb_shared(skb)) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+ }
+
+ if (skb_linearize(skb))
+ goto out;
+
+ if (skb_headlen(skb) < (NET_TEST_PKT_SIZE - ETH_HLEN))
+ goto out;
+
+ eh = (struct ethhdr *)skb_mac_header(skb);
+ if (dst) {
+ if (!ether_addr_equal_unaligned(eh->h_dest, dst))
+ goto out;
+ }
+ if (src) {
+ if (!ether_addr_equal_unaligned(eh->h_source, src))
+ goto out;
+ }
+
+ ih = ip_hdr(skb);
+
+ if (tdata->packet->tcp) {
+ if (ih->protocol != IPPROTO_TCP)
+ goto out;
+
+ th = (struct tcphdr *)((u8 *)ih + 4 * ih->ihl);
+ if (th->dest != htons(tdata->packet->dport))
+ goto out;
+
+ hdr = (struct netsfhdr *)((u8 *)th + sizeof(*th));
+ } else {
+ if (ih->protocol != IPPROTO_UDP)
+ goto out;
+
+ uh = (struct udphdr *)((u8 *)ih + 4 * ih->ihl);
+ if (uh->dest != htons(tdata->packet->dport))
+ goto out;
+
+ hdr = (struct netsfhdr *)((u8 *)uh + sizeof(*uh));
+ }
+
+ if (hdr->magic != cpu_to_be64(NET_TEST_PKT_MAGIC))
+ goto out;
+ if (tdata->packet->id != hdr->id)
+ goto out;
+
+ tdata->ok = true;
+ complete(&tdata->comp);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int __xgbe_test_loopback(struct xgbe_prv_data *pdata,
+ struct net_packet_attrs *attr)
+{
+ struct net_test_priv *tdata;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+
+ tdata = kzalloc(sizeof(*tdata), GFP_KERNEL);
+ if (!tdata)
+ return -ENOMEM;
+
+ tdata->ok = false;
+ init_completion(&tdata->comp);
+
+ tdata->pt.type = htons(ETH_P_IP);
+ tdata->pt.func = xgbe_test_loopback_validate;
+ tdata->pt.dev = pdata->netdev;
+ tdata->pt.af_packet_priv = tdata;
+ tdata->packet = attr;
+
+ dev_add_pack(&tdata->pt);
+
+ skb = net_test_get_skb(pdata->netdev, xgbe_test_id, attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ xgbe_test_id++;
+ ret = dev_direct_xmit(skb, attr->queue_mapping);
+ if (ret)
+ goto cleanup;
+
+ if (!attr->timeout)
+ attr->timeout = NET_LB_TIMEOUT;
+
+ wait_for_completion_timeout(&tdata->comp, attr->timeout);
+ ret = tdata->ok ? 0 : -ETIMEDOUT;
+
+ if (ret)
+ netdev_err(pdata->netdev, "Response timedout: ret %d\n", ret);
+cleanup:
+ dev_remove_pack(&tdata->pt);
+ kfree(tdata);
+ return ret;
+}
+
+static int xgbe_test_mac_loopback(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+
+ attr.dst = pdata->netdev->dev_addr;
+ return __xgbe_test_loopback(pdata, &attr);
+}
+
+static int xgbe_test_phy_loopback(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+ int ret;
+
+ if (!pdata->netdev->phydev) {
+ netdev_err(pdata->netdev, "phydev not found: cannot start PHY loopback test\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = phy_loopback(pdata->netdev->phydev, true, 0);
+ if (ret)
+ return ret;
+
+ attr.dst = pdata->netdev->dev_addr;
+ ret = __xgbe_test_loopback(pdata, &attr);
+
+ phy_loopback(pdata->netdev->phydev, false, 0);
+ return ret;
+}
+
+static int xgbe_test_sph(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+ unsigned long cnt_end, cnt_start;
+ int ret;
+
+ cnt_start = pdata->ext_stats.rx_split_header_packets;
+
+ if (!pdata->sph) {
+ netdev_err(pdata->netdev, "Split Header not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* UDP test */
+ attr.dst = pdata->netdev->dev_addr;
+ attr.tcp = false;
+
+ ret = __xgbe_test_loopback(pdata, &attr);
+ if (ret)
+ return ret;
+
+ cnt_end = pdata->ext_stats.rx_split_header_packets;
+ if (cnt_end <= cnt_start)
+ return -EINVAL;
+
+ /* TCP test */
+ cnt_start = cnt_end;
+
+ attr.dst = pdata->netdev->dev_addr;
+ attr.tcp = true;
+
+ ret = __xgbe_test_loopback(pdata, &attr);
+ if (ret)
+ return ret;
+
+ cnt_end = pdata->ext_stats.rx_split_header_packets;
+ if (cnt_end <= cnt_start)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int xgbe_test_jumbo(struct xgbe_prv_data *pdata)
+{
+ struct net_packet_attrs attr = {};
+ int size = pdata->rx_buf_size;
+
+ attr.dst = pdata->netdev->dev_addr;
+ attr.max_size = size - ETH_FCS_LEN;
+
+ return __xgbe_test_loopback(pdata, &attr);
+}
+
+static const struct xgbe_test xgbe_selftests[] = {
+ {
+ .name = "MAC Loopback ",
+ .lb = XGBE_LOOPBACK_MAC,
+ .fn = xgbe_test_mac_loopback,
+ }, {
+ .name = "PHY Loopback ",
+ .lb = XGBE_LOOPBACK_NONE,
+ .fn = xgbe_test_phy_loopback,
+ }, {
+ .name = "Split Header ",
+ .lb = XGBE_LOOPBACK_PHY,
+ .fn = xgbe_test_sph,
+ }, {
+ .name = "Jumbo Frame ",
+ .lb = XGBE_LOOPBACK_PHY,
+ .fn = xgbe_test_jumbo,
+ },
+};
+
+void xgbe_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(dev);
+ int count = xgbe_selftest_get_count(pdata);
+ int i, ret;
+
+ memset(buf, 0, sizeof(*buf) * count);
+ xgbe_test_id = 0;
+
+ if (etest->flags != ETH_TEST_FL_OFFLINE) {
+ netdev_err(pdata->netdev, "Only offline tests are supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ } else if (!netif_carrier_ok(dev)) {
+ netdev_err(pdata->netdev,
+ "Invalid link, cannot execute tests\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ /* Wait for queues drain */
+ msleep(200);
+
+ for (i = 0; i < count; i++) {
+ ret = 0;
+
+ switch (xgbe_selftests[i].lb) {
+ case XGBE_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, true, 0);
+ if (!ret)
+ break;
+ fallthrough;
+ case XGBE_LOOPBACK_MAC:
+ ret = xgbe_enable_mac_loopback(pdata);
+ break;
+ case XGBE_LOOPBACK_NONE:
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ /*
+ * First tests will always be MAC / PHY loopback.
+ * If any of them is not supported we abort earlier.
+ */
+ if (ret) {
+ netdev_err(pdata->netdev, "Loopback not supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ break;
+ }
+
+ ret = xgbe_selftests[i].fn(pdata);
+ if (ret && (ret != -EOPNOTSUPP))
+ etest->flags |= ETH_TEST_FL_FAILED;
+ buf[i] = ret;
+
+ switch (xgbe_selftests[i].lb) {
+ case XGBE_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, false, 0);
+ if (!ret)
+ break;
+ fallthrough;
+ case XGBE_LOOPBACK_MAC:
+ xgbe_disable_mac_loopback(pdata);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void xgbe_selftest_get_strings(struct xgbe_prv_data *pdata, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ for (i = 0; i < xgbe_selftest_get_count(pdata); i++)
+ ethtool_puts(&p, xgbe_selftests[i].name);
+}
+
+int xgbe_selftest_get_count(struct xgbe_prv_data *pdata)
+{
+ return ARRAY_SIZE(xgbe_selftests);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-smn.h b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
new file mode 100644
index 000000000000..c6ae127ced03
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#ifndef __SMN_H__
+#define __SMN_H__
+
+#ifdef CONFIG_AMD_NB
+
+#include <asm/amd/nb.h>
+
+#else
+
+static inline int amd_smn_write(u16 node, u32 address, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int amd_smn_read(u16 node, u32 address, u32 *value)
+{
+ return -ENODEV;
+}
+
+#endif
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 607a2c90513b..03ef0f548483 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_H__
@@ -151,7 +42,8 @@
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
/* Descriptors required for maximum contiguous TSO/GSO packet */
-#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
+#define XGBE_TX_MAX_SPLIT \
+ ((GSO_LEGACY_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
/* Maximum possible descriptors needed for an SKB:
* - Maximum number of SKB frags
@@ -188,11 +80,13 @@
#define XGBE_IRQ_MODE_EDGE 0
#define XGBE_IRQ_MODE_LEVEL 1
+#define XGBE_ETH_FRAME_HDR (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGMAC_MIN_PACKET 60
#define XGMAC_STD_PACKET_MTU 1500
#define XGMAC_MAX_STD_PACKET 1518
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
+#define XGMAC_GIANT_PACKET_MTU 16368
#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */
#define XGMAC_PFC_DATA_LEN 46
@@ -225,6 +119,14 @@
#define XGBE_MSI_BASE_COUNT 4
#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1)
+/* Initial PTP register values based on Link Speed. */
+#define MAC_TICNR_1G_INITVAL 0x10
+#define MAC_TECNR_1G_INITVAL 0x28
+
+#define MAC_TICSNR_10G_INITVAL 0x33
+#define MAC_TECNR_10G_INITVAL 0x14
+#define MAC_TECSNR_10G_INITVAL 0xCC
+
/* PCI clock frequencies */
#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
#define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */
@@ -234,6 +136,15 @@
*/
#define XGBE_TSTAMP_SSINC 20
#define XGBE_TSTAMP_SNSINC 0
+#define XGBE_PTP_ACT_CLK_FREQ 500000000
+
+#define XGBE_V2_TSTAMP_SSINC 0xA
+#define XGBE_V2_TSTAMP_SNSINC 0
+#define XGBE_V2_PTP_ACT_CLK_FREQ 1000000000
+
+/* Define maximum supported values */
+#define XGBE_MAX_PPS_OUT 4
+#define XGBE_MAX_AUX_SNAP 4
/* Driver PMT macros */
#define XGMAC_DRIVER_CONTEXT 1
@@ -261,6 +172,7 @@
/* Default coalescing parameters */
#define XGMAC_INIT_DMA_TX_USECS 1000
#define XGMAC_INIT_DMA_TX_FRAMES 25
+#define XGMAC_MAX_COAL_TX_TICK 100000
#define XGMAC_MAX_DMA_RIWT 0xff
#define XGMAC_INIT_DMA_RX_USECS 30
@@ -289,12 +201,14 @@
/* Auto-negotiation */
#define XGBE_AN_MS_TIMEOUT 500
#define XGBE_LINK_TIMEOUT 5
+#define XGBE_KR_TRAINING_WAIT_ITER 50
-#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define XGBE_SGMII_AN_LINK_DUPLEX BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define XGBE_SGMII_AN_LINK_SPEED_10 0x00
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
-#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+#define XGBE_SGMII_AN_LINK_STATUS BIT(4)
/* ECC correctable error notification window (seconds) */
#define XGBE_ECC_LIMIT 60
@@ -344,6 +258,15 @@
(_src)->link_modes._sname, \
__ETHTOOL_LINK_MODE_MASK_NBITS)
+/* XGBE PCI device id */
+#define XGBE_RV_PCI_DEVICE_ID 0x15d0
+#define XGBE_YC_PCI_DEVICE_ID 0x14b5
+#define XGBE_RN_PCI_DEVICE_ID 0x1630
+
+ /* Generic low and high masks */
+#define XGBE_GEN_HI_MASK GENMASK(31, 16)
+#define XGBE_GEN_LO_MASK GENMASK(15, 0)
+
struct xgbe_prv_data;
struct xgbe_packet_data {
@@ -416,7 +339,7 @@ struct xgbe_rx_ring_data {
/* Structure used to hold information related to the descriptor
* and the packet associated with the descriptor (always use
- * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
+ * the XGBE_GET_DESC_DATA macro to access this data from the ring)
*/
struct xgbe_ring_data {
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
@@ -492,7 +415,7 @@ struct xgbe_ring {
* a DMA channel.
*/
struct xgbe_channel {
- char name[16];
+ char name[20];
/* Address of private data area for device */
struct xgbe_prv_data *pdata;
@@ -562,6 +485,7 @@ enum xgbe_speed {
enum xgbe_xpcs_access {
XGBE_XPCS_ACCESS_V1 = 0,
XGBE_XPCS_ACCESS_V2,
+ XGBE_XPCS_ACCESS_V3,
};
enum xgbe_an_mode {
@@ -593,6 +517,7 @@ enum xgbe_mode {
XGBE_MODE_KX_2500,
XGBE_MODE_KR,
XGBE_MODE_X,
+ XGBE_MODE_SGMII_10,
XGBE_MODE_SGMII_100,
XGBE_MODE_SGMII_1000,
XGBE_MODE_SFI,
@@ -610,6 +535,32 @@ enum xgbe_mdio_mode {
XGBE_MDIO_MODE_CL45,
};
+enum xgbe_mb_cmd {
+ XGBE_MB_CMD_POWER_OFF = 0,
+ XGBE_MB_CMD_SET_1G,
+ XGBE_MB_CMD_SET_2_5G,
+ XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_CMD_RRC
+};
+
+enum xgbe_mb_subcmd {
+ XGBE_MB_SUBCMD_NONE = 0,
+ XGBE_MB_SUBCMD_RX_ADAP,
+
+ /* 10GbE SFP subcommands */
+ XGBE_MB_SUBCMD_ACTIVE = 0,
+ XGBE_MB_SUBCMD_PASSIVE_1M,
+ XGBE_MB_SUBCMD_PASSIVE_3M,
+ XGBE_MB_SUBCMD_PASSIVE_OTHER,
+
+ /* 1GbE Mode subcommands */
+ XGBE_MB_SUBCMD_10MBITS = 0,
+ XGBE_MB_SUBCMD_100MBITS,
+ XGBE_MB_SUBCMD_1G_SGMII,
+ XGBE_MB_SUBCMD_1G_KX
+};
+
struct xgbe_phy {
struct ethtool_link_ksettings lks;
@@ -726,6 +677,11 @@ struct xgbe_ext_stats {
u64 rx_vxlan_csum_errors;
};
+struct xgbe_pps_config {
+ struct timespec64 start;
+ struct timespec64 period;
+};
+
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
@@ -747,8 +703,11 @@ struct xgbe_hw_if {
int (*set_ext_mii_mode)(struct xgbe_prv_data *, unsigned int,
enum xgbe_mdio_mode);
- int (*read_ext_mii_regs)(struct xgbe_prv_data *, int, int);
- int (*write_ext_mii_regs)(struct xgbe_prv_data *, int, int, u16);
+ int (*read_ext_mii_regs_c22)(struct xgbe_prv_data *, int, int);
+ int (*write_ext_mii_regs_c22)(struct xgbe_prv_data *, int, int, u16);
+ int (*read_ext_mii_regs_c45)(struct xgbe_prv_data *, int, int, int);
+ int (*write_ext_mii_regs_c45)(struct xgbe_prv_data *, int, int, int,
+ u16);
int (*set_gpio)(struct xgbe_prv_data *, unsigned int);
int (*clr_gpio)(struct xgbe_prv_data *, unsigned int);
@@ -805,14 +764,6 @@ struct xgbe_hw_if {
void (*tx_mmc_int)(struct xgbe_prv_data *);
void (*read_mmc_stats)(struct xgbe_prv_data *);
- /* For Timestamp config */
- int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
- void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
- void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
- unsigned int nsec);
- u64 (*get_tstamp_time)(struct xgbe_prv_data *);
- u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
-
/* For Data Center Bridging config */
void (*config_tc)(struct xgbe_prv_data *);
void (*config_dcb_tc)(struct xgbe_prv_data *);
@@ -832,6 +783,10 @@ struct xgbe_hw_if {
void (*enable_vxlan)(struct xgbe_prv_data *);
void (*disable_vxlan)(struct xgbe_prv_data *);
void (*set_vxlan_id)(struct xgbe_prv_data *);
+
+ /* For Split Header */
+ void (*enable_sph)(struct xgbe_prv_data *pdata);
+ void (*disable_sph)(struct xgbe_prv_data *pdata);
};
/* This structure represents implementation specific routines for an
@@ -1006,12 +961,14 @@ struct xgbe_version_data {
unsigned int tx_max_fifo_size;
unsigned int rx_max_fifo_size;
unsigned int tx_tstamp_workaround;
+ unsigned int tstamp_ptp_clock_freq;
unsigned int ecc_support;
unsigned int i2c_support;
unsigned int irq_reissue_support;
unsigned int tx_desc_prefetch;
unsigned int rx_desc_prefetch;
unsigned int an_cdr_workaround;
+ unsigned int enable_rrc;
};
struct xgbe_prv_data {
@@ -1022,6 +979,7 @@ struct xgbe_prv_data {
struct device *dev;
struct platform_device *phy_platdev;
struct device *phy_dev;
+ unsigned int smn_base;
/* Version related data */
struct xgbe_version_data *vdata;
@@ -1188,14 +1146,15 @@ struct xgbe_prv_data {
spinlock_t tstamp_lock;
struct ptp_clock_info ptp_clock_info;
struct ptp_clock *ptp_clock;
- struct hwtstamp_config tstamp_config;
- struct cyclecounter tstamp_cc;
- struct timecounter tstamp_tc;
+ struct kernel_hwtstamp_config tstamp_config;
unsigned int tstamp_addend;
struct work_struct tx_tstamp_work;
struct sk_buff *tx_tstamp_skb;
u64 tx_tstamp;
+ /* Pulse Per Second output */
+ struct xgbe_pps_config pps[XGBE_MAX_PPS_OUT];
+
/* DCB support */
struct ieee_ets *ets;
struct ieee_pfc *pfc;
@@ -1253,6 +1212,7 @@ struct xgbe_prv_data {
unsigned int parallel_detect;
unsigned int fec_ability;
unsigned long an_start;
+ unsigned long kr_start_time;
enum xgbe_an_mode an_mode;
/* I2C support */
@@ -1263,11 +1223,11 @@ struct xgbe_prv_data {
unsigned int lpm_ctrl; /* CTRL1 for resume */
- unsigned int isr_as_tasklet;
- struct tasklet_struct tasklet_dev;
- struct tasklet_struct tasklet_ecc;
- struct tasklet_struct tasklet_i2c;
- struct tasklet_struct tasklet_an;
+ unsigned int isr_as_bh_work;
+ struct work_struct dev_bh_work;
+ struct work_struct ecc_bh_work;
+ struct work_struct i2c_bh_work;
+ struct work_struct an_bh_work;
struct dentry *xgbe_debugfs;
@@ -1282,6 +1242,11 @@ struct xgbe_prv_data {
bool debugfs_an_cdr_workaround;
bool debugfs_an_cdr_track_early;
+ bool en_rx_adap;
+ int rx_adapt_retries;
+ bool rx_adapt_done;
+ bool mode_set;
+ bool sph;
};
/* Function prototypes*/
@@ -1330,6 +1295,44 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
void xgbe_restart_dev(struct xgbe_prv_data *pdata);
void xgbe_full_restart_dev(struct xgbe_prv_data *pdata);
+/* For Timestamp config */
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr);
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata);
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend);
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
+void xgbe_tx_tstamp(struct work_struct *work);
+int xgbe_get_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int xgbe_set_hwtstamp_settings(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet);
+int xgbe_init_ptp(struct xgbe_prv_data *pdata);
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
+
+int xgbe_pps_config(struct xgbe_prv_data *pdata, struct xgbe_pps_config *cfg,
+ int index, bool on);
+
+/* Selftest functions */
+void xgbe_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf);
+void xgbe_selftest_get_strings(struct xgbe_prv_data *pdata, u8 *data);
+int xgbe_selftest_get_count(struct xgbe_prv_data *pdata);
+
+/* Loopback control */
+int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata);
+void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata);
+
#ifdef CONFIG_DEBUG_FS
void xgbe_debugfs_init(struct xgbe_prv_data *);
void xgbe_debugfs_exit(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index d022b6db9e06..d7ca847d44c7 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -9,8 +9,6 @@
#include "main.h"
-static const struct acpi_device_id xge_acpi_match[];
-
static int xge_get_resources(struct xge_pdata *pdata)
{
struct platform_device *pdev;
@@ -672,7 +670,7 @@ static int xge_probe(struct platform_device *pdev)
if (ret)
goto err;
- netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &pdata->napi, xge_napi);
ret = register_netdev(ndev);
if (ret) {
@@ -690,7 +688,7 @@ err:
return ret;
}
-static int xge_remove(struct platform_device *pdev)
+static void xge_remove(struct platform_device *pdev)
{
struct xge_pdata *pdata;
struct net_device *ndev;
@@ -706,8 +704,6 @@ static int xge_remove(struct platform_device *pdev)
xge_mdio_remove(ndev);
unregister_netdev(ndev);
free_netdev(ndev);
-
- return 0;
}
static void xge_shutdown(struct platform_device *pdev)
@@ -733,7 +729,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
static struct platform_driver xge_driver = {
.driver = {
.name = "xgene-enet-v2",
- .acpi_match_table = ACPI_PTR(xge_acpi_match),
+ .acpi_match_table = xge_acpi_match,
},
.probe = xge_probe,
.remove = xge_remove,
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h
index b3985a7be59d..7be6f83e22fe 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.h
+++ b/drivers/net/ethernet/apm/xgene-v2/main.h
@@ -22,6 +22,7 @@
#include <linux/of_mdio.h>
#include <linux/prefetch.h>
#include <linux/phy.h>
+#include <linux/platform_device.h>
#include <net/ip.h>
#include "mac.h"
#include "enet.h"
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index eba06831aec2..6a17045a5f62 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -97,7 +97,6 @@ void xge_mdio_remove(struct net_device *ndev)
int xge_mdio_config(struct net_device *ndev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct xge_pdata *pdata = netdev_priv(ndev);
struct device *dev = &pdata->pdev->dev;
struct mii_bus *mdio_bus;
@@ -137,17 +136,12 @@ int xge_mdio_config(struct net_device *ndev)
goto err;
}
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
-
- linkmode_andnot(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
pdata->phy_speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index e641dbbea1e2..b854b6b42d77 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -421,18 +421,12 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
if (dev->of_node) {
struct clk *parent = clk_get_parent(pdata->clk);
+ long rate = rgmii_clock(pdata->phy_speed);
- switch (pdata->phy_speed) {
- case SPEED_10:
- clk_set_rate(parent, 2500000);
- break;
- case SPEED_100:
- clk_set_rate(parent, 25000000);
- break;
- default:
- clk_set_rate(parent, 125000000);
- break;
- }
+ if (rate < 0)
+ rate = 125000000;
+
+ clk_set_rate(parent, rate);
}
#ifdef CONFIG_ACPI
else {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 220dc42af31a..3b2951030a38 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -696,6 +696,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
buf_pool->rx_skb[skb_index] = NULL;
datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
+
+ /* strip off CRC as HW isn't doing this */
+ nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
+ if (!nv)
+ datalen -= 4;
+
skb_put(skb, datalen);
prefetch(skb->data - NET_IP_ALIGN);
skb->protocol = eth_type_trans(skb, ndev);
@@ -717,12 +723,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
}
}
- nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
- if (!nv) {
- /* strip off CRC as HW isn't doing this */
- datalen -= 4;
+ if (!nv)
goto skip_jumbo;
- }
slots = page_pool->slots - 1;
head = page_pool->head;
@@ -869,7 +871,7 @@ static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue)
for (i = 0; i < pdata->txq_cnt; i++) {
txq = netdev_get_tx_queue(ndev, i);
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
netif_tx_start_queue(txq);
}
}
@@ -1002,8 +1004,10 @@ static int xgene_enet_open(struct net_device *ndev)
xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev);
- if (ret)
+ if (ret) {
+ xgene_enet_napi_disable(pdata);
return ret;
+ }
if (ndev->phydev) {
phy_start(ndev->phydev);
@@ -1526,7 +1530,7 @@ static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
xgene_enet_close(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
pdata->mac_ops->set_framesize(pdata, frame_size);
xgene_enet_open(ndev);
@@ -1628,7 +1632,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
for (i = 0; i < max_irqs; i++) {
ret = platform_get_irq(pdev, i);
- if (ret <= 0) {
+ if (ret < 0) {
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
max_irqs = i;
pdata->rxq_cnt = max_irqs / 2;
@@ -1636,7 +1640,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
pdata->cq_cnt = max_irqs / 2;
break;
}
- return ret ? : -ENXIO;
+ return ret;
}
pdata->irqs[i] = ret;
}
@@ -1975,14 +1979,12 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
for (i = 0; i < pdata->rxq_cnt; i++) {
napi = &pdata->rx_ring[i]->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
for (i = 0; i < pdata->cq_cnt; i++) {
napi = &pdata->tx_ring[i]->cp_ring->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
}
@@ -2016,7 +2018,6 @@ static int xgene_enet_probe(struct platform_device *pdev)
struct xgene_enet_pdata *pdata;
struct device *dev = &pdev->dev;
void (*link_state)(struct work_struct *);
- const struct of_device_id *of_id;
int ret;
ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
@@ -2037,19 +2038,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
NETIF_F_GRO |
NETIF_F_SG;
- of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
- if (of_id) {
- pdata->enet_id = (enum xgene_enet_id)of_id->data;
- }
-#ifdef CONFIG_ACPI
- else {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
- if (acpi_id)
- pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
- }
-#endif
+ pdata->enet_id = (enum xgene_enet_id)device_get_match_data(&pdev->dev);
if (!pdata->enet_id) {
ret = -ENODEV;
goto err;
@@ -2125,7 +2114,7 @@ err:
return ret;
}
-static int xgene_enet_remove(struct platform_device *pdev)
+static void xgene_enet_remove(struct platform_device *pdev)
{
struct xgene_enet_pdata *pdata;
struct net_device *ndev;
@@ -2147,8 +2136,6 @@ static int xgene_enet_remove(struct platform_device *pdev)
xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
-
- return 0;
}
static void xgene_enet_shutdown(struct platform_device *pdev)
@@ -2168,7 +2155,7 @@ static void xgene_enet_shutdown(struct platform_device *pdev)
static struct platform_driver xgene_enet_driver = {
.driver = {
.name = "xgene-enet",
- .of_match_table = of_match_ptr(xgene_enet_of_match),
+ .of_match_table = xgene_enet_of_match,
.acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 643f5e646740..bce2c19e3f22 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -15,9 +15,10 @@
#include <linux/efi.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <net/ip.h>
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 86607b79c09f..cc3b1631c905 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -6,8 +6,14 @@
* Keyur Chudgar <kchudgar@apm.com>
*/
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_xgmac.h"
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 9a650d1c1bdd..b3bf8d6f88e8 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -20,12 +20,10 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
-#include <linux/crc32poly.h>
#include <linux/bitrev.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
#include <linux/pgtable.h>
-#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/page.h>
@@ -462,7 +460,7 @@ static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
/* prolly should wait for dma to finish & turn off the chip */
spin_lock_irqsave(&bp->lock, flags);
if (bp->timeout_active) {
- del_timer(&bp->tx_timeout);
+ timer_delete(&bp->tx_timeout);
bp->timeout_active = 0;
}
disable_irq(dev->irq);
@@ -547,7 +545,7 @@ static inline void bmac_set_timeout(struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags);
if (bp->timeout_active)
- del_timer(&bp->tx_timeout);
+ timer_delete(&bp->tx_timeout);
bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
add_timer(&bp->tx_timeout);
bp->timeout_active = 1;
@@ -756,7 +754,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
XXDEBUG(("bmac_txdma_intr\n"));
}
- /* del_timer(&bp->tx_timeout); */
+ /* timer_delete(&bp->tx_timeout); */
/* bp->timeout_active = 0; */
while (1) {
@@ -797,59 +795,6 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
}
#ifndef SUNHME_MULTICAST
-/* Real fast bit-reversal algorithm, 6-bit values */
-static int reverse6[64] = {
- 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
- 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
- 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
- 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
- 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
- 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
- 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
- 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
-};
-
-static unsigned int
-crc416(unsigned int curval, unsigned short nxtval)
-{
- unsigned int counter, cur = curval, next = nxtval;
- int high_crc_set, low_data_set;
-
- /* Swap bytes */
- next = ((next & 0x00FF) << 8) | (next >> 8);
-
- /* Compute bit-by-bit */
- for (counter = 0; counter < 16; ++counter) {
- /* is high CRC bit set? */
- if ((cur & 0x80000000) == 0) high_crc_set = 0;
- else high_crc_set = 1;
-
- cur = cur << 1;
-
- if ((next & 0x0001) == 0) low_data_set = 0;
- else low_data_set = 1;
-
- next = next >> 1;
-
- /* do the XOR */
- if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
- }
- return cur;
-}
-
-static unsigned int
-bmac_crc(unsigned short *address)
-{
- unsigned int newcrc;
-
- XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
- newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
- newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
- newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
-
- return(newcrc);
-}
-
/*
* Add requested mcast addr to BMac's hash table filter.
*
@@ -862,8 +807,7 @@ bmac_addhash(struct bmac_data *bp, unsigned char *addr)
unsigned short mask;
if (!(*addr)) return;
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc]++) return; /* This bit is already set */
mask = crc % 16;
mask = (unsigned char)1 << mask;
@@ -877,8 +821,7 @@ bmac_removehash(struct bmac_data *bp, unsigned char *addr)
unsigned char mask;
/* Now, delete the address from the filter copy, as indicated */
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
mask = crc % 16;
@@ -1237,6 +1180,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct bmac_data *bp;
const unsigned char *prop_addr;
unsigned char addr[6];
+ u8 macaddr[6];
struct net_device *dev;
int is_bmac_plus = ((int)match->data) != 0;
@@ -1284,7 +1228,9 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j)
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
+
+ eth_hw_addr_set(dev, macaddr);
/* Enable chip without interrupts for now */
bmac_enable_and_reset_chip(dev);
@@ -1315,7 +1261,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
- ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
+ ret = request_irq(dev->irq, bmac_misc_intr, IRQF_NO_AUTOEN, "BMAC-misc", dev);
if (ret) {
printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
goto err_out_iounmap_rx;
@@ -1334,7 +1280,6 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* Mask chip interrupts and disable chip, will be
* re-enabled on open()
*/
- disable_irq(dev->irq);
pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
if (register_netdev(dev) != 0) {
@@ -1465,7 +1410,7 @@ bmac_output(struct sk_buff *skb, struct net_device *dev)
static void bmac_tx_timeout(struct timer_list *t)
{
- struct bmac_data *bp = from_timer(bp, t, tx_timeout);
+ struct bmac_data *bp = timer_container_of(bp, t, tx_timeout);
struct net_device *dev = macio_get_drvdata(bp->mdev);
volatile struct dbdma_regs __iomem *td = bp->tx_dma;
volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
@@ -1508,7 +1453,7 @@ static void bmac_tx_timeout(struct timer_list *t)
i = bp->tx_empty;
++dev->stats.tx_errors;
if (i != bp->tx_fill) {
- dev_kfree_skb(bp->tx_bufs[i]);
+ dev_kfree_skb_irq(bp->tx_bufs[i]);
bp->tx_bufs[i] = NULL;
if (++i >= N_TX_RING) i = 0;
bp->tx_empty = i;
@@ -1589,7 +1534,7 @@ bmac_proc_info(char *buffer, char **start, off_t offset, int length)
}
#endif
-static int bmac_remove(struct macio_dev *mdev)
+static void bmac_remove(struct macio_dev *mdev)
{
struct net_device *dev = macio_get_drvdata(mdev);
struct bmac_data *bp = netdev_priv(dev);
@@ -1607,8 +1552,6 @@ static int bmac_remove(struct macio_dev *mdev)
macio_release_resources(mdev);
free_netdev(dev);
-
- return 0;
}
static const struct of_device_id bmac_match[] =
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 4b80e3a52a19..af26905e44e3 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -20,7 +20,6 @@
#include <linux/bitrev.h>
#include <linux/slab.h>
#include <linux/pgtable.h>
-#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/macio.h>
@@ -90,7 +89,7 @@ static void mace_set_timeout(struct net_device *dev);
static void mace_tx_timeout(struct timer_list *t);
static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
static inline void mace_clean_rings(struct mace_data *mp);
-static void __mace_set_address(struct net_device *dev, void *addr);
+static void __mace_set_address(struct net_device *dev, const void *addr);
/*
* If we can't get a skbuff when we need it, we use this area for DMA.
@@ -112,6 +111,7 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct net_device *dev;
struct mace_data *mp;
const unsigned char *addr;
+ u8 macaddr[ETH_ALEN];
int j, rev, rc = -EBUSY;
if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
@@ -167,8 +167,9 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j) {
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
}
+ eth_hw_addr_set(dev, macaddr);
mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
in_8(&mp->mace->chipid_lo);
@@ -271,7 +272,7 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
return rc;
}
-static int mace_remove(struct macio_dev *mdev)
+static void mace_remove(struct macio_dev *mdev)
{
struct net_device *dev = macio_get_drvdata(mdev);
struct mace_data *mp;
@@ -295,8 +296,6 @@ static int mace_remove(struct macio_dev *mdev)
free_netdev(dev);
macio_release_resources(mdev);
-
- return 0;
}
static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
@@ -369,11 +368,12 @@ static void mace_reset(struct net_device *dev)
out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
}
-static void __mace_set_address(struct net_device *dev, void *addr)
+static void __mace_set_address(struct net_device *dev, const void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
- unsigned char *p = addr;
+ const unsigned char *p = addr;
+ u8 macaddr[ETH_ALEN];
int i;
/* load up the hardware address */
@@ -385,7 +385,10 @@ static void __mace_set_address(struct net_device *dev, void *addr)
;
}
for (i = 0; i < 6; ++i)
- out_8(&mb->padr, dev->dev_addr[i] = p[i]);
+ out_8(&mb->padr, macaddr[i] = p[i]);
+
+ eth_hw_addr_set(dev, macaddr);
+
if (mp->chipid != BROKEN_ADDRCHG_REV)
out_8(&mb->iac, 0);
}
@@ -520,7 +523,7 @@ static inline void mace_set_timeout(struct net_device *dev)
struct mace_data *mp = netdev_priv(dev);
if (mp->timeout_active)
- del_timer(&mp->tx_timeout);
+ timer_delete(&mp->tx_timeout);
mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
add_timer(&mp->tx_timeout);
mp->timeout_active = 1;
@@ -673,7 +676,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
i = mp->tx_empty;
while (in_8(&mb->pr) & XMTSV) {
- del_timer(&mp->tx_timeout);
+ timer_delete(&mp->tx_timeout);
mp->timeout_active = 0;
/*
* Clear any interrupt indication associated with this status
@@ -802,7 +805,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
static void mace_tx_timeout(struct timer_list *t)
{
- struct mace_data *mp = from_timer(mp, t, tx_timeout);
+ struct mace_data *mp = timer_container_of(mp, t, tx_timeout);
struct net_device *dev = macio_get_drvdata(mp->mdev);
volatile struct mace __iomem *mb = mp->mace;
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
@@ -841,7 +844,7 @@ static void mace_tx_timeout(struct timer_list *t)
if (mp->tx_bad_runt) {
mp->tx_bad_runt = 0;
} else if (i != mp->tx_fill) {
- dev_kfree_skb(mp->tx_bufs[i]);
+ dev_kfree_skb_irq(mp->tx_bufs[i]);
if (++i >= N_TX_RING)
i = 0;
mp->tx_empty = i;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 95d3061c61be..8989506e6248 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -77,7 +77,7 @@ struct mace_frame {
u8 pad4;
u32 pad5;
u32 pad6;
- u8 data[1];
+ DECLARE_FLEX_ARRAY(u8, data);
/* And frame continues.. */
};
@@ -92,7 +92,7 @@ static void mace_reset(struct net_device *dev);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static irqreturn_t mace_dma_intr(int irq, void *dev_id);
static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
-static void __mace_set_address(struct net_device *dev, void *addr);
+static void __mace_set_address(struct net_device *dev, const void *addr);
/*
* Load a receive DMA channel with a base address and ring length
@@ -197,6 +197,7 @@ static int mace_probe(struct platform_device *pdev)
unsigned char *addr;
struct net_device *dev;
unsigned char checksum = 0;
+ u8 macaddr[ETH_ALEN];
int err;
dev = alloc_etherdev(PRIV_BYTES);
@@ -229,8 +230,9 @@ static int mace_probe(struct platform_device *pdev)
for (j = 0; j < 6; ++j) {
u8 v = bitrev8(addr[j<<4]);
checksum ^= v;
- dev->dev_addr[j] = v;
+ macaddr[j] = v;
}
+ eth_hw_addr_set(dev, macaddr);
for (; j < 8; ++j) {
checksum ^= bitrev8(addr[j<<4]);
}
@@ -315,11 +317,12 @@ static void mace_reset(struct net_device *dev)
* Load the address on a mace controller.
*/
-static void __mace_set_address(struct net_device *dev, void *addr)
+static void __mace_set_address(struct net_device *dev, const void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace *mb = mp->mace;
- unsigned char *p = addr;
+ const unsigned char *p = addr;
+ u8 macaddr[ETH_ALEN];
int i;
/* load up the hardware address */
@@ -331,7 +334,8 @@ static void __mace_set_address(struct net_device *dev, void *addr)
;
}
for (i = 0; i < 6; ++i)
- mb->padr = dev->dev_addr[i] = p[i];
+ mb->padr = macaddr[i] = p[i];
+ eth_hw_addr_set(dev, macaddr);
if (mp->chipid != BROKEN_ADDRCHG_REV)
mb->iac = 0;
}
@@ -735,7 +739,7 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
MODULE_ALIAS("platform:macmace");
-static int mac_mace_device_remove(struct platform_device *pdev)
+static void mac_mace_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct mace_data *mp = netdev_priv(dev);
@@ -751,8 +755,6 @@ static int mac_mace_device_remove(struct platform_device *pdev)
mp->tx_ring, mp->tx_ring_phys);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver mac_mace_driver = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 8ebcc68e807f..f6a96931c89a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -8,7 +8,7 @@
obj-$(CONFIG_AQTION) += atlantic.o
-ccflags-y += -I$(srctree)/$(src)
+ccflags-y += -I$(src)
atlantic-objs := aq_main.o \
aq_nic.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 52b9833fda99..fc2b325f34e7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -17,7 +17,7 @@
#define AQ_CFG_IS_POLLING_DEF 0U
-#define AQ_CFG_FORCE_LEGACY_INT 0U
+#define AQ_CFG_FORCE_INTX 0U
#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
#define AQ_CFG_INTERRUPT_MODERATION_ON 1
@@ -40,6 +40,7 @@
#define AQ_CFG_RX_HDR_SIZE 256U
#define AQ_CFG_RX_PAGEORDER 0U
+#define AQ_CFG_XDP_PAGEORDER 2U
/* LRO */
#define AQ_CFG_IS_LRO_DEF 1U
@@ -64,8 +65,6 @@
*/
#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
-#define AQ_CFG_NAPI_WEIGHT 64U
-
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 23b2d390fcdd..ace691d7cd75 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -40,10 +40,12 @@
#define AQ_DEVICE_ID_AQC113DEV 0x00C0
#define AQ_DEVICE_ID_AQC113CS 0x94C0
+#define AQ_DEVICE_ID_AQC113CA 0x34C0
#define AQ_DEVICE_ID_AQC114CS 0x93C0
#define AQ_DEVICE_ID_AQC113 0x04C0
#define AQ_DEVICE_ID_AQC113C 0x14C0
#define AQ_DEVICE_ID_AQC115C 0x12C0
+#define AQ_DEVICE_ID_AQC116C 0x11C0
#define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
@@ -53,20 +55,19 @@
#define AQ_NIC_RATE_10G BIT(0)
#define AQ_NIC_RATE_5G BIT(1)
-#define AQ_NIC_RATE_5GSR BIT(2)
-#define AQ_NIC_RATE_2G5 BIT(3)
-#define AQ_NIC_RATE_1G BIT(4)
-#define AQ_NIC_RATE_100M BIT(5)
-#define AQ_NIC_RATE_10M BIT(6)
-#define AQ_NIC_RATE_1G_HALF BIT(7)
-#define AQ_NIC_RATE_100M_HALF BIT(8)
-#define AQ_NIC_RATE_10M_HALF BIT(9)
+#define AQ_NIC_RATE_2G5 BIT(2)
+#define AQ_NIC_RATE_1G BIT(3)
+#define AQ_NIC_RATE_100M BIT(4)
+#define AQ_NIC_RATE_10M BIT(5)
+#define AQ_NIC_RATE_1G_HALF BIT(6)
+#define AQ_NIC_RATE_100M_HALF BIT(7)
+#define AQ_NIC_RATE_10M_HALF BIT(8)
-#define AQ_NIC_RATE_EEE_10G BIT(10)
-#define AQ_NIC_RATE_EEE_5G BIT(11)
-#define AQ_NIC_RATE_EEE_2G5 BIT(12)
-#define AQ_NIC_RATE_EEE_1G BIT(13)
-#define AQ_NIC_RATE_EEE_100M BIT(14)
+#define AQ_NIC_RATE_EEE_10G BIT(9)
+#define AQ_NIC_RATE_EEE_5G BIT(10)
+#define AQ_NIC_RATE_EEE_2G5 BIT(11)
+#define AQ_NIC_RATE_EEE_1G BIT(12)
+#define AQ_NIC_RATE_EEE_100M BIT(13)
#define AQ_NIC_RATE_EEE_MSK (AQ_NIC_RATE_EEE_10G |\
AQ_NIC_RATE_EEE_5G |\
AQ_NIC_RATE_EEE_2G5 |\
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
index d3526cd38f3d..787ea91802e7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -113,19 +113,9 @@ static const struct hwmon_ops aq_hwmon_ops = {
.read_string = aq_hwmon_read_string,
};
-static u32 aq_hwmon_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info aq_hwmon_temp = {
- .type = hwmon_temp,
- .config = aq_hwmon_temp_config,
-};
-
-static const struct hwmon_channel_info *aq_hwmon_info[] = {
- &aq_hwmon_temp,
+static const struct hwmon_channel_info * const aq_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a9ef0544e30f..6fef47ba0a59 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -13,7 +13,10 @@
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_macsec.h"
+#include "aq_main.h"
+#include <linux/ethtool.h>
+#include <linux/linkmode.h>
#include <linux/ptp_clock_kernel.h>
static void aq_ethtool_get_regs(struct net_device *ndev,
@@ -97,6 +100,15 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = {
"%sQueue[%d] AllocFails",
"%sQueue[%d] SkbAllocFails",
"%sQueue[%d] Polls",
+ "%sQueue[%d] PageFlips",
+ "%sQueue[%d] PageReuses",
+ "%sQueue[%d] PageFrees",
+ "%sQueue[%d] XdpAbort",
+ "%sQueue[%d] XdpDrop",
+ "%sQueue[%d] XdpPass",
+ "%sQueue[%d] XdpTx",
+ "%sQueue[%d] XdpInvalid",
+ "%sQueue[%d] XdpRedirect",
};
static const char * const aq_ethtool_queue_tx_stat_names[] = {
@@ -229,7 +241,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
"%u.%u.%u", firmware_version >> 24,
(firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU);
- strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
+ strscpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
drvinfo->n_stats = aq_ethtool_n_stats(ndev);
drvinfo->testinfo_len = 0;
@@ -255,7 +267,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
char tc_string[8];
- int tc;
+ unsigned int tc;
memset(tc_string, 0, sizeof(tc_string));
memcpy(p, aq_ethtool_stat_names,
@@ -264,22 +276,20 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (tc = 0; tc < cfg->tcs; tc++) {
if (cfg->is_qos)
- snprintf(tc_string, 8, "TC%d ", tc);
+ snprintf(tc_string, 8, "TC%u ", tc);
for (i = 0; i < cfg->vecs; i++) {
for (si = 0; si < rx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
- p += ETH_GSTRING_LEN;
}
for (si = 0; si < tx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_tx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
- p += ETH_GSTRING_LEN;
}
}
}
@@ -294,20 +304,18 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < max(rx_ring_cnt, tx_ring_cnt); i++) {
for (si = 0; si < rx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
- p += ETH_GSTRING_LEN;
}
if (i >= tx_ring_cnt)
continue;
for (si = 0; si < tx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_tx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -327,9 +335,8 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsc_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_txsc_stat_names[si], i);
- p += ETH_GSTRING_LEN;
}
aq_txsc = &nic->macsec_cfg->aq_txsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
@@ -338,10 +345,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsa_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_txsa_stat_names[si],
i, sa);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -358,10 +364,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_rxsa_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_rxsa_stat_names[si],
i, sa);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -437,8 +442,8 @@ static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
return sizeof(cfg->aq_rss.hash_secret_key);
}
-static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int aq_ethtool_get_rss(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
@@ -446,21 +451,21 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
cfg = aq_nic_get_cfg(aq_nic);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
- if (indir) {
+ rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+ if (rxfh->indir) {
for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++)
- indir[i] = cfg->aq_rss.indirection_table[i];
+ rxfh->indir[i] = cfg->aq_rss.indirection_table[i];
}
- if (key)
- memcpy(key, cfg->aq_rss.hash_secret_key,
+ if (rxfh->key)
+ memcpy(rxfh->key, cfg->aq_rss.hash_secret_key,
sizeof(cfg->aq_rss.hash_secret_key));
return 0;
}
-static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int aq_ethtool_set_rss(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(netdev);
struct aq_nic_cfg_s *cfg;
@@ -472,16 +477,17 @@ static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir,
rss_entries = cfg->aq_rss.indirection_table_size;
/* We do not allow change in unsupported parameters */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
/* Fill out the redirection table */
- if (indir)
+ if (rxfh->indir)
for (i = 0; i < rss_entries; i++)
- cfg->aq_rss.indirection_table[i] = indir[i];
+ cfg->aq_rss.indirection_table[i] = rxfh->indir[i];
/* Fill out the rss hash key */
- if (key) {
- memcpy(cfg->aq_rss.hash_secret_key, key,
+ if (rxfh->key) {
+ memcpy(cfg->aq_rss.hash_secret_key, rxfh->key,
sizeof(cfg->aq_rss.hash_secret_key));
err = aq_nic->aq_hw_ops->hw_rss_hash_set(aq_nic->aq_hw,
&cfg->aq_rss);
@@ -640,7 +646,7 @@ static int aq_ethtool_set_wol(struct net_device *ndev,
}
static int aq_ethtool_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
@@ -670,23 +676,19 @@ static int aq_ethtool_get_ts_info(struct net_device *ndev,
return 0;
}
-static u32 eee_mask_to_ethtool_mask(u32 speed)
+static void eee_mask_to_ethtool_mask(unsigned long *mode, u32 speed)
{
- u32 rate = 0;
-
if (speed & AQ_NIC_RATE_EEE_10G)
- rate |= SUPPORTED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
if (speed & AQ_NIC_RATE_EEE_1G)
- rate |= SUPPORTED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (speed & AQ_NIC_RATE_EEE_100M)
- rate |= SUPPORTED_100baseT_Full;
-
- return rate;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
}
-static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
+static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_keee *eee)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 rate, supported_rates;
@@ -702,14 +704,14 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
if (err < 0)
return err;
- eee->supported = eee_mask_to_ethtool_mask(supported_rates);
+ eee_mask_to_ethtool_mask(eee->supported, supported_rates);
if (aq_nic->aq_nic_cfg.eee_speeds)
- eee->advertised = eee->supported;
+ linkmode_copy(eee->advertised, eee->supported);
- eee->lp_advertised = eee_mask_to_ethtool_mask(rate);
+ eee_mask_to_ethtool_mask(eee->lp_advertised, rate);
- eee->eee_enabled = !!eee->advertised;
+ eee->eee_enabled = !linkmode_empty(eee->advertised);
eee->tx_lpi_enabled = eee->eee_enabled;
if ((supported_rates & rate) & AQ_NIC_RATE_EEE_MSK)
@@ -718,7 +720,7 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
return 0;
}
-static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
+static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_keee *eee)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 rate, supported_rates;
@@ -812,7 +814,9 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev,
}
static void aq_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
@@ -827,7 +831,9 @@ static void aq_get_ringparam(struct net_device *ndev,
}
static int aq_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
const struct aq_hw_caps_s *hw_caps;
@@ -845,7 +851,7 @@ static int aq_set_ringparam(struct net_device *ndev,
if (netif_running(ndev)) {
ndev_running = true;
- dev_close(ndev);
+ aq_ndev_close(ndev);
}
cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
@@ -861,7 +867,7 @@ static int aq_set_ringparam(struct net_device *ndev,
goto err_exit;
if (ndev_running)
- err = dev_open(ndev, NULL);
+ err = aq_ndev_open(ndev);
err_exit:
return err;
@@ -972,6 +978,76 @@ static int aq_ethtool_set_phy_tunable(struct net_device *ndev,
return err;
}
+static int aq_ethtool_get_module_info(struct net_device *ndev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u8 compliance_val, dom_type;
+ int err;
+
+ /* Module EEPROM is only supported for controllers with external PHY */
+ if (aq_nic->aq_nic_cfg.aq_hw_caps->media_type != AQ_HW_MEDIA_TYPE_FIBRE ||
+ !aq_nic->aq_hw_ops->hw_read_module_eeprom)
+ return -EOPNOTSUPP;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_ID_ADDR, SFF_8472_COMP_ADDR, 1, &compliance_val);
+ if (err)
+ return err;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_ID_ADDR, SFF_8472_DOM_TYPE_ADDR, 1, &dom_type);
+ if (err)
+ return err;
+
+ if (dom_type & SFF_8472_ADDRESS_CHANGE_REQ_MASK || compliance_val == 0x00) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ return 0;
+}
+
+static int aq_ethtool_get_module_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, unsigned char *data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ unsigned int first, last, len;
+ int err;
+
+ if (!aq_nic->aq_hw_ops->hw_read_module_eeprom)
+ return -EOPNOTSUPP;
+
+ first = ee->offset;
+ last = ee->offset + ee->len;
+
+ if (first < ETH_MODULE_SFF_8079_LEN) {
+ len = min(last, ETH_MODULE_SFF_8079_LEN);
+ len -= first;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_ID_ADDR, first, len, data);
+ if (err)
+ return err;
+
+ first += len;
+ data += len;
+ }
+ if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) {
+ len = min(last, ETH_MODULE_SFF_8472_LEN);
+ len -= first;
+ first -= ETH_MODULE_SFF_8079_LEN;
+
+ err = aq_nic->aq_hw_ops->hw_read_module_eeprom(aq_nic->aq_hw,
+ SFF_8472_DIAGNOSTICS_ADDR, first, len, data);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -1009,4 +1085,6 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_ts_info = aq_ethtool_get_ts_info,
.get_phy_tunable = aq_ethtool_get_phy_tunable,
.set_phy_tunable = aq_ethtool_set_phy_tunable,
+ .get_module_info = aq_ethtool_get_module_info,
+ .get_module_eeprom = aq_ethtool_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
index 6d5be5ebeb13..f26fe1a75539 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
@@ -14,4 +14,12 @@
extern const struct ethtool_ops aq_ethtool_ops;
#define AQ_PRIV_FLAGS_MASK (AQ_HW_LOOPBACK_MASK)
+#define SFF_8472_ID_ADDR 0x50
+#define SFF_8472_DIAGNOSTICS_ADDR 0x51
+
+#define SFF_8472_COMP_ADDR 0x5e
+#define SFF_8472_DOM_TYPE_ADDR 0x5c
+
+#define SFF_8472_ADDRESS_CHANGE_REQ_MASK 0x4
+
#endif /* AQ_ETHTOOL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 1bc4d33a0ce5..30a573db02bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -826,7 +826,6 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int hweight = 0;
int err = 0;
- int i;
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
@@ -837,8 +836,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
- hweight += hweight_long(aq_nic->active_vlans[i]);
+ hweight = bitmap_weight(aq_nic->active_vlans, VLAN_N_VID);
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
@@ -871,7 +869,7 @@ int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int err = 0;
- memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ bitmap_zero(aq_nic->active_vlans, VLAN_N_VID);
aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 062a300a566a..4e66fd9b2ab1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -80,6 +80,8 @@ struct aq_hw_link_status_s {
};
struct aq_stats_s {
+ u64 brc;
+ u64 btc;
u64 uprc;
u64 mprc;
u64 bprc;
@@ -102,7 +104,7 @@ struct aq_stats_s {
};
#define AQ_HW_IRQ_INVALID 0U
-#define AQ_HW_IRQ_LEGACY 1U
+#define AQ_HW_IRQ_INTX 1U
#define AQ_HW_IRQ_MSI 2U
#define AQ_HW_IRQ_MSIX 3U
@@ -111,6 +113,8 @@ struct aq_stats_s {
#define AQ_HW_POWER_STATE_D0 0U
#define AQ_HW_POWER_STATE_D3 3U
+#define AQ_FW_WAKE_ON_LINK_RTPM BIT(10)
+
#define AQ_HW_FLAG_STARTED 0x00000004U
#define AQ_HW_FLAG_STOPPING 0x00000008U
#define AQ_HW_FLAG_RESETTING 0x00000010U
@@ -338,6 +342,9 @@ struct aq_hw_ops {
int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable);
int (*hw_get_mac_temp)(struct aq_hw_s *self, u32 *temp);
+
+ int (*hw_read_module_eeprom)(struct aq_hw_s *self, u8 dev_addr,
+ u8 reg_start_addr, int len, u8 *data);
};
struct aq_fw_ops {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 1921741f7311..18b08277d2e1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -15,6 +15,7 @@
#include "aq_hw.h"
#include "aq_nic.h"
+#include "hw_atl/hw_atl_llh.h"
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 shift, u32 val)
@@ -81,6 +82,27 @@ void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
lo_hi_writeq(value, hw->mmio + reg);
}
+int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw)
+{
+ int err;
+ u32 val;
+
+ /* Invalidate Descriptor Cache to prevent writing to the cached
+ * descriptors and to the data pointer of those descriptors
+ */
+ hw_atl_rdm_rx_dma_desc_cache_init_tgl(hw);
+
+ err = aq_hw_err_from_flags(hw);
+ if (err)
+ goto err_exit;
+
+ readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
+ hw, val, val == 1, 1000U, 10000U);
+
+err_exit:
+ return err;
+}
+
int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index ffa6e4067c21..d89c63d88e4a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -35,6 +35,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value);
+int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
int aq_hw_num_tcs(struct aq_hw_s *hw);
int aq_hw_q_per_tc(struct aq_hw_s *hw);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 02058fe79f52..6afff8af5e86 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -289,12 +289,9 @@ static int aq_get_txsc_stats(struct aq_hw_s *hw, const int sc_idx,
static int aq_mdo_dev_open(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
ret = aq_apply_secy_cfg(nic, ctx->secy);
@@ -303,12 +300,9 @@ static int aq_mdo_dev_open(struct macsec_context *ctx)
static int aq_mdo_dev_stop(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
int i;
- if (ctx->prepare)
- return 0;
-
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (nic->macsec_cfg->txsc_idx_busy & BIT(i))
aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy,
@@ -445,7 +439,7 @@ static enum aq_macsec_sc_sa sc_sa_from_num_an(const int num_an)
static int aq_mdo_add_secy(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
const struct macsec_secy *secy = ctx->secy;
enum aq_macsec_sc_sa sc_sa;
@@ -466,9 +460,6 @@ static int aq_mdo_add_secy(struct macsec_context *ctx)
if (txsc_idx == AQ_MACSEC_MAX_SC)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->sc_sa = sc_sa;
cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa);
cfg->aq_txsc[txsc_idx].sw_secy = secy;
@@ -483,7 +474,7 @@ static int aq_mdo_add_secy(struct macsec_context *ctx)
static int aq_mdo_upd_secy(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
const struct macsec_secy *secy = ctx->secy;
int txsc_idx;
int ret = 0;
@@ -492,9 +483,6 @@ static int aq_mdo_upd_secy(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_set_txsc(nic, txsc_idx);
@@ -540,12 +528,9 @@ static int aq_clear_txsc(struct aq_nic_s *nic, const int txsc_idx,
static int aq_mdo_del_secy(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (!nic->macsec_cfg)
return 0;
@@ -585,12 +570,13 @@ static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx,
ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
+ memzero_explicit(&key_rec, sizeof(key_rec));
return ret;
}
static int aq_mdo_add_txsa(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
const struct macsec_secy *secy = ctx->secy;
struct aq_macsec_txsc *aq_txsc;
@@ -601,9 +587,6 @@ static int aq_mdo_add_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy);
@@ -620,7 +603,7 @@ static int aq_mdo_add_txsa(struct macsec_context *ctx)
static int aq_mdo_upd_txsa(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
const struct macsec_secy *secy = ctx->secy;
struct aq_macsec_txsc *aq_txsc;
@@ -631,9 +614,6 @@ static int aq_mdo_upd_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
@@ -672,7 +652,7 @@ static int aq_clear_txsa(struct aq_nic_s *nic, struct aq_macsec_txsc *aq_txsc,
static int aq_mdo_del_txsa(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
int txsc_idx;
int ret = 0;
@@ -681,9 +661,6 @@ static int aq_mdo_del_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -767,7 +744,7 @@ static int aq_set_rxsc(struct aq_nic_s *nic, const u32 rxsc_idx)
static int aq_mdo_add_rxsc(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
const u32 rxsc_idx_max = aq_sc_idx_max(cfg->sc_sa);
u32 rxsc_idx;
@@ -780,9 +757,6 @@ static int aq_mdo_add_rxsc(struct macsec_context *ctx)
if (rxsc_idx >= rxsc_idx_max)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx,
cfg->sc_sa);
cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy;
@@ -801,7 +775,7 @@ static int aq_mdo_add_rxsc(struct macsec_context *ctx)
static int aq_mdo_upd_rxsc(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
int rxsc_idx;
int ret = 0;
@@ -809,9 +783,6 @@ static int aq_mdo_upd_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
ret = aq_set_rxsc(nic, rxsc_idx);
@@ -867,7 +838,7 @@ static int aq_clear_rxsc(struct aq_nic_s *nic, const int rxsc_idx,
static int aq_mdo_del_rxsc(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
enum aq_clear_type clear_type = AQ_CLEAR_SW;
int rxsc_idx;
int ret = 0;
@@ -876,9 +847,6 @@ static int aq_mdo_del_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
clear_type = AQ_CLEAR_ALL;
@@ -932,13 +900,14 @@ static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx,
ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx);
+ memzero_explicit(&sa_key_record, sizeof(sa_key_record));
return ret;
}
static int aq_mdo_add_rxsa(struct macsec_context *ctx)
{
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
const struct macsec_secy *secy = ctx->secy;
struct aq_macsec_rxsc *aq_rxsc;
int rxsc_idx;
@@ -948,9 +917,6 @@ static int aq_mdo_add_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy);
@@ -967,8 +933,8 @@ static int aq_mdo_add_rxsa(struct macsec_context *ctx)
static int aq_mdo_upd_rxsa(struct macsec_context *ctx)
{
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
const struct macsec_secy *secy = ctx->secy;
int rxsc_idx;
@@ -978,9 +944,6 @@ static int aq_mdo_upd_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx,
secy, ctx->sa.rx_sa, NULL,
@@ -1019,8 +982,8 @@ static int aq_clear_rxsa(struct aq_nic_s *nic, struct aq_macsec_rxsc *aq_rxsc,
static int aq_mdo_del_rxsa(struct macsec_context *ctx)
{
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
int rxsc_idx;
int ret = 0;
@@ -1029,9 +992,6 @@ static int aq_mdo_del_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -1040,13 +1000,10 @@ static int aq_mdo_del_rxsa(struct macsec_context *ctx)
static int aq_mdo_get_dev_stats(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats;
struct aq_hw_s *hw = nic->aq_hw;
- if (ctx->prepare)
- return 0;
-
aq_get_macsec_common_stats(hw, stats);
ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts;
@@ -1063,7 +1020,7 @@ static int aq_mdo_get_dev_stats(struct macsec_context *ctx)
static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
struct aq_macsec_tx_sc_stats *stats;
struct aq_hw_s *hw = nic->aq_hw;
struct aq_macsec_txsc *aq_txsc;
@@ -1073,9 +1030,6 @@ static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
stats = &aq_txsc->stats;
aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats);
@@ -1090,7 +1044,7 @@ static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
struct aq_macsec_tx_sa_stats *stats;
struct aq_hw_s *hw = nic->aq_hw;
@@ -1106,9 +1060,6 @@ static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num;
stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num];
@@ -1133,7 +1084,7 @@ static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
struct aq_macsec_rx_sa_stats *stats;
struct aq_hw_s *hw = nic->aq_hw;
@@ -1147,9 +1098,6 @@ static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
for (i = 0; i < MACSEC_NUM_AN; i++) {
if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy))
@@ -1181,7 +1129,7 @@ static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx)
{
- struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev);
struct aq_macsec_cfg *cfg = nic->macsec_cfg;
struct aq_macsec_rx_sa_stats *stats;
struct aq_hw_s *hw = nic->aq_hw;
@@ -1196,9 +1144,6 @@ static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num];
sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num;
@@ -1451,26 +1396,57 @@ static void aq_check_txsa_expiration(struct aq_nic_s *nic)
egress_sa_threshold_expired);
}
+#define AQ_LOCKED_MDO_DEF(mdo) \
+static int aq_locked_mdo_##mdo(struct macsec_context *ctx) \
+{ \
+ struct aq_nic_s *nic = macsec_netdev_priv(ctx->netdev); \
+ int ret; \
+ mutex_lock(&nic->macsec_mutex); \
+ ret = aq_mdo_##mdo(ctx); \
+ mutex_unlock(&nic->macsec_mutex); \
+ return ret; \
+}
+
+AQ_LOCKED_MDO_DEF(dev_open)
+AQ_LOCKED_MDO_DEF(dev_stop)
+AQ_LOCKED_MDO_DEF(add_secy)
+AQ_LOCKED_MDO_DEF(upd_secy)
+AQ_LOCKED_MDO_DEF(del_secy)
+AQ_LOCKED_MDO_DEF(add_rxsc)
+AQ_LOCKED_MDO_DEF(upd_rxsc)
+AQ_LOCKED_MDO_DEF(del_rxsc)
+AQ_LOCKED_MDO_DEF(add_rxsa)
+AQ_LOCKED_MDO_DEF(upd_rxsa)
+AQ_LOCKED_MDO_DEF(del_rxsa)
+AQ_LOCKED_MDO_DEF(add_txsa)
+AQ_LOCKED_MDO_DEF(upd_txsa)
+AQ_LOCKED_MDO_DEF(del_txsa)
+AQ_LOCKED_MDO_DEF(get_dev_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sa_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sa_stats)
+
const struct macsec_ops aq_macsec_ops = {
- .mdo_dev_open = aq_mdo_dev_open,
- .mdo_dev_stop = aq_mdo_dev_stop,
- .mdo_add_secy = aq_mdo_add_secy,
- .mdo_upd_secy = aq_mdo_upd_secy,
- .mdo_del_secy = aq_mdo_del_secy,
- .mdo_add_rxsc = aq_mdo_add_rxsc,
- .mdo_upd_rxsc = aq_mdo_upd_rxsc,
- .mdo_del_rxsc = aq_mdo_del_rxsc,
- .mdo_add_rxsa = aq_mdo_add_rxsa,
- .mdo_upd_rxsa = aq_mdo_upd_rxsa,
- .mdo_del_rxsa = aq_mdo_del_rxsa,
- .mdo_add_txsa = aq_mdo_add_txsa,
- .mdo_upd_txsa = aq_mdo_upd_txsa,
- .mdo_del_txsa = aq_mdo_del_txsa,
- .mdo_get_dev_stats = aq_mdo_get_dev_stats,
- .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats,
- .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats,
- .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats,
- .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats,
+ .mdo_dev_open = aq_locked_mdo_dev_open,
+ .mdo_dev_stop = aq_locked_mdo_dev_stop,
+ .mdo_add_secy = aq_locked_mdo_add_secy,
+ .mdo_upd_secy = aq_locked_mdo_upd_secy,
+ .mdo_del_secy = aq_locked_mdo_del_secy,
+ .mdo_add_rxsc = aq_locked_mdo_add_rxsc,
+ .mdo_upd_rxsc = aq_locked_mdo_upd_rxsc,
+ .mdo_del_rxsc = aq_locked_mdo_del_rxsc,
+ .mdo_add_rxsa = aq_locked_mdo_add_rxsa,
+ .mdo_upd_rxsa = aq_locked_mdo_upd_rxsa,
+ .mdo_del_rxsa = aq_locked_mdo_del_rxsa,
+ .mdo_add_txsa = aq_locked_mdo_add_txsa,
+ .mdo_upd_txsa = aq_locked_mdo_upd_txsa,
+ .mdo_del_txsa = aq_locked_mdo_del_txsa,
+ .mdo_get_dev_stats = aq_locked_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = aq_locked_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = aq_locked_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = aq_locked_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = aq_locked_mdo_get_rx_sa_stats,
};
int aq_macsec_init(struct aq_nic_s *nic)
@@ -1492,6 +1468,7 @@ int aq_macsec_init(struct aq_nic_s *nic)
nic->ndev->features |= NETIF_F_HW_MACSEC;
nic->ndev->macsec_ops = &aq_macsec_ops;
+ mutex_init(&nic->macsec_mutex);
return 0;
}
@@ -1515,7 +1492,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)
if (!nic->macsec_cfg)
return 0;
- rtnl_lock();
+ mutex_lock(&nic->macsec_mutex);
if (nic->aq_fw_ops->send_macsec_req) {
struct macsec_cfg_request cfg = { 0 };
@@ -1564,7 +1541,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)
ret = aq_apply_macsec_cfg(nic);
unlock:
- rtnl_unlock();
+ mutex_unlock(&nic->macsec_mutex);
return ret;
}
@@ -1576,9 +1553,9 @@ void aq_macsec_work(struct aq_nic_s *nic)
if (!netif_carrier_ok(nic->ndev))
return;
- rtnl_lock();
+ mutex_lock(&nic->macsec_mutex);
aq_check_txsa_expiration(nic);
- rtnl_unlock();
+ mutex_unlock(&nic->macsec_mutex);
}
int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
@@ -1589,21 +1566,30 @@ int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
if (!cfg)
return 0;
+ mutex_lock(&nic->macsec_mutex);
+
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!test_bit(i, &cfg->rxsc_idx_busy))
continue;
cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy);
}
+ mutex_unlock(&nic->macsec_mutex);
return cnt;
}
int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic)
{
+ int cnt;
+
if (!nic->macsec_cfg)
return 0;
- return hweight_long(nic->macsec_cfg->txsc_idx_busy);
+ mutex_lock(&nic->macsec_mutex);
+ cnt = hweight_long(nic->macsec_cfg->txsc_idx_busy);
+ mutex_unlock(&nic->macsec_mutex);
+
+ return cnt;
}
int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
@@ -1614,12 +1600,15 @@ int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
if (!cfg)
return 0;
+ mutex_lock(&nic->macsec_mutex);
+
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!test_bit(i, &cfg->txsc_idx_busy))
continue;
cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy);
}
+ mutex_unlock(&nic->macsec_mutex);
return cnt;
}
@@ -1691,6 +1680,8 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
if (!cfg)
return data;
+ mutex_lock(&nic->macsec_mutex);
+
aq_macsec_update_stats(nic);
common_stats = &cfg->stats;
@@ -1773,5 +1764,7 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
data += i;
+ mutex_unlock(&nic->macsec_mutex);
+
return data;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e22935ce9573..4ef4fe64b8ac 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -14,17 +14,23 @@
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_hw_utils.h"
+#include "aq_vec.h"
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <linux/filter.h>
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+EXPORT_SYMBOL(aq_xdp_locking_key);
+
static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
static const struct net_device_ops aq_ndev_ops;
@@ -53,7 +59,7 @@ struct net_device *aq_ndev_alloc(void)
return ndev;
}
-static int aq_ndev_open(struct net_device *ndev)
+int aq_ndev_open(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
@@ -83,17 +89,14 @@ err_exit:
return err;
}
-static int aq_ndev_close(struct net_device *ndev)
+int aq_ndev_close(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
err = aq_nic_stop(aq_nic);
- if (err < 0)
- goto err_exit;
aq_nic_deinit(aq_nic, true);
-err_exit:
return err;
}
@@ -120,20 +123,29 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
}
#endif
- skb_tx_timestamp(skb);
return aq_nic_xmit(aq_nic, skb);
}
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{
+ int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
int err;
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ new_frame_size > AQ_CFG_RX_FRAME_MAX) {
+ netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n",
+ ndev->mtu);
+ return -EOPNOTSUPP;
+ }
+
err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0)
goto err_exit;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
err_exit:
return err;
@@ -204,6 +216,25 @@ err_exit:
return err;
}
+static netdev_features_t aq_ndev_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ aq_nic->xdp_prog && features & NETIF_F_LRO) {
+ netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n");
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
@@ -227,12 +258,14 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
(void)aq_nic_set_multicast_list(aq_nic, ndev);
}
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
-static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
- struct hwtstamp_config *config)
+static int aq_ndev_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- if (config->flags)
- return -EINVAL;
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
+
+ if (!IS_REACHABLE(CONFIG_PTP_1588_CLOCK) || !aq_nic->aq_ptp)
+ return -EOPNOTSUPP;
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -262,59 +295,17 @@ static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
}
-#endif
-static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
+static int aq_ndev_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
- int ret_val;
-#endif
-
- if (!aq_nic->aq_ptp)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
- ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
- if (ret_val)
- return ret_val;
-#endif
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
-static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
-{
- struct hwtstamp_config config;
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
if (!aq_nic->aq_ptp)
return -EOPNOTSUPP;
- aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config);
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-#endif
-
-static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct aq_nic_s *aq_nic = netdev_priv(netdev);
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return aq_ndev_hwtstamp_set(aq_nic, ifr);
-
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
- case SIOCGHWTSTAMP:
- return aq_ndev_hwtstamp_get(aq_nic, ifr);
-#endif
- }
-
- return -EOPNOTSUPP;
+ aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, config);
+ return 0;
}
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
@@ -413,6 +404,56 @@ static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
mqprio->qopt.prio_tc_map);
}
+static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ bool need_update, running = netif_running(ndev);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *old_prog;
+
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "prog does not support XDP frags");
+ return -EOPNOTSUPP;
+ }
+
+ if (prog && ndev->features & NETIF_F_LRO) {
+ netdev_err(ndev,
+ "LRO is not supported with single buffer XDP, disabling\n");
+ ndev->features &= ~NETIF_F_LRO;
+ }
+ }
+
+ need_update = !!aq_nic->xdp_prog != !!prog;
+ if (running && need_update)
+ aq_ndev_close(ndev);
+
+ old_prog = xchg(&aq_nic->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (!old_prog && prog)
+ static_branch_inc(&aq_xdp_locking_key);
+ else if (old_prog && !prog)
+ static_branch_dec(&aq_xdp_locking_key);
+
+ if (running && need_update)
+ return aq_ndev_open(ndev);
+
+ return 0;
+}
+
+static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return aq_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -421,10 +462,14 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
- .ndo_eth_ioctl = aq_ndev_ioctl,
+ .ndo_fix_features = aq_ndev_fix_features,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
+ .ndo_bpf = aq_xdp,
+ .ndo_xdp_xmit = aq_xdp_xmit,
+ .ndo_hwtstamp_get = aq_ndev_hwtstamp_get,
+ .ndo_hwtstamp_set = aq_ndev_hwtstamp_set,
};
static int __init aq_ndev_init_module(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index a5a624b9ce73..a78c1a168d8e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -12,7 +12,11 @@
#include "aq_common.h"
#include "aq_nic.h"
+DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+
void aq_ndev_schedule_work(struct work_struct *work);
struct net_device *aq_ndev_alloc(void);
+int aq_ndev_open(struct net_device *ndev);
+int aq_ndev_close(struct net_device *ndev);
#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 1acf544afeb4..b24eaa5283fa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -127,7 +127,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->irq_type = aq_pci_func_get_irq_type(self);
- if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
+ if ((cfg->irq_type == AQ_HW_IRQ_INTX) ||
(cfg->aq_hw_caps->vecs == 1U) ||
(cfg->vecs == 1U)) {
cfg->is_rss = 0U;
@@ -254,7 +254,7 @@ static void aq_nic_service_task(struct work_struct *work)
static void aq_nic_service_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = from_timer(self, t, service_timer);
+ struct aq_nic_s *self = timer_container_of(self, t, service_timer);
mod_timer(&self->service_timer,
jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
@@ -264,13 +264,11 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = from_timer(self, t, polling_timer);
- struct aq_vec_s *aq_vec = NULL;
+ struct aq_nic_s *self = timer_container_of(self, t, polling_timer);
unsigned int i = 0U;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_isr(i, (void *)aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_isr(i, (void *)self->aq_vec[i]);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
@@ -316,18 +314,22 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
aq_macsec_init(self);
#endif
- mutex_lock(&self->fwreq_mutex);
- err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
- mutex_unlock(&self->fwreq_mutex);
- if (err)
- goto err_exit;
+ if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) {
+ // If DT has none or an invalid one, ask device for MAC address
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
+ mutex_unlock(&self->fwreq_mutex);
- eth_hw_addr_set(self->ndev, addr);
+ if (err)
+ goto err_exit;
- if (!is_valid_ether_addr(self->ndev->dev_addr) ||
- !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
- netdev_warn(self->ndev, "MAC is invalid, will use random.");
- eth_hw_addr_random(self->ndev);
+ if (is_valid_ether_addr(addr) &&
+ aq_nic_is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(self->ndev, addr);
+ } else {
+ netdev_warn(self->ndev, "MAC is invalid, will use random.");
+ eth_hw_addr_random(self->ndev);
+ }
}
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@@ -382,6 +384,11 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
+ self->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
}
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
@@ -482,8 +489,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_vec_start(aq_vec);
if (err < 0)
goto err_exit;
@@ -513,8 +520,8 @@ int aq_nic_start(struct aq_nic_s *self)
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
aq_vec_isr, aq_vec,
aq_vec_get_affinity_mask(aq_vec));
@@ -565,6 +572,103 @@ err_exit:
return err;
}
+static unsigned int aq_nic_map_xdp(struct aq_nic_s *self,
+ struct xdp_frame *xdpf,
+ struct aq_ring_s *ring)
+{
+ struct device *dev = aq_nic_get_dev(self);
+ struct aq_ring_buff_s *first = NULL;
+ unsigned int dx = ring->sw_tail;
+ struct aq_ring_buff_s *dx_buff;
+ struct skb_shared_info *sinfo;
+ unsigned int frag_count = 0U;
+ unsigned int nr_frags = 0U;
+ unsigned int ret = 0U;
+ u16 total_len;
+
+ dx_buff = &ring->buff_ring[dx];
+ dx_buff->flags = 0U;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ total_len = xdpf->len;
+ dx_buff->len = total_len;
+ if (xdp_frame_has_frags(xdpf)) {
+ nr_frags = sinfo->nr_frags;
+ total_len += sinfo->xdp_frags_size;
+ }
+ dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, dx_buff->pa)))
+ goto exit;
+
+ first = dx_buff;
+ dx_buff->len_pkt = total_len;
+ dx_buff->is_sop = 1U;
+ dx_buff->is_mapped = 1U;
+ ++ret;
+
+ for (; nr_frags--; ++frag_count) {
+ skb_frag_t *frag = &sinfo->frags[frag_count];
+ unsigned int frag_len = skb_frag_size(frag);
+ unsigned int buff_offset = 0U;
+ unsigned int buff_size = 0U;
+ dma_addr_t frag_pa;
+
+ while (frag_len) {
+ if (frag_len > AQ_CFG_TX_FRAME_MAX)
+ buff_size = AQ_CFG_TX_FRAME_MAX;
+ else
+ buff_size = frag_len;
+
+ frag_pa = skb_frag_dma_map(dev, frag, buff_offset,
+ buff_size, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, frag_pa)))
+ goto mapping_error;
+
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+
+ dx_buff->flags = 0U;
+ dx_buff->len = buff_size;
+ dx_buff->pa = frag_pa;
+ dx_buff->is_mapped = 1U;
+ dx_buff->eop_index = 0xffffU;
+
+ frag_len -= buff_size;
+ buff_offset += buff_size;
+
+ ++ret;
+ }
+ }
+
+ first->eop_index = dx;
+ dx_buff->is_eop = 1U;
+ dx_buff->skb = NULL;
+ dx_buff->xdpf = xdpf;
+ goto exit;
+
+mapping_error:
+ for (dx = ring->sw_tail;
+ ret > 0;
+ --ret, dx = aq_ring_next_dx(ring, dx)) {
+ dx_buff = &ring->buff_ring[dx];
+
+ if (!dx_buff->pa)
+ continue;
+ if (unlikely(dx_buff->is_sop))
+ dma_unmap_single(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ }
+
+exit:
+ return ret;
+}
+
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
@@ -693,6 +797,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
first->eop_index = dx;
dx_buff->is_eop = 1U;
dx_buff->skb = skb;
+ dx_buff->xdpf = NULL;
goto exit;
mapping_error:
@@ -721,6 +826,44 @@ exit:
return ret;
}
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf)
+{
+ u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx);
+ struct net_device *ndev = aq_nic_get_ndev(aq_nic);
+ struct skb_shared_info *sinfo;
+ int cpu = smp_processor_id();
+ int err = NETDEV_TX_BUSY;
+ struct netdev_queue *nq;
+ unsigned int frags = 1;
+
+ if (xdp_frame_has_frags(xdpf)) {
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ frags += sinfo->nr_frags;
+ }
+
+ if (frags > AQ_CFG_SKB_FRAGS_MAX)
+ return err;
+
+ nq = netdev_get_tx_queue(ndev, tx_ring->idx);
+ __netif_tx_lock(nq, cpu);
+
+ aq_ring_update_queue_state(tx_ring);
+
+ /* Above status update may stop the queue. Check this. */
+ if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index))
+ goto out;
+
+ frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring);
+ if (likely(frags))
+ err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring,
+ frags);
+out:
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
@@ -755,6 +898,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
frags = aq_nic_map_skb(self, skb, ring);
+ skb_tx_timestamp(skb);
+
if (likely(frags)) {
err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
ring, frags);
@@ -874,7 +1019,6 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
@@ -905,8 +1049,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data[++i] = stats->mbtc;
data[++i] = stats->bbrc;
data[++i] = stats->bbtc;
- data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
- data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+ if (stats->brc)
+ data[++i] = stats->brc;
+ else
+ data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+ if (stats->btc)
+ data[++i] = stats->btc;
+ else
+ data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
data[++i] = stats->dma_pkt_rc;
data[++i] = stats->dma_pkt_tc;
data[++i] = stats->dma_oct_rc;
@@ -918,11 +1068,11 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data += i;
for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
- for (i = 0U, aq_vec = self->aq_vec[0];
- aq_vec && self->aq_vecs > i;
- ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ if (!self->aq_vec[i])
+ break;
data += count;
- count = aq_vec_get_sw_stats(aq_vec, tc, data);
+ count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
}
}
@@ -1236,27 +1386,25 @@ int aq_nic_set_loopback(struct aq_nic_s *self)
int aq_nic_stop(struct aq_nic_s *self)
{
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
netif_tx_disable(self->ndev);
netif_carrier_off(self->ndev);
- del_timer_sync(&self->service_timer);
+ timer_delete_sync(&self->service_timer);
cancel_work_sync(&self->service_task);
self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
if (self->aq_nic_cfg.is_polling)
- del_timer_sync(&self->polling_timer);
+ timer_delete_sync(&self->polling_timer);
else
aq_pci_func_free_irqs(self);
aq_ptp_irq_free(self);
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_stop(aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_stop(self->aq_vec[i]);
aq_ptp_ring_stop(self);
@@ -1295,7 +1443,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
aq_ptp_ring_free(self);
aq_ptp_free(self);
- if (likely(self->aq_fw_ops->deinit) && link_down) {
+ /* May be invoked during hot unplug. */
+ if (pci_device_is_present(self->pdev) &&
+ likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 1a7148041e3d..ad33f8586532 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -11,6 +11,8 @@
#define AQ_NIC_H
#include <linux/ethtool.h>
+#include <net/xdp.h>
+#include <linux/bpf.h>
#include "aq_common.h"
#include "aq_rss.h"
@@ -128,6 +130,7 @@ struct aq_nic_s {
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX];
struct aq_hw_s *aq_hw;
+ struct bpf_prog *xdp_prog;
struct net_device *ndev;
unsigned int aq_vecs;
unsigned int packet_filter;
@@ -154,6 +157,8 @@ struct aq_nic_s {
struct mutex fwreq_mutex;
#if IS_ENABLED(CONFIG_MACSEC)
struct aq_macsec_cfg *macsec_cfg;
+ /* mutex to protect data in macsec_cfg */
+ struct mutex macsec_mutex;
#endif
/* PTP support */
struct aq_ptp_s *aq_ptp;
@@ -177,6 +182,8 @@ void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring);
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index d4b1976ee69b..ed5231dece3f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
{}
};
@@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
- { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, },
+ { AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, },
+
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
@@ -157,8 +162,8 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
self->msix_entry_mask |= (1 << i);
if (pdev->msix_enabled && affinity_mask)
- irq_set_affinity_hint(pci_irq_vector(pdev, i),
- affinity_mask);
+ irq_update_affinity_hint(pci_irq_vector(pdev, i),
+ affinity_mask);
}
return err;
@@ -182,7 +187,7 @@ void aq_pci_func_free_irqs(struct aq_nic_s *self)
continue;
if (pdev->msix_enabled)
- irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
+ irq_update_affinity_hint(pci_irq_vector(pdev, i), NULL);
free_irq(pci_irq_vector(pdev, i), irq_data);
self->msix_entry_mask &= ~(1U << i);
}
@@ -195,7 +200,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
if (self->pdev->msi_enabled)
return AQ_HW_IRQ_MSI;
- return AQ_HW_IRQ_LEGACY;
+ return AQ_HW_IRQ_INTX;
}
static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
@@ -293,11 +298,8 @@ static int aq_pci_probe(struct pci_dev *pdev,
numvecs += AQ_HW_SERVICE_IRQS;
/*enable interrupts */
-#if !AQ_CFG_FORCE_LEGACY_INT
- err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
- PCI_IRQ_MSIX | PCI_IRQ_MSI |
- PCI_IRQ_LEGACY);
-
+#if !AQ_CFG_FORCE_INTX
+ err = pci_alloc_irq_vectors(self->pdev, 1, numvecs, PCI_IRQ_ALL_TYPES);
if (err < 0)
goto err_hwinit;
numvecs = err;
@@ -374,7 +376,8 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
}
}
-static int aq_suspend_common(struct device *dev, bool deep)
+#ifdef CONFIG_PM
+static int aq_suspend_common(struct device *dev)
{
struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
@@ -387,17 +390,15 @@ static int aq_suspend_common(struct device *dev, bool deep)
if (netif_running(nic->ndev))
aq_nic_stop(nic);
- if (deep) {
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- aq_nic_set_power(nic);
- }
+ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(nic);
rtnl_unlock();
return 0;
}
-static int atl_resume_common(struct device *dev, bool deep)
+static int atl_resume_common(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct aq_nic_s *nic;
@@ -410,11 +411,6 @@ static int atl_resume_common(struct device *dev, bool deep)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- if (deep) {
- /* Reinitialize Nic/Vecs objects */
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- }
-
if (netif_running(nic->ndev)) {
ret = aq_nic_init(nic);
if (ret)
@@ -439,22 +435,22 @@ err_exit:
static int aq_pm_freeze(struct device *dev)
{
- return aq_suspend_common(dev, false);
+ return aq_suspend_common(dev);
}
static int aq_pm_suspend_poweroff(struct device *dev)
{
- return aq_suspend_common(dev, true);
+ return aq_suspend_common(dev);
}
static int aq_pm_thaw(struct device *dev)
{
- return atl_resume_common(dev, false);
+ return atl_resume_common(dev);
}
static int aq_pm_resume_restore(struct device *dev)
{
- return atl_resume_common(dev, true);
+ return atl_resume_common(dev);
}
static const struct dev_pm_ops aq_pm_ops = {
@@ -465,8 +461,9 @@ static const struct dev_pm_ops aq_pm_ops = {
.restore = aq_pm_resume_restore,
.thaw = aq_pm_thaw,
};
+#endif
-static struct pci_driver aq_pci_ops = {
+static struct pci_driver aq_pci_driver = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
@@ -479,11 +476,11 @@ static struct pci_driver aq_pci_ops = {
int aq_pci_func_register_driver(void)
{
- return pci_register_driver(&aq_pci_ops);
+ return pci_register_driver(&aq_pci_driver);
}
void aq_pci_func_unregister_driver(void)
{
- pci_unregister_driver(&aq_pci_ops);
+ pci_unregister_driver(&aq_pci_driver);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 06de19f63287..0fa0f891c0e0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -51,7 +51,7 @@ struct ptp_tx_timeout {
struct aq_ptp_s {
struct aq_nic_s *aq_nic;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
spinlock_t ptp_lock;
spinlock_t ptp_ring_lock;
struct ptp_clock *ptp_clock;
@@ -553,21 +553,21 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
/* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @adapter: pointer to adapter struct
- * @skb: particular skb to send timestamp with
+ * @shhwtstamps: particular skb_shared_hwtstamps to save timestamp
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the hwtstamps structure which
* is passed up the network stack
*/
-static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
+static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtstamps *shhwtstamps,
u64 timestamp)
{
timestamp -= atomic_read(&aq_ptp->offset_ingress);
- aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
+ aq_ptp_convert_to_hwtstamp(aq_ptp, shhwtstamps, timestamp);
}
void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
*config = aq_ptp->hwtstamp_config;
}
@@ -588,7 +588,7 @@ static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp)
}
int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
const struct aq_hw_ops *hw_ops;
@@ -639,7 +639,7 @@ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
&aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
}
-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
unsigned int len)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
@@ -648,7 +648,7 @@ u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
p, len, &timestamp);
if (ret > 0)
- aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
+ aq_ptp_rx_hwtstamp(aq_ptp, shhwtstamps, timestamp);
return ret;
}
@@ -953,8 +953,6 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
unsigned int tx_ring_idx, rx_ring_idx;
- struct aq_ring_s *hwts;
- struct aq_ring_s *ring;
int err;
if (!aq_ptp)
@@ -962,29 +960,23 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
tx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
- ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
- tx_ring_idx, &aq_nic->aq_nic_cfg);
- if (!ring) {
- err = -ENOMEM;
+ err = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
+ tx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (err)
goto err_exit;
- }
rx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
- ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
- rx_ring_idx, &aq_nic->aq_nic_cfg);
- if (!ring) {
- err = -ENOMEM;
+ err = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
+ rx_ring_idx, &aq_nic->aq_nic_cfg);
+ if (err)
goto err_exit_ptp_tx;
- }
- hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
- aq_nic->aq_nic_cfg.rxds,
- aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
- if (!hwts) {
- err = -ENOMEM;
+ err = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX,
+ aq_nic->aq_nic_cfg.rxds,
+ aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size);
+ if (err)
goto err_exit_ptp_rx;
- }
err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds);
if (err != 0) {
@@ -1001,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
return 0;
err_exit_hwts_rx:
- aq_ring_free(&aq_ptp->hwts_rx);
+ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
err_exit_ptp_rx:
aq_ring_free(&aq_ptp->ptp_rx);
err_exit_ptp_tx:
@@ -1019,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
aq_ring_free(&aq_ptp->ptp_tx);
aq_ring_free(&aq_ptp->ptp_rx);
- aq_ring_free(&aq_ptp->hwts_rx);
+ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
}
@@ -1217,8 +1209,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
atomic_set(&aq_ptp->offset_egress, 0);
atomic_set(&aq_ptp->offset_ingress, 0);
- netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
- aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi, aq_ptp_poll);
aq_ptp->idx_vector = idx_vec;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
index 28ccb7ca2df9..5e643ec7cc06 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
@@ -60,14 +60,14 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp);
/* Must be to check available of PTP before call */
void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
/* Return either ring is belong to PTP or not*/
bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
unsigned int len);
struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
@@ -130,9 +130,9 @@ static inline int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
static inline void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) {}
static inline void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config) {}
+ struct kernel_hwtstamp_config *config) {}
static inline int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return 0;
}
@@ -143,7 +143,7 @@ static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
}
static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic,
- struct sk_buff *skb, u8 *p,
+ struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
unsigned int len)
{
return 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 24122ccda614..d23d23bed39f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -7,15 +7,37 @@
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
-#include "aq_ring.h"
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
#include "aq_ptp.h"
+#include "aq_vec.h"
+#include "aq_main.h"
+#include <net/xdp.h>
+#include <linux/filter.h>
+#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo;
+ int i;
+
+ if (xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
+ page_ref_inc(buff->rxdata.page);
+}
+
static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
{
unsigned int len = PAGE_SIZE << rxpage->order;
@@ -27,9 +49,10 @@ static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
rxpage->page = NULL;
}
-static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
- struct device *dev)
+static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring)
{
+ struct device *dev = aq_nic_get_dev(rx_ring->aq_nic);
+ unsigned int order = rx_ring->page_order;
struct page *page;
int ret = -ENOMEM;
dma_addr_t daddr;
@@ -47,7 +70,7 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
rxpage->page = page;
rxpage->daddr = daddr;
rxpage->order = order;
- rxpage->pg_off = 0;
+ rxpage->pg_off = rx_ring->page_offset;
return 0;
@@ -58,21 +81,26 @@ err_exit:
return ret;
}
-static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
- int order)
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
{
+ unsigned int order = self->page_order;
+ u16 page_offset = self->page_offset;
+ u16 frame_max = self->frame_max;
+ u16 tail_size = self->tail_size;
int ret;
if (rxbuf->rxdata.page) {
/* One means ring is the only user and can reuse */
if (page_ref_count(rxbuf->rxdata.page) > 1) {
/* Try reuse buffer */
- rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
- if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
- (PAGE_SIZE << order)) {
+ rxbuf->rxdata.pg_off += frame_max + page_offset +
+ tail_size;
+ if (rxbuf->rxdata.pg_off + frame_max + tail_size <=
+ (PAGE_SIZE << order)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_flips++;
u64_stats_update_end(&self->stats.rx.syncp);
+
} else {
/* Buffer exhausted. We have other users and
* should release this page and realloc
@@ -84,7 +112,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
u64_stats_update_end(&self->stats.rx.syncp);
}
} else {
- rxbuf->rxdata.pg_off = 0;
+ rxbuf->rxdata.pg_off = page_offset;
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_reuses++;
u64_stats_update_end(&self->stats.rx.syncp);
@@ -92,8 +120,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
}
if (!rxbuf->rxdata.page) {
- ret = aq_get_rxpage(&rxbuf->rxdata, order,
- aq_nic_get_dev(self->aq_nic));
+ ret = aq_alloc_rxpages(&rxbuf->rxdata, self);
if (ret) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.alloc_fails++;
@@ -105,8 +132,8 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
return 0;
}
-static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic)
+static int aq_ring_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic)
{
int err = 0;
@@ -117,6 +144,7 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
self->size * self->dx_size,
&self->dx_ring_pa, GFP_KERNEL);
@@ -128,72 +156,54 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
err_exit:
if (err < 0) {
aq_ring_free(self);
- self = NULL;
}
- return self;
+ return err;
}
-struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic,
- unsigned int idx,
- struct aq_nic_cfg_s *aq_nic_cfg)
+int aq_ring_tx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
{
- int err = 0;
-
self->aq_nic = aq_nic;
self->idx = idx;
self->size = aq_nic_cfg->txds;
self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
- self = aq_ring_alloc(self, aq_nic);
- if (!self) {
- err = -ENOMEM;
- goto err_exit;
- }
-
-err_exit:
- if (err < 0) {
- aq_ring_free(self);
- self = NULL;
- }
-
- return self;
+ return aq_ring_alloc(self, aq_nic);
}
-struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic,
- unsigned int idx,
- struct aq_nic_cfg_s *aq_nic_cfg)
+int aq_ring_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
{
- int err = 0;
-
self->aq_nic = aq_nic;
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
- self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
- (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
-
- if (aq_nic_cfg->rxpageorder > self->page_order)
- self->page_order = aq_nic_cfg->rxpageorder;
-
- self = aq_ring_alloc(self, aq_nic);
- if (!self) {
- err = -ENOMEM;
- goto err_exit;
- }
-
-err_exit:
- if (err < 0) {
- aq_ring_free(self);
- self = NULL;
+ self->xdp_prog = aq_nic->xdp_prog;
+ self->frame_max = AQ_CFG_RX_FRAME_MAX;
+
+ /* Only order-2 is allowed if XDP is enabled */
+ if (READ_ONCE(self->xdp_prog)) {
+ self->page_offset = AQ_XDP_HEADROOM;
+ self->page_order = AQ_CFG_XDP_PAGEORDER;
+ self->tail_size = AQ_XDP_TAILROOM;
+ } else {
+ self->page_offset = 0;
+ self->page_order = fls(self->frame_max / PAGE_SIZE +
+ (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
+ if (aq_nic_cfg->rxpageorder > self->page_order)
+ self->page_order = aq_nic_cfg->rxpageorder;
+ self->tail_size = 0;
}
- return self;
+ return aq_ring_alloc(self, aq_nic);
}
-struct aq_ring_s *
+int
aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
unsigned int idx, unsigned int size, unsigned int dx_size)
{
@@ -211,10 +221,10 @@ aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
GFP_KERNEL);
if (!self->dx_ring) {
aq_ring_free(self);
- return NULL;
+ return -ENOMEM;
}
- return self;
+ return 0;
}
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
@@ -298,14 +308,26 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
}
}
- if (unlikely(buff->is_eop)) {
+ if (likely(!buff->is_eop))
+ goto out;
+
+ if (buff->skb) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += buff->skb->len;
u64_stats_update_end(&self->stats.tx.syncp);
-
dev_kfree_skb_any(buff->skb);
+ } else if (buff->xdpf) {
+ u64_stats_update_begin(&self->stats.tx.syncp);
+ ++self->stats.tx.packets;
+ self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
+ u64_stats_update_end(&self->stats.tx.syncp);
+ xdp_return_frame_rx_napi(buff->xdpf);
}
+
+out:
+ buff->skb = NULL;
+ buff->xdpf = NULL;
buff->pa = 0U;
buff->eop_index = 0xffffU;
self->sw_head = aq_ring_next_dx(self, self->sw_head);
@@ -338,14 +360,175 @@ static void aq_rx_checksum(struct aq_ring_s *self,
__skb_incr_checksum_unnecessary(skb);
}
-#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
-int aq_ring_rx_clean(struct aq_ring_s *self,
- struct napi_struct *napi,
- int *work_done,
- int budget)
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(dev);
+ unsigned int vec, i, drop = 0;
+ int cpu = smp_processor_id();
+ struct aq_nic_cfg_s *aq_cfg;
+ struct aq_ring_s *ring;
+
+ aq_cfg = aq_nic_get_cfg(aq_nic);
+ vec = cpu % aq_cfg->vecs;
+ ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
+
+ for (i = 0; i < num_frames; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
+ drop++;
+ }
+
+ return num_frames - drop;
+}
+
+static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
+ struct net_device *dev,
+ struct aq_ring_buff_s *buff)
+{
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ return NULL;
+
+ skb = xdp_build_skb_from_frame(xdpf, dev);
+ if (!skb)
+ return NULL;
+
+ aq_get_rxpages_xdp(buff, xdp);
+ return skb;
+}
+
+static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ struct xdp_buff *xdp,
+ struct aq_ring_s *rx_ring,
+ struct aq_ring_buff_s *buff)
+{
+ int result = NETDEV_TX_BUSY;
+ struct aq_ring_s *tx_ring;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act = XDP_ABORTED;
+ struct sk_buff *skb;
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+
+ prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!prog)
+ return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ /* single buffer XDP program, but packet is multi buffer, aborted */
+ if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
+ goto out_aborted;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+ if (!skb)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_pass;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ return skb;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
+ result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
+ if (result == NETDEV_TX_BUSY)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_tx;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
+ goto out_aborted;
+ xdp_do_flush();
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_redirect;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ default:
+ fallthrough;
+ case XDP_ABORTED:
+out_aborted:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ trace_xdp_exception(aq_nic->ndev, prog, act);
+ bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
+ break;
+ case XDP_DROP:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_drop;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ break;
+ }
+
+ return ERR_PTR(-result);
+}
+
+static bool aq_add_rx_fragment(struct device *dev,
+ struct aq_ring_s *ring,
+ struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ struct aq_ring_buff_s *buff_ = buff;
+
+ memset(sinfo, 0, sizeof(*sinfo));
+ do {
+ skb_frag_t *frag;
+
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
+ return true;
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ buff_ = &ring->buff_ring[buff_->next];
+ dma_sync_single_range_for_cpu(dev,
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ DMA_FROM_DEVICE);
+ sinfo->xdp_frags_size += buff_->len;
+ skb_frag_fill_page_desc(frag, buff_->rxdata.page,
+ buff_->rxdata.pg_off,
+ buff_->len);
+
+ buff_->is_cleaned = 1;
+
+ buff->is_ip_cso &= buff_->is_ip_cso;
+ buff->is_udp_cso &= buff_->is_udp_cso;
+ buff->is_tcp_cso &= buff_->is_tcp_cso;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ if (page_is_pfmemalloc(buff_->rxdata.page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ } while (!buff_->is_eop);
+
+ xdp_buff_set_frags_flag(xdp);
+
+ return false;
+}
+
+static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
+ int *work_done, int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
- bool is_rsc_completed = true;
int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
@@ -363,32 +546,49 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
continue;
if (!buff->is_eop) {
+ unsigned int frag_cnt = 0U;
+
+ /* There will be an extra fragment */
+ if (buff->len > AQ_CFG_RX_HDR_SIZE)
+ frag_cnt++;
+
buff_ = buff;
do {
- next_ = buff_->next,
+ bool is_rsc_completed = true;
+
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+
+ frag_cnt++;
+ next_ = buff_->next;
buff_ = &self->buff_ring[next_];
is_rsc_completed =
aq_ring_dx_in_range(self->sw_head,
next_,
self->hw_head);
- if (unlikely(!is_rsc_completed))
- break;
+ if (unlikely(!is_rsc_completed) ||
+ frag_cnt > MAX_SKB_FRAGS) {
+ err = 0;
+ goto err_exit;
+ }
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
- if (!is_rsc_completed) {
- err = 0;
- goto err_exit;
- }
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
do {
- next_ = buff_->next,
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
buff_ = &self->buff_ring[next_];
buff_->is_cleaned = true;
@@ -423,7 +623,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
if (is_ptp_ring)
buff->len -=
- aq_ptp_extract_ts(self->aq_nic, skb,
+ aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb),
aq_buf_vaddr(&buff->rxdata),
buff->len);
@@ -437,16 +637,15 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) {
- skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff->rxdata.page);
}
if (!buff->is_eop) {
buff_ = buff;
- i = 1U;
do {
next_ = buff_->next;
buff_ = &self->buff_ring[next_];
@@ -460,7 +659,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff_->rxdata.page,
buff_->rxdata.pg_off,
buff_->len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1;
@@ -501,6 +700,155 @@ err_exit:
return err;
}
+static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ struct napi_struct *napi, int *work_done,
+ int budget)
+{
+ int frame_sz = rx_ring->page_offset + rx_ring->frame_max +
+ rx_ring->tail_size;
+ struct aq_nic_s *aq_nic = rx_ring->aq_nic;
+ bool is_rsc_completed = true;
+ struct device *dev;
+ int err = 0;
+
+ dev = aq_nic_get_dev(aq_nic);
+ for (; (rx_ring->sw_head != rx_ring->hw_head) && budget;
+ rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head),
+ --budget, ++(*work_done)) {
+ struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
+ struct aq_ring_buff_s *buff_ = NULL;
+ u16 ptp_hwtstamp_len = 0;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ struct xdp_buff xdp;
+ void *hard_start;
+
+ if (buff->is_cleaned)
+ continue;
+
+ if (!buff->is_eop) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+ is_rsc_completed =
+ aq_ring_dx_in_range(rx_ring->sw_head,
+ next_,
+ rx_ring->hw_head);
+
+ if (unlikely(!is_rsc_completed))
+ break;
+
+ buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
+ } while (!buff_->is_eop);
+
+ if (!is_rsc_completed) {
+ err = 0;
+ goto err_exit;
+ }
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+
+ buff_->is_cleaned = true;
+ } while (!buff_->is_eop);
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ if (buff->is_error) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+
+ dma_sync_single_range_for_cpu(dev,
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+ hard_start = page_address(buff->rxdata.page) +
+ buff->rxdata.pg_off - rx_ring->page_offset;
+
+ if (is_ptp_ring) {
+ ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+ buff->len -= ptp_hwtstamp_len;
+ }
+
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
+ buff->len, false);
+ if (!buff->is_eop) {
+ if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
+ if (IS_ERR(skb) || !skb)
+ continue;
+
+ if (ptp_hwtstamp_len > 0)
+ *skb_hwtstamps(skb) = shhwtstamps;
+
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+
+ aq_rx_checksum(rx_ring, buff, skb);
+
+ skb_set_hash(skb, buff->rss_hash,
+ buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_NONE);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb,
+ is_ptp_ring ? 0
+ : AQ_NIC_RING2QMAP(rx_ring->aq_nic,
+ rx_ring->idx));
+
+ napi_gro_receive(napi, skb);
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_ring_rx_clean(struct aq_ring_s *self,
+ struct napi_struct *napi,
+ int *work_done,
+ int budget)
+{
+ if (static_branch_unlikely(&aq_xdp_locking_key))
+ return __aq_ring_xdp_clean(self, napi, work_done, budget);
+ else
+ return __aq_ring_rx_clean(self, napi, work_done, budget);
+}
+
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
{
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
@@ -520,7 +868,6 @@ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
int aq_ring_rx_fill(struct aq_ring_s *self)
{
- unsigned int page_order = self->page_order;
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
@@ -534,9 +881,9 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff = &self->buff_ring[self->sw_tail];
buff->flags = 0U;
- buff->len = AQ_CFG_RX_FRAME_MAX;
+ buff->len = self->frame_max;
- err = aq_get_rxpages(self, buff, page_order);
+ err = aq_get_rxpages(self, buff);
if (err)
goto err_exit;
@@ -567,11 +914,27 @@ void aq_ring_free(struct aq_ring_s *self)
return;
kfree(self->buff_ring);
+ self->buff_ring = NULL;
- if (self->dx_ring)
+ if (self->dx_ring) {
dma_free_coherent(aq_nic_get_dev(self->aq_nic),
self->size * self->dx_size, self->dx_ring,
self->dx_ring_pa);
+ self->dx_ring = NULL;
+ }
+}
+
+void aq_ring_hwts_rx_free(struct aq_ring_s *self)
+{
+ if (!self)
+ return;
+
+ if (self->dx_ring) {
+ dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+ self->size * self->dx_size + AQ_CFG_RXDS_DEF,
+ self->dx_ring, self->dx_ring_pa);
+ self->dx_ring = NULL;
+ }
}
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
@@ -583,7 +946,7 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
/* This data should mimic aq_ethtool_queue_rx_stat_names structure */
do {
count = 0;
- start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp);
+ start = u64_stats_fetch_begin(&self->stats.rx.syncp);
data[count] = self->stats.rx.packets;
data[++count] = self->stats.rx.jumbo_packets;
data[++count] = self->stats.rx.lro_packets;
@@ -591,15 +954,24 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
data[++count] = self->stats.rx.alloc_fails;
data[++count] = self->stats.rx.skb_alloc_fails;
data[++count] = self->stats.rx.polls;
- } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
+ data[++count] = self->stats.rx.pg_flips;
+ data[++count] = self->stats.rx.pg_reuses;
+ data[++count] = self->stats.rx.pg_losts;
+ data[++count] = self->stats.rx.xdp_aborted;
+ data[++count] = self->stats.rx.xdp_drop;
+ data[++count] = self->stats.rx.xdp_pass;
+ data[++count] = self->stats.rx.xdp_tx;
+ data[++count] = self->stats.rx.xdp_invalid;
+ data[++count] = self->stats.rx.xdp_redirect;
+ } while (u64_stats_fetch_retry(&self->stats.rx.syncp, start));
} else {
/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
do {
count = 0;
- start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp);
+ start = u64_stats_fetch_begin(&self->stats.tx.syncp);
data[count] = self->stats.tx.packets;
data[++count] = self->stats.tx.queue_restarts;
- } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start));
+ } while (u64_stats_fetch_retry(&self->stats.tx.syncp, start));
}
return ++count;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 93659e58f1ce..d627ace850ff 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -11,6 +11,10 @@
#define AQ_RING_H
#include "aq_common.h"
+#include "aq_vec.h"
+
+#define AQ_XDP_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
+#define AQ_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
struct page;
struct aq_nic_cfg_s;
@@ -51,6 +55,7 @@ struct __packed aq_ring_buff_s {
struct {
dma_addr_t pa_eop;
struct sk_buff *skb;
+ struct xdp_frame *xdpf;
};
/* TxC */
struct {
@@ -101,6 +106,12 @@ struct aq_ring_stats_rx_s {
u64 pg_losts;
u64 pg_flips;
u64 pg_reuses;
+ u64 xdp_aborted;
+ u64 xdp_drop;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_invalid;
+ u64 xdp_redirect;
};
struct aq_ring_stats_tx_s {
@@ -132,10 +143,15 @@ struct aq_ring_s {
unsigned int size; /* descriptors number */
unsigned int dx_size; /* TX or RX descriptor size, */
/* stored here for fater math */
- unsigned int page_order;
+ u16 page_order;
+ u16 page_offset;
+ u16 frame_max;
+ u16 tail_size;
union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa;
+ struct bpf_prog *xdp_prog;
enum atl_ring_type ring_type;
+ struct xdp_rxq_info xdp_rxq;
};
struct aq_ring_param_s {
@@ -167,14 +183,15 @@ static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
self->sw_head - self->sw_tail - 1);
}
-struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic,
- unsigned int idx,
- struct aq_nic_cfg_s *aq_nic_cfg);
-struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic,
- unsigned int idx,
- struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_ring_tx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_ring_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self);
@@ -182,15 +199,18 @@ void aq_ring_update_queue_state(struct aq_ring_s *ring);
void aq_ring_queue_wake(struct aq_ring_s *ring);
void aq_ring_queue_stop(struct aq_ring_s *ring);
bool aq_ring_tx_clean(struct aq_ring_s *self);
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
int *work_done,
int budget);
int aq_ring_rx_fill(struct aq_ring_s *self);
-struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
- struct aq_nic_s *aq_nic, unsigned int idx,
- unsigned int size, unsigned int dx_size);
+int aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic, unsigned int idx,
+ unsigned int size, unsigned int dx_size);
+void aq_ring_hwts_rx_free(struct aq_ring_s *self);
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index d281322d7dd2..9769ab4f9bef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -10,11 +10,6 @@
*/
#include "aq_vec.h"
-#include "aq_nic.h"
-#include "aq_ring.h"
-#include "aq_hw.h"
-
-#include <linux/netdevice.h>
struct aq_vec_s {
const struct aq_hw_ops *aq_hw_ops;
@@ -43,8 +38,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
if (!self) {
err = -EINVAL;
} else {
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
ring[AQ_VEC_RX_ID].stats.rx.polls++;
u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
@@ -124,8 +119,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
self->tx_rings = 0;
self->rx_rings = 0;
- netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
- aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, aq_vec_poll);
err_exit:
return self;
@@ -142,24 +136,35 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
i, idx);
- ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
- idx_ring, aq_nic_cfg);
- if (!ring) {
- err = -ENOMEM;
+ ring = &self->ring[i][AQ_VEC_TX_ID];
+ err = aq_ring_tx_alloc(ring, aq_nic, idx_ring, aq_nic_cfg);
+ if (err)
goto err_exit;
- }
++self->tx_rings;
aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
- ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
- idx_ring, aq_nic_cfg);
- if (!ring) {
+ ring = &self->ring[i][AQ_VEC_RX_ID];
+ if (xdp_rxq_info_reg(&ring->xdp_rxq,
+ aq_nic->ndev, idx,
+ self->napi.napi_id) < 0) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ if (xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL) < 0) {
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
err = -ENOMEM;
goto err_exit;
}
+ err = aq_ring_rx_alloc(ring, aq_nic, idx_ring, aq_nic_cfg);
+ if (err) {
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
+ goto err_exit;
+ }
+
++self->rx_rings;
}
@@ -182,8 +187,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
if (err < 0)
goto err_exit;
@@ -224,8 +229,8 @@ int aq_vec_start(struct aq_vec_s *self)
unsigned int i = 0U;
int err = 0;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
if (err < 0)
@@ -248,8 +253,8 @@ void aq_vec_stop(struct aq_vec_s *self)
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
@@ -268,8 +273,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
@@ -297,11 +302,13 @@ void aq_vec_ring_free(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_free(&ring[AQ_VEC_TX_ID]);
- if (i < self->rx_rings)
+ if (i < self->rx_rings) {
+ xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq);
aq_ring_free(&ring[AQ_VEC_RX_ID]);
+ }
}
self->tx_rings = 0;
@@ -362,9 +369,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u
{
unsigned int count;
- WARN_ONCE(!aq_vec_is_valid_tc(self, tc),
- "Invalid tc %u (#rx=%u, #tx=%u)\n",
- tc, self->rx_rings, self->tx_rings);
if (!aq_vec_is_valid_tc(self, tc))
return 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 567f3d4b79a2..78fac609b71d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -13,7 +13,13 @@
#define AQ_VEC_H
#include "aq_common.h"
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_hw.h"
+
#include <linux/irqreturn.h>
+#include <linux/filter.h>
+#include <linux/netdevice.h>
struct aq_hw_s;
struct aq_hw_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 4625ccb79499..8de2cdd09213 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -352,7 +352,7 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
@@ -531,7 +531,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -706,9 +706,9 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index d875ce3ec759..c7895bfb2ecf 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -562,7 +562,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
@@ -766,7 +766,7 @@ int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
err = -ENXIO;
goto err_exit;
}
+
+ /* Validate that the new hw_head_ is reasonable. */
+ if (hw_head_ >= ring->size) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);
@@ -969,15 +976,15 @@ int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
rxd_wb->status);
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
buff->len =
- rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
- AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
+ rxd_wb->pkt_len > ring->frame_max ?
+ ring->frame_max : rxd_wb->pkt_len;
if (buff->is_lro) {
/* LRO */
@@ -1191,26 +1198,9 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
- int err;
- u32 val;
-
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
- /* Invalidate Descriptor Cache to prevent writing to the cached
- * descriptors and to the data pointer of those descriptors
- */
- hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
-
- err = aq_hw_err_from_flags(self);
-
- if (err)
- goto err_exit;
-
- readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
- self, val, val == 1, 1000U, 10000U);
-
-err_exit:
- return err;
+ return aq_hw_invalidate_descriptor_cache(self);
}
int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
@@ -1647,6 +1637,137 @@ static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp)
return 0;
}
+#define START_TRANSMIT 0x5001
+#define START_READ_TRANSMIT 0x5101
+#define STOP_TRANSMIT 0x3001
+#define REPEAT_TRANSMIT 0x1001
+#define REPEAT_NACK_TRANSMIT 0x1011
+
+static int hw_atl_b0_smb0_wait_result(struct aq_hw_s *self, bool expect_ack)
+{
+ int err;
+ u32 val;
+
+ err = readx_poll_timeout(hw_atl_smb0_byte_transfer_complete_get,
+ self, val, val == 1, 100U, 10000U);
+ if (err)
+ return err;
+ if (hw_atl_smb0_receive_acknowledged_get(self) != expect_ack)
+ return -EIO;
+ return 0;
+}
+
+/* Starts an I2C/SMBUS write to a given address. addr is in 7-bit format,
+ * the read/write bit is not part of it.
+ */
+static int hw_atl_b0_smb0_start_write(struct aq_hw_s *self, u32 addr)
+{
+ hw_atl_smb0_tx_data_set(self, (addr << 1) | 0);
+ hw_atl_smb0_provisioning2_set(self, START_TRANSMIT);
+ return hw_atl_b0_smb0_wait_result(self, 0);
+}
+
+/* Writes a single byte as part of an ongoing write started by start_write. */
+static int hw_atl_b0_smb0_write_byte(struct aq_hw_s *self, u32 data)
+{
+ hw_atl_smb0_tx_data_set(self, data);
+ hw_atl_smb0_provisioning2_set(self, REPEAT_TRANSMIT);
+ return hw_atl_b0_smb0_wait_result(self, 0);
+}
+
+/* Starts an I2C/SMBUS read to a given address. addr is in 7-bit format,
+ * the read/write bit is not part of it.
+ */
+static int hw_atl_b0_smb0_start_read(struct aq_hw_s *self, u32 addr)
+{
+ int err;
+
+ hw_atl_smb0_tx_data_set(self, (addr << 1) | 1);
+ hw_atl_smb0_provisioning2_set(self, START_READ_TRANSMIT);
+ err = hw_atl_b0_smb0_wait_result(self, 0);
+ if (err)
+ return err;
+ if (hw_atl_smb0_repeated_start_detect_get(self) == 0)
+ return -EIO;
+ return 0;
+}
+
+/* Reads a single byte as part of an ongoing read started by start_read. */
+static int hw_atl_b0_smb0_read_byte(struct aq_hw_s *self)
+{
+ int err;
+
+ hw_atl_smb0_provisioning2_set(self, REPEAT_TRANSMIT);
+ err = hw_atl_b0_smb0_wait_result(self, 0);
+ if (err)
+ return err;
+ return hw_atl_smb0_rx_data_get(self);
+}
+
+/* Reads the last byte of an ongoing read. */
+static int hw_atl_b0_smb0_read_byte_nack(struct aq_hw_s *self)
+{
+ int err;
+
+ hw_atl_smb0_provisioning2_set(self, REPEAT_NACK_TRANSMIT);
+ err = hw_atl_b0_smb0_wait_result(self, 1);
+ if (err)
+ return err;
+ return hw_atl_smb0_rx_data_get(self);
+}
+
+/* Sends a stop condition and ends a transfer. */
+static void hw_atl_b0_smb0_stop(struct aq_hw_s *self)
+{
+ hw_atl_smb0_provisioning2_set(self, STOP_TRANSMIT);
+}
+
+static int hw_atl_b0_read_module_eeprom(struct aq_hw_s *self, u8 dev_addr,
+ u8 reg_start_addr, int len, u8 *data)
+{
+ int i, b;
+ int err;
+ u32 val;
+
+ /* Wait for SMBUS0 to be idle */
+ err = readx_poll_timeout(hw_atl_smb0_bus_busy_get, self,
+ val, val == 0, 100U, 10000U);
+ if (err)
+ return err;
+
+ err = hw_atl_b0_smb0_start_write(self, dev_addr);
+ if (err)
+ goto out;
+
+ err = hw_atl_b0_smb0_write_byte(self, reg_start_addr);
+ if (err)
+ goto out;
+
+ err = hw_atl_b0_smb0_start_read(self, dev_addr);
+ if (err)
+ goto out;
+
+ for (i = 0; i < len - 1; i++) {
+ b = hw_atl_b0_smb0_read_byte(self);
+ if (b < 0) {
+ err = b;
+ goto out;
+ }
+ data[i] = (u8)b;
+ }
+
+ b = hw_atl_b0_smb0_read_byte_nack(self);
+ if (b < 0) {
+ err = b;
+ goto out;
+ }
+ data[i] = (u8)b;
+
+out:
+ hw_atl_b0_smb0_stop(self);
+ return err;
+}
+
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_soft_reset = hw_atl_utils_soft_reset,
.hw_prepare = hw_atl_utils_initfw,
@@ -1705,4 +1826,5 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_fc = hw_atl_b0_set_fc,
.hw_get_mac_temp = hw_atl_b0_get_mac_temp,
+ .hw_read_module_eeprom = hw_atl_b0_read_module_eeprom,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 7b67bdd8a258..d07af1271d59 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -57,6 +57,49 @@ u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw)
HW_ATL_TS_DATA_OUT_SHIFT);
}
+u32 hw_atl_smb0_bus_busy_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_BUS_BUSY_ADR,
+ HW_ATL_SMB0_BUS_BUSY_MSK,
+ HW_ATL_SMB0_BUS_BUSY_SHIFT);
+}
+
+u32 hw_atl_smb0_byte_transfer_complete_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_ADR,
+ HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_MSK,
+ HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_SHIFT);
+}
+
+u32 hw_atl_smb0_receive_acknowledged_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_RX_ACKNOWLEDGED_ADR,
+ HW_ATL_SMB0_RX_ACKNOWLEDGED_MSK,
+ HW_ATL_SMB0_RX_ACKNOWLEDGED_SHIFT);
+}
+
+u32 hw_atl_smb0_repeated_start_detect_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, HW_ATL_SMB0_REPEATED_START_DETECT_ADR,
+ HW_ATL_SMB0_REPEATED_START_DETECT_MSK,
+ HW_ATL_SMB0_REPEATED_START_DETECT_SHIFT);
+}
+
+u32 hw_atl_smb0_rx_data_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, HW_ATL_SMB0_RECEIVED_DATA_ADR);
+}
+
+void hw_atl_smb0_tx_data_set(struct aq_hw_s *aq_hw, u32 data)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL_SMB0_TRANSMITTED_DATA_ADR, data);
+}
+
+void hw_atl_smb0_provisioning2_set(struct aq_hw_s *aq_hw, u32 data)
+{
+ return aq_hw_write_reg(aq_hw, HW_ATL_SMB0_PROVISIONING2_ADR, data);
+}
+
/* global */
void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
u32 semaphore)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 58f5ee0a6214..5fd506acacb5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -34,6 +34,27 @@ u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw);
/* get temperature sense data */
u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw);
+/* SMBUS0 bus busy */
+u32 hw_atl_smb0_bus_busy_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 byte transfer complete */
+u32 hw_atl_smb0_byte_transfer_complete_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 receive acknowledged */
+u32 hw_atl_smb0_receive_acknowledged_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 set transmitted data (only leftmost byte of data valid) */
+void hw_atl_smb0_tx_data_set(struct aq_hw_s *aq_hw, u32 data);
+
+/* SMBUS0 provisioning2 command register */
+void hw_atl_smb0_provisioning2_set(struct aq_hw_s *aq_hw, u32 data);
+
+/* SMBUS0 repeated start detect */
+u32 hw_atl_smb0_repeated_start_detect_get(struct aq_hw_s *aq_hw);
+
+/* SMBUS0 received data register */
+u32 hw_atl_smb0_rx_data_get(struct aq_hw_s *aq_hw);
+
/* global */
/* set global microprocessor semaphore */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 4a6467031b9e..fce30d90b6cb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -42,6 +42,38 @@
#define HW_ATL_TS_DATA_OUT_SHIFT 0
#define HW_ATL_TS_DATA_OUT_WIDTH 12
+/* SMBUS0 Received Data register */
+#define HW_ATL_SMB0_RECEIVED_DATA_ADR 0x00000748
+/* SMBUS0 Transmitted Data register */
+#define HW_ATL_SMB0_TRANSMITTED_DATA_ADR 0x00000608
+
+/* SMBUS0 Global Provisioning 2 register */
+#define HW_ATL_SMB0_PROVISIONING2_ADR 0x00000604
+
+/* SMBUS0 Bus Busy Bitfield Definitions */
+#define HW_ATL_SMB0_BUS_BUSY_ADR 0x00000744
+#define HW_ATL_SMB0_BUS_BUSY_MSK 0x00000080
+#define HW_ATL_SMB0_BUS_BUSY_SHIFT 7
+#define HW_ATL_SMB0_BUS_BUSY_WIDTH 1
+
+/* SMBUS0 Byte Transfer Complete Bitfield Definitions */
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_ADR 0x00000744
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_MSK 0x00000002
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_SHIFT 1
+#define HW_ATL_SMB0_BYTE_TRANSFER_COMPLETE_WIDTH 1
+
+/* SMBUS0 Receive Acknowledge Bitfield Definitions */
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_ADR 0x00000744
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_MSK 0x00000100
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_SHIFT 8
+#define HW_ATL_SMB0_RX_ACKNOWLEDGED_WIDTH 1
+
+/* SMBUS0 Repeated Start Detect Bitfield Definitions */
+#define HW_ATL_SMB0_REPEATED_START_DETECT_ADR 0x00000744
+#define HW_ATL_SMB0_REPEATED_START_DETECT_MSK 0x00000004
+#define HW_ATL_SMB0_REPEATED_START_DETECT_SHIFT 2
+#define HW_ATL_SMB0_REPEATED_START_DETECT_WIDTH 1
+
/* global microprocessor semaphore definitions
* base address: 0x000003a0
* parameter: semaphore {s} | stride size 0x4 | range [0, 15]
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index fc0e66006644..7e88d7234b14 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -559,6 +559,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
if (fw.len == 0xFFFFU) {
+ if (sw.len > sizeof(self->rpc)) {
+ printk(KERN_INFO "Invalid sw len: %x\n", sw.len);
+ err = -EINVAL;
+ goto err_exit;
+ }
err = hw_atl_utils_fw_rpc_call(self, sw.len);
if (err < 0)
goto err_exit;
@@ -567,6 +572,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
if (rpc) {
if (fw.len) {
+ if (fw.len > sizeof(self->rpc)) {
+ printk(KERN_INFO "Invalid fw len: %x\n", fw.len);
+ err = -EINVAL;
+ goto err_exit;
+ }
err =
hw_atl_utils_fw_downld_dwords(self,
self->rpc_addr,
@@ -857,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
struct hw_atl_utils_mbox mbox;
+ bool corrupted_stats = false;
hw_atl_utils_mpi_read_stats(self, &mbox);
-#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
- mbox.stats._N_ - self->last_stats._N_)
+#define AQ_SDELTA(_N_) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \
+ curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \
+ else \
+ corrupted_stats = true; \
+} while (0)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc);
@@ -882,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
AQ_SDELTA(bbrc);
AQ_SDELTA(bbtc);
AQ_SDELTA(dpc);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
}
#undef AQ_SDELTA
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index f5901f8e3907..f6b990b7f5b4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -226,7 +226,6 @@ struct __packed offload_info {
struct offload_port_info ports;
struct offload_ka_info kas;
struct offload_rr_info rrs;
- u8 buf[];
};
struct __packed hw_atl_utils_fw_rpc {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index eac631c45c56..4d4cfbc91e19 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
if (speed & AQ_NIC_RATE_5G)
rate |= FW2X_RATE_5G;
- if (speed & AQ_NIC_RATE_5GSR)
- rate |= FW2X_RATE_5G;
-
if (speed & AQ_NIC_RATE_2G5)
rate |= FW2X_RATE_2G5;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index c98708bb044c..0ce9caae8799 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
AQ_NIC_RATE_5G |
AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
- AQ_NIC_RATE_1G_HALF |
AQ_NIC_RATE_100M |
- AQ_NIC_RATE_100M_HALF |
- AQ_NIC_RATE_10M |
- AQ_NIC_RATE_10M_HALF,
+ AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc115c = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc116c = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
};
static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
@@ -79,7 +93,7 @@ static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
static int hw_atl2_hw_reset(struct aq_hw_s *self)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
int err;
err = hw_atl2_utils_soft_reset(self);
@@ -364,8 +378,8 @@ static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self)
static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map;
+ struct hw_atl2_priv *priv = self->priv;
u16 action;
u8 index;
int i;
@@ -419,7 +433,7 @@ static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
u16 off_action = (!promisc &&
!hw_atl_rpfl2promiscuous_mode_en_get(self)) ?
HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE;
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
u8 index;
index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
@@ -431,7 +445,7 @@ static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc)
{
u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP;
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
bool vlan_promisc_enable;
u8 index;
@@ -520,13 +534,13 @@ static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl2_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
+ struct hw_atl2_priv *priv = self->priv;
u8 base_index, count;
int err;
@@ -745,7 +759,7 @@ static int hw_atl2_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
- return 0;
+ return aq_hw_invalidate_descriptor_cache(self);
}
static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
@@ -756,7 +770,7 @@ static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
static int hw_atl2_hw_vlan_set(struct aq_hw_s *self,
struct aq_rx_filter_vlan *aq_vlans)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
u32 queue;
u8 index;
int i;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
index de8723f1c28a..346f0dc9912e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
@@ -9,6 +9,8 @@
#include "aq_common.h"
extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c;
extern const struct aq_hw_ops hw_atl2_ops;
#endif /* HW_ATL2_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
index b66fa346581c..6bad64c77b87 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
@@ -239,7 +239,8 @@ struct version_s {
u8 minor;
u16 build;
} phy;
- u32 rsvd;
+ u32 drv_iface_ver:4;
+ u32 rsvd:28;
};
struct link_status_s {
@@ -424,7 +425,7 @@ struct cable_diag_status_s {
u16 rsvd2;
};
-struct statistics_s {
+struct statistics_a0_s {
struct {
u32 link_up;
u32 link_down;
@@ -457,6 +458,33 @@ struct statistics_s {
u32 reserve_fw_gap;
};
+struct __packed statistics_b0_s {
+ u64 rx_good_octets;
+ u64 rx_pause_frames;
+ u64 rx_good_frames;
+ u64 rx_errors;
+ u64 rx_unicast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_broadcast_frames;
+
+ u64 tx_good_octets;
+ u64 tx_pause_frames;
+ u64 tx_good_frames;
+ u64 tx_errors;
+ u64 tx_unicast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_broadcast_frames;
+
+ u32 main_loop_cycles;
+};
+
+struct __packed statistics_s {
+ union __packed {
+ struct statistics_a0_s a0;
+ struct statistics_b0_s b0;
+ };
+};
+
struct filter_caps_s {
u8 l2_filters_base_index:6;
u8 flexible_filter_mask:2;
@@ -545,7 +573,7 @@ struct management_status_s {
u32 rsvd5;
};
-struct fw_interface_out {
+struct __packed fw_interface_out {
struct transaction_counter_s transaction_id;
struct version_s version;
struct link_status_s link_status;
@@ -569,7 +597,6 @@ struct fw_interface_out {
struct core_dump_s core_dump;
u32 rsvd11;
struct statistics_s stats;
- u32 rsvd12;
struct filter_caps_s filter_caps;
struct device_caps_s device_caps;
u32 rsvd13;
@@ -592,6 +619,9 @@ struct fw_interface_out {
#define AQ_HOST_MODE_LOW_POWER 3U
#define AQ_HOST_MODE_SHUTDOWN 4U
+#define AQ_A2_FW_INTERFACE_A0 0
+#define AQ_A2_FW_INTERFACE_B0 1
+
int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index dd259c8f2f4f..7370e3f76b62 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
if (cnt > AQ_A2_FW_READ_TRY_MAX)
return -ETIME;
if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
- udelay(1);
+ mdelay(1);
} while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
@@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed,
{
link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
- link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
+ link_options->rate_N5G = link_options->rate_5G;
link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
link_options->rate_N2P5G = link_options->rate_2P5G;
link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
@@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
rate |= AQ_NIC_RATE_10G;
if (lkp_link_caps->rate_5G)
rate |= AQ_NIC_RATE_5G;
- if (lkp_link_caps->rate_N5G)
- rate |= AQ_NIC_RATE_5GSR;
if (lkp_link_caps->rate_2P5G)
rate |= AQ_NIC_RATE_2G5;
if (lkp_link_caps->rate_1G)
@@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
return 0;
}
-static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
+ struct statistics_s *stats)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
- struct statistics_s stats;
-
- hw_atl2_shared_buffer_read_safe(self, stats, &stats);
-
-#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
- stats.msm._F_ - priv->last_stats.msm._F_)
+ struct hw_atl2_priv *priv = self->priv;
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
+ bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \
+ curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\
+ else \
+ corrupted_stats = true; \
+} while (0)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc, rx_unicast_frames);
@@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self)
AQ_SDELTA(mbtc, tx_multicast_octets);
AQ_SDELTA(bbrc, rx_broadcast_octets);
AQ_SDELTA(bbtc, tx_broadcast_octets);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
}
#undef AQ_SDELTA
- self->curr_stats.dma_pkt_rc =
- hw_atl_stats_rx_dma_good_pkt_counter_get(self);
- self->curr_stats.dma_pkt_tc =
- hw_atl_stats_tx_dma_good_pkt_counter_get(self);
- self->curr_stats.dma_oct_rc =
- hw_atl_stats_rx_dma_good_octet_counter_get(self);
- self->curr_stats.dma_oct_tc =
- hw_atl_stats_tx_dma_good_octet_counter_get(self);
- self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+}
+
+static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
+ struct statistics_s *stats)
+{
+ struct hw_atl2_priv *priv = self->priv;
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
+ bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \
+ curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \
+ else \
+ corrupted_stats = true; \
+} while (0)
+
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc, rx_unicast_frames);
+ AQ_SDELTA(mprc, rx_multicast_frames);
+ AQ_SDELTA(bprc, rx_broadcast_frames);
+ AQ_SDELTA(erpr, rx_errors);
+ AQ_SDELTA(brc, rx_good_octets);
+
+ AQ_SDELTA(uptc, tx_unicast_frames);
+ AQ_SDELTA(mptc, tx_multicast_frames);
+ AQ_SDELTA(bptc, tx_broadcast_frames);
+ AQ_SDELTA(erpt, tx_errors);
+ AQ_SDELTA(btc, tx_good_octets);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
+ }
+#undef AQ_SDELTA
+}
+
+static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+{
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct hw_atl2_priv *priv = self->priv;
+ struct statistics_s stats;
+ struct version_s version;
+ int err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, version, &version);
+ if (err)
+ return err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, stats, &stats);
+ if (err)
+ return err;
+
+ if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0)
+ aq_a2_fill_a0_stats(self, &stats);
+ else
+ aq_a2_fill_b0_stats(self, &stats);
+
+ cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+ cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+ cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
+ cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
+ cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
memcpy(&priv->last_stats, &stats, sizeof(stats));
@@ -398,6 +462,44 @@ static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp)
return aq_a2_fw_get_phy_temp(self, temp);
}
+static int aq_a2_fw_set_wol_params(struct aq_hw_s *self, const u8 *mac, u32 wol)
+{
+ struct mac_address_aligned_s mac_address;
+ struct link_control_s link_control;
+ struct wake_on_lan_s wake_on_lan;
+
+ memcpy(mac_address.aligned.mac_address, mac, ETH_ALEN);
+ hw_atl2_shared_buffer_write(self, mac_address, mac_address);
+
+ memset(&wake_on_lan, 0, sizeof(wake_on_lan));
+
+ if (wol & WAKE_MAGIC)
+ wake_on_lan.wake_on_magic_packet = 1U;
+
+ if (wol & (WAKE_PHY | AQ_FW_WAKE_ON_LINK_RTPM))
+ wake_on_lan.wake_on_link_up = 1U;
+
+ hw_atl2_shared_buffer_write(self, sleep_proxy, wake_on_lan);
+
+ hw_atl2_shared_buffer_get(self, link_control, link_control);
+ link_control.mode = AQ_HOST_MODE_SLEEP_PROXY;
+ hw_atl2_shared_buffer_write(self, link_control, link_control);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_set_power(struct aq_hw_s *self, unsigned int power_state,
+ const u8 *mac)
+{
+ u32 wol = self->aq_nic_cfg->wol;
+ int err = 0;
+
+ if (wol)
+ err = aq_a2_fw_set_wol_params(self, mac, wol);
+
+ return err;
+}
+
static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed)
{
struct link_options_s link_options;
@@ -499,9 +601,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
hw_atl2_shared_buffer_read_safe(self, version, &version);
/* A2 FW version is stored in reverse order */
- return version.mac.major << 24 |
- version.mac.minor << 16 |
- version.mac.build;
+ return version.bundle.major << 24 |
+ version.bundle.minor << 16 |
+ version.bundle.build;
}
int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
@@ -541,6 +643,7 @@ const struct aq_fw_ops aq_a2_fw_ops = {
.set_state = aq_a2_fw_set_state,
.update_link_status = aq_a2_fw_update_link_status,
.update_stats = aq_a2_fw_update_stats,
+ .set_power = aq_a2_fw_set_power,
.get_mac_temp = aq_a2_fw_get_mac_temp,
.get_phy_temp = aq_a2_fw_get_phy_temp,
.set_eee_rate = aq_a2_fw_set_eee_rate,
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
index 36c7cf05630a..431924959520 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -757,6 +757,7 @@ set_ingress_sakey_record(struct aq_hw_s *hw,
u16 table_index)
{
u16 packed_record[18];
+ int ret;
if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
return -EINVAL;
@@ -789,9 +790,12 @@ set_ingress_sakey_record(struct aq_hw_s *hw,
packed_record[16] = rec->key_len & 0x3;
- return set_raw_ingress_record(hw, packed_record, 18, 2,
- ROWOFFSET_INGRESSSAKEYRECORD +
- table_index);
+ ret = set_raw_ingress_record(hw, packed_record, 18, 2,
+ ROWOFFSET_INGRESSSAKEYRECORD +
+ table_index);
+
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
}
int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
@@ -1739,14 +1743,14 @@ static int set_egress_sakey_record(struct aq_hw_s *hw,
ret = set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index);
if (unlikely(ret))
- return ret;
+ goto clear_key;
ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index -
32);
- if (unlikely(ret))
- return ret;
- return 0;
+clear_key:
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
}
int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
index b6119dcc3bb9..c2fda80fe1cc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
@@ -158,7 +158,7 @@ struct aq_mss_egress_class_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! 0: don't care and no LLC header exist.
@@ -422,7 +422,7 @@ struct aq_mss_ingress_preclass_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! Mask is per-byte.
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 0a67612af228..0d400a7d8d91 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -23,16 +23,6 @@ config ARC_EMAC_CORE
select PHYLIB
select CRC32
-config ARC_EMAC
- tristate "ARC EMAC support"
- select ARC_EMAC_CORE
- depends on OF_IRQ
- depends on ARC || COMPILE_TEST
- help
- On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
- non-standard on-chip ethernet device ARC EMAC 10/100 is used.
- Say Y here if you have such a board. If unsure, say N.
-
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile
index d63ada577c8e..23586eefec44 100644
--- a/drivers/net/ethernet/arc/Makefile
+++ b/drivers/net/ethernet/arc/Makefile
@@ -5,5 +5,4 @@
arc_emac-objs := emac_main.o emac_mdio.o
obj-$(CONFIG_ARC_EMAC_CORE) += arc_emac.o
-obj-$(CONFIG_ARC_EMAC) += emac_arc.o
obj-$(CONFIG_EMAC_ROCKCHIP) += emac_rockchip.o
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index d820ae03a966..0e244f0e25fd 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -220,6 +220,6 @@ static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask)
int arc_mdio_probe(struct arc_emac_priv *priv);
int arc_mdio_remove(struct arc_emac_priv *priv);
int arc_emac_probe(struct net_device *ndev, int interface);
-int arc_emac_remove(struct net_device *ndev);
+void arc_emac_remove(struct net_device *ndev);
#endif /* ARC_EMAC_H */
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
deleted file mode 100644
index 800620b8f10d..000000000000
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/**
- * DOC: emac_arc.c - ARC EMAC specific glue layer
- *
- * Copyright (C) 2014 Romain Perier
- *
- * Romain Perier <romain.perier@gmail.com>
- */
-
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/of_net.h>
-#include <linux/platform_device.h>
-
-#include "emac.h"
-
-#define DRV_NAME "emac_arc"
-
-static int emac_arc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct arc_emac_priv *priv;
- phy_interface_t interface;
- struct net_device *ndev;
- int err;
-
- if (!dev->of_node)
- return -ENODEV;
-
- ndev = alloc_etherdev(sizeof(struct arc_emac_priv));
- if (!ndev)
- return -ENOMEM;
- platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, dev);
-
- priv = netdev_priv(ndev);
- priv->drv_name = DRV_NAME;
-
- err = of_get_phy_mode(dev->of_node, &interface);
- if (err) {
- if (err == -ENODEV)
- interface = PHY_INTERFACE_MODE_MII;
- else
- goto out_netdev;
- }
-
- priv->clk = devm_clk_get(dev, "hclk");
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to retrieve host clock from device tree\n");
- err = -EINVAL;
- goto out_netdev;
- }
-
- err = arc_emac_probe(ndev, interface);
-out_netdev:
- if (err)
- free_netdev(ndev);
- return err;
-}
-
-static int emac_arc_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- int err;
-
- err = arc_emac_remove(ndev);
- free_netdev(ndev);
- return err;
-}
-
-static const struct of_device_id emac_arc_dt_ids[] = {
- { .compatible = "snps,arc-emac" },
- { /* Sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
-
-static struct platform_driver emac_arc_driver = {
- .probe = emac_arc_probe,
- .remove = emac_arc_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = emac_arc_dt_ids,
- },
-};
-
-module_platform_driver(emac_arc_driver);
-
-MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>");
-MODULE_DESCRIPTION("ARC EMAC platform driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index c642c3d3e600..8283aeee35fb 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -15,11 +15,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
#include "emac.h"
@@ -91,7 +91,7 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
+ strscpy(info->driver, priv->drv_name, sizeof(info->driver));
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
@@ -111,6 +111,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < TX_BD_NUM; i++) {
@@ -140,7 +141,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
stats->tx_bytes += skb->len;
}
- dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
+ dma_unmap_single(dev, dma_unmap_addr(tx_buff, addr),
dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
/* return the sk_buff to system */
@@ -174,6 +175,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
static int arc_emac_rx(struct net_device *ndev, int budget)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int work_done;
for (work_done = 0; work_done < budget; work_done++) {
@@ -223,9 +225,9 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
continue;
}
- addr = dma_map_single(&ndev->dev, (void *)skb->data,
+ addr = dma_map_single(dev, (void *)skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, addr)) {
+ if (dma_mapping_error(dev, addr)) {
if (net_ratelimit())
netdev_err(ndev, "cannot map dma buffer\n");
dev_kfree_skb(skb);
@@ -237,7 +239,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
}
/* unmap previosly mapped skb */
- dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+ dma_unmap_single(dev, dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
pktlen = info & LEN_MASK;
@@ -423,6 +425,7 @@ static int arc_emac_open(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
struct phy_device *phy_dev = ndev->phydev;
+ struct device *dev = ndev->dev.parent;
int i;
phy_dev->autoneg = AUTONEG_ENABLE;
@@ -445,9 +448,9 @@ static int arc_emac_open(struct net_device *ndev)
if (unlikely(!rx_buff->skb))
return -ENOMEM;
- addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+ addr = dma_map_single(dev, (void *)rx_buff->skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, addr)) {
+ if (dma_mapping_error(dev, addr)) {
netdev_err(ndev, "cannot dma map\n");
dev_kfree_skb(rx_buff->skb);
return -ENOMEM;
@@ -548,6 +551,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
static void arc_free_tx_queue(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < TX_BD_NUM; i++) {
@@ -555,7 +559,7 @@ static void arc_free_tx_queue(struct net_device *ndev)
struct buffer_state *tx_buff = &priv->tx_buff[i];
if (tx_buff->skb) {
- dma_unmap_single(&ndev->dev,
+ dma_unmap_single(dev,
dma_unmap_addr(tx_buff, addr),
dma_unmap_len(tx_buff, len),
DMA_TO_DEVICE);
@@ -579,6 +583,7 @@ static void arc_free_tx_queue(struct net_device *ndev)
static void arc_free_rx_queue(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = ndev->dev.parent;
unsigned int i;
for (i = 0; i < RX_BD_NUM; i++) {
@@ -586,7 +591,7 @@ static void arc_free_rx_queue(struct net_device *ndev)
struct buffer_state *rx_buff = &priv->rx_buff[i];
if (rx_buff->skb) {
- dma_unmap_single(&ndev->dev,
+ dma_unmap_single(dev,
dma_unmap_addr(rx_buff, addr),
dma_unmap_len(rx_buff, len),
DMA_FROM_DEVICE);
@@ -679,6 +684,7 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
unsigned int len, *txbd_curr = &priv->txbd_curr;
struct net_device_stats *stats = &ndev->stats;
__le32 *info = &priv->txbd[*txbd_curr].info;
+ struct device *dev = ndev->dev.parent;
dma_addr_t addr;
if (skb_padto(skb, ETH_ZLEN))
@@ -692,10 +698,9 @@ static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
- DMA_TO_DEVICE);
+ addr = dma_map_single(dev, (void *)skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
+ if (unlikely(dma_mapping_error(dev, addr))) {
stats->tx_dropped++;
stats->tx_errors++;
dev_kfree_skb_any(skb);
@@ -981,7 +986,8 @@ int arc_emac_probe(struct net_device *ndev, int interface)
dev_info(dev, "connected to %s phy with id 0x%x\n",
phydev->drv->name, phydev->phy_id);
- netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, arc_emac_poll,
+ ARC_EMAC_NAPI_WEIGHT);
err = register_netdev(ndev);
if (err) {
@@ -1007,7 +1013,7 @@ out_put_node:
}
EXPORT_SYMBOL_GPL(arc_emac_probe);
-int arc_emac_remove(struct net_device *ndev)
+void arc_emac_remove(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
@@ -1018,8 +1024,6 @@ int arc_emac_remove(struct net_device *ndev)
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
-
- return 0;
}
EXPORT_SYMBOL_GPL(arc_emac_remove);
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 9acf589b1178..078b1a72c161 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -132,6 +132,8 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
{
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
struct device_node *np = priv->dev->of_node;
+ const char *name = "Synopsys MII Bus";
+ struct device_node *mdio_node;
struct mii_bus *bus;
int error;
@@ -142,7 +144,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
priv->bus = bus;
bus->priv = priv;
bus->parent = priv->dev;
- bus->name = "Synopsys MII Bus";
+ bus->name = name;
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
bus->reset = &arc_mdio_reset;
@@ -163,11 +165,17 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name);
- error = of_mdiobus_register(bus, priv->dev->of_node);
+ /* Backwards compatibility for EMAC nodes without MDIO subnode. */
+ mdio_node = of_get_child_by_name(np, "mdio");
+ if (!mdio_node)
+ mdio_node = of_node_get(np);
+
+ error = of_mdiobus_register(bus, mdio_node);
+ of_node_put(mdio_node);
if (error) {
mdiobus_free(bus);
return dev_err_probe(priv->dev, error,
- "cannot register MDIO bus %s\n", bus->name);
+ "cannot register MDIO bus %s\n", name);
}
return 0;
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 1c9ca3bcb871..780e70ea1c22 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -244,13 +244,12 @@ out_netdev:
return err;
}
-static int emac_rockchip_remove(struct platform_device *pdev)
+static void emac_rockchip_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct rockchip_priv_data *priv = netdev_priv(ndev);
- int err;
- err = arc_emac_remove(ndev);
+ arc_emac_remove(ndev);
clk_disable_unprepare(priv->refclk);
@@ -261,7 +260,6 @@ static int emac_rockchip_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->macclk);
free_netdev(ndev);
- return err;
}
static struct platform_driver emac_rockchip_driver = {
diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.c b/drivers/net/ethernet/asix/ax88796c_ioctl.c
index 916ae380a004..7d2fe2e5af92 100644
--- a/drivers/net/ethernet/asix/ax88796c_ioctl.c
+++ b/drivers/net/ethernet/asix/ax88796c_ioctl.c
@@ -24,7 +24,7 @@ static void
ax88796c_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
{
/* Inherit standard device info */
- strncpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static u32 ax88796c_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index 4b0c5a09fd57..11e8996b33d7 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -144,12 +144,13 @@ static void ax88796c_set_mac_addr(struct net_device *ndev)
static void ax88796c_load_mac_addr(struct net_device *ndev)
{
struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u8 addr[ETH_ALEN];
u16 temp;
lockdep_assert_held(&ax_local->spi_lock);
/* Try the device tree first */
- if (!eth_platform_get_mac_address(&ax_local->spi->dev, ndev->dev_addr) &&
+ if (!platform_get_ethdev_address(&ax_local->spi->dev, ndev) &&
is_valid_ether_addr(ndev->dev_addr)) {
if (netif_msg_probe(ax_local))
dev_info(&ax_local->spi->dev,
@@ -159,18 +160,19 @@ static void ax88796c_load_mac_addr(struct net_device *ndev)
/* Read the MAC address from AX88796C */
temp = AX_READ(&ax_local->ax_spi, P3_MACASR0);
- ndev->dev_addr[5] = (u8)temp;
- ndev->dev_addr[4] = (u8)(temp >> 8);
+ addr[5] = (u8)temp;
+ addr[4] = (u8)(temp >> 8);
temp = AX_READ(&ax_local->ax_spi, P3_MACASR1);
- ndev->dev_addr[3] = (u8)temp;
- ndev->dev_addr[2] = (u8)(temp >> 8);
+ addr[3] = (u8)temp;
+ addr[2] = (u8)(temp >> 8);
temp = AX_READ(&ax_local->ax_spi, P3_MACASR2);
- ndev->dev_addr[1] = (u8)temp;
- ndev->dev_addr[0] = (u8)(temp >> 8);
+ addr[1] = (u8)temp;
+ addr[0] = (u8)(temp >> 8);
- if (is_valid_ether_addr(ndev->dev_addr)) {
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(ndev, addr);
if (netif_msg_probe(ax_local))
dev_info(&ax_local->spi->dev,
"MAC address read from ASIX chip\n");
@@ -282,7 +284,7 @@ ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
ax88796c_proc_tx_hdr(&info, skb->ip_summed);
/* SOP and SEG header */
- memcpy(skb_push(skb, TX_OVERHEAD), &info.sop, TX_OVERHEAD);
+ memcpy(skb_push(skb, TX_OVERHEAD), &info.tx_overhead, TX_OVERHEAD);
/* Write SPI TXQ header */
memcpy(skb_push(skb, spi_len), ax88796c_tx_cmd_buf, spi_len);
@@ -291,7 +293,7 @@ ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
skb_put(skb, padlen);
/* EOP header */
- memcpy(skb_put(skb, TX_EOP_SIZE), &info.eop, TX_EOP_SIZE);
+ skb_put_data(skb, &info.eop, TX_EOP_SIZE);
skb_unlink(skb, q);
@@ -379,7 +381,7 @@ static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
return 1;
}
-static int
+static netdev_tx_t
ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
@@ -431,7 +433,7 @@ ax88796c_skb_return(struct ax88796c_device *ax_local,
netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n",
skb->len + sizeof(struct ethhdr), skb->protocol);
- status = netif_rx_ni(skb);
+ status = netif_rx(skb);
if (status != NET_RX_SUCCESS && net_ratelimit())
netif_info(ax_local, rx_err, ndev,
"netif_rx status %d\n", status);
@@ -660,12 +662,12 @@ static void ax88796c_get_stats64(struct net_device *ndev,
s = per_cpu_ptr(ax_local->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&s->syncp);
+ start = u64_stats_fetch_begin(&s->syncp);
rx_packets = u64_stats_read(&s->rx_packets);
rx_bytes = u64_stats_read(&s->rx_bytes);
tx_packets = u64_stats_read(&s->tx_packets);
tx_bytes = u64_stats_read(&s->tx_bytes);
- } while (u64_stats_fetch_retry_irq(&s->syncp, start));
+ } while (u64_stats_fetch_retry(&s->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -934,7 +936,7 @@ static const struct net_device_ops ax88796c_netdev_ops = {
.ndo_stop = ax88796c_close,
.ndo_start_xmit = ax88796c_start_xmit,
.ndo_get_stats64 = ax88796c_get_stats64,
- .ndo_do_ioctl = ax88796c_ioctl,
+ .ndo_eth_ioctl = ax88796c_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = ax88796c_set_features,
};
@@ -1004,7 +1006,7 @@ static int ax88796c_probe(struct spi_device *spi)
ax_local->mdiobus->parent = &spi->dev;
snprintf(ax_local->mdiobus->id, MII_BUS_ID_SIZE,
- "ax88796c-%s.%u", dev_name(&spi->dev), spi->chip_select);
+ "ax88796c-%s.%u", dev_name(&spi->dev), spi_get_chipselect(spi, 0));
ret = devm_mdiobus_register(&spi->dev, ax_local->mdiobus);
if (ret < 0) {
@@ -1100,7 +1102,7 @@ err:
return ret;
}
-static int ax88796c_remove(struct spi_device *spi)
+static void ax88796c_remove(struct spi_device *spi)
{
struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev);
struct net_device *ndev = ax_local->ndev;
@@ -1110,15 +1112,15 @@ static int ax88796c_remove(struct spi_device *spi)
netif_info(ax_local, probe, ndev, "removing network device %s %s\n",
dev_driver_string(&spi->dev),
dev_name(&spi->dev));
-
- return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id ax88796c_dt_ids[] = {
{ .compatible = "asix,ax88796c" },
{},
};
MODULE_DEVICE_TABLE(of, ax88796c_dt_ids);
+#endif
static const struct spi_device_id asix_id[] = {
{ "ax88796c", 0 },
diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h
index 80263c3cef75..68a09edecab8 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.h
+++ b/drivers/net/ethernet/asix/ax88796c_main.h
@@ -25,7 +25,7 @@
#define AX88796C_PHY_REGDUMP_LEN 14
#define AX88796C_PHY_ID 0x10
-#define TX_OVERHEAD 8
+#define TX_OVERHEAD sizeof_field(struct tx_pkt_info, tx_overhead)
#define TX_EOP_SIZE 4
#define AX_MCAST_FILTER_SIZE 8
@@ -127,9 +127,9 @@ struct ax88796c_device {
#define AX_PRIV_FLAGS_MASK (AX_CAP_COMP)
unsigned long flags;
- #define EVENT_INTR BIT(0)
- #define EVENT_TX BIT(1)
- #define EVENT_SET_MULTI BIT(2)
+ #define EVENT_INTR 0
+ #define EVENT_TX 1
+ #define EVENT_SET_MULTI 2
};
@@ -549,8 +549,10 @@ struct tx_eop_header {
};
struct tx_pkt_info {
- struct tx_sop_header sop;
- struct tx_segment_header seg;
+ struct_group(tx_overhead,
+ struct tx_sop_header sop;
+ struct tx_segment_header seg;
+ );
struct tx_eop_header eop;
u16 pkt_len;
u16 seq_num;
diff --git a/drivers/net/ethernet/asix/ax88796c_spi.c b/drivers/net/ethernet/asix/ax88796c_spi.c
index 94df4f96d2be..0710e716d682 100644
--- a/drivers/net/ethernet/asix/ax88796c_spi.c
+++ b/drivers/net/ethernet/asix/ax88796c_spi.c
@@ -34,7 +34,7 @@ int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status)
/* OP */
ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS;
- ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3);
+ ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)status, 3);
if (ret)
dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
else
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 482c58c4c584..bec5cdf8d1da 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_ATHEROS
bool "Atheros devices"
default y
- depends on (PCI || ATH79)
+ depends on PCI || ATH79 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,7 +19,7 @@ if NET_VENDOR_ATHEROS
config AG71XX
tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support"
- depends on ATH79
+ depends on ATH79 || COMPILE_TEST
select PHYLINK
imply NET_SELFTESTS
help
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 88d2ab748399..cbc730c7cff2 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -29,9 +29,10 @@
#include <linux/if_vlan.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/phylink.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -148,11 +149,11 @@
#define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
#define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
#define FIFO_CFG4_DR BIT(10) /* Dribble */
-#define FIFO_CFG4_LE BIT(11) /* Long Event */
-#define FIFO_CFG4_CF BIT(12) /* Control Frame */
-#define FIFO_CFG4_PF BIT(13) /* Pause Frame */
-#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
-#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
+#define FIFO_CFG4_CF BIT(11) /* Control Frame */
+#define FIFO_CFG4_PF BIT(12) /* Pause Frame */
+#define FIFO_CFG4_UO BIT(13) /* Unsupported Opcode */
+#define FIFO_CFG4_VT BIT(14) /* VLAN tag detected */
+#define FIFO_CFG4_LE BIT(15) /* Long Event */
#define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
#define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
@@ -167,28 +168,28 @@
#define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
#define FIFO_CFG5_FC BIT(2) /* False Carrier */
#define FIFO_CFG5_CE BIT(3) /* Code Error */
-#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
-#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
-#define FIFO_CFG5_OK BIT(6) /* Packet is OK */
-#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
-#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
-#define FIFO_CFG5_DR BIT(9) /* Dribble */
-#define FIFO_CFG5_CF BIT(10) /* Control Frame */
-#define FIFO_CFG5_PF BIT(11) /* Pause Frame */
-#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
-#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
-#define FIFO_CFG5_LE BIT(14) /* Long Event */
-#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
-#define FIFO_CFG5_16 BIT(16) /* unknown */
-#define FIFO_CFG5_17 BIT(17) /* unknown */
+#define FIFO_CFG5_CR BIT(4) /* CRC error */
+#define FIFO_CFG5_LM BIT(5) /* Length Mismatch */
+#define FIFO_CFG5_LO BIT(6) /* Length Out of Range */
+#define FIFO_CFG5_OK BIT(7) /* Packet is OK */
+#define FIFO_CFG5_MC BIT(8) /* Multicast Packet */
+#define FIFO_CFG5_BC BIT(9) /* Broadcast Packet */
+#define FIFO_CFG5_DR BIT(10) /* Dribble */
+#define FIFO_CFG5_CF BIT(11) /* Control Frame */
+#define FIFO_CFG5_PF BIT(12) /* Pause Frame */
+#define FIFO_CFG5_UO BIT(13) /* Unsupported Opcode */
+#define FIFO_CFG5_VT BIT(14) /* VLAN tag detected */
+#define FIFO_CFG5_LE BIT(15) /* Long Event */
+#define FIFO_CFG5_FT BIT(16) /* Frame Truncated */
+#define FIFO_CFG5_UC BIT(17) /* Unicast Packet */
#define FIFO_CFG5_SF BIT(18) /* Short Frame */
#define FIFO_CFG5_BM BIT(19) /* Byte Mode */
#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
- FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
- FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
- FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
- FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
- FIFO_CFG5_17 | FIFO_CFG5_SF)
+ FIFO_CFG5_CE | FIFO_CFG5_LM | FIFO_CFG5_LO | \
+ FIFO_CFG5_OK | FIFO_CFG5_MC | FIFO_CFG5_BC | \
+ FIFO_CFG5_DR | FIFO_CFG5_CF | FIFO_CFG5_UO | \
+ FIFO_CFG5_VT | FIFO_CFG5_LE | FIFO_CFG5_FT | \
+ FIFO_CFG5_UC | FIFO_CFG5_SF)
#define AG71XX_REG_TX_CTRL 0x0180
#define TX_CTRL_TXE BIT(0) /* Tx Enable */
@@ -378,10 +379,7 @@ struct ag71xx {
u32 fifodata[3];
int mac_idx;
- struct reset_control *mdio_reset;
- struct mii_bus *mii_bus;
struct clk *clk_mdio;
- struct clk *clk_eth;
};
static int ag71xx_desc_empty(struct ag71xx_desc *desc)
@@ -446,13 +444,20 @@ static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
}
+static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_mii_ioctl(ag->phylink, ifr, cmd);
+}
+
static void ag71xx_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
struct ag71xx *ag = netdev_priv(ndev);
- strlcpy(info->driver, "ag71xx", sizeof(info->driver));
- strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
+ strscpy(info->driver, "ag71xx", sizeof(info->driver));
+ strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
sizeof(info->bus_info));
}
@@ -503,8 +508,7 @@ static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- ag71xx_statistics[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, ag71xx_statistics[i].name);
break;
case ETH_SS_TEST:
net_selftest_get_strings(data);
@@ -684,36 +688,27 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
{
struct device *dev = &ag->pdev->dev;
struct net_device *ndev = ag->ndev;
+ struct reset_control *mdio_reset;
static struct mii_bus *mii_bus;
struct device_node *np, *mnp;
int err;
np = dev->of_node;
- ag->mii_bus = NULL;
- ag->clk_mdio = devm_clk_get(dev, "mdio");
+ ag->clk_mdio = devm_clk_get_enabled(dev, "mdio");
if (IS_ERR(ag->clk_mdio)) {
netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
return PTR_ERR(ag->clk_mdio);
}
- err = clk_prepare_enable(ag->clk_mdio);
- if (err) {
- netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
- return err;
- }
-
mii_bus = devm_mdiobus_alloc(dev);
- if (!mii_bus) {
- err = -ENOMEM;
- goto mdio_err_put_clk;
- }
+ if (!mii_bus)
+ return -ENOMEM;
- ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
- if (IS_ERR(ag->mdio_reset)) {
+ mdio_reset = devm_reset_control_get_exclusive(dev, "mdio");
+ if (IS_ERR(mdio_reset)) {
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
- err = PTR_ERR(ag->mdio_reset);
- goto mdio_err_put_clk;
+ return PTR_ERR(mdio_reset);
}
mii_bus->name = "ag71xx_mdio";
@@ -724,33 +719,18 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
mii_bus->parent = dev;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
- if (!IS_ERR(ag->mdio_reset)) {
- reset_control_assert(ag->mdio_reset);
- msleep(100);
- reset_control_deassert(ag->mdio_reset);
- msleep(200);
- }
+ reset_control_assert(mdio_reset);
+ msleep(100);
+ reset_control_deassert(mdio_reset);
+ msleep(200);
mnp = of_get_child_by_name(np, "mdio");
- err = of_mdiobus_register(mii_bus, mnp);
+ err = devm_of_mdiobus_register(dev, mii_bus, mnp);
of_node_put(mnp);
if (err)
- goto mdio_err_put_clk;
-
- ag->mii_bus = mii_bus;
+ return err;
return 0;
-
-mdio_err_put_clk:
- clk_disable_unprepare(ag->clk_mdio);
- return err;
-}
-
-static void ag71xx_mdio_remove(struct ag71xx *ag)
-{
- if (ag->mii_bus)
- mdiobus_unregister(ag->mii_bus);
- clk_disable_unprepare(ag->clk_mdio);
}
static void ag71xx_hw_stop(struct ag71xx *ag)
@@ -766,7 +746,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
unsigned long timestamp;
u32 rx_sm, tx_sm, rx_fd;
- timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start;
+ timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start);
if (likely(time_before(jiffies, timestamp + HZ / 10)))
return false;
@@ -786,7 +766,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
return false;
}
-static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
{
struct ag71xx_ring *ring = &ag->tx_ring;
int sent = 0, bytes_compl = 0, n = 0;
@@ -825,7 +805,7 @@ static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
if (!skb)
continue;
- dev_kfree_skb_any(skb);
+ napi_consume_skb(skb, budget);
ring->buf[i].tx.skb = NULL;
bytes_compl += ring->buf[i].tx.len;
@@ -946,7 +926,7 @@ static unsigned int ag71xx_max_frame_len(unsigned int mtu)
return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
}
-static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
+static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac)
{
u32 t;
@@ -970,7 +950,7 @@ static void ag71xx_fast_reset(struct ag71xx *ag)
mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
- ag71xx_tx_packets(ag, true);
+ ag71xx_tx_packets(ag, true, 0);
reset_control_assert(ag->mac_reset);
usleep_range(10, 20);
@@ -1024,83 +1004,6 @@ static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
}
-static void ag71xx_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- break;
- case PHY_INTERFACE_MODE_MII:
- if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
- ag71xx_is(ag, AR9340) ||
- ag71xx_is(ag, QCA9530) ||
- (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_GMII:
- if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
- (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
- (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_SGMII:
- if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_RMII:
- if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_RGMII:
- if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
- (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
- break;
- goto unsupported;
- default:
- goto unsupported;
- }
-
- phylink_set(mask, MII);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, Autoneg);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_RGMII ||
- state->interface == PHY_INTERFACE_MODE_GMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-unsupported:
- linkmode_zero(supported);
-}
-
-static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- state->link = 0;
-}
-
-static void ag71xx_mac_an_restart(struct phylink_config *config)
-{
- /* Not Supported */
-}
-
static void ag71xx_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -1163,9 +1066,6 @@ static void ag71xx_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
- .validate = ag71xx_mac_validate,
- .mac_pcs_get_state = ag71xx_mac_pcs_get_state,
- .mac_an_restart = ag71xx_mac_an_restart,
.mac_config = ag71xx_mac_config,
.mac_link_down = ag71xx_mac_link_down,
.mac_link_up = ag71xx_mac_link_up,
@@ -1177,6 +1077,34 @@ static int ag71xx_phylink_setup(struct ag71xx *ag)
ag->phylink_config.dev = &ag->ndev->dev;
ag->phylink_config.type = PHYLINK_NETDEV;
+ ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
+ ag71xx_is(ag, AR9340) ||
+ ag71xx_is(ag, QCA9530) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ ag->phylink_config.supported_interfaces);
+
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ ag->phylink_config.supported_interfaces);
+
+ if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ ag->phylink_config.supported_interfaces);
+
+ if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ ag->phylink_config.supported_interfaces);
+
+ if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ ag->phylink_config.supported_interfaces);
phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
ag->phy_if_mode, &ag71xx_phylink_mac_ops);
@@ -1285,6 +1213,11 @@ static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
buf->rx.rx_buf = data;
buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ag->pdev->dev, buf->rx.dma_addr)) {
+ skb_free_frag(data);
+ buf->rx.rx_buf = NULL;
+ return false;
+ }
desc->data = (u32)buf->rx.dma_addr + offset;
return true;
}
@@ -1463,7 +1396,7 @@ static void ag71xx_hw_disable(struct ag71xx *ag)
ag71xx_dma_reset(ag);
napi_disable(&ag->napi);
- del_timer_sync(&ag->oom_timer);
+ timer_delete_sync(&ag->oom_timer);
ag71xx_rings_cleanup(ag);
}
@@ -1478,7 +1411,7 @@ static int ag71xx_open(struct net_device *ndev)
if (ret) {
netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
ret);
- goto err;
+ return ret;
}
max_frame_len = ag71xx_max_frame_len(ndev->mtu);
@@ -1499,6 +1432,7 @@ static int ag71xx_open(struct net_device *ndev)
err:
ag71xx_rings_cleanup(ag);
+ phylink_disconnect_phy(ag->phylink);
return ret;
}
@@ -1582,6 +1516,10 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&ag->pdev->dev, dma_addr)) {
+ netif_dbg(ag, tx_err, ndev, "DMA mapping error\n");
+ goto err_drop;
+ }
i = ring->curr & ring_mask;
desc = ag71xx_ring_desc(ring, i);
@@ -1634,7 +1572,7 @@ err_drop:
static void ag71xx_oom_timer_handler(struct timer_list *t)
{
- struct ag71xx *ag = from_timer(ag, t, oom_timer);
+ struct ag71xx *ag = timer_container_of(ag, t, oom_timer);
napi_schedule(&ag->napi);
}
@@ -1669,8 +1607,8 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
int ring_mask, ring_size, done = 0;
unsigned int pktlen_mask, offset;
struct ag71xx_ring *ring;
- struct list_head rx_list;
struct sk_buff *skb;
+ LIST_HEAD(rx_list);
ring = &ag->rx_ring;
pktlen_mask = ag->dcfg->desc_pktlen_mask;
@@ -1681,13 +1619,10 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
limit, ring->curr, ring->dirty);
- INIT_LIST_HEAD(&rx_list);
-
while (done < limit) {
unsigned int i = ring->curr & ring_mask;
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
int pktlen;
- int err = 0;
if (ag71xx_desc_empty(desc))
break;
@@ -1708,8 +1643,9 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pktlen;
- skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
+ skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
if (!skb) {
+ ndev->stats.rx_errors++;
skb_free_frag(ring->buf[i].rx.rx_buf);
goto next;
}
@@ -1717,14 +1653,10 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
skb_reserve(skb, offset);
skb_put(skb, pktlen);
- if (err) {
- ndev->stats.rx_dropped++;
- kfree_skb(skb);
- } else {
- skb->dev = ndev;
- skb->ip_summed = CHECKSUM_NONE;
- list_add_tail(&skb->list, &rx_list);
- }
+ skb->dev = ndev;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->protocol = eth_type_trans(skb, ndev);
+ list_add_tail(&skb->list, &rx_list);
next:
ring->buf[i].rx.rx_buf = NULL;
@@ -1735,8 +1667,6 @@ next:
ag71xx_ring_rx_refill(ag);
- list_for_each_entry(skb, &rx_list, list)
- skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb_list(&rx_list);
netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
@@ -1754,7 +1684,7 @@ static int ag71xx_poll(struct napi_struct *napi, int limit)
int tx_done, rx_done;
u32 status;
- tx_done = ag71xx_tx_packets(ag, false);
+ tx_done = ag71xx_tx_packets(ag, false, limit);
netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
rx_done = ag71xx_rx_packets(ag, limit);
@@ -1838,7 +1768,7 @@ static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ag71xx *ag = netdev_priv(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
ag71xx_max_frame_len(ndev->mtu));
@@ -1849,7 +1779,7 @@ static const struct net_device_ops ag71xx_netdev_ops = {
.ndo_open = ag71xx_open,
.ndo_stop = ag71xx_stop,
.ndo_start_xmit = ag71xx_hard_start_xmit,
- .ndo_eth_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = ag71xx_do_ioctl,
.ndo_tx_timeout = ag71xx_tx_timeout,
.ndo_change_mtu = ag71xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -1866,6 +1796,7 @@ static int ag71xx_probe(struct platform_device *pdev)
const struct ag71xx_dcfg *dcfg;
struct net_device *ndev;
struct resource *res;
+ struct clk *clk_eth;
int tx_size, err, i;
struct ag71xx *ag;
@@ -1896,11 +1827,10 @@ static int ag71xx_probe(struct platform_device *pdev)
return -EINVAL;
}
- ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
- if (IS_ERR(ag->clk_eth)) {
- netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
- return PTR_ERR(ag->clk_eth);
- }
+ clk_eth = devm_clk_get_enabled(&pdev->dev, "eth");
+ if (IS_ERR(clk_eth))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_eth),
+ "Failed to get eth clk.");
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1911,17 +1841,19 @@ static int ag71xx_probe(struct platform_device *pdev)
memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
- if (IS_ERR(ag->mac_reset)) {
- netif_err(ag, probe, ndev, "missing mac reset\n");
- err = PTR_ERR(ag->mac_reset);
- goto err_free;
- }
+ if (IS_ERR(ag->mac_reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ag->mac_reset),
+ "missing mac reset");
- ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ag->mac_base) {
- err = -ENOMEM;
- goto err_free;
- }
+ ag->mac_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ag->mac_base))
+ return PTR_ERR(ag->mac_base);
+
+ /* ensure that HW is in manual polling mode before interrupts are
+ * activated. Otherwise ag71xx_interrupt might call napi_schedule
+ * before it is initialized by netif_napi_add.
+ */
+ ag71xx_int_disable(ag, AG71XX_INT_POLL);
ndev->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
@@ -1929,7 +1861,7 @@ static int ag71xx_probe(struct platform_device *pdev)
if (err) {
netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
ndev->irq);
- goto err_free;
+ return err;
}
ndev->netdev_ops = &ag71xx_netdev_ops;
@@ -1957,16 +1889,16 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
sizeof(struct ag71xx_desc),
&ag->stop_desc_dma, GFP_KERNEL);
- if (!ag->stop_desc) {
- err = -ENOMEM;
- goto err_free;
- }
+ if (!ag->stop_desc)
+ return -ENOMEM;
ag->stop_desc->data = 0;
ag->stop_desc->ctrl = 0;
ag->stop_desc->next = (u32)ag->stop_desc_dma;
err = of_get_ethdev_address(np, ndev);
+ if (err == -EPROBE_DEFER)
+ return err;
if (err) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
eth_hw_addr_random(ndev);
@@ -1975,16 +1907,11 @@ static int ag71xx_probe(struct platform_device *pdev)
err = of_get_phy_mode(np, &ag->phy_if_mode);
if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
- goto err_free;
+ return err;
}
- netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
-
- err = clk_prepare_enable(ag->clk_eth);
- if (err) {
- netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
- goto err_free;
- }
+ netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
+ AG71XX_NAPI_WEIGHT);
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
@@ -1992,21 +1919,17 @@ static int ag71xx_probe(struct platform_device *pdev)
err = ag71xx_mdio_probe(ag);
if (err)
- goto err_put_clk;
-
- platform_set_drvdata(pdev, ndev);
+ return err;
err = ag71xx_phylink_setup(ag);
- if (err) {
- netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
- goto err_mdio_remove;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to setup phylink");
- err = register_netdev(ndev);
+ err = devm_register_netdev(&pdev->dev, ndev);
if (err) {
netif_err(ag, probe, ndev, "unable to register net device\n");
- platform_set_drvdata(pdev, NULL);
- goto err_mdio_remove;
+ return err;
}
netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
@@ -2014,31 +1937,6 @@ static int ag71xx_probe(struct platform_device *pdev)
phy_modes(ag->phy_if_mode));
return 0;
-
-err_mdio_remove:
- ag71xx_mdio_remove(ag);
-err_put_clk:
- clk_disable_unprepare(ag->clk_eth);
-err_free:
- free_netdev(ndev);
- return err;
-}
-
-static int ag71xx_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct ag71xx *ag;
-
- if (!ndev)
- return 0;
-
- ag = netdev_priv(ndev);
- unregister_netdev(ndev);
- ag71xx_mdio_remove(ag);
- clk_disable_unprepare(ag->clk_eth);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static const u32 ar71xx_fifo_ar7100[] = {
@@ -2122,10 +2020,10 @@ static const struct of_device_id ag71xx_match[] = {
{ .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
{}
};
+MODULE_DEVICE_TABLE(of, ag71xx_match);
static struct platform_driver ag71xx_driver = {
.probe = ag71xx_probe,
- .remove = ag71xx_remove,
.driver = {
.name = "ag71xx",
.of_match_table = ag71xx_match,
@@ -2133,4 +2031,5 @@ static struct platform_driver ag71xx_driver = {
};
module_platform_driver(ag71xx_driver);
+MODULE_DESCRIPTION("Atheros AR71xx built-in ethernet mac driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
index b716adacd815..7f6b69a52367 100644
--- a/drivers/net/ethernet/atheros/alx/ethtool.c
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -292,9 +292,8 @@ static void alx_get_ethtool_stats(struct net_device *netdev,
spin_lock(&alx->stats_lock);
alx_update_hw_stats(hw);
- BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) <
- ALX_NUM_STATS * sizeof(u64));
- memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64));
+ BUILD_BUG_ON(sizeof(hw->stats) != ALX_NUM_STATS * sizeof(u64));
+ memcpy(data, &hw->stats, sizeof(hw->stats));
spin_unlock(&alx->stats_lock);
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4ad3fc72e74e..ad6d6abd885f 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -39,7 +39,6 @@
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <linux/mdio.h>
-#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -752,7 +751,7 @@ static int alx_alloc_napis(struct alx_priv *alx)
goto err_out;
np->alx = alx;
- netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
+ netif_napi_add(alx->dev, &np->napi, alx_poll);
alx->qnapi[i] = np;
}
@@ -902,7 +901,7 @@ static int alx_init_intr(struct alx_priv *alx)
int ret;
ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ PCI_IRQ_MSI | PCI_IRQ_INTX);
if (ret < 0)
return ret;
@@ -1177,12 +1176,15 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
netdev_update_features(netdev);
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ mutex_lock(&alx->mtx);
alx_reinit(alx);
+ mutex_unlock(&alx->mtx);
+ }
return 0;
}
@@ -1742,7 +1744,6 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_pci_disable;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
if (!pdev->pm_cap) {
@@ -1876,7 +1877,6 @@ out_free_netdev:
free_netdev(netdev);
out_pci_release:
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
out_pci_disable:
pci_disable_device(pdev);
return err;
@@ -1894,7 +1894,6 @@ static void alx_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
mutex_destroy(&alx->mtx);
@@ -1902,18 +1901,20 @@ static void alx_remove(struct pci_dev *pdev)
free_netdev(alx->dev);
}
-#ifdef CONFIG_PM_SLEEP
static int alx_suspend(struct device *dev)
{
struct alx_priv *alx = dev_get_drvdata(dev);
if (!netif_running(alx->dev))
return 0;
+
+ rtnl_lock();
netif_device_detach(alx->dev);
mutex_lock(&alx->mtx);
__alx_stop(alx);
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return 0;
}
@@ -1924,6 +1925,7 @@ static int alx_resume(struct device *dev)
struct alx_hw *hw = &alx->hw;
int err;
+ rtnl_lock();
mutex_lock(&alx->mtx);
alx_reset_phy(hw);
@@ -1940,15 +1942,11 @@ static int alx_resume(struct device *dev)
unlock:
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return err;
}
-static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
-#define ALX_PM_OPS (&alx_pm_ops)
-#else
-#define ALX_PM_OPS NULL
-#endif
-
+static DEFINE_SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
@@ -2047,7 +2045,7 @@ static struct pci_driver alx_driver = {
.probe = alx_probe,
.remove = alx_remove,
.err_handler = &alx_err_handlers,
- .driver.pm = ALX_PM_OPS,
+ .driver.pm = pm_sleep_ptr(&alx_pm_ops),
};
module_pci_driver(alx_driver);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 43d821fe7a54..63ba64dbb731 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -504,15 +504,12 @@ struct atl1c_rrd_ring {
u16 next_to_use;
u16 next_to_clean;
struct napi_struct napi;
- struct page *rx_page;
- unsigned int rx_page_offset;
};
/* board specific private data structure */
struct atl1c_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- unsigned int rx_frag_size;
struct atl1c_hw hw;
struct atl1c_hw_stats hw_stats;
struct mii_if_info mii; /* MII interface info */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index e2eb7b8c63a0..0bce122c68f1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -220,8 +220,8 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index da595242bc13..7efa3fc257b3 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -207,16 +207,6 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-/**
- * atl1c_irq_reset - reset interrupt confiure on the NIC
- * @adapter: board private structure
- */
-static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
-{
- atomic_set(&adapter->irq_sem, 1);
- atl1c_irq_enable(adapter);
-}
-
/*
* atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
* of the idle status register until the device is actually idle
@@ -241,8 +231,8 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
*/
static void atl1c_phy_config(struct timer_list *t)
{
- struct atl1c_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1c_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1c_hw *hw = &adapter->hw;
unsigned long flags;
@@ -367,7 +357,7 @@ static void atl1c_common_task(struct work_struct *work)
static void atl1c_del_timer(struct atl1c_adapter *adapter)
{
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
}
@@ -493,15 +483,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
struct net_device *dev)
{
- unsigned int head_size;
int mtu = dev->mtu;
adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
-
- head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- adapter->rx_frag_size = roundup_pow_of_two(head_size);
}
static netdev_features_t atl1c_fix_features(struct net_device *netdev,
@@ -576,7 +561,7 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.max_frame_size = new_mtu;
atl1c_set_rxbufsize(adapter, netdev);
atl1c_down(adapter);
@@ -857,7 +842,8 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
}
static inline void atl1c_clean_buffer(struct pci_dev *pdev,
- struct atl1c_buffer *buffer_info)
+ struct atl1c_buffer *buffer_info,
+ int budget)
{
u16 pci_driection;
if (buffer_info->flags & ATL1C_BUFFER_FREE)
@@ -876,7 +862,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
buffer_info->length, pci_driection);
}
if (buffer_info->skb)
- dev_consume_skb_any(buffer_info->skb);
+ napi_consume_skb(buffer_info->skb, budget);
buffer_info->dma = 0;
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
@@ -897,10 +883,10 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
ring_count = tpd_ring->count;
for (index = 0; index < ring_count; index++) {
buffer_info = &tpd_ring->buffer_info[index];
- atl1c_clean_buffer(pdev, buffer_info);
+ atl1c_clean_buffer(pdev, buffer_info, 0);
}
- netdev_reset_queue(adapter->netdev);
+ netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
@@ -924,7 +910,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter, u32 queue)
for (j = 0; j < rfd_ring->count; j++) {
buffer_info = &rfd_ring->buffer_info[j];
- atl1c_clean_buffer(pdev, buffer_info);
+ atl1c_clean_buffer(pdev, buffer_info, 0);
}
/* zero out the descriptor ring */
memset(rfd_ring->desc, 0, rfd_ring->size);
@@ -974,7 +960,6 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- int i;
dma_free_coherent(&pdev->dev, adapter->ring_header.size,
adapter->ring_header.desc, adapter->ring_header.dma);
@@ -987,12 +972,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
kfree(adapter->tpd_ring[0].buffer_info);
adapter->tpd_ring[0].buffer_info = NULL;
}
- for (i = 0; i < adapter->rx_queue_count; ++i) {
- if (adapter->rrd_ring[i].rx_page) {
- put_page(adapter->rrd_ring[i].rx_page);
- adapter->rrd_ring[i].rx_page = NULL;
- }
- }
}
/**
@@ -1051,7 +1030,7 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
* each ring/block may need up to 8 bytes for alignment, hence the
* additional bytes tacked onto the end.
*/
- ring_header->size = size =
+ ring_header->size =
sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc +
sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc +
sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc +
@@ -1629,7 +1608,7 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget)
total_bytes += buffer_info->skb->len;
total_packets++;
}
- atl1c_clean_buffer(pdev, buffer_info);
+ atl1c_clean_buffer(pdev, buffer_info, budget);
if (++next_to_clean == tpd_ring->count)
next_to_clean = 0;
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@@ -1764,48 +1743,11 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
skb_checksum_none_assert(skb);
}
-static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
- u32 queue, bool napi_mode)
-{
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
- struct sk_buff *skb;
- struct page *page;
-
- if (adapter->rx_frag_size > PAGE_SIZE) {
- if (likely(napi_mode))
- return napi_alloc_skb(&rrd_ring->napi,
- adapter->rx_buffer_len);
- else
- return netdev_alloc_skb_ip_align(adapter->netdev,
- adapter->rx_buffer_len);
- }
-
- page = rrd_ring->rx_page;
- if (!page) {
- page = alloc_page(GFP_ATOMIC);
- if (unlikely(!page))
- return NULL;
- rrd_ring->rx_page = page;
- rrd_ring->rx_page_offset = 0;
- }
-
- skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
- adapter->rx_frag_size);
- if (likely(skb)) {
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- rrd_ring->rx_page_offset += adapter->rx_frag_size;
- if (rrd_ring->rx_page_offset >= PAGE_SIZE)
- rrd_ring->rx_page = NULL;
- else
- get_page(page);
- }
- return skb;
-}
-
static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
bool napi_mode)
{
struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
+ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
struct pci_dev *pdev = adapter->pdev;
struct atl1c_buffer *buffer_info, *next_info;
struct sk_buff *skb;
@@ -1824,13 +1766,27 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
while (next_info->flags & ATL1C_BUFFER_FREE) {
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
- skb = atl1c_alloc_skb(adapter, queue, napi_mode);
+ /* When DMA RX address is set to something like
+ * 0x....fc0, it will be very likely to cause DMA
+ * RFD overflow issue.
+ *
+ * To work around it, we apply rx skb with 64 bytes
+ * longer space, and offset the address whenever
+ * 0x....fc0 is detected.
+ */
+ if (likely(napi_mode))
+ skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64);
+ else
+ skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64);
if (unlikely(!skb)) {
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
break;
}
+ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+ skb_reserve(skb, 64);
+
/*
* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
@@ -2072,7 +2028,7 @@ static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
tpd_req = skb_shinfo(skb)->nr_frags + 1;
if (skb_is_gso(skb)) {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb))
tpd_req++;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
@@ -2104,10 +2060,13 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ ntohs(ip_hdr(skb)->tot_len));
- if (real_len < skb->len)
- pskb_trim(skb, real_len);
+ if (real_len < skb->len) {
+ err = pskb_trim(skb, real_len);
+ if (err)
+ return err;
+ }
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2132,7 +2091,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
*tpd = atl1c_get_tpd(adapter, queue);
ipv6_hdr(skb)->payload_len = 0;
/* check payload == 0 byte ? */
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2193,7 +2152,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
while (index != tpd_ring->next_to_use) {
tpd = ATL1C_TPD_DESC(tpd_ring, index);
buffer_info = &tpd_ring->buffer_info[index];
- atl1c_clean_buffer(adpt->pdev, buffer_info);
+ atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
if (++index == tpd_ring->count)
index = 0;
@@ -2219,7 +2178,8 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
if (tso) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
@@ -2728,13 +2688,13 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.mdio_write = atl1c_mdio_write;
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
- dev_set_threaded(netdev, true);
+ netif_threaded_enable(netdev);
for (i = 0; i < adapter->rx_queue_count; ++i)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
- atl1c_clean_rx, 64);
+ atl1c_clean_rx);
for (i = 0; i < adapter->tx_queue_count; ++i)
- netif_napi_add(netdev, &adapter->tpd_ring[i].napi,
- atl1c_clean_tx, 64);
+ netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi,
+ atl1c_clean_tx);
timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
@@ -2849,7 +2809,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 0cbde352d1ba..68f1832a198d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -306,9 +306,9 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 56e5f440e666..40290028580b 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -115,8 +115,8 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
*/
static void atl1e_phy_config(struct timer_list *t)
{
- struct atl1e_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1e_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1e_hw *hw = &adapter->hw;
unsigned long flags;
@@ -232,7 +232,7 @@ static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
static void atl1e_del_timer(struct atl1e_adapter *adapter)
{
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
}
static void atl1e_cancel_work(struct atl1e_adapter *adapter)
@@ -428,7 +428,7 @@ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.max_frame_size = new_mtu;
adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3;
atl1e_down(adapter);
@@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
offset, adapter->ring_size);
err = -1;
- goto failed;
+ goto free_buffer;
}
return 0;
+free_buffer:
+ kfree(tx_ring->tx_buffer);
+ tx_ring->tx_buffer = NULL;
failed:
if (adapter->ring_vir_addr != NULL) {
dma_free_coherent(&pdev->dev, adapter->ring_size,
@@ -1609,8 +1612,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
if (skb_is_gso(skb)) {
if (skb->protocol == htons(ETH_P_IP) ||
(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
- proto_hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb)) {
tpd_req += ((skb_headlen(skb) - proto_hdr_len +
MAX_TX_BUF_LEN - 1) >>
@@ -1642,10 +1644,13 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ ntohs(ip_hdr(skb)->tot_len));
- if (real_len < skb->len)
- pskb_trim(skb, real_len);
+ if (real_len < skb->len) {
+ err = pskb_trim(skb, real_len);
+ if (err)
+ return err;
+ }
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
netdev_warn(adapter->netdev,
@@ -1713,7 +1718,8 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (segment) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
@@ -2354,7 +2360,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
- netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1e_clean);
timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
@@ -2395,7 +2401,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
- netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
+ netif_set_tso_max_size(netdev, MAX_TSO_SEG_SIZE);
err = register_netdev(netdev);
if (err) {
netdev_err(netdev, "register netdevice failed\n");
@@ -2482,7 +2488,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b4c9e805e981..98a4d089270e 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1861,14 +1861,21 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
break;
}
- buffer_info->alloced = 1;
- buffer_info->skb = skb;
- buffer_info->length = (u16) adapter->rx_buffer_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ kfree_skb(skb);
+ adapter->soft_stats.rx_dropped++;
+ break;
+ }
+
+ buffer_info->alloced = 1;
+ buffer_info->skb = skb;
+ buffer_info->length = (u16)adapter->rx_buffer_len;
+
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
rfd_desc->coalese = 0;
@@ -2113,9 +2120,12 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
real_len = (((unsigned char *)iph - skb->data) +
ntohs(iph->tot_len));
- if (real_len < skb->len)
- pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ if (real_len < skb->len) {
+ err = pskb_trim(skb, real_len);
+ if (err)
+ return err;
+ }
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->len == hdr_len) {
iph->check = 0;
tcp_hdr(skb)->check =
@@ -2180,8 +2190,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
return 0;
}
-static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
- struct tx_packet_desc *ptpd)
+static bool atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ struct tx_packet_desc *ptpd)
{
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
struct atl1_buffer *buffer_info;
@@ -2191,6 +2201,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
unsigned int nr_frags;
unsigned int f;
int retval;
+ u16 first_mapped;
u16 next_to_use;
u16 data_len;
u8 hdr_len;
@@ -2198,6 +2209,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buf_len -= skb->data_len;
nr_frags = skb_shinfo(skb)->nr_frags;
next_to_use = atomic_read(&tpd_ring->next_to_use);
+ first_mapped = next_to_use;
buffer_info = &tpd_ring->buffer_info[next_to_use];
BUG_ON(buffer_info->skb);
/* put skb in last TPD */
@@ -2206,13 +2218,15 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (retval) {
/* TSO */
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, hdr_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2239,6 +2253,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
page, offset,
buffer_info->length,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2251,6 +2268,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, buf_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2274,6 +2293,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, i * ATL1_MAX_TX_BUF_LEN,
buffer_info->length, DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ buffer_info->dma))
+ goto dma_err;
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2282,6 +2304,22 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
/* last tpd's buffer-info */
buffer_info->skb = skb;
+
+ return true;
+
+ dma_err:
+ while (first_mapped != next_to_use) {
+ buffer_info = &tpd_ring->buffer_info[first_mapped];
+ dma_unmap_page(&adapter->pdev->dev,
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ buffer_info->dma = 0;
+
+ if (++first_mapped == tpd_ring->count)
+ first_mapped = 0;
+ }
+ return false;
}
static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
@@ -2352,10 +2390,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
len = skb_headlen(skb);
- if (unlikely(skb->len <= 0)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (unlikely(skb->len <= 0))
+ goto drop_packet;
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
@@ -2367,12 +2403,10 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
mss = skb_shinfo(skb)->gso_size;
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
- proto_hdr_len = (skb_transport_offset(skb) +
- tcp_hdrlen(skb));
- if (unlikely(proto_hdr_len > len)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ proto_hdr_len = skb_tcp_all_headers(skb);
+ if (unlikely(proto_hdr_len > len))
+ goto drop_packet;
+
/* need additional TPD ? */
if (proto_hdr_len != len)
count += (len - proto_hdr_len +
@@ -2404,23 +2438,26 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
}
tso = atl1_tso(adapter, skb, ptpd);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (tso < 0)
+ goto drop_packet;
if (!tso) {
ret_val = atl1_tx_csum(adapter, skb, ptpd);
- if (ret_val < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ if (ret_val < 0)
+ goto drop_packet;
}
- atl1_tx_map(adapter, skb, ptpd);
+ if (!atl1_tx_map(adapter, skb, ptpd))
+ goto drop_packet;
+
atl1_tx_queue(adapter, count, ptpd);
atl1_update_mailbox(adapter);
return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->soft_stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
static int atl1_rings_clean(struct napi_struct *napi, int budget)
@@ -2444,7 +2481,7 @@ static int atl1_rings_clean(struct napi_struct *napi, int budget)
static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
{
- if (!napi_schedule_prep(&adapter->napi))
+ if (!napi_schedule(&adapter->napi))
/* It is possible in case even the RX/TX ints are disabled via IMR
* register the ISR bits are set anyway (but do not produce IRQ).
* To handle such situation the napi functions used to check is
@@ -2452,8 +2489,6 @@ static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
*/
return 0;
- __napi_schedule(&adapter->napi);
-
/*
* Disable RX/TX ints via IMR register if it is
* allowed. NAPI handler must reenable them in same
@@ -2556,8 +2591,8 @@ static irqreturn_t atl1_intr(int irq, void *data)
*/
static void atl1_phy_config(struct timer_list *t)
{
- struct atl1_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1_hw *hw = &adapter->hw;
unsigned long flags;
@@ -2641,7 +2676,7 @@ static void atl1_down(struct atl1_adapter *adapter)
napi_disable(&adapter->napi);
netif_stop_queue(netdev);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
adapter->phy_timer_pending = false;
atlx_irq_disable(adapter);
@@ -2687,7 +2722,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = (max_frame + 7) & ~7;
adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev)) {
atl1_down(adapter);
atl1_up(adapter);
@@ -2978,7 +3013,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &atl1_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1_rings_clean);
netdev->ethtool_ops = &atl1_ethtool_ops;
adapter->bd_number = cards_found;
@@ -3341,8 +3376,8 @@ static void atl1_get_drvinfo(struct net_device *netdev,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
@@ -3438,7 +3473,9 @@ static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
}
static void atl1_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
@@ -3451,7 +3488,9 @@ static void atl1_get_ringparam(struct net_device *netdev,
}
static int atl1_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bbc4d7b08a49..280e2f5f4aa5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -752,8 +752,8 @@ static void atl2_down(struct atl2_adapter *adapter)
atl2_irq_disable(adapter);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
clear_bit(0, &adapter->cfg_phy);
netif_carrier_off(netdev);
@@ -905,7 +905,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
struct atl2_hw *hw = &adapter->hw;
/* set MTU */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
hw->max_frame_size = new_mtu;
ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ETH_HLEN +
VLAN_HLEN + ETH_FCS_LEN);
@@ -1010,7 +1010,8 @@ static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
*/
static void atl2_watchdog(struct timer_list *t)
{
- struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct atl2_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
u32 drop_rxd, drop_rxs;
@@ -1035,8 +1036,8 @@ static void atl2_watchdog(struct timer_list *t)
*/
static void atl2_phy_config(struct timer_list *t)
{
- struct atl2_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl2_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl2_hw *hw = &adapter->hw;
unsigned long flags;
@@ -1377,7 +1378,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 5 * HZ;
netdev->min_mtu = 40;
netdev->max_mtu = ETH_DATA_LEN + VLAN_HLEN;
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
@@ -1468,8 +1469,8 @@ static void atl2_remove(struct pci_dev *pdev)
* explicitly disable watchdog tasks from being rescheduled */
set_bit(__ATL2_DOWN, &adapter->flags);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->phy_config_timer);
+ timer_delete_sync(&adapter->watchdog_timer);
+ timer_delete_sync(&adapter->phy_config_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->link_chg_task);
@@ -1980,9 +1981,9 @@ static void atl2_get_drvinfo(struct net_device *netdev,
{
struct atl2_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 56e0fb07aec7..666522d64775 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -25,6 +25,7 @@ config B44
select SSB
select MII
select PHYLIB
+ select FIXED_PHY if BCM47XX
help
If you have a network (Ethernet) controller of this type, say Y
or M here.
@@ -53,8 +54,8 @@ config B44_PCI
config BCM4908_ENET
tristate "Broadcom BCM4908 internal mac support"
- depends on ARCH_BCM4908 || COMPILE_TEST
- default y if ARCH_BCM4908
+ depends on ARCH_BCMBCA || COMPILE_TEST
+ default y if ARCH_BCMBCA
help
This driver supports Ethernet controller integrated into Broadcom
BCM4908 family SoCs.
@@ -71,7 +72,7 @@ config BCM63XX_ENET
config BCMGENET
tristate "Broadcom GENET internal MAC support"
depends on HAS_IOMEM
- select MII
+ depends on PTP_1588_CLOCK_OPTIONAL || !ARCH_BCM2835
select PHYLIB
select FIXED_PHY
select BCM7XXX_PHY
@@ -96,7 +97,6 @@ config BNX2
config CNIC
tristate "QLogic CNIC support"
depends on PCI && (IPV6 || IPV6=n)
- depends on MMU
select BNX2
select UIO
help
@@ -123,6 +123,7 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
select PHYLIB
help
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -143,7 +144,7 @@ config BNX2X
depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
select ZLIB_INFLATE
- select LIBCRC32C
+ select CRC32
select MDIO
help
This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
@@ -194,7 +195,6 @@ config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
depends on HAS_IOMEM
depends on NET_DSA || !NET_DSA
- select MII
select PHYLIB
select FIXED_PHY
select DIMLIB
@@ -208,10 +208,11 @@ config BNXT
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select FW_LOADER
- select LIBCRC32C
+ select CRC32
select NET_DEVLINK
select PAGE_POOL
select DIMLIB
+ select AUXILIARY_BUS
help
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
@@ -253,4 +254,25 @@ config BNXT_HWMON
Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
devices, via the hwmon sysfs interface.
+config BNGE
+ tristate "Broadcom Ethernet device support"
+ depends on PCI
+ select NET_DEVLINK
+ select PAGE_POOL
+ help
+ This driver supports Broadcom 50/100/200/400/800 gigabit Ethernet cards.
+ The module will be called bng_en. To compile this driver as a module,
+ choose M here.
+
+config BCMASP
+ tristate "Broadcom ASP 2.0 Ethernet support"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ depends on OF
+ select PHYLIB
+ select MDIO_BCM_UNIMAC
+ help
+ This configuration enables the Broadcom ASP 2.0 Ethernet controller
+ driver which is present in Broadcom STB SoCs such as 72165.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 0ddfb5b5d53c..10cc1c92ecfc 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -17,3 +17,5 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o
obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
+obj-$(CONFIG_BCMASP) += asp2/
+obj-$(CONFIG_BNGE) += bnge/
diff --git a/drivers/net/ethernet/broadcom/asp2/Makefile b/drivers/net/ethernet/broadcom/asp2/Makefile
new file mode 100644
index 000000000000..e07550315f83
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCMASP) += bcm-asp.o
+bcm-asp-objs := bcmasp.o bcmasp_intf.o bcmasp_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
new file mode 100644
index 000000000000..fd35f4b4dc50
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -0,0 +1,1469 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom STB ASP 2.0 Driver
+ *
+ * Copyright (c) 2023 Broadcom
+ */
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+
+#include "bcmasp.h"
+#include "bcmasp_intf_defs.h"
+
+static void _intr2_mask_clear(struct bcmasp_priv *priv, u32 mask)
+{
+ intr2_core_wl(priv, mask, ASP_INTR2_MASK_CLEAR);
+ priv->irq_mask &= ~mask;
+}
+
+static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask)
+{
+ intr2_core_wl(priv, mask, ASP_INTR2_MASK_SET);
+ priv->irq_mask |= mask;
+}
+
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ /* Only supported with internal phys */
+ if (!intf->internal_phy)
+ return;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+}
+
+void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_TX_DESC(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_TX_DESC(intf->channel));
+}
+EXPORT_SYMBOL_GPL(bcmasp_enable_tx_irq);
+
+void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_RX_ECH(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_RX_ECH(intf->channel));
+}
+EXPORT_SYMBOL_GPL(bcmasp_enable_rx_irq);
+
+static void bcmasp_intr2_mask_set_all(struct bcmasp_priv *priv)
+{
+ _intr2_mask_set(priv, 0xffffffff);
+ priv->irq_mask = 0xffffffff;
+}
+
+static void bcmasp_intr2_clear_all(struct bcmasp_priv *priv)
+{
+ intr2_core_wl(priv, 0xffffffff, ASP_INTR2_CLEAR);
+}
+
+static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status)
+{
+ if (status & ASP_INTR2_RX_ECH(intf->channel)) {
+ if (likely(napi_schedule_prep(&intf->rx_napi))) {
+ bcmasp_enable_rx_irq(intf, 0);
+ __napi_schedule_irqoff(&intf->rx_napi);
+ }
+ }
+
+ if (status & ASP_INTR2_TX_DESC(intf->channel)) {
+ if (likely(napi_schedule_prep(&intf->tx_napi))) {
+ bcmasp_enable_tx_irq(intf, 0);
+ __napi_schedule_irqoff(&intf->tx_napi);
+ }
+ }
+
+ if (status & ASP_INTR2_PHY_EVENT(intf->channel))
+ phy_mac_interrupt(intf->ndev->phydev);
+}
+
+static irqreturn_t bcmasp_isr(int irq, void *data)
+{
+ struct bcmasp_priv *priv = data;
+ struct bcmasp_intf *intf;
+ u32 status;
+
+ status = intr2_core_rl(priv, ASP_INTR2_STATUS) &
+ ~intr2_core_rl(priv, ASP_INTR2_MASK_STATUS);
+
+ intr2_core_wl(priv, status, ASP_INTR2_CLEAR);
+
+ if (unlikely(status == 0)) {
+ dev_warn(&priv->pdev->dev, "l2 spurious interrupt\n");
+ return IRQ_NONE;
+ }
+
+ /* Handle intferfaces */
+ list_for_each_entry(intf, &priv->intfs, list)
+ bcmasp_intr2_handling(intf, status);
+
+ return IRQ_HANDLED;
+}
+
+void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ u32 mask;
+
+ switch (intf->port) {
+ case 0:
+ mask = ASP_CTRL_UMAC0_FLUSH_MASK;
+ break;
+ case 1:
+ mask = ASP_CTRL_UMAC1_FLUSH_MASK;
+ break;
+ case 2:
+ mask = ASP_CTRL_SPB_FLUSH_MASK;
+ break;
+ default:
+ /* Not valid port */
+ return;
+ }
+
+ rx_ctrl_core_wl(priv, mask, ASP_RX_CTRL_FLUSH);
+}
+
+static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt)
+{
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L3_1(64),
+ ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index));
+
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L2(32) |
+ ASP_RX_FILTER_NET_OFFSET_L3_0(32) |
+ ASP_RX_FILTER_NET_OFFSET_L3_1(96) |
+ ASP_RX_FILTER_NET_OFFSET_L4(32),
+ ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1));
+
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
+ ASP_RX_FILTER_NET_CFG_EN |
+ ASP_RX_FILTER_NET_CFG_L2_EN |
+ ASP_RX_FILTER_NET_CFG_L3_EN |
+ ASP_RX_FILTER_NET_CFG_L4_EN |
+ ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
+ ASP_RX_FILTER_NET_CFG(nfilt->hw_index));
+
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
+ ASP_RX_FILTER_NET_CFG_EN |
+ ASP_RX_FILTER_NET_CFG_L2_EN |
+ ASP_RX_FILTER_NET_CFG_L3_EN |
+ ASP_RX_FILTER_NET_CFG_L4_EN |
+ ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
+ ASP_RX_FILTER_NET_CFG(nfilt->hw_index + 1));
+}
+
+#define MAX_WAKE_FILTER_SIZE 256
+enum asp_netfilt_reg_type {
+ ASP_NETFILT_MATCH = 0,
+ ASP_NETFILT_MASK,
+ ASP_NETFILT_MAX
+};
+
+static int bcmasp_netfilt_get_reg_offset(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ enum asp_netfilt_reg_type reg_type,
+ u32 offset)
+{
+ u32 block_index, filter_sel;
+
+ if (offset < 32) {
+ block_index = ASP_RX_FILTER_NET_L2;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 64) {
+ block_index = ASP_RX_FILTER_NET_L2;
+ filter_sel = nfilt->hw_index + 1;
+ } else if (offset < 96) {
+ block_index = ASP_RX_FILTER_NET_L3_0;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 128) {
+ block_index = ASP_RX_FILTER_NET_L3_0;
+ filter_sel = nfilt->hw_index + 1;
+ } else if (offset < 160) {
+ block_index = ASP_RX_FILTER_NET_L3_1;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 192) {
+ block_index = ASP_RX_FILTER_NET_L3_1;
+ filter_sel = nfilt->hw_index + 1;
+ } else if (offset < 224) {
+ block_index = ASP_RX_FILTER_NET_L4;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 256) {
+ block_index = ASP_RX_FILTER_NET_L4;
+ filter_sel = nfilt->hw_index + 1;
+ } else {
+ return -EINVAL;
+ }
+
+ switch (reg_type) {
+ case ASP_NETFILT_MATCH:
+ return ASP_RX_FILTER_NET_PAT(filter_sel, block_index,
+ (offset % 32));
+ case ASP_NETFILT_MASK:
+ return ASP_RX_FILTER_NET_MASK(filter_sel, block_index,
+ (offset % 32));
+ default:
+ return -EINVAL;
+ }
+}
+
+static void bcmasp_netfilt_wr(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ enum asp_netfilt_reg_type reg_type,
+ u32 val, u32 offset)
+{
+ int reg_offset;
+
+ /* HW only accepts 4 byte aligned writes */
+ if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
+ return;
+
+ reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
+ offset);
+
+ rx_filter_core_wl(priv, val, reg_offset);
+}
+
+static u32 bcmasp_netfilt_rd(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ enum asp_netfilt_reg_type reg_type,
+ u32 offset)
+{
+ int reg_offset;
+
+ /* HW only accepts 4 byte aligned writes */
+ if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
+ return 0;
+
+ reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
+ offset);
+
+ return rx_filter_core_rl(priv, reg_offset);
+}
+
+static int bcmasp_netfilt_wr_m_wake(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ u32 offset, void *match, void *mask,
+ size_t size)
+{
+ u32 shift, mask_val = 0, match_val = 0;
+ bool first_byte = true;
+
+ if ((offset + size) > MAX_WAKE_FILTER_SIZE)
+ return -EINVAL;
+
+ while (size--) {
+ /* The HW only accepts 4 byte aligned writes, so if we
+ * begin unaligned or if remaining bytes less than 4,
+ * we need to read then write to avoid losing current
+ * register state
+ */
+ if (first_byte && (!IS_ALIGNED(offset, 4) || size < 3)) {
+ match_val = bcmasp_netfilt_rd(priv, nfilt,
+ ASP_NETFILT_MATCH,
+ ALIGN_DOWN(offset, 4));
+ mask_val = bcmasp_netfilt_rd(priv, nfilt,
+ ASP_NETFILT_MASK,
+ ALIGN_DOWN(offset, 4));
+ }
+
+ shift = (3 - (offset % 4)) * 8;
+ match_val &= ~GENMASK(shift + 7, shift);
+ mask_val &= ~GENMASK(shift + 7, shift);
+ match_val |= (u32)(*((u8 *)match) << shift);
+ mask_val |= (u32)(*((u8 *)mask) << shift);
+
+ /* If last byte or last byte of word, write to reg */
+ if (!size || ((offset % 4) == 3)) {
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH,
+ match_val, ALIGN_DOWN(offset, 4));
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK,
+ mask_val, ALIGN_DOWN(offset, 4));
+ first_byte = true;
+ } else {
+ first_byte = false;
+ }
+
+ offset++;
+ match++;
+ mask++;
+ }
+
+ return 0;
+}
+
+static void bcmasp_netfilt_reset_hw(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt)
+{
+ int i;
+
+ for (i = 0; i < MAX_WAKE_FILTER_SIZE; i += 4) {
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH, 0, i);
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK, 0, i);
+ }
+}
+
+static void bcmasp_netfilt_tcpip4_wr(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ struct ethtool_tcpip4_spec *match,
+ struct ethtool_tcpip4_spec *mask,
+ u32 offset)
+{
+ __be16 val_16, mask_16;
+
+ val_16 = htons(ETH_P_IP);
+ mask_16 = htons(0xFFFF);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
+ &match->tos, &mask->tos,
+ sizeof(match->tos));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
+ &match->ip4src, &mask->ip4src,
+ sizeof(match->ip4src));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
+ &match->ip4dst, &mask->ip4dst,
+ sizeof(match->ip4dst));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 20,
+ &match->psrc, &mask->psrc,
+ sizeof(match->psrc));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 22,
+ &match->pdst, &mask->pdst,
+ sizeof(match->pdst));
+}
+
+static void bcmasp_netfilt_tcpip6_wr(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ struct ethtool_tcpip6_spec *match,
+ struct ethtool_tcpip6_spec *mask,
+ u32 offset)
+{
+ __be16 val_16, mask_16;
+
+ val_16 = htons(ETH_P_IPV6);
+ mask_16 = htons(0xFFFF);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ val_16 = htons(match->tclass << 4);
+ mask_16 = htons(mask->tclass << 4);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 8,
+ &match->ip6src, &mask->ip6src,
+ sizeof(match->ip6src));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 24,
+ &match->ip6dst, &mask->ip6dst,
+ sizeof(match->ip6dst));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 40,
+ &match->psrc, &mask->psrc,
+ sizeof(match->psrc));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 42,
+ &match->pdst, &mask->pdst,
+ sizeof(match->pdst));
+}
+
+static int bcmasp_netfilt_wr_to_hw(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt)
+{
+ struct ethtool_rx_flow_spec *fs = &nfilt->fs;
+ unsigned int offset = 0;
+ __be16 val_16, mask_16;
+ u8 val_8, mask_8;
+
+ /* Currently only supports wake filters */
+ if (!nfilt->wake_filter)
+ return -EINVAL;
+
+ bcmasp_netfilt_reset_hw(priv, nfilt);
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, 0, &fs->h_ext.h_dest,
+ &fs->m_ext.h_dest,
+ sizeof(fs->h_ext.h_dest));
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci)) {
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2),
+ &fs->h_ext.vlan_etype,
+ &fs->m_ext.vlan_etype,
+ sizeof(fs->h_ext.vlan_etype));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ((ETH_ALEN * 2) + 2),
+ &fs->h_ext.vlan_tci,
+ &fs->m_ext.vlan_tci,
+ sizeof(fs->h_ext.vlan_tci));
+ offset += VLAN_HLEN;
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, 0,
+ &fs->h_u.ether_spec.h_dest,
+ &fs->m_u.ether_spec.h_dest,
+ sizeof(fs->h_u.ether_spec.h_dest));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_ALEN,
+ &fs->h_u.ether_spec.h_source,
+ &fs->m_u.ether_spec.h_source,
+ sizeof(fs->h_u.ether_spec.h_source));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &fs->h_u.ether_spec.h_proto,
+ &fs->m_u.ether_spec.h_proto,
+ sizeof(fs->h_u.ether_spec.h_proto));
+
+ break;
+ case IP_USER_FLOW:
+ val_16 = htons(ETH_P_IP);
+ mask_16 = htons(0xFFFF);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
+ &fs->h_u.usr_ip4_spec.tos,
+ &fs->m_u.usr_ip4_spec.tos,
+ sizeof(fs->h_u.usr_ip4_spec.tos));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
+ &fs->h_u.usr_ip4_spec.proto,
+ &fs->m_u.usr_ip4_spec.proto,
+ sizeof(fs->h_u.usr_ip4_spec.proto));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
+ &fs->h_u.usr_ip4_spec.ip4src,
+ &fs->m_u.usr_ip4_spec.ip4src,
+ sizeof(fs->h_u.usr_ip4_spec.ip4src));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
+ &fs->h_u.usr_ip4_spec.ip4dst,
+ &fs->m_u.usr_ip4_spec.ip4dst,
+ sizeof(fs->h_u.usr_ip4_spec.ip4dst));
+ if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
+ break;
+
+ /* Only supports 20 byte IPv4 header */
+ val_8 = 0x45;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
+ &val_8, &mask_8, sizeof(val_8));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt,
+ ETH_HLEN + 20 + offset,
+ &fs->h_u.usr_ip4_spec.l4_4_bytes,
+ &fs->m_u.usr_ip4_spec.l4_4_bytes,
+ sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes)
+ );
+ break;
+ case TCP_V4_FLOW:
+ val_8 = IPPROTO_TCP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.tcp_ip4_spec,
+ &fs->m_u.tcp_ip4_spec, offset);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ case UDP_V4_FLOW:
+ val_8 = IPPROTO_UDP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.udp_ip4_spec,
+ &fs->m_u.udp_ip4_spec, offset);
+
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ case TCP_V6_FLOW:
+ val_8 = IPPROTO_TCP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.tcp_ip6_spec,
+ &fs->m_u.tcp_ip6_spec, offset);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ case UDP_V6_FLOW:
+ val_8 = IPPROTO_UDP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.udp_ip6_spec,
+ &fs->m_u.udp_ip6_spec, offset);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ }
+
+ bcmasp_netfilt_hw_en_wake(priv, nfilt);
+
+ return 0;
+}
+
+void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ bool write = false;
+ int ret, i;
+
+ /* Write all filters to HW */
+ for (i = 0; i < priv->num_net_filters; i++) {
+ /* If the filter does not match the port, skip programming. */
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ if (i > 0 && (i % 2) &&
+ priv->net_filters[i].wake_filter &&
+ priv->net_filters[i - 1].wake_filter)
+ continue;
+
+ ret = bcmasp_netfilt_wr_to_hw(priv, &priv->net_filters[i]);
+ if (!ret)
+ write = true;
+ }
+
+ /* Successfully programmed at least one wake filter
+ * so enable top level wake config
+ */
+ if (write)
+ rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
+ ASP_RX_FILTER_LNR_MD |
+ ASP_RX_FILTER_GEN_WK_EN |
+ ASP_RX_FILTER_NT_FLT_EN),
+ ASP_RX_FILTER_BLK_CTRL);
+}
+
+int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ int j = 0, i;
+
+ for (i = 0; i < priv->num_net_filters; i++) {
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ if (i > 0 && (i % 2) &&
+ priv->net_filters[i].wake_filter &&
+ priv->net_filters[i - 1].wake_filter)
+ continue;
+
+ if (j == *rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[j++] = priv->net_filters[i].fs.location;
+ }
+
+ *rule_cnt = j;
+
+ return 0;
+}
+
+int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ int cnt = 0, i;
+
+ for (i = 0; i < priv->num_net_filters; i++) {
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ /* Skip over a wake filter pair */
+ if (i > 0 && (i % 2) &&
+ priv->net_filters[i].wake_filter &&
+ priv->net_filters[i - 1].wake_filter)
+ continue;
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ struct ethtool_rx_flow_spec *cur;
+ size_t fs_size = 0;
+ int i;
+
+ for (i = 0; i < priv->num_net_filters; i++) {
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ cur = &priv->net_filters[i].fs;
+
+ if (cur->flow_type != fs->flow_type ||
+ cur->ring_cookie != fs->ring_cookie)
+ continue;
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ fs_size = sizeof(struct ethhdr);
+ break;
+ case IP_USER_FLOW:
+ fs_size = sizeof(struct ethtool_usrip4_spec);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip6_spec);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip4_spec);
+ break;
+ default:
+ continue;
+ }
+
+ if (memcmp(&cur->h_u, &fs->h_u, fs_size) ||
+ memcmp(&cur->m_u, &fs->m_u, fs_size))
+ continue;
+
+ if (cur->flow_type & FLOW_EXT) {
+ if (cur->h_ext.vlan_etype != fs->h_ext.vlan_etype ||
+ cur->m_ext.vlan_etype != fs->m_ext.vlan_etype ||
+ cur->h_ext.vlan_tci != fs->h_ext.vlan_tci ||
+ cur->m_ext.vlan_tci != fs->m_ext.vlan_tci ||
+ cur->h_ext.data[0] != fs->h_ext.data[0])
+ continue;
+ }
+ if (cur->flow_type & FLOW_MAC_EXT) {
+ if (memcmp(&cur->h_ext.h_dest,
+ &fs->h_ext.h_dest, ETH_ALEN) ||
+ memcmp(&cur->m_ext.h_dest,
+ &fs->m_ext.h_dest, ETH_ALEN))
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+/* If no network filter found, return open filter.
+ * If no more open filters return NULL
+ */
+struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
+ u32 loc, bool wake_filter,
+ bool init)
+{
+ struct bcmasp_net_filter *nfilter = NULL;
+ struct bcmasp_priv *priv = intf->parent;
+ int i, open_index = -1;
+
+ /* Check whether we exceed the filter table capacity */
+ if (loc != RX_CLS_LOC_ANY && loc >= priv->num_net_filters)
+ return ERR_PTR(-EINVAL);
+
+ /* If the filter location is busy (already claimed) and we are initializing
+ * the filter (insertion), return a busy error code.
+ */
+ if (loc != RX_CLS_LOC_ANY && init && priv->net_filters[loc].claimed)
+ return ERR_PTR(-EBUSY);
+
+ /* We need two filters for wake-up, so we cannot use an odd filter */
+ if (wake_filter && loc != RX_CLS_LOC_ANY && (loc % 2))
+ return ERR_PTR(-EINVAL);
+
+ /* Initialize the loop index based on the desired location or from 0 */
+ i = loc == RX_CLS_LOC_ANY ? 0 : loc;
+
+ for ( ; i < priv->num_net_filters; i++) {
+ /* Found matching network filter */
+ if (!init &&
+ priv->net_filters[i].claimed &&
+ priv->net_filters[i].hw_index == i &&
+ priv->net_filters[i].port == intf->port)
+ return &priv->net_filters[i];
+
+ /* If we don't need a new filter or new filter already found */
+ if (!init || open_index >= 0)
+ continue;
+
+ /* Wake filter conslidates two filters to cover more bytes
+ * Wake filter is open if...
+ * 1. It is an even filter
+ * 2. The current and next filter is not claimed
+ */
+ if (wake_filter && !(i % 2) && !priv->net_filters[i].claimed &&
+ !priv->net_filters[i + 1].claimed)
+ open_index = i;
+ else if (!priv->net_filters[i].claimed)
+ open_index = i;
+ }
+
+ if (open_index >= 0) {
+ nfilter = &priv->net_filters[open_index];
+ nfilter->claimed = true;
+ nfilter->port = intf->port;
+ nfilter->hw_index = open_index;
+ }
+
+ if (wake_filter && open_index >= 0) {
+ /* Claim next filter */
+ priv->net_filters[open_index + 1].claimed = true;
+ priv->net_filters[open_index + 1].wake_filter = true;
+ nfilter->wake_filter = true;
+ }
+
+ return nfilter ? nfilter : ERR_PTR(-EINVAL);
+}
+
+void bcmasp_netfilt_release(struct bcmasp_intf *intf,
+ struct bcmasp_net_filter *nfilt)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (nfilt->wake_filter) {
+ memset(&priv->net_filters[nfilt->hw_index + 1], 0,
+ sizeof(struct bcmasp_net_filter));
+ }
+
+ memset(nfilt, 0, sizeof(struct bcmasp_net_filter));
+}
+
+static void bcmasp_addr_to_uint(unsigned char *addr, u32 *high, u32 *low)
+{
+ *high = (u32)(addr[0] << 8 | addr[1]);
+ *low = (u32)(addr[2] << 24 | addr[3] << 16 | addr[4] << 8 |
+ addr[5]);
+}
+
+static void bcmasp_set_mda_filter(struct bcmasp_intf *intf,
+ const unsigned char *addr,
+ unsigned char *mask,
+ unsigned int i)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ u32 addr_h, addr_l, mask_h, mask_l;
+
+ /* Set local copy */
+ ether_addr_copy(priv->mda_filters[i].mask, mask);
+ ether_addr_copy(priv->mda_filters[i].addr, addr);
+
+ /* Write to HW */
+ bcmasp_addr_to_uint(priv->mda_filters[i].mask, &mask_h, &mask_l);
+ bcmasp_addr_to_uint(priv->mda_filters[i].addr, &addr_h, &addr_l);
+ rx_filter_core_wl(priv, addr_h, ASP_RX_FILTER_MDA_PAT_H(i));
+ rx_filter_core_wl(priv, addr_l, ASP_RX_FILTER_MDA_PAT_L(i));
+ rx_filter_core_wl(priv, mask_h, ASP_RX_FILTER_MDA_MSK_H(i));
+ rx_filter_core_wl(priv, mask_l, ASP_RX_FILTER_MDA_MSK_L(i));
+}
+
+static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en,
+ unsigned int i)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (priv->mda_filters[i].en == en)
+ return;
+
+ priv->mda_filters[i].en = en;
+ priv->mda_filters[i].port = intf->port;
+
+ rx_filter_core_wl(priv, ((intf->channel + priv->tx_chan_offset) |
+ (en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) |
+ ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)),
+ ASP_RX_FILTER_MDA_CFG(i));
+}
+
+/* There are 32 MDA filters shared between all ports, we reserve 4 filters per
+ * port for the following.
+ * - Promisc: Filter to allow all packets when promisc is enabled
+ * - All Multicast
+ * - Broadcast
+ * - Own address
+ *
+ * The reserved filters are identified as so.
+ * - Promisc: (index * 4) + 0
+ * - All Multicast: (index * 4) + 1
+ * - Broadcast: (index * 4) + 2
+ * - Own address: (index * 4) + 3
+ */
+enum asp_rx_filter_id {
+ ASP_RX_FILTER_MDA_PROMISC = 0,
+ ASP_RX_FILTER_MDA_ALLMULTI,
+ ASP_RX_FILTER_MDA_BROADCAST,
+ ASP_RX_FILTER_MDA_OWN_ADDR,
+ ASP_RX_FILTER_MDA_RES_MAX,
+};
+
+#define ASP_RX_FILT_MDA(intf, name) (((intf)->index * \
+ ASP_RX_FILTER_MDA_RES_MAX) \
+ + ASP_RX_FILTER_MDA_##name)
+
+static int bcmasp_total_res_mda_cnt(struct bcmasp_priv *priv)
+{
+ return list_count_nodes(&priv->intfs) * ASP_RX_FILTER_MDA_RES_MAX;
+}
+
+void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en)
+{
+ unsigned int i = ASP_RX_FILT_MDA(intf, PROMISC);
+ unsigned char promisc[ETH_ALEN];
+
+ eth_zero_addr(promisc);
+ /* Set mask to 00:00:00:00:00:00 to match all packets */
+ bcmasp_set_mda_filter(intf, promisc, promisc, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en)
+{
+ unsigned char allmulti[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ unsigned int i = ASP_RX_FILT_MDA(intf, ALLMULTI);
+
+ /* Set mask to 01:00:00:00:00:00 to match all multicast */
+ bcmasp_set_mda_filter(intf, allmulti, allmulti, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_set_broad(struct bcmasp_intf *intf, bool en)
+{
+ unsigned int i = ASP_RX_FILT_MDA(intf, BROADCAST);
+ unsigned char addr[ETH_ALEN];
+
+ eth_broadcast_addr(addr);
+ bcmasp_set_mda_filter(intf, addr, addr, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr,
+ bool en)
+{
+ unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ unsigned int i = ASP_RX_FILT_MDA(intf, OWN_ADDR);
+
+ bcmasp_set_mda_filter(intf, addr, mask, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_disable_all_filters(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ unsigned int i;
+ int res_count;
+
+ res_count = bcmasp_total_res_mda_cnt(intf->parent);
+
+ /* Disable all filters held by this port */
+ for (i = res_count; i < priv->num_mda_filters; i++) {
+ if (priv->mda_filters[i].en &&
+ priv->mda_filters[i].port == intf->port)
+ bcmasp_en_mda_filter(intf, 0, i);
+ }
+}
+
+static int bcmasp_combine_set_filter(struct bcmasp_intf *intf,
+ unsigned char *addr, unsigned char *mask,
+ int i)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ u64 addr1, addr2, mask1, mask2, mask3;
+
+ /* Switch to u64 to help with the calculations */
+ addr1 = ether_addr_to_u64(priv->mda_filters[i].addr);
+ mask1 = ether_addr_to_u64(priv->mda_filters[i].mask);
+ addr2 = ether_addr_to_u64(addr);
+ mask2 = ether_addr_to_u64(mask);
+
+ /* Check if one filter resides within the other */
+ mask3 = mask1 & mask2;
+ if (mask3 == mask1 && ((addr1 & mask1) == (addr2 & mask1))) {
+ /* Filter 2 resides within filter 1, so everything is good */
+ return 0;
+ } else if (mask3 == mask2 && ((addr1 & mask2) == (addr2 & mask2))) {
+ /* Filter 1 resides within filter 2, so swap filters */
+ bcmasp_set_mda_filter(intf, addr, mask, i);
+ return 0;
+ }
+
+ /* Unable to combine */
+ return -EINVAL;
+}
+
+int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
+ unsigned char *mask)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ int ret, res_count;
+ unsigned int i;
+
+ res_count = bcmasp_total_res_mda_cnt(intf->parent);
+
+ for (i = res_count; i < priv->num_mda_filters; i++) {
+ /* If filter not enabled or belongs to another port skip */
+ if (!priv->mda_filters[i].en ||
+ priv->mda_filters[i].port != intf->port)
+ continue;
+
+ /* Attempt to combine filters */
+ ret = bcmasp_combine_set_filter(intf, addr, mask, i);
+ if (!ret) {
+ intf->mib.filters_combine_cnt++;
+ return 0;
+ }
+ }
+
+ /* Create new filter if possible */
+ for (i = res_count; i < priv->num_mda_filters; i++) {
+ if (priv->mda_filters[i].en)
+ continue;
+
+ bcmasp_set_mda_filter(intf, addr, mask, i);
+ bcmasp_en_mda_filter(intf, 1, i);
+ return 0;
+ }
+
+ /* No room for new filter */
+ return -EINVAL;
+}
+
+static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
+{
+ unsigned int i;
+
+ /* Disable all filters and reset software view since the HW
+ * can lose context while in deep sleep suspend states
+ */
+ for (i = 0; i < priv->num_mda_filters; i++) {
+ rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
+ priv->mda_filters[i].en = 0;
+ }
+
+ for (i = 0; i < priv->num_net_filters; i++)
+ rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
+
+ /* Top level filter enable bit should be enabled at all times, set
+ * GEN_WAKE_CLEAR to clear the network filter wake-up which would
+ * otherwise be sticky
+ */
+ rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
+ ASP_RX_FILTER_MDA_EN |
+ ASP_RX_FILTER_GEN_WK_CLR |
+ ASP_RX_FILTER_NT_FLT_EN),
+ ASP_RX_FILTER_BLK_CTRL);
+}
+
+/* ASP core initialization */
+static void bcmasp_core_init(struct bcmasp_priv *priv)
+{
+ rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT);
+ rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT);
+
+ rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE);
+
+ /* Disable and clear both UniMAC's wake-up interrupts to avoid
+ * sticky interrupts.
+ */
+ _intr2_mask_set(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE);
+ intr2_core_wl(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE,
+ ASP_INTR2_CLEAR);
+}
+
+static void bcmasp_core_clock_select_many(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CPU_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CPU_CLOCK_SELECT);
+}
+
+static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl_core_rl(priv, ASP_CTRL_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
+ ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT);
+}
+
+static void bcmasp_core_clock_select_one_ctrl2(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+}
+
+static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set)
+{
+ u32 reg;
+
+ reg = ctrl_core_rl(priv, ASP_CTRL_CLOCK_CTRL);
+ reg &= ~clr;
+ reg |= set;
+ ctrl_core_wl(priv, reg, ASP_CTRL_CLOCK_CTRL);
+
+ reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0);
+ reg &= ~clr;
+ reg |= set;
+ ctrl_core_wl(priv, reg, ASP_CTRL_SCRATCH_0);
+}
+
+static void bcmasp_core_clock_set(struct bcmasp_priv *priv, u32 clr, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->clk_lock, flags);
+ bcmasp_core_clock_set_ll(priv, clr, set);
+ spin_unlock_irqrestore(&priv->clk_lock, flags);
+}
+
+void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en)
+{
+ u32 intf_mask = ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(intf->port);
+ struct bcmasp_priv *priv = intf->parent;
+ unsigned long flags;
+ u32 reg;
+
+ /* When enabling an interface, if the RX or TX clocks were not enabled,
+ * enable them. Conversely, while disabling an interface, if this is
+ * the last one enabled, we can turn off the shared RX and TX clocks as
+ * well. We control enable bits which is why we test for equality on
+ * the RGMII clock bit mask.
+ */
+ spin_lock_irqsave(&priv->clk_lock, flags);
+ if (en) {
+ intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
+ ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
+ bcmasp_core_clock_set_ll(priv, intf_mask, 0);
+ } else {
+ reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0) | intf_mask;
+ if ((reg & ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK) ==
+ ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK)
+ intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
+ ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
+ bcmasp_core_clock_set_ll(priv, 0, intf_mask);
+ }
+ spin_unlock_irqrestore(&priv->clk_lock, flags);
+}
+
+static irqreturn_t bcmasp_isr_wol(int irq, void *data)
+{
+ struct bcmasp_priv *priv = data;
+ u32 status;
+
+ /* No L3 IRQ, so we good */
+ if (priv->wol_irq <= 0)
+ goto irq_handled;
+
+ status = wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_STATUS) &
+ ~wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_MASK_STATUS);
+ wakeup_intr2_core_wl(priv, status, ASP_WAKEUP_INTR2_CLEAR);
+
+irq_handled:
+ pm_wakeup_event(&priv->pdev->dev, 0);
+ return IRQ_HANDLED;
+}
+
+static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i)
+{
+ struct platform_device *pdev = priv->pdev;
+ int irq, ret;
+
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, bcmasp_isr_wol, 0,
+ pdev->name, priv);
+ if (ret)
+ return ret;
+
+ return irq;
+}
+
+static void bcmasp_init_wol(struct bcmasp_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ int irq;
+
+ irq = bcmasp_get_and_request_irq(priv, 1);
+ if (irq < 0) {
+ dev_warn(dev, "Failed to init WoL irq: %d\n", irq);
+ return;
+ }
+
+ priv->wol_irq = irq;
+ priv->wol_irq_enabled_mask = 0;
+ device_set_wakeup_capable(&pdev->dev, 1);
+}
+
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ struct device *dev = &priv->pdev->dev;
+
+ if (en) {
+ if (priv->wol_irq_enabled_mask) {
+ set_bit(intf->port, &priv->wol_irq_enabled_mask);
+ return;
+ }
+
+ /* First enable */
+ set_bit(intf->port, &priv->wol_irq_enabled_mask);
+ enable_irq_wake(priv->wol_irq);
+ device_set_wakeup_enable(dev, 1);
+ } else {
+ if (!priv->wol_irq_enabled_mask)
+ return;
+
+ clear_bit(intf->port, &priv->wol_irq_enabled_mask);
+ if (priv->wol_irq_enabled_mask)
+ return;
+
+ /* Last disable */
+ disable_irq_wake(priv->wol_irq);
+ device_set_wakeup_enable(dev, 0);
+ }
+}
+
+static void bcmasp_wol_irq_destroy(struct bcmasp_priv *priv)
+{
+ if (priv->wol_irq > 0)
+ free_irq(priv->wol_irq, priv);
+}
+
+static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
+{
+ u32 reg, phy_lpi_overwrite;
+
+ reg = rx_edpkt_core_rl(intf->parent, ASP_EDPKT_SPARE_REG);
+ phy_lpi_overwrite = intf->internal_phy ? ASP_EDPKT_SPARE_REG_EPHY_LPI :
+ ASP_EDPKT_SPARE_REG_GPHY_LPI;
+
+ if (en)
+ reg |= phy_lpi_overwrite;
+ else
+ reg &= ~phy_lpi_overwrite;
+
+ rx_edpkt_core_wl(intf->parent, reg, ASP_EDPKT_SPARE_REG);
+
+ usleep_range(50, 100);
+}
+
+static const struct bcmasp_plat_data v21_plat_data = {
+ .core_clock_select = bcmasp_core_clock_select_one,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
+};
+
+static const struct bcmasp_plat_data v22_plat_data = {
+ .core_clock_select = bcmasp_core_clock_select_many,
+ .eee_fixup = bcmasp_eee_fixup,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
+};
+
+static const struct bcmasp_plat_data v30_plat_data = {
+ .core_clock_select = bcmasp_core_clock_select_one_ctrl2,
+ .num_mda_filters = 20,
+ .num_net_filters = 16,
+ .tx_chan_offset = 0,
+ .rx_ctrl_offset = 0x10000,
+};
+
+static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
+{
+ priv->core_clock_select = pdata->core_clock_select;
+ priv->eee_fixup = pdata->eee_fixup;
+ priv->num_mda_filters = pdata->num_mda_filters;
+ priv->num_net_filters = pdata->num_net_filters;
+ priv->tx_chan_offset = pdata->tx_chan_offset;
+ priv->rx_ctrl_offset = pdata->rx_ctrl_offset;
+}
+
+static const struct of_device_id bcmasp_of_match[] = {
+ { .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
+ { .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
+ { .compatible = "brcm,asp-v3.0", .data = &v30_plat_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcmasp_of_match);
+
+static const struct of_device_id bcmasp_mdio_of_match[] = {
+ { .compatible = "brcm,asp-v2.1-mdio", },
+ { .compatible = "brcm,asp-v2.2-mdio", },
+ { .compatible = "brcm,asp-v3.0-mdio", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match);
+
+static void bcmasp_remove_intfs(struct bcmasp_priv *priv)
+{
+ struct bcmasp_intf *intf, *n;
+
+ list_for_each_entry_safe(intf, n, &priv->intfs, list) {
+ list_del(&intf->list);
+ bcmasp_interface_destroy(intf);
+ }
+}
+
+static int bcmasp_probe(struct platform_device *pdev)
+{
+ const struct bcmasp_plat_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct device_node *ports_node;
+ struct bcmasp_priv *priv;
+ struct bcmasp_intf *intf;
+ int ret = 0, count = 0;
+ unsigned int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq <= 0)
+ return -EINVAL;
+
+ priv->clk = devm_clk_get_optional_enabled(dev, "sw_asp");
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "failed to request clock\n");
+
+ /* Base from parent node */
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return dev_err_probe(dev, PTR_ERR(priv->base), "failed to iomap\n");
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to set DMA mask: %d\n", ret);
+
+ dev_set_drvdata(&pdev->dev, priv);
+ priv->pdev = pdev;
+ spin_lock_init(&priv->mda_lock);
+ spin_lock_init(&priv->clk_lock);
+ mutex_init(&priv->wol_lock);
+ mutex_init(&priv->net_lock);
+ INIT_LIST_HEAD(&priv->intfs);
+
+ pdata = device_get_match_data(&pdev->dev);
+ if (!pdata)
+ return dev_err_probe(dev, -EINVAL, "unable to find platform data\n");
+
+ bcmasp_set_pdata(priv, pdata);
+
+ /* Enable all clocks to ensure successful probing */
+ bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
+
+ /* Switch to the main clock */
+ priv->core_clock_select(priv, false);
+
+ bcmasp_intr2_mask_set_all(priv);
+ bcmasp_intr2_clear_all(priv);
+
+ ret = devm_request_irq(&pdev->dev, priv->irq, bcmasp_isr, 0,
+ pdev->name, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request ASP interrupt: %d", ret);
+
+ /* Register mdio child nodes */
+ of_platform_populate(dev->of_node, bcmasp_mdio_of_match, NULL, dev);
+
+ /* ASP specific initialization, Needs to be done regardless of
+ * how many interfaces come up.
+ */
+ bcmasp_core_init(priv);
+
+ priv->mda_filters = devm_kcalloc(dev, priv->num_mda_filters,
+ sizeof(*priv->mda_filters), GFP_KERNEL);
+ if (!priv->mda_filters)
+ return -ENOMEM;
+
+ priv->net_filters = devm_kcalloc(dev, priv->num_net_filters,
+ sizeof(*priv->net_filters), GFP_KERNEL);
+ if (!priv->net_filters)
+ return -ENOMEM;
+
+ bcmasp_core_init_filters(priv);
+
+ ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
+ if (!ports_node) {
+ dev_warn(dev, "No ports found\n");
+ return -EINVAL;
+ }
+
+ i = 0;
+ for_each_available_child_of_node_scoped(ports_node, intf_node) {
+ intf = bcmasp_interface_create(priv, intf_node, i);
+ if (!intf) {
+ dev_err(dev, "Cannot create eth interface %d\n", i);
+ bcmasp_remove_intfs(priv);
+ ret = -ENOMEM;
+ goto of_put_exit;
+ }
+ list_add_tail(&intf->list, &priv->intfs);
+ i++;
+ }
+
+ /* Check and enable WoL */
+ bcmasp_init_wol(priv);
+
+ /* Drop the clock reference count now and let ndo_open()/ndo_close()
+ * manage it for us from now on.
+ */
+ bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
+
+ clk_disable_unprepare(priv->clk);
+
+ /* Now do the registration of the network ports which will take care
+ * of managing the clock properly.
+ */
+ list_for_each_entry(intf, &priv->intfs, list) {
+ ret = register_netdev(intf->ndev);
+ if (ret) {
+ netdev_err(intf->ndev,
+ "failed to register net_device: %d\n", ret);
+ bcmasp_wol_irq_destroy(priv);
+ bcmasp_remove_intfs(priv);
+ goto of_put_exit;
+ }
+ count++;
+ }
+
+ dev_info(dev, "Initialized %d port(s)\n", count);
+
+of_put_exit:
+ of_node_put(ports_node);
+ return ret;
+}
+
+static void bcmasp_remove(struct platform_device *pdev)
+{
+ struct bcmasp_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ if (!priv)
+ return;
+
+ bcmasp_wol_irq_destroy(priv);
+ bcmasp_remove_intfs(priv);
+}
+
+static void bcmasp_shutdown(struct platform_device *pdev)
+{
+ bcmasp_remove(pdev);
+}
+
+static int __maybe_unused bcmasp_suspend(struct device *d)
+{
+ struct bcmasp_priv *priv = dev_get_drvdata(d);
+ struct bcmasp_intf *intf;
+ int ret;
+
+ list_for_each_entry(intf, &priv->intfs, list) {
+ ret = bcmasp_interface_suspend(intf);
+ if (ret)
+ break;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* Whether Wake-on-LAN is enabled or not, we can always disable
+ * the shared TX clock
+ */
+ bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
+
+ priv->core_clock_select(priv, true);
+
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static int __maybe_unused bcmasp_resume(struct device *d)
+{
+ struct bcmasp_priv *priv = dev_get_drvdata(d);
+ struct bcmasp_intf *intf;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* Switch to the main clock domain */
+ priv->core_clock_select(priv, false);
+
+ /* Re-enable all clocks for re-initialization */
+ bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
+
+ bcmasp_core_init(priv);
+ bcmasp_core_init_filters(priv);
+
+ /* And disable them to let the network devices take care of them */
+ bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
+
+ clk_disable_unprepare(priv->clk);
+
+ list_for_each_entry(intf, &priv->intfs, list) {
+ ret = bcmasp_interface_resume(intf);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops,
+ bcmasp_suspend, bcmasp_resume);
+
+static struct platform_driver bcmasp_driver = {
+ .probe = bcmasp_probe,
+ .remove = bcmasp_remove,
+ .shutdown = bcmasp_shutdown,
+ .driver = {
+ .name = "brcm,asp-v2",
+ .of_match_table = bcmasp_of_match,
+ .pm = &bcmasp_pm_ops,
+ },
+};
+module_platform_driver(bcmasp_driver);
+
+MODULE_DESCRIPTION("Broadcom ASP 2.0 Ethernet controller driver");
+MODULE_ALIAS("platform:brcm,asp-v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
new file mode 100644
index 000000000000..74adfdb50e11
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -0,0 +1,600 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCMASP_H
+#define __BCMASP_H
+
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <uapi/linux/ethtool.h>
+
+#define ASP_INTR2_OFFSET 0x1000
+#define ASP_INTR2_STATUS 0x0
+#define ASP_INTR2_SET 0x4
+#define ASP_INTR2_CLEAR 0x8
+#define ASP_INTR2_MASK_STATUS 0xc
+#define ASP_INTR2_MASK_SET 0x10
+#define ASP_INTR2_MASK_CLEAR 0x14
+
+#define ASP_INTR2_RX_ECH(intr) BIT(intr)
+#define ASP_INTR2_TX_DESC(intr) BIT((intr) + 14)
+#define ASP_INTR2_UMC0_WAKE BIT(22)
+#define ASP_INTR2_UMC1_WAKE BIT(28)
+#define ASP_INTR2_PHY_EVENT(intr) ((intr) ? BIT(30) | BIT(31) : \
+ BIT(24) | BIT(25))
+
+#define ASP_WAKEUP_INTR2_OFFSET 0x1200
+#define ASP_WAKEUP_INTR2_STATUS 0x0
+#define ASP_WAKEUP_INTR2_SET 0x4
+#define ASP_WAKEUP_INTR2_CLEAR 0x8
+#define ASP_WAKEUP_INTR2_MASK_STATUS 0xc
+#define ASP_WAKEUP_INTR2_MASK_SET 0x10
+#define ASP_WAKEUP_INTR2_MASK_CLEAR 0x14
+#define ASP_WAKEUP_INTR2_MPD_0 BIT(0)
+#define ASP_WAKEUP_INTR2_MPD_1 BIT(1)
+#define ASP_WAKEUP_INTR2_FILT_0 BIT(2)
+#define ASP_WAKEUP_INTR2_FILT_1 BIT(3)
+#define ASP_WAKEUP_INTR2_FW BIT(4)
+
+#define ASP_CTRL2_OFFSET 0x2000
+#define ASP_CTRL2_CORE_CLOCK_SELECT 0x0
+#define ASP_CTRL2_CORE_CLOCK_SELECT_MAIN BIT(0)
+#define ASP_CTRL2_CPU_CLOCK_SELECT 0x4
+#define ASP_CTRL2_CPU_CLOCK_SELECT_MAIN BIT(0)
+
+#define ASP_TX_ANALYTICS_OFFSET 0x4c000
+#define ASP_TX_ANALYTICS_CTRL 0x0
+
+#define ASP_RX_ANALYTICS_OFFSET 0x98000
+#define ASP_RX_ANALYTICS_CTRL 0x0
+
+#define ASP_RX_CTRL_OFFSET 0x9f000
+#define ASP_RX_CTRL_UMAC_0_FRAME_COUNT 0x8
+#define ASP_RX_CTRL_UMAC_1_FRAME_COUNT 0xc
+#define ASP_RX_CTRL_FB_0_FRAME_COUNT 0x14
+#define ASP_RX_CTRL_FB_1_FRAME_COUNT 0x18
+#define ASP_RX_CTRL_FB_8_FRAME_COUNT 0x1c
+#define ASP_RX_CTRL_FB_9_FRAME_COUNT 0x20
+#define ASP_RX_CTRL_FB_10_FRAME_COUNT 0x24
+#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x28
+#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x2c
+#define ASP_RX_CTRL_FLUSH 0x30
+#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
+#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
+#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
+#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x38
+
+#define ASP_RX_FILTER_OFFSET 0x80000
+#define ASP_RX_FILTER_BLK_CTRL 0x0
+#define ASP_RX_FILTER_OPUT_EN BIT(0)
+#define ASP_RX_FILTER_MDA_EN BIT(1)
+#define ASP_RX_FILTER_LNR_MD BIT(2)
+#define ASP_RX_FILTER_GEN_WK_EN BIT(3)
+#define ASP_RX_FILTER_GEN_WK_CLR BIT(4)
+#define ASP_RX_FILTER_NT_FLT_EN BIT(5)
+#define ASP_RX_FILTER_MDA_CFG(sel) (((sel) * 0x14) + 0x100)
+#define ASP_RX_FILTER_MDA_CFG_EN_SHIFT 8
+#define ASP_RX_FILTER_MDA_CFG_UMC_SEL(sel) ((sel) > 1 ? BIT(17) : \
+ BIT((sel) + 9))
+#define ASP_RX_FILTER_MDA_PAT_H(sel) (((sel) * 0x14) + 0x104)
+#define ASP_RX_FILTER_MDA_PAT_L(sel) (((sel) * 0x14) + 0x108)
+#define ASP_RX_FILTER_MDA_MSK_H(sel) (((sel) * 0x14) + 0x10c)
+#define ASP_RX_FILTER_MDA_MSK_L(sel) (((sel) * 0x14) + 0x110)
+#define ASP_RX_FILTER_MDA_CFG(sel) (((sel) * 0x14) + 0x100)
+#define ASP_RX_FILTER_MDA_PAT_H(sel) (((sel) * 0x14) + 0x104)
+#define ASP_RX_FILTER_MDA_PAT_L(sel) (((sel) * 0x14) + 0x108)
+#define ASP_RX_FILTER_MDA_MSK_H(sel) (((sel) * 0x14) + 0x10c)
+#define ASP_RX_FILTER_MDA_MSK_L(sel) (((sel) * 0x14) + 0x110)
+#define ASP_RX_FILTER_NET_CFG(sel) (((sel) * 0xa04) + 0x400)
+#define ASP_RX_FILTER_NET_CFG_CH(sel) ((sel) << 0)
+#define ASP_RX_FILTER_NET_CFG_EN BIT(9)
+#define ASP_RX_FILTER_NET_CFG_L2_EN BIT(10)
+#define ASP_RX_FILTER_NET_CFG_L3_EN BIT(11)
+#define ASP_RX_FILTER_NET_CFG_L4_EN BIT(12)
+#define ASP_RX_FILTER_NET_CFG_L3_FRM(sel) ((sel) << 13)
+#define ASP_RX_FILTER_NET_CFG_L4_FRM(sel) ((sel) << 15)
+#define ASP_RX_FILTER_NET_CFG_UMC(sel) BIT((sel) + 19)
+#define ASP_RX_FILTER_NET_CFG_DMA_EN BIT(27)
+
+#define ASP_RX_FILTER_NET_OFFSET_MAX 32
+#define ASP_RX_FILTER_NET_PAT(sel, block, off) \
+ (((sel) * 0xa04) + ((block) * 0x200) + (off) + 0x600)
+#define ASP_RX_FILTER_NET_MASK(sel, block, off) \
+ (((sel) * 0xa04) + ((block) * 0x200) + (off) + 0x700)
+
+#define ASP_RX_FILTER_NET_OFFSET(sel) (((sel) * 0xa04) + 0xe00)
+#define ASP_RX_FILTER_NET_OFFSET_L2(val) ((val) << 0)
+#define ASP_RX_FILTER_NET_OFFSET_L3_0(val) ((val) << 8)
+#define ASP_RX_FILTER_NET_OFFSET_L3_1(val) ((val) << 16)
+#define ASP_RX_FILTER_NET_OFFSET_L4(val) ((val) << 24)
+
+enum asp_rx_net_filter_block {
+ ASP_RX_FILTER_NET_L2 = 0,
+ ASP_RX_FILTER_NET_L3_0,
+ ASP_RX_FILTER_NET_L3_1,
+ ASP_RX_FILTER_NET_L4,
+ ASP_RX_FILTER_NET_BLOCK_MAX
+};
+
+#define ASP_EDPKT_OFFSET 0x9c000
+#define ASP_EDPKT_ENABLE 0x4
+#define ASP_EDPKT_ENABLE_EN BIT(0)
+#define ASP_EDPKT_HDR_CFG 0xc
+#define ASP_EDPKT_HDR_SZ_SHIFT 2
+#define ASP_EDPKT_HDR_SZ_32 0
+#define ASP_EDPKT_HDR_SZ_64 1
+#define ASP_EDPKT_HDR_SZ_96 2
+#define ASP_EDPKT_HDR_SZ_128 3
+#define ASP_EDPKT_BURST_BUF_PSCAL_TOUT 0x10
+#define ASP_EDPKT_BURST_BUF_WRITE_TOUT 0x14
+#define ASP_EDPKT_BURST_BUF_READ_TOUT 0x18
+#define ASP_EDPKT_RX_TS_COUNTER 0x38
+#define ASP_EDPKT_ENDI 0x48
+#define ASP_EDPKT_ENDI_DESC_SHIFT 8
+#define ASP_EDPKT_ENDI_NO_BT_SWP 0
+#define ASP_EDPKT_ENDI_BT_SWP_WD 1
+#define ASP_EDPKT_RX_PKT_CNT 0x138
+#define ASP_EDPKT_HDR_EXTR_CNT 0x13c
+#define ASP_EDPKT_HDR_OUT_CNT 0x140
+#define ASP_EDPKT_SPARE_REG 0x174
+#define ASP_EDPKT_SPARE_REG_EPHY_LPI BIT(4)
+#define ASP_EDPKT_SPARE_REG_GPHY_LPI BIT(3)
+
+#define ASP_CTRL_OFFSET 0x101000
+#define ASP_CTRL_ASP_SW_INIT 0x04
+#define ASP_CTRL_ASP_SW_INIT_ACPUSS_CORE BIT(0)
+#define ASP_CTRL_ASP_SW_INIT_ASP_TX BIT(1)
+#define ASP_CTRL_ASP_SW_INIT_AS_RX BIT(2)
+#define ASP_CTRL_ASP_SW_INIT_ASP_RGMII_UMAC0 BIT(3)
+#define ASP_CTRL_ASP_SW_INIT_ASP_RGMII_UMAC1 BIT(4)
+#define ASP_CTRL_ASP_SW_INIT_ASP_XMEMIF BIT(5)
+#define ASP_CTRL_CLOCK_CTRL 0x04
+#define ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE BIT(0)
+#define ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE BIT(1)
+#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT 2
+#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK (0x7 << ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT)
+#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(x) BIT(ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT + (x))
+#define ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE GENMASK(4, 0)
+#define ASP_CTRL_CORE_CLOCK_SELECT 0x08
+#define ASP_CTRL_CORE_CLOCK_SELECT_MAIN BIT(0)
+#define ASP_CTRL_SCRATCH_0 0x0c
+
+struct bcmasp_tx_cb {
+ struct sk_buff *skb;
+ unsigned int bytes_sent;
+ bool last;
+
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+struct bcmasp_res {
+ /* Per interface resources */
+ /* Port */
+ void __iomem *umac;
+ void __iomem *umac2fb;
+ void __iomem *rgmii;
+
+ /* TX slowpath/configuration */
+ void __iomem *tx_spb_ctrl;
+ void __iomem *tx_spb_top;
+ void __iomem *tx_epkt_core;
+ void __iomem *tx_pause_ctrl;
+};
+
+#define DESC_ADDR(x) ((x) & GENMASK_ULL(39, 0))
+#define DESC_FLAGS(x) ((x) & GENMASK_ULL(63, 40))
+
+struct bcmasp_desc {
+ u64 buf;
+ #define DESC_CHKSUM BIT_ULL(40)
+ #define DESC_CRC_ERR BIT_ULL(41)
+ #define DESC_RX_SYM_ERR BIT_ULL(42)
+ #define DESC_NO_OCT_ALN BIT_ULL(43)
+ #define DESC_PKT_TRUC BIT_ULL(44)
+ /* 39:0 (TX/RX) bits 0-39 of buf addr
+ * 40 (RX) checksum
+ * 41 (RX) crc_error
+ * 42 (RX) rx_symbol_error
+ * 43 (RX) non_octet_aligned
+ * 44 (RX) pkt_truncated
+ * 45 Reserved
+ * 56:46 (RX) mac_filter_id
+ * 60:57 (RX) rx_port_num (0-unicmac0, 1-unimac1)
+ * 61 Reserved
+ * 63:62 (TX) forward CRC, overwrite CRC
+ */
+ u32 size;
+ u32 flags;
+ #define DESC_INT_EN BIT(0)
+ #define DESC_SOF BIT(1)
+ #define DESC_EOF BIT(2)
+ #define DESC_EPKT_CMD BIT(3)
+ #define DESC_SCRAM_ST BIT(8)
+ #define DESC_SCRAM_END BIT(9)
+ #define DESC_PCPP BIT(10)
+ #define DESC_PPPP BIT(11)
+ /* 0 (TX) tx_int_en
+ * 1 (TX/RX) SOF
+ * 2 (TX/RX) EOF
+ * 3 (TX) epkt_command
+ * 6:4 (TX) PA
+ * 7 (TX) pause at desc end
+ * 8 (TX) scram_start
+ * 9 (TX) scram_end
+ * 10 (TX) PCPP
+ * 11 (TX) PPPP
+ * 14:12 Reserved
+ * 15 (TX) pid ch Valid
+ * 19:16 (TX) data_pkt_type
+ * 32:20 (TX) pid_channel (RX) nw_filter_id
+ */
+};
+
+struct bcmasp_intf;
+
+struct bcmasp_intf_stats64 {
+ /* Rx Stats */
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t rx_dropped;
+ u64_stats_t rx_crc_errs;
+ u64_stats_t rx_sym_errs;
+
+ /* Tx Stats*/
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+
+ struct u64_stats_sync syncp;
+};
+
+struct bcmasp_mib_counters {
+ u32 edpkt_ts;
+ u32 edpkt_rx_pkt_cnt;
+ u32 edpkt_hdr_ext_cnt;
+ u32 edpkt_hdr_out_cnt;
+ u32 umac_frm_cnt;
+ u32 fb_frm_cnt;
+ u32 fb_rx_fifo_depth;
+ u32 fb_out_frm_cnt;
+ u32 fb_filt_out_frm_cnt;
+ u32 alloc_rx_skb_failed;
+ u32 tx_dma_failed;
+ u32 mc_filters_full_cnt;
+ u32 uc_filters_full_cnt;
+ u32 filters_combine_cnt;
+ u32 promisc_filters_cnt;
+ u32 tx_realloc_offload_failed;
+ u32 tx_timeout_cnt;
+};
+
+struct bcmasp_intf_ops {
+ unsigned long (*rx_desc_read)(struct bcmasp_intf *intf);
+ void (*rx_buffer_write)(struct bcmasp_intf *intf, dma_addr_t addr);
+ void (*rx_desc_write)(struct bcmasp_intf *intf, dma_addr_t addr);
+ unsigned long (*tx_read)(struct bcmasp_intf *intf);
+ void (*tx_write)(struct bcmasp_intf *intf, dma_addr_t addr);
+};
+
+struct bcmasp_priv;
+
+struct bcmasp_intf {
+ struct list_head list;
+ struct net_device *ndev;
+ struct bcmasp_priv *parent;
+
+ /* ASP Ch */
+ int channel;
+ int port;
+ const struct bcmasp_intf_ops *ops;
+
+ /* Used for splitting shared resources */
+ int index;
+
+ struct napi_struct tx_napi;
+ /* TX ring, starts on a new cacheline boundary */
+ void __iomem *tx_spb_dma;
+ int tx_spb_index;
+ int tx_spb_clean_index;
+ struct bcmasp_desc *tx_spb_cpu;
+ dma_addr_t tx_spb_dma_addr;
+ dma_addr_t tx_spb_dma_valid;
+ dma_addr_t tx_spb_dma_read;
+ struct bcmasp_tx_cb *tx_cbs;
+
+ /* RX ring, starts on a new cacheline boundary */
+ void __iomem *rx_edpkt_cfg;
+ void __iomem *rx_edpkt_dma;
+ int rx_edpkt_index;
+ int rx_buf_order;
+ struct bcmasp_desc *rx_edpkt_cpu;
+ dma_addr_t rx_edpkt_dma_addr;
+ dma_addr_t rx_edpkt_dma_read;
+ dma_addr_t rx_edpkt_dma_valid;
+
+ /* RX buffer prefetcher ring*/
+ void *rx_ring_cpu;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t rx_ring_dma_valid;
+ struct napi_struct rx_napi;
+
+ struct bcmasp_res res;
+ unsigned int crc_fwd;
+
+ /* PHY device */
+ struct device_node *phy_dn;
+ struct device_node *ndev_dn;
+ phy_interface_t phy_interface;
+ bool internal_phy;
+ int old_pause;
+ int old_link;
+ int old_duplex;
+
+ u32 msg_enable;
+
+ /* Statistics */
+ struct bcmasp_intf_stats64 stats64;
+ struct bcmasp_mib_counters mib;
+
+ u32 wolopts;
+ u8 sopass[SOPASS_MAX];
+};
+
+#define NUM_NET_FILTERS 32
+struct bcmasp_net_filter {
+ struct ethtool_rx_flow_spec fs;
+
+ bool claimed;
+ bool wake_filter;
+
+ int port;
+ unsigned int hw_index;
+};
+
+#define NUM_MDA_FILTERS 32
+struct bcmasp_mda_filter {
+ /* Current owner of this filter */
+ int port;
+ bool en;
+ u8 addr[ETH_ALEN];
+ u8 mask[ETH_ALEN];
+};
+
+struct bcmasp_plat_data {
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *priv, bool en);
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
+};
+
+struct bcmasp_priv {
+ struct platform_device *pdev;
+ struct clk *clk;
+
+ int irq;
+ u32 irq_mask;
+
+ /* Used if shared wol irq */
+ struct mutex wol_lock;
+ int wol_irq;
+ unsigned long wol_irq_enabled_mask;
+
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *intf, bool en);
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
+
+ void __iomem *base;
+
+ struct list_head intfs;
+
+ struct bcmasp_mda_filter *mda_filters;
+
+ /* MAC destination address filters lock */
+ spinlock_t mda_lock;
+
+ /* Protects accesses to ASP_CTRL_CLOCK_CTRL */
+ spinlock_t clk_lock;
+
+ struct bcmasp_net_filter *net_filters;
+
+ /* Network filter lock */
+ struct mutex net_lock;
+};
+
+static inline unsigned long bcmasp_intf_rx_desc_read(struct bcmasp_intf *intf)
+{
+ return intf->ops->rx_desc_read(intf);
+}
+
+static inline void bcmasp_intf_rx_buffer_write(struct bcmasp_intf *intf,
+ dma_addr_t addr)
+{
+ intf->ops->rx_buffer_write(intf, addr);
+}
+
+static inline void bcmasp_intf_rx_desc_write(struct bcmasp_intf *intf,
+ dma_addr_t addr)
+{
+ intf->ops->rx_desc_write(intf, addr);
+}
+
+static inline unsigned long bcmasp_intf_tx_read(struct bcmasp_intf *intf)
+{
+ return intf->ops->tx_read(intf);
+}
+
+static inline void bcmasp_intf_tx_write(struct bcmasp_intf *intf,
+ dma_addr_t addr)
+{
+ intf->ops->tx_write(intf, addr);
+}
+
+#define __BCMASP_IO_MACRO(name, m) \
+static inline u32 name##_rl(struct bcmasp_intf *intf, u32 off) \
+{ \
+ u32 reg = readl_relaxed(intf->m + off); \
+ return reg; \
+} \
+static inline void name##_wl(struct bcmasp_intf *intf, u32 val, u32 off)\
+{ \
+ writel_relaxed(val, intf->m + off); \
+}
+
+#define BCMASP_IO_MACRO(name) __BCMASP_IO_MACRO(name, res.name)
+#define BCMASP_FP_IO_MACRO(name) __BCMASP_IO_MACRO(name, name)
+
+BCMASP_IO_MACRO(umac);
+BCMASP_IO_MACRO(umac2fb);
+BCMASP_IO_MACRO(rgmii);
+BCMASP_FP_IO_MACRO(tx_spb_dma);
+BCMASP_IO_MACRO(tx_spb_ctrl);
+BCMASP_IO_MACRO(tx_spb_top);
+BCMASP_IO_MACRO(tx_epkt_core);
+BCMASP_IO_MACRO(tx_pause_ctrl);
+BCMASP_FP_IO_MACRO(rx_edpkt_dma);
+BCMASP_FP_IO_MACRO(rx_edpkt_cfg);
+
+#define __BCMASP_FP_IO_MACRO_Q(name, m) \
+static inline u64 name##_rq(struct bcmasp_intf *intf, u32 off) \
+{ \
+ u64 reg = readq_relaxed(intf->m + off); \
+ return reg; \
+} \
+static inline void name##_wq(struct bcmasp_intf *intf, u64 val, u32 off)\
+{ \
+ writeq_relaxed(val, intf->m + off); \
+}
+
+#define BCMASP_FP_IO_MACRO_Q(name) __BCMASP_FP_IO_MACRO_Q(name, name)
+
+BCMASP_FP_IO_MACRO_Q(tx_spb_dma);
+BCMASP_FP_IO_MACRO_Q(rx_edpkt_dma);
+BCMASP_FP_IO_MACRO_Q(rx_edpkt_cfg);
+
+#define PKT_OFFLOAD_NOP (0 << 28)
+#define PKT_OFFLOAD_HDR_OP (1 << 28)
+#define PKT_OFFLOAD_HDR_WRBACK BIT(19)
+#define PKT_OFFLOAD_HDR_COUNT(x) ((x) << 16)
+#define PKT_OFFLOAD_HDR_SIZE_1(x) ((x) << 4)
+#define PKT_OFFLOAD_HDR_SIZE_2(x) (x)
+#define PKT_OFFLOAD_HDR2_SIZE_2(x) ((x) << 24)
+#define PKT_OFFLOAD_HDR2_SIZE_3(x) ((x) << 12)
+#define PKT_OFFLOAD_HDR2_SIZE_4(x) (x)
+#define PKT_OFFLOAD_EPKT_OP (2 << 28)
+#define PKT_OFFLOAD_EPKT_WRBACK BIT(23)
+#define PKT_OFFLOAD_EPKT_IP(x) ((x) << 21)
+#define PKT_OFFLOAD_EPKT_TP(x) ((x) << 19)
+#define PKT_OFFLOAD_EPKT_LEN(x) ((x) << 16)
+#define PKT_OFFLOAD_EPKT_CSUM_L4 BIT(15)
+#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(14)
+#define PKT_OFFLOAD_EPKT_ID(x) ((x) << 12)
+#define PKT_OFFLOAD_EPKT_SEQ(x) ((x) << 10)
+#define PKT_OFFLOAD_EPKT_TS(x) ((x) << 8)
+#define PKT_OFFLOAD_EPKT_BLOC(x) (x)
+#define PKT_OFFLOAD_END_OP (7 << 28)
+
+struct bcmasp_pkt_offload {
+ __be32 nop;
+ __be32 header;
+ __be32 header2;
+ __be32 epkt;
+ __be32 end;
+};
+
+#define BCMASP_CORE_IO_MACRO(name, offset) \
+static inline u32 name##_core_rl(struct bcmasp_priv *priv, \
+ u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + (offset) + off); \
+ return reg; \
+} \
+static inline void name##_core_wl(struct bcmasp_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + (offset) + off); \
+}
+
+BCMASP_CORE_IO_MACRO(intr2, ASP_INTR2_OFFSET);
+BCMASP_CORE_IO_MACRO(wakeup_intr2, ASP_WAKEUP_INTR2_OFFSET);
+BCMASP_CORE_IO_MACRO(tx_analytics, ASP_TX_ANALYTICS_OFFSET);
+BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
+BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
+BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
+BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET);
+BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET);
+
+#define BCMASP_CORE_IO_MACRO_OFFSET(name, offset) \
+static inline u32 name##_core_rl(struct bcmasp_priv *priv, \
+ u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + priv->name##_offset + \
+ (offset) + off); \
+ return reg; \
+} \
+static inline void name##_core_wl(struct bcmasp_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + priv->name##_offset + \
+ (offset) + off); \
+}
+BCMASP_CORE_IO_MACRO_OFFSET(rx_ctrl, ASP_RX_CTRL_OFFSET);
+
+struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
+ struct device_node *ndev_dn, int i);
+
+void bcmasp_interface_destroy(struct bcmasp_intf *intf);
+
+void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en);
+
+void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en);
+
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en);
+
+void bcmasp_flush_rx_port(struct bcmasp_intf *intf);
+
+extern const struct ethtool_ops bcmasp_ethtool_ops;
+
+int bcmasp_interface_suspend(struct bcmasp_intf *intf);
+
+int bcmasp_interface_resume(struct bcmasp_intf *intf);
+
+void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en);
+
+void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en);
+
+void bcmasp_set_broad(struct bcmasp_intf *intf, bool en);
+
+void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr,
+ bool en);
+
+int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
+ unsigned char *mask);
+
+void bcmasp_disable_all_filters(struct bcmasp_intf *intf);
+
+void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en);
+
+struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
+ u32 loc, bool wake_filter,
+ bool init);
+
+bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
+ struct ethtool_rx_flow_spec *fs);
+
+void bcmasp_netfilt_release(struct bcmasp_intf *intf,
+ struct bcmasp_net_filter *nfilt);
+
+int bcmasp_netfilt_get_active(struct bcmasp_intf *intf);
+
+int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt);
+
+void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
+
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en);
+#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
new file mode 100644
index 000000000000..dd80ccfca19d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "bcmasp_ethtool: " fmt
+
+#include <linux/unaligned.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "bcmasp.h"
+#include "bcmasp_intf_defs.h"
+
+enum bcmasp_stat_type {
+ BCMASP_STAT_RX_CTRL,
+ BCMASP_STAT_RX_CTRL_PER_INTF,
+ BCMASP_STAT_SOFT,
+};
+
+struct bcmasp_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ enum bcmasp_stat_type type;
+ u32 reg_offset;
+};
+
+#define STAT_BCMASP_SOFT_MIB(str) { \
+ .stat_string = str, \
+ .type = BCMASP_STAT_SOFT, \
+}
+
+#define STAT_BCMASP_OFFSET(str, _type, offset) { \
+ .stat_string = str, \
+ .type = _type, \
+ .reg_offset = offset, \
+}
+
+#define STAT_BCMASP_RX_CTRL(str, offset) \
+ STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL, offset)
+#define STAT_BCMASP_RX_CTRL_PER_INTF(str, offset) \
+ STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL_PER_INTF, offset)
+
+/* Must match the order of struct bcmasp_mib_counters */
+static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
+ /* ASP RX control */
+ STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Unimac",
+ ASP_RX_CTRL_UMAC_0_FRAME_COUNT),
+ STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Port",
+ ASP_RX_CTRL_FB_0_FRAME_COUNT),
+ STAT_BCMASP_RX_CTRL_PER_INTF("RX Buffer FIFO Depth",
+ ASP_RX_CTRL_FB_RX_FIFO_DEPTH),
+ STAT_BCMASP_RX_CTRL("Frames Out(Buffer)",
+ ASP_RX_CTRL_FB_OUT_FRAME_COUNT),
+ STAT_BCMASP_RX_CTRL("Frames Out(Filters)",
+ ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT),
+ /* Software maintained statistics */
+ STAT_BCMASP_SOFT_MIB("RX SKB Alloc Failed"),
+ STAT_BCMASP_SOFT_MIB("TX DMA Failed"),
+ STAT_BCMASP_SOFT_MIB("Multicast Filters Full"),
+ STAT_BCMASP_SOFT_MIB("Unicast Filters Full"),
+ STAT_BCMASP_SOFT_MIB("MDA Filters Combined"),
+ STAT_BCMASP_SOFT_MIB("Promisc Filter Set"),
+ STAT_BCMASP_SOFT_MIB("TX Realloc For Offload Failed"),
+ STAT_BCMASP_SOFT_MIB("Tx Timeout Count"),
+};
+
+#define BCMASP_STATS_LEN ARRAY_SIZE(bcmasp_gstrings_stats)
+
+static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCMASP_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcmasp_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ const char *str;
+ unsigned int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCMASP_STATS_LEN; i++) {
+ str = bcmasp_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
+ }
+ break;
+ default:
+ return;
+ }
+}
+
+static void bcmasp_update_mib_counters(struct bcmasp_intf *intf)
+{
+ unsigned int i;
+
+ for (i = 0; i < BCMASP_STATS_LEN; i++) {
+ const struct bcmasp_stats *s;
+ u32 offset, val;
+ char *p;
+
+ s = &bcmasp_gstrings_stats[i];
+ offset = s->reg_offset;
+ switch (s->type) {
+ case BCMASP_STAT_SOFT:
+ continue;
+ case BCMASP_STAT_RX_CTRL:
+ val = rx_ctrl_core_rl(intf->parent, offset);
+ break;
+ case BCMASP_STAT_RX_CTRL_PER_INTF:
+ offset += sizeof(u32) * intf->port;
+ val = rx_ctrl_core_rl(intf->parent, offset);
+ break;
+ default:
+ continue;
+ }
+ p = (char *)(&intf->mib) + (i * sizeof(u32));
+ put_unaligned(val, (u32 *)p);
+ }
+}
+
+static void bcmasp_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ unsigned int i;
+ char *p;
+
+ if (netif_running(dev))
+ bcmasp_update_mib_counters(intf);
+
+ for (i = 0; i < BCMASP_STATS_LEN; i++) {
+ p = (char *)(&intf->mib) + (i * sizeof(u32));
+ data[i] = *(u32 *)p;
+ }
+}
+
+static void bcmasp_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, "bcmasp", sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static u32 bcmasp_get_msglevel(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ return intf->msg_enable;
+}
+
+static void bcmasp_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ intf->msg_enable = level;
+}
+
+#define BCMASP_SUPPORTED_WAKE (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER)
+static void bcmasp_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_priv *priv = intf->parent;
+ struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
+
+ if (dev->phydev) {
+ phy_ethtool_get_wol(dev->phydev, wol);
+ phy_wolopts = wol->wolopts;
+ }
+
+ /* MAC is not wake-up capable, return what the PHY does */
+ if (!device_can_wakeup(kdev))
+ return;
+
+ /* Overlay MAC capabilities with that of the PHY queried before */
+ wol->supported |= BCMASP_SUPPORTED_WAKE;
+ wol->wolopts |= intf->wolopts;
+
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
+
+ memset(wol->sopass, 0, sizeof(wol->sopass));
+
+ /* Otherwise the MAC one */
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, intf->sopass, sizeof(intf->sopass));
+}
+
+static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_priv *priv = intf->parent;
+ struct device *kdev = &priv->pdev->dev;
+ int ret = 0;
+
+ /* Try Wake-on-LAN from the PHY first */
+ if (dev->phydev) {
+ ret = phy_ethtool_set_wol(dev->phydev, wol);
+ if (ret != -EOPNOTSUPP && wol->wolopts)
+ return ret;
+ }
+
+ if (!device_can_wakeup(kdev))
+ return -EOPNOTSUPP;
+
+ if (wol->wolopts & ~BCMASP_SUPPORTED_WAKE)
+ return -EINVAL;
+
+ /* Interface Specific */
+ intf->wolopts = wol->wolopts;
+ if (intf->wolopts & WAKE_MAGICSECURE)
+ memcpy(intf->sopass, wol->sopass, sizeof(wol->sopass));
+
+ mutex_lock(&priv->wol_lock);
+ bcmasp_enable_wol(intf, !!intf->wolopts);
+ mutex_unlock(&priv->wol_lock);
+
+ return 0;
+}
+
+static int bcmasp_flow_insert(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_net_filter *nfilter;
+ u32 loc = cmd->fs.location;
+ bool wake = false;
+
+ if (cmd->fs.ring_cookie == RX_CLS_FLOW_WAKE)
+ wake = true;
+
+ /* Currently only supports WAKE filters */
+ if (!wake)
+ return -EOPNOTSUPP;
+
+ switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ case IP_USER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if filter already exists */
+ if (bcmasp_netfilt_check_dup(intf, &cmd->fs))
+ return -EINVAL;
+
+ nfilter = bcmasp_netfilt_get_init(intf, loc, wake, true);
+ if (IS_ERR(nfilter))
+ return PTR_ERR(nfilter);
+
+ /* Return the location where we did insert the filter */
+ cmd->fs.location = nfilter->hw_index;
+ memcpy(&nfilter->fs, &cmd->fs, sizeof(struct ethtool_rx_flow_spec));
+
+ /* Since we only support wake filters, defer register programming till
+ * suspend time.
+ */
+ return 0;
+}
+
+static int bcmasp_flow_delete(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_net_filter *nfilter;
+
+ nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false);
+ if (IS_ERR(nfilter))
+ return PTR_ERR(nfilter);
+
+ bcmasp_netfilt_release(intf, nfilter);
+
+ return 0;
+}
+
+static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_net_filter *nfilter;
+
+ nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false);
+ if (IS_ERR(nfilter))
+ return PTR_ERR(nfilter);
+
+ memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs));
+
+ cmd->data = intf->parent->num_net_filters;
+
+ return 0;
+}
+
+static int bcmasp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ mutex_lock(&intf->parent->net_lock);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = bcmasp_flow_insert(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = bcmasp_flow_delete(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&intf->parent->net_lock);
+
+ return ret;
+}
+
+static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ int err = 0;
+
+ mutex_lock(&intf->parent->net_lock);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bcmasp_netfilt_get_active(intf);
+ /* We support specifying rule locations */
+ cmd->data |= RX_CLS_LOC_SPECIAL;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = bcmasp_flow_get(intf, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
+ cmd->data = intf->parent->num_net_filters;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&intf->parent->net_lock);
+
+ return err;
+}
+
+static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
+{
+ if (!dev->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_get_eee(dev->phydev, e);
+}
+
+static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
+{
+ if (!dev->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_set_eee(dev->phydev, e);
+}
+
+static void bcmasp_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ mac_stats->FramesTransmittedOK = umac_rl(intf, UMC_GTPOK);
+ mac_stats->SingleCollisionFrames = umac_rl(intf, UMC_GTSCL);
+ mac_stats->MultipleCollisionFrames = umac_rl(intf, UMC_GTMCL);
+ mac_stats->FramesReceivedOK = umac_rl(intf, UMC_GRPOK);
+ mac_stats->FrameCheckSequenceErrors = umac_rl(intf, UMC_GRFCS);
+ mac_stats->AlignmentErrors = umac_rl(intf, UMC_GRALN);
+ mac_stats->OctetsTransmittedOK = umac_rl(intf, UMC_GTBYT);
+ mac_stats->FramesWithDeferredXmissions = umac_rl(intf, UMC_GTDRF);
+ mac_stats->LateCollisions = umac_rl(intf, UMC_GTLCL);
+ mac_stats->FramesAbortedDueToXSColls = umac_rl(intf, UMC_GTXCL);
+ mac_stats->OctetsReceivedOK = umac_rl(intf, UMC_GRBYT);
+ mac_stats->MulticastFramesXmittedOK = umac_rl(intf, UMC_GTMCA);
+ mac_stats->BroadcastFramesXmittedOK = umac_rl(intf, UMC_GTBCA);
+ mac_stats->FramesWithExcessiveDeferral = umac_rl(intf, UMC_GTEDF);
+ mac_stats->MulticastFramesReceivedOK = umac_rl(intf, UMC_GRMCA);
+ mac_stats->BroadcastFramesReceivedOK = umac_rl(intf, UMC_GRBCA);
+}
+
+static const struct ethtool_rmon_hist_range bcmasp_rmon_ranges[] = {
+ { 0, 64},
+ { 65, 127},
+ { 128, 255},
+ { 256, 511},
+ { 512, 1023},
+ { 1024, 1518},
+ { 1519, 1522},
+ {}
+};
+
+static void bcmasp_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ *ranges = bcmasp_rmon_ranges;
+
+ rmon_stats->undersize_pkts = umac_rl(intf, UMC_RRUND);
+ rmon_stats->oversize_pkts = umac_rl(intf, UMC_GROVR);
+ rmon_stats->fragments = umac_rl(intf, UMC_RRFRG);
+ rmon_stats->jabbers = umac_rl(intf, UMC_GRJBR);
+
+ rmon_stats->hist[0] = umac_rl(intf, UMC_GR64);
+ rmon_stats->hist[1] = umac_rl(intf, UMC_GR127);
+ rmon_stats->hist[2] = umac_rl(intf, UMC_GR255);
+ rmon_stats->hist[3] = umac_rl(intf, UMC_GR511);
+ rmon_stats->hist[4] = umac_rl(intf, UMC_GR1023);
+ rmon_stats->hist[5] = umac_rl(intf, UMC_GR1518);
+ rmon_stats->hist[6] = umac_rl(intf, UMC_GRMGV);
+
+ rmon_stats->hist_tx[0] = umac_rl(intf, UMC_TR64);
+ rmon_stats->hist_tx[1] = umac_rl(intf, UMC_TR127);
+ rmon_stats->hist_tx[2] = umac_rl(intf, UMC_TR255);
+ rmon_stats->hist_tx[3] = umac_rl(intf, UMC_TR511);
+ rmon_stats->hist_tx[4] = umac_rl(intf, UMC_TR1023);
+ rmon_stats->hist_tx[5] = umac_rl(intf, UMC_TR1518);
+ rmon_stats->hist_tx[6] = umac_rl(intf, UMC_TRMGV);
+}
+
+static void bcmasp_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ ctrl_stats->MACControlFramesTransmitted = umac_rl(intf, UMC_GTXCF);
+ ctrl_stats->MACControlFramesReceived = umac_rl(intf, UMC_GRXCF);
+ ctrl_stats->UnsupportedOpcodesReceived = umac_rl(intf, UMC_GRXUO);
+}
+
+const struct ethtool_ops bcmasp_ethtool_ops = {
+ .get_drvinfo = bcmasp_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = bcmasp_get_msglevel,
+ .set_msglevel = bcmasp_set_msglevel,
+ .get_wol = bcmasp_get_wol,
+ .set_wol = bcmasp_set_wol,
+ .get_rxnfc = bcmasp_get_rxnfc,
+ .set_rxnfc = bcmasp_set_rxnfc,
+ .set_eee = bcmasp_set_eee,
+ .get_eee = bcmasp_get_eee,
+ .get_eth_mac_stats = bcmasp_get_eth_mac_stats,
+ .get_rmon_stats = bcmasp_get_rmon_stats,
+ .get_eth_ctrl_stats = bcmasp_get_eth_ctrl_stats,
+ .get_strings = bcmasp_get_strings,
+ .get_ethtool_stats = bcmasp_get_ethtool_stats,
+ .get_sset_count = bcmasp_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .nway_reset = phy_ethtool_nway_reset,
+};
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
new file mode 100644
index 000000000000..b9973956c480
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -0,0 +1,1423 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "bcmasp_intf: " fmt
+
+#include <asm/byteorder.h>
+#include <linux/brcmphy.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/ptp_classify.h>
+#include <linux/platform_device.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "bcmasp.h"
+#include "bcmasp_intf_defs.h"
+
+static int incr_ring(int index, int ring_count)
+{
+ index++;
+ if (index == ring_count)
+ return 0;
+
+ return index;
+}
+
+/* Points to last byte of descriptor */
+static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg,
+ int ring_count)
+{
+ dma_addr_t end = beg + (ring_count * DESC_SIZE);
+
+ addr += DESC_SIZE;
+ if (addr > end)
+ return beg + DESC_SIZE - 1;
+
+ return addr;
+}
+
+/* Points to first byte of descriptor */
+static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg,
+ int ring_count)
+{
+ dma_addr_t end = beg + (ring_count * DESC_SIZE);
+
+ addr += DESC_SIZE;
+ if (addr >= end)
+ return beg;
+
+ return addr;
+}
+
+static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en)
+{
+ if (en) {
+ tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE);
+ tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN |
+ TX_EPKT_C_CFG_MISC_PT |
+ (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)),
+ TX_EPKT_C_CFG_MISC);
+ } else {
+ tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
+ tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
+ }
+}
+
+static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en)
+{
+ if (en)
+ rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN,
+ RX_EDPKT_CFG_ENABLE);
+ else
+ rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
+}
+
+static void bcmasp_set_rx_mode(struct net_device *dev)
+{
+ unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int ret;
+
+ spin_lock_bh(&intf->parent->mda_lock);
+
+ bcmasp_disable_all_filters(intf);
+
+ if (dev->flags & IFF_PROMISC)
+ goto set_promisc;
+
+ bcmasp_set_promisc(intf, 0);
+
+ bcmasp_set_broad(intf, 1);
+
+ bcmasp_set_oaddr(intf, dev->dev_addr, 1);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ bcmasp_set_allmulti(intf, 1);
+ } else {
+ bcmasp_set_allmulti(intf, 0);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
+ if (ret) {
+ intf->mib.mc_filters_full_cnt++;
+ goto set_promisc;
+ }
+ }
+ }
+
+ netdev_for_each_uc_addr(ha, dev) {
+ ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
+ if (ret) {
+ intf->mib.uc_filters_full_cnt++;
+ goto set_promisc;
+ }
+ }
+
+ spin_unlock_bh(&intf->parent->mda_lock);
+ return;
+
+set_promisc:
+ bcmasp_set_promisc(intf, 1);
+ intf->mib.promisc_filters_cnt++;
+
+ /* disable all filters used by this port */
+ bcmasp_disable_all_filters(intf);
+
+ spin_unlock_bh(&intf->parent->mda_lock);
+}
+
+static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index)
+{
+ struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index];
+
+ txcb->skb = NULL;
+ dma_unmap_addr_set(txcb, dma_addr, 0);
+ dma_unmap_len_set(txcb, dma_len, 0);
+ txcb->last = false;
+}
+
+static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt)
+{
+ int next_index, i;
+
+ /* Check if we have enough room for cnt descriptors */
+ for (i = 0; i < cnt; i++) {
+ next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT);
+ if (next_index == intf->tx_spb_clean_index)
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
+ struct sk_buff *skb,
+ bool *csum_hw)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ u32 header = 0, header2 = 0, epkt = 0;
+ struct bcmasp_pkt_offload *offload;
+ unsigned int header_cnt = 0;
+ u8 ip_proto;
+ int ret;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return skb;
+
+ ret = skb_cow_head(skb, sizeof(*offload));
+ if (ret < 0) {
+ intf->mib.tx_realloc_offload_failed++;
+ goto help;
+ }
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
+ epkt |= PKT_OFFLOAD_EPKT_IP(0);
+ ip_proto = ip_hdr(skb)->protocol;
+ header_cnt += 2;
+ break;
+ case htons(ETH_P_IPV6):
+ header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
+ epkt |= PKT_OFFLOAD_EPKT_IP(1);
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ header_cnt += 2;
+ break;
+ default:
+ goto help;
+ }
+
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
+ epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L4;
+ header_cnt++;
+ break;
+ case IPPROTO_UDP:
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
+ epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L4;
+ header_cnt++;
+ break;
+ default:
+ goto help;
+ }
+
+ offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload));
+
+ header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) |
+ PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN);
+ epkt |= PKT_OFFLOAD_EPKT_OP;
+
+ offload->nop = htonl(PKT_OFFLOAD_NOP);
+ offload->header = htonl(header);
+ offload->header2 = htonl(header2);
+ offload->epkt = htonl(epkt);
+ offload->end = htonl(PKT_OFFLOAD_END_OP);
+ *csum_hw = true;
+
+ return skb;
+
+help:
+ skb_checksum_help(skb);
+
+ return skb;
+}
+
+static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
+{
+ return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
+}
+
+static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
+{
+ rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
+}
+
+static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
+{
+ rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
+}
+
+static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
+{
+ return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
+}
+
+static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
+{
+ tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
+}
+
+static const struct bcmasp_intf_ops bcmasp_intf_ops = {
+ .rx_desc_read = bcmasp_rx_edpkt_dma_rq,
+ .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
+ .rx_desc_write = bcmasp_rx_edpkt_dma_wq,
+ .tx_read = bcmasp_tx_spb_dma_rq,
+ .tx_write = bcmasp_tx_spb_dma_wq,
+};
+
+static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ unsigned int total_bytes, size;
+ int spb_index, nr_frags, i, j;
+ struct bcmasp_tx_cb *txcb;
+ dma_addr_t mapping, valid;
+ struct bcmasp_desc *desc;
+ bool csum_hw = false;
+ struct device *kdev;
+ skb_frag_t *frag;
+
+ kdev = &intf->parent->pdev->dev;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (tx_spb_ring_full(intf, nr_frags + 1)) {
+ netif_stop_queue(dev);
+ if (net_ratelimit())
+ netdev_err(dev, "Tx Ring Full!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Save skb len before adding csum offload header */
+ total_bytes = skb->len;
+ skb = bcmasp_csum_offload(dev, skb, &csum_hw);
+ if (!skb)
+ return NETDEV_TX_OK;
+
+ spb_index = intf->tx_spb_index;
+ valid = intf->tx_spb_dma_valid;
+ for (i = 0; i <= nr_frags; i++) {
+ if (!i) {
+ size = skb_headlen(skb);
+ if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) {
+ if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN))
+ return NETDEV_TX_OK;
+ size = skb->len;
+ }
+ mapping = dma_map_single(kdev, skb->data, size,
+ DMA_TO_DEVICE);
+ } else {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ size = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(kdev, frag, 0, size,
+ DMA_TO_DEVICE);
+ }
+
+ if (dma_mapping_error(kdev, mapping)) {
+ intf->mib.tx_dma_failed++;
+ spb_index = intf->tx_spb_index;
+ for (j = 0; j < i; j++) {
+ bcmasp_clean_txcb(intf, spb_index);
+ spb_index = incr_ring(spb_index,
+ DESC_RING_COUNT);
+ }
+ /* Rewind so we do not have a hole */
+ spb_index = intf->tx_spb_index;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ txcb = &intf->tx_cbs[spb_index];
+ desc = &intf->tx_spb_cpu[spb_index];
+ memset(desc, 0, sizeof(*desc));
+ txcb->skb = skb;
+ txcb->bytes_sent = total_bytes;
+ dma_unmap_addr_set(txcb, dma_addr, mapping);
+ dma_unmap_len_set(txcb, dma_len, size);
+ if (!i) {
+ desc->flags |= DESC_SOF;
+ if (csum_hw)
+ desc->flags |= DESC_EPKT_CMD;
+ }
+
+ if (i == nr_frags) {
+ desc->flags |= DESC_EOF;
+ txcb->last = true;
+ }
+
+ desc->buf = mapping;
+ desc->size = size;
+ desc->flags |= DESC_INT_EN;
+
+ netif_dbg(intf, tx_queued, dev,
+ "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
+ __func__, &mapping, desc->size, desc->flags,
+ spb_index);
+
+ spb_index = incr_ring(spb_index, DESC_RING_COUNT);
+ valid = incr_last_byte(valid, intf->tx_spb_dma_addr,
+ DESC_RING_COUNT);
+ }
+
+ /* Ensure all descriptors have been written to DRAM for the
+ * hardware to see up-to-date contents.
+ */
+ wmb();
+
+ intf->tx_spb_index = spb_index;
+ intf->tx_spb_dma_valid = valid;
+
+ skb_tx_timestamp(skb);
+
+ bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
+
+ if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+}
+
+static void bcmasp_netif_start(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ bcmasp_set_rx_mode(dev);
+ napi_enable(&intf->tx_napi);
+ napi_enable(&intf->rx_napi);
+
+ bcmasp_enable_rx_irq(intf, 1);
+ bcmasp_enable_tx_irq(intf, 1);
+ bcmasp_enable_phy_irq(intf, 1);
+
+ phy_start(dev->phydev);
+}
+
+static void umac_reset(struct bcmasp_intf *intf)
+{
+ umac_wl(intf, 0x0, UMC_CMD);
+ umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
+ usleep_range(10, 100);
+ /* We hold the umac in reset and bring it out of
+ * reset when phy link is up.
+ */
+}
+
+static void umac_set_hw_addr(struct bcmasp_intf *intf,
+ const unsigned char *addr)
+{
+ u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
+ addr[3];
+ u32 mac1 = (addr[4] << 8) | addr[5];
+
+ umac_wl(intf, mac0, UMC_MAC0);
+ umac_wl(intf, mac1, UMC_MAC1);
+}
+
+static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
+ unsigned int enable)
+{
+ u32 reg;
+
+ reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ return;
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ umac_wl(intf, reg, UMC_CMD);
+
+ /* UniMAC stops on a packet boundary, wait for a full-sized packet
+ * to be processed (1 msec).
+ */
+ if (enable == 0)
+ usleep_range(1000, 2000);
+}
+
+static void umac_init(struct bcmasp_intf *intf)
+{
+ umac_wl(intf, 0x800, UMC_FRM_LEN);
+ umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
+ umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
+}
+
+static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
+{
+ struct bcmasp_intf_stats64 *stats = &intf->stats64;
+ struct device *kdev = &intf->parent->pdev->dev;
+ unsigned long read, released = 0;
+ struct bcmasp_tx_cb *txcb;
+ struct bcmasp_desc *desc;
+ dma_addr_t mapping;
+
+ read = bcmasp_intf_tx_read(intf);
+ while (intf->tx_spb_dma_read != read) {
+ txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
+ mapping = dma_unmap_addr(txcb, dma_addr);
+
+ dma_unmap_single(kdev, mapping,
+ dma_unmap_len(txcb, dma_len),
+ DMA_TO_DEVICE);
+
+ if (txcb->last) {
+ dev_consume_skb_any(txcb->skb);
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, txcb->bytes_sent);
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index];
+
+ netif_dbg(intf, tx_done, intf->ndev,
+ "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
+ __func__, &mapping, desc->size, desc->flags,
+ intf->tx_spb_clean_index);
+
+ bcmasp_clean_txcb(intf, intf->tx_spb_clean_index);
+ released++;
+
+ intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index,
+ DESC_RING_COUNT);
+ intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read,
+ intf->tx_spb_dma_addr,
+ DESC_RING_COUNT);
+ }
+
+ return released;
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, tx_napi);
+ int released = 0;
+
+ released = bcmasp_tx_reclaim(intf);
+
+ napi_complete(&intf->tx_napi);
+
+ bcmasp_enable_tx_irq(intf, 1);
+
+ if (released)
+ netif_wake_queue(intf->ndev);
+
+ return 0;
+}
+
+static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, rx_napi);
+ struct bcmasp_intf_stats64 *stats = &intf->stats64;
+ struct device *kdev = &intf->parent->pdev->dev;
+ unsigned long processed = 0;
+ struct bcmasp_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t valid;
+ void *data;
+ u64 flags;
+ u32 len;
+
+ valid = bcmasp_intf_rx_desc_read(intf) + 1;
+ if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
+ valid = intf->rx_edpkt_dma_addr;
+
+ while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) {
+ desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index];
+
+ /* Ensure that descriptor has been fully written to DRAM by
+ * hardware before reading by the CPU
+ */
+ rmb();
+
+ /* Calculate virt addr by offsetting from physical addr */
+ data = intf->rx_ring_cpu +
+ (DESC_ADDR(desc->buf) - intf->rx_ring_dma);
+
+ flags = DESC_FLAGS(desc->buf);
+ if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) {
+ if (net_ratelimit()) {
+ netif_err(intf, rx_status, intf->ndev,
+ "flags=0x%llx\n", flags);
+ }
+
+ u64_stats_update_begin(&stats->syncp);
+ if (flags & DESC_CRC_ERR)
+ u64_stats_inc(&stats->rx_crc_errs);
+ if (flags & DESC_RX_SYM_ERR)
+ u64_stats_inc(&stats->rx_sym_errs);
+ u64_stats_update_end(&stats->syncp);
+
+ goto next;
+ }
+
+ dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size,
+ DMA_FROM_DEVICE);
+
+ len = desc->size;
+
+ skb = napi_alloc_skb(napi, len);
+ if (!skb) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_dropped);
+ u64_stats_update_end(&stats->syncp);
+ intf->mib.alloc_rx_skb_failed++;
+
+ goto next;
+ }
+
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+
+ skb_pull(skb, 2);
+ len -= 2;
+ if (likely(intf->crc_fwd)) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ if ((intf->ndev->features & NETIF_F_RXCSUM) &&
+ (desc->buf & DESC_CHKSUM))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb->protocol = eth_type_trans(skb, intf->ndev);
+
+ napi_gro_receive(napi, skb);
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, len);
+ u64_stats_update_end(&stats->syncp);
+
+next:
+ bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
+ desc->size));
+
+ processed++;
+ intf->rx_edpkt_dma_read =
+ incr_first_byte(intf->rx_edpkt_dma_read,
+ intf->rx_edpkt_dma_addr,
+ DESC_RING_COUNT);
+ intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index,
+ DESC_RING_COUNT);
+ }
+
+ bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
+
+ if (processed < budget && napi_complete_done(&intf->rx_napi, processed))
+ bcmasp_enable_rx_irq(intf, 1);
+
+ return processed;
+}
+
+static void bcmasp_adj_link(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ u32 cmd_bits = 0, reg;
+ int changed = 0;
+
+ if (intf->old_link != phydev->link) {
+ changed = 1;
+ intf->old_link = phydev->link;
+ }
+
+ if (intf->old_duplex != phydev->duplex) {
+ changed = 1;
+ intf->old_duplex = phydev->duplex;
+ }
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ cmd_bits = UMC_CMD_SPEED_2500;
+ break;
+ case SPEED_1000:
+ cmd_bits = UMC_CMD_SPEED_1000;
+ break;
+ case SPEED_100:
+ cmd_bits = UMC_CMD_SPEED_100;
+ break;
+ case SPEED_10:
+ cmd_bits = UMC_CMD_SPEED_10;
+ break;
+ default:
+ break;
+ }
+ cmd_bits <<= UMC_CMD_SPEED_SHIFT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ cmd_bits |= UMC_CMD_HD_EN;
+
+ if (intf->old_pause != phydev->pause) {
+ changed = 1;
+ intf->old_pause = phydev->pause;
+ }
+
+ if (!phydev->pause)
+ cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE;
+
+ if (!changed)
+ return;
+
+ if (phydev->link) {
+ reg = umac_rl(intf, UMC_CMD);
+ reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) |
+ UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
+ UMC_CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ if (reg & UMC_CMD_SW_RESET) {
+ reg &= ~UMC_CMD_SW_RESET;
+ umac_wl(intf, reg, UMC_CMD);
+ udelay(2);
+ reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ }
+ umac_wl(intf, reg, UMC_CMD);
+
+ umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER);
+ reg = umac_rl(intf, UMC_EEE_CTRL);
+ if (phydev->enable_tx_lpi)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ umac_wl(intf, reg, UMC_EEE_CTRL);
+ }
+
+ reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
+ if (phydev->link)
+ reg |= RGMII_LINK;
+ else
+ reg &= ~RGMII_LINK;
+ rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
+
+ if (changed)
+ phy_print_status(phydev);
+}
+
+static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+ struct page *buffer_pg;
+
+ /* Alloc RX */
+ intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
+ buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
+ if (!buffer_pg)
+ return -ENOMEM;
+
+ intf->rx_ring_cpu = page_to_virt(buffer_pg);
+ intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(kdev, intf->rx_ring_dma))
+ goto free_rx_buffer;
+
+ intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->rx_edpkt_dma_addr, GFP_KERNEL);
+ if (!intf->rx_edpkt_cpu)
+ goto free_rx_buffer_dma;
+
+ /* Alloc TX */
+ intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->tx_spb_dma_addr, GFP_KERNEL);
+ if (!intf->tx_spb_cpu)
+ goto free_rx_edpkt_dma;
+
+ intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
+ GFP_KERNEL);
+ if (!intf->tx_cbs)
+ goto free_tx_spb_dma;
+
+ return 0;
+
+free_tx_spb_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+free_rx_edpkt_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+free_rx_buffer_dma:
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+free_rx_buffer:
+ __free_pages(buffer_pg, intf->rx_buf_order);
+
+ return -ENOMEM;
+}
+
+static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+
+ /* RX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+
+ /* TX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+ kfree(intf->tx_cbs);
+}
+
+static void bcmasp_init_rx(struct bcmasp_intf *intf)
+{
+ /* Restart from index 0 */
+ intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+ intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
+ intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
+ intf->rx_edpkt_index = 0;
+
+ /* Make sure channels are disabled */
+ rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
+
+ /* Rx SPB */
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
+ RX_EDPKT_RING_BUFFER_END);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
+ RX_EDPKT_RING_BUFFER_VALID);
+
+ /* EDPKT */
+ rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K <<
+ RX_EDPKT_CFG_CFG0_DBUF_SHIFT) |
+ (RX_EDPKT_CFG_CFG0_64_ALN <<
+ RX_EDPKT_CFG_CFG0_BALN_SHIFT) |
+ (RX_EDPKT_CFG_CFG0_EFRM_STUF),
+ RX_EDPKT_CFG_CFG0);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
+
+ umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
+ UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
+ UMAC2FB_CFG);
+}
+
+
+static void bcmasp_init_tx(struct bcmasp_intf *intf)
+{
+ /* Restart from index 0 */
+ intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
+ intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
+ intf->tx_spb_index = 0;
+ intf->tx_spb_clean_index = 0;
+ memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
+
+ /* Make sure channels are disabled */
+ tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
+ tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
+
+ /* Tx SPB */
+ tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
+ TX_SPB_CTRL_XF_CTRL2);
+
+ if (intf->parent->tx_chan_offset)
+ tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
+ tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
+
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
+}
+
+static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
+{
+ u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN |
+ RGMII_EPHY_CFG_IDDQ_GLOBAL;
+ u32 reg;
+
+ reg = rgmii_rl(intf, RGMII_EPHY_CNTRL);
+ if (enable) {
+ reg &= ~RGMII_EPHY_CK25_DIS;
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+
+ reg &= ~mask;
+ reg |= RGMII_EPHY_RESET;
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+
+ reg &= ~RGMII_EPHY_RESET;
+ } else {
+ reg |= mask | RGMII_EPHY_RESET;
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+ reg |= RGMII_EPHY_CK25_DIS;
+ }
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+
+ /* Set or clear the LED control override to avoid lighting up LEDs
+ * while the EPHY is powered off and drawing unnecessary current.
+ */
+ reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL);
+ if (enable)
+ reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD;
+ else
+ reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD;
+ rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL);
+}
+
+static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable)
+{
+ u32 reg;
+
+ reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
+ reg &= ~RGMII_OOB_DIS;
+ if (enable)
+ reg |= RGMII_MODE_EN;
+ else
+ reg &= ~RGMII_MODE_EN;
+ rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
+}
+
+static void bcmasp_netif_deinit(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ u32 reg, timeout = 1000;
+
+ napi_disable(&intf->tx_napi);
+
+ bcmasp_enable_tx(intf, 0);
+
+ /* Flush any TX packets in the pipe */
+ tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL);
+ do {
+ reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS);
+ if (!(reg & TX_SPB_DMA_FIFO_FLUSH))
+ break;
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+ tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+
+ bcmasp_tx_reclaim(intf);
+
+ umac_enable_set(intf, UMC_CMD_TX_EN, 0);
+
+ phy_stop(dev->phydev);
+
+ umac_enable_set(intf, UMC_CMD_RX_EN, 0);
+
+ bcmasp_flush_rx_port(intf);
+ usleep_range(1000, 2000);
+ bcmasp_enable_rx(intf, 0);
+
+ napi_disable(&intf->rx_napi);
+
+ /* Disable interrupts */
+ bcmasp_enable_tx_irq(intf, 0);
+ bcmasp_enable_rx_irq(intf, 0);
+ bcmasp_enable_phy_irq(intf, 0);
+
+ netif_napi_del(&intf->tx_napi);
+ netif_napi_del(&intf->rx_napi);
+}
+
+static int bcmasp_stop(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ netif_dbg(intf, ifdown, dev, "bcmasp stop\n");
+
+ /* Stop tx from updating HW */
+ netif_tx_disable(dev);
+
+ bcmasp_netif_deinit(dev);
+
+ bcmasp_reclaim_free_buffers(intf);
+
+ phy_disconnect(dev->phydev);
+
+ /* Disable internal EPHY or external PHY */
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, false);
+ else
+ bcmasp_rgmii_mode_en_set(intf, false);
+
+ /* Disable the interface clocks */
+ bcmasp_core_clock_set_intf(intf, false);
+
+ clk_disable_unprepare(intf->parent->clk);
+
+ return 0;
+}
+
+static void bcmasp_configure_port(struct bcmasp_intf *intf)
+{
+ u32 reg, id_mode_dis = 0;
+
+ reg = rgmii_rl(intf, RGMII_PORT_CNTRL);
+ reg &= ~RGMII_PORT_MODE_MASK;
+
+ switch (intf->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ /* RGMII_NO_ID: TXC transitions at the same time as TXD
+ * (requires PCB or receiver-side delay)
+ * RGMII: Add 2ns delay on TXC (90 degree shift)
+ *
+ * ID is implicitly disabled for 100Mbps (RG)MII operation.
+ */
+ id_mode_dis = RGMII_ID_MODE_DIS;
+ fallthrough;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ reg |= RGMII_PORT_MODE_EXT_GPHY;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ reg |= RGMII_PORT_MODE_EXT_EPHY;
+ break;
+ default:
+ break;
+ }
+
+ if (intf->internal_phy)
+ reg |= RGMII_PORT_MODE_EPHY;
+
+ rgmii_wl(intf, reg, RGMII_PORT_CNTRL);
+
+ reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
+ reg &= ~RGMII_ID_MODE_DIS;
+ reg |= id_mode_dis;
+ rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
+}
+
+static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ phy_interface_t phy_iface = intf->phy_interface;
+ u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_IDDQ_SUSPEND;
+ struct phy_device *phydev = NULL;
+ int ret;
+
+ /* Always enable interface clocks */
+ bcmasp_core_clock_set_intf(intf, true);
+
+ /* Enable internal PHY or external PHY before any MAC activity */
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, true);
+ else
+ bcmasp_rgmii_mode_en_set(intf, true);
+ bcmasp_configure_port(intf);
+
+ /* This is an ugly quirk but we have not been correctly
+ * interpreting the phy_interface values and we have done that
+ * across different drivers, so at least we are consistent in
+ * our mistakes.
+ *
+ * When the Generic PHY driver is in use either the PHY has
+ * been strapped or programmed correctly by the boot loader so
+ * we should stick to our incorrect interpretation since we
+ * have validated it.
+ *
+ * Now when a dedicated PHY driver is in use, we need to
+ * reverse the meaning of the phy_interface_mode values to
+ * something that the PHY driver will interpret and act on such
+ * that we have two mistakes canceling themselves so to speak.
+ * We only do this for the two modes that GENET driver
+ * officially supports on Broadcom STB chips:
+ * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
+ * Other modes are not *officially* supported with the boot
+ * loader and the scripted environment generating Device Tree
+ * blobs for those platforms.
+ *
+ * Note that internal PHY and fixed-link configurations are not
+ * affected because they use different phy_interface_t values
+ * or the Generic PHY driver.
+ */
+ switch (phy_iface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ default:
+ break;
+ }
+
+ if (phy_connect) {
+ phydev = of_phy_connect(dev, intf->phy_dn,
+ bcmasp_adj_link, phy_flags,
+ phy_iface);
+ if (!phydev) {
+ ret = -ENODEV;
+ netdev_err(dev, "could not attach to PHY\n");
+ goto err_phy_disable;
+ }
+
+ if (intf->internal_phy)
+ dev->phydev->irq = PHY_MAC_INTERRUPT;
+
+ /* Indicate that the MAC is responsible for PHY PM */
+ phydev->mac_managed_pm = true;
+
+ /* Set phylib's copy of the LPI timer */
+ phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
+ }
+
+ umac_reset(intf);
+
+ umac_init(intf);
+
+ umac_set_hw_addr(intf, dev->dev_addr);
+
+ intf->old_duplex = -1;
+ intf->old_link = -1;
+ intf->old_pause = -1;
+
+ bcmasp_init_tx(intf);
+ netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
+ bcmasp_enable_tx(intf, 1);
+
+ bcmasp_init_rx(intf);
+ netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
+ bcmasp_enable_rx(intf, 1);
+
+ intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
+
+ bcmasp_netif_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+err_phy_disable:
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, false);
+ else
+ bcmasp_rgmii_mode_en_set(intf, false);
+ return ret;
+}
+
+static int bcmasp_open(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ int ret;
+
+ netif_dbg(intf, ifup, dev, "bcmasp open\n");
+
+ ret = bcmasp_alloc_buffers(intf);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(intf->parent->clk);
+ if (ret)
+ goto err_free_mem;
+
+ ret = bcmasp_netif_init(dev, true);
+ if (ret) {
+ clk_disable_unprepare(intf->parent->clk);
+ goto err_free_mem;
+ }
+
+ return ret;
+
+err_free_mem:
+ bcmasp_reclaim_free_buffers(intf);
+
+ return ret;
+}
+
+static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ netif_dbg(intf, tx_err, dev, "transmit timeout!\n");
+ intf->mib.tx_timeout_cnt++;
+}
+
+static int bcmasp_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ if (snprintf(name, len, "p%d", intf->port) >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void bcmasp_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_intf_stats64 *lstats;
+ unsigned int start;
+
+ lstats = &intf->stats64;
+
+ do {
+ start = u64_stats_fetch_begin(&lstats->syncp);
+ stats->rx_packets = u64_stats_read(&lstats->rx_packets);
+ stats->rx_bytes = u64_stats_read(&lstats->rx_bytes);
+ stats->rx_dropped = u64_stats_read(&lstats->rx_dropped);
+ stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs);
+ stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs);
+ stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
+
+ stats->tx_packets = u64_stats_read(&lstats->tx_packets);
+ stats->tx_bytes = u64_stats_read(&lstats->tx_bytes);
+ } while (u64_stats_fetch_retry(&lstats->syncp, start));
+}
+
+static const struct net_device_ops bcmasp_netdev_ops = {
+ .ndo_open = bcmasp_open,
+ .ndo_stop = bcmasp_stop,
+ .ndo_start_xmit = bcmasp_xmit,
+ .ndo_tx_timeout = bcmasp_tx_timeout,
+ .ndo_set_rx_mode = bcmasp_set_rx_mode,
+ .ndo_get_phys_port_name = bcmasp_get_phys_port_name,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats64 = bcmasp_get_stats64,
+};
+
+static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
+{
+ /* Per port */
+ intf->res.umac = priv->base + UMC_OFFSET(intf);
+ intf->res.umac2fb = priv->base + (UMAC2FB_OFFSET + priv->rx_ctrl_offset +
+ (intf->port * 0x4));
+ intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
+
+ /* Per ch */
+ intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf);
+ intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf);
+ intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf);
+ intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf);
+ intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf);
+
+ intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf);
+ intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
+}
+
+struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
+ struct device_node *ndev_dn, int i)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct bcmasp_intf *intf;
+ struct net_device *ndev;
+ int ch, port, ret;
+
+ if (of_property_read_u32(ndev_dn, "reg", &port)) {
+ dev_warn(dev, "%s: invalid port number\n", ndev_dn->name);
+ goto err;
+ }
+
+ if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) {
+ dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name);
+ goto err;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct bcmasp_intf));
+ if (!ndev) {
+ dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name);
+ goto err;
+ }
+ intf = netdev_priv(ndev);
+
+ intf->parent = priv;
+ intf->ndev = ndev;
+ intf->channel = ch;
+ intf->port = port;
+ intf->ndev_dn = ndev_dn;
+ intf->index = i;
+
+ ret = of_get_phy_mode(ndev_dn, &intf->phy_interface);
+ if (ret < 0) {
+ dev_err(dev, "invalid PHY mode property\n");
+ goto err_free_netdev;
+ }
+
+ if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ intf->internal_phy = true;
+
+ intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0);
+ if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) {
+ ret = of_phy_register_fixed_link(ndev_dn);
+ if (ret) {
+ dev_warn(dev, "%s: failed to register fixed PHY\n",
+ ndev_dn->name);
+ goto err_free_netdev;
+ }
+ intf->phy_dn = ndev_dn;
+ }
+
+ /* Map resource */
+ bcmasp_map_res(priv, intf);
+
+ if ((!phy_interface_mode_is_rgmii(intf->phy_interface) &&
+ intf->phy_interface != PHY_INTERFACE_MODE_MII &&
+ intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) ||
+ (intf->port != 1 && intf->internal_phy)) {
+ netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
+ phy_modes(intf->phy_interface), intf->port);
+ ret = -EINVAL;
+ goto err_free_netdev;
+ }
+
+ ret = of_get_ethdev_address(ndev_dn, ndev);
+ if (ret) {
+ netdev_warn(ndev, "using random Ethernet MAC\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ SET_NETDEV_DEV(ndev, dev);
+ intf->ops = &bcmasp_intf_ops;
+ ndev->netdev_ops = &bcmasp_netdev_ops;
+ ndev->ethtool_ops = &bcmasp_ethtool_ops;
+ intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
+ NETIF_MSG_PROBE |
+ NETIF_MSG_LINK);
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ ndev->hw_features |= ndev->features;
+ ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
+
+ netdev_sw_irq_coalesce_default_on(ndev);
+
+ return intf;
+
+err_free_netdev:
+ free_netdev(ndev);
+err:
+ return NULL;
+}
+
+void bcmasp_interface_destroy(struct bcmasp_intf *intf)
+{
+ if (intf->ndev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(intf->ndev);
+ if (of_phy_is_fixed_link(intf->ndev_dn))
+ of_phy_deregister_fixed_link(intf->ndev_dn);
+ free_netdev(intf->ndev);
+}
+
+static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
+{
+ struct net_device *ndev = intf->ndev;
+ u32 reg;
+
+ reg = umac_rl(intf, UMC_MPD_CTRL);
+ if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
+ reg |= UMC_MPD_CTRL_MPD_EN;
+ reg &= ~UMC_MPD_CTRL_PSW_EN;
+ if (intf->wolopts & WAKE_MAGICSECURE) {
+ /* Program the SecureOn password */
+ umac_wl(intf, get_unaligned_be16(&intf->sopass[0]),
+ UMC_PSW_MS);
+ umac_wl(intf, get_unaligned_be32(&intf->sopass[2]),
+ UMC_PSW_LS);
+ reg |= UMC_MPD_CTRL_PSW_EN;
+ }
+ umac_wl(intf, reg, UMC_MPD_CTRL);
+
+ if (intf->wolopts & WAKE_FILTER)
+ bcmasp_netfilt_suspend(intf);
+
+ /* Bring UniMAC out of reset if needed and enable RX */
+ reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ reg &= ~UMC_CMD_SW_RESET;
+
+ reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ umac_wl(intf, reg, UMC_CMD);
+
+ umac_enable_set(intf, UMC_CMD_RX_EN, 1);
+
+ if (intf->parent->wol_irq > 0) {
+ wakeup_intr2_core_wl(intf->parent, 0xffffffff,
+ ASP_WAKEUP_INTR2_MASK_CLEAR);
+ }
+
+ if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, true);
+
+ netif_dbg(intf, wol, ndev, "entered WOL mode\n");
+}
+
+int bcmasp_interface_suspend(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+ struct net_device *dev = intf->ndev;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+
+ bcmasp_netif_deinit(dev);
+
+ if (!intf->wolopts) {
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, false);
+ else
+ bcmasp_rgmii_mode_en_set(intf, false);
+
+ /* If Wake-on-LAN is disabled, we can safely
+ * disable the network interface clocks.
+ */
+ bcmasp_core_clock_set_intf(intf, false);
+ }
+
+ if (device_may_wakeup(kdev) && intf->wolopts)
+ bcmasp_suspend_to_wol(intf);
+
+ clk_disable_unprepare(intf->parent->clk);
+
+ return 0;
+}
+
+static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
+{
+ u32 reg;
+
+ if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, false);
+
+ reg = umac_rl(intf, UMC_MPD_CTRL);
+ reg &= ~UMC_MPD_CTRL_MPD_EN;
+ umac_wl(intf, reg, UMC_MPD_CTRL);
+
+ if (intf->parent->wol_irq > 0) {
+ wakeup_intr2_core_wl(intf->parent, 0xffffffff,
+ ASP_WAKEUP_INTR2_MASK_SET);
+ }
+}
+
+int bcmasp_interface_resume(struct bcmasp_intf *intf)
+{
+ struct net_device *dev = intf->ndev;
+ int ret;
+
+ if (!netif_running(dev))
+ return 0;
+
+ ret = clk_prepare_enable(intf->parent->clk);
+ if (ret)
+ return ret;
+
+ ret = bcmasp_netif_init(dev, false);
+ if (ret)
+ goto out;
+
+ bcmasp_resume_from_wol(intf);
+
+ netif_device_attach(dev);
+
+ return 0;
+
+out:
+ clk_disable_unprepare(intf->parent->clk);
+ return ret;
+}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
new file mode 100644
index 000000000000..af7418348e81
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCMASP_INTF_DEFS_H
+#define __BCMASP_INTF_DEFS_H
+
+#define UMC_OFFSET(intf) \
+ ((((intf)->port) * 0x800) + 0xc000)
+#define UMC_CMD 0x008
+#define UMC_CMD_TX_EN BIT(0)
+#define UMC_CMD_RX_EN BIT(1)
+#define UMC_CMD_SPEED_SHIFT 0x2
+#define UMC_CMD_SPEED_MASK 0x3
+#define UMC_CMD_SPEED_10 0x0
+#define UMC_CMD_SPEED_100 0x1
+#define UMC_CMD_SPEED_1000 0x2
+#define UMC_CMD_SPEED_2500 0x3
+#define UMC_CMD_PROMISC BIT(4)
+#define UMC_CMD_PAD_EN BIT(5)
+#define UMC_CMD_CRC_FWD BIT(6)
+#define UMC_CMD_PAUSE_FWD BIT(7)
+#define UMC_CMD_RX_PAUSE_IGNORE BIT(8)
+#define UMC_CMD_TX_ADDR_INS BIT(9)
+#define UMC_CMD_HD_EN BIT(10)
+#define UMC_CMD_SW_RESET BIT(13)
+#define UMC_CMD_LCL_LOOP_EN BIT(15)
+#define UMC_CMD_AUTO_CONFIG BIT(22)
+#define UMC_CMD_CNTL_FRM_EN BIT(23)
+#define UMC_CMD_NO_LEN_CHK BIT(24)
+#define UMC_CMD_RMT_LOOP_EN BIT(25)
+#define UMC_CMD_PRBL_EN BIT(27)
+#define UMC_CMD_TX_PAUSE_IGNORE BIT(28)
+#define UMC_CMD_TX_RX_EN BIT(29)
+#define UMC_CMD_RUNT_FILTER_DIS BIT(30)
+#define UMC_MAC0 0x0c
+#define UMC_MAC1 0x10
+#define UMC_FRM_LEN 0x14
+#define UMC_EEE_CTRL 0x64
+#define EN_LPI_RX_PAUSE BIT(0)
+#define EN_LPI_TX_PFC BIT(1)
+#define EN_LPI_TX_PAUSE BIT(2)
+#define EEE_EN BIT(3)
+#define RX_FIFO_CHECK BIT(4)
+#define EEE_TX_CLK_DIS BIT(5)
+#define DIS_EEE_10M BIT(6)
+#define LP_IDLE_PREDICTION_MODE BIT(7)
+#define UMC_EEE_LPI_TIMER 0x68
+#define UMC_PAUSE_CNTRL 0x330
+#define UMC_TX_FLUSH 0x334
+#define UMC_GR64 0x400
+#define UMC_GR127 0x404
+#define UMC_GR255 0x408
+#define UMC_GR511 0x40c
+#define UMC_GR1023 0x410
+#define UMC_GR1518 0x414
+#define UMC_GRMGV 0x418
+#define UMC_GR2047 0x41c
+#define UMC_GR4095 0x420
+#define UMC_GR9216 0x424
+#define UMC_GRPKT 0x428
+#define UMC_GRBYT 0x42c
+#define UMC_GRMCA 0x430
+#define UMC_GRBCA 0x434
+#define UMC_GRFCS 0x438
+#define UMC_GRXCF 0x43c
+#define UMC_GRXPF 0x440
+#define UMC_GRXUO 0x444
+#define UMC_GRALN 0x448
+#define UMC_GRFLR 0x44c
+#define UMC_GRCDE 0x450
+#define UMC_GRFCR 0x454
+#define UMC_GROVR 0x458
+#define UMC_GRJBR 0x45c
+#define UMC_GRMTUE 0x460
+#define UMC_GRPOK 0x464
+#define UMC_GRUC 0x468
+#define UMC_GRPPP 0x46c
+#define UMC_GRMCRC 0x470
+#define UMC_TR64 0x480
+#define UMC_TR127 0x484
+#define UMC_TR255 0x488
+#define UMC_TR511 0x48c
+#define UMC_TR1023 0x490
+#define UMC_TR1518 0x494
+#define UMC_TRMGV 0x498
+#define UMC_TR2047 0x49c
+#define UMC_TR4095 0x4a0
+#define UMC_TR9216 0x4a4
+#define UMC_GTPKT 0x4a8
+#define UMC_GTMCA 0x4ac
+#define UMC_GTBCA 0x4b0
+#define UMC_GTXPF 0x4b4
+#define UMC_GTXCF 0x4b8
+#define UMC_GTFCS 0x4bc
+#define UMC_GTOVR 0x4c0
+#define UMC_GTDRF 0x4c4
+#define UMC_GTEDF 0x4c8
+#define UMC_GTSCL 0x4cc
+#define UMC_GTMCL 0x4d0
+#define UMC_GTLCL 0x4d4
+#define UMC_GTXCL 0x4d8
+#define UMC_GTFRG 0x4dc
+#define UMC_GTNCL 0x4e0
+#define UMC_GTJBR 0x4e4
+#define UMC_GTBYT 0x4e8
+#define UMC_GTPOK 0x4ec
+#define UMC_GTUC 0x4f0
+#define UMC_RRPKT 0x500
+#define UMC_RRUND 0x504
+#define UMC_RRFRG 0x508
+#define UMC_RRBYT 0x50c
+#define UMC_MIB_CNTRL 0x580
+#define UMC_MIB_CNTRL_RX_CNT_RST BIT(0)
+#define UMC_MIB_CNTRL_RUNT_CNT_RST BIT(1)
+#define UMC_MIB_CNTRL_TX_CNT_RST BIT(2)
+#define UMC_RX_MAX_PKT_SZ 0x608
+#define UMC_MPD_CTRL 0x620
+#define UMC_MPD_CTRL_MPD_EN BIT(0)
+#define UMC_MPD_CTRL_PSW_EN BIT(27)
+#define UMC_PSW_MS 0x624
+#define UMC_PSW_LS 0x628
+
+#define UMAC2FB_OFFSET 0x9f044
+#define UMAC2FB_CFG 0x0
+#define UMAC2FB_CFG_OPUT_EN BIT(0)
+#define UMAC2FB_CFG_VLAN_EN BIT(1)
+#define UMAC2FB_CFG_SNAP_EN BIT(2)
+#define UMAC2FB_CFG_BCM_TG_EN BIT(3)
+#define UMAC2FB_CFG_IPUT_EN BIT(4)
+#define UMAC2FB_CFG_CHID_SHIFT 8
+#define UMAC2FB_CFG_OK_SEND_SHIFT 24
+#define UMAC2FB_CFG_DEFAULT_EN \
+ (UMAC2FB_CFG_OPUT_EN | UMAC2FB_CFG_VLAN_EN \
+ | UMAC2FB_CFG_SNAP_EN | UMAC2FB_CFG_IPUT_EN)
+
+#define RGMII_OFFSET(intf) \
+ ((((intf)->port) * 0x100) + 0xd000)
+#define RGMII_EPHY_CNTRL 0x00
+#define RGMII_EPHY_CFG_IDDQ_BIAS BIT(0)
+#define RGMII_EPHY_CFG_EXT_PWRDOWN BIT(1)
+#define RGMII_EPHY_CFG_FORCE_DLL_EN BIT(2)
+#define RGMII_EPHY_CFG_IDDQ_GLOBAL BIT(3)
+#define RGMII_EPHY_CK25_DIS BIT(4)
+#define RGMII_EPHY_RESET BIT(7)
+#define RGMII_OOB_CNTRL 0x0c
+#define RGMII_LINK BIT(4)
+#define RGMII_OOB_DIS BIT(5)
+#define RGMII_MODE_EN BIT(6)
+#define RGMII_ID_MODE_DIS BIT(16)
+
+#define RGMII_PORT_CNTRL 0x60
+#define RGMII_PORT_MODE_EPHY 0
+#define RGMII_PORT_MODE_GPHY 1
+#define RGMII_PORT_MODE_EXT_EPHY 2
+#define RGMII_PORT_MODE_EXT_GPHY 3
+#define RGMII_PORT_MODE_EXT_RVMII 4
+#define RGMII_PORT_MODE_MASK GENMASK(2, 0)
+
+#define RGMII_SYS_LED_CNTRL 0x74
+#define RGMII_SYS_LED_CNTRL_LINK_OVRD BIT(15)
+
+#define TX_SPB_DMA_OFFSET(intf) \
+ ((((intf)->channel) * 0x30) + 0x48180)
+#define TX_SPB_DMA_READ 0x00
+#define TX_SPB_DMA_BASE 0x08
+#define TX_SPB_DMA_END 0x10
+#define TX_SPB_DMA_VALID 0x18
+#define TX_SPB_DMA_FIFO_CTRL 0x20
+#define TX_SPB_DMA_FIFO_FLUSH BIT(0)
+#define TX_SPB_DMA_FIFO_STATUS 0x24
+
+#define TX_SPB_CTRL_OFFSET(intf) \
+ ((((intf)->channel) * 0x68) + 0x49340)
+#define TX_SPB_CTRL_ENABLE 0x0
+#define TX_SPB_CTRL_ENABLE_EN BIT(0)
+#define TX_SPB_CTRL_XF_CTRL2 0x20
+#define TX_SPB_CTRL_XF_BID_SHIFT 16
+
+#define TX_SPB_TOP_OFFSET(intf) \
+ ((((intf)->channel) * 0x1c) + 0x4a0e0)
+#define TX_SPB_TOP_BLKOUT 0x0
+#define TX_SPB_TOP_SPRE_BW_CTRL 0x4
+
+#define TX_EPKT_C_OFFSET(intf) \
+ ((((intf)->channel) * 0x120) + 0x40900)
+#define TX_EPKT_C_CFG_MISC 0x0
+#define TX_EPKT_C_CFG_MISC_EN BIT(0)
+#define TX_EPKT_C_CFG_MISC_PT BIT(1)
+#define TX_EPKT_C_CFG_MISC_PS_SHIFT 14
+#define TX_EPKT_C_CFG_MISC_FD_SHIFT 20
+
+#define TX_PAUSE_CTRL_OFFSET(intf) \
+ ((((intf)->channel * 0xc) + 0x49a20))
+#define TX_PAUSE_MAP_VECTOR 0x8
+
+#define RX_EDPKT_DMA_OFFSET(intf) \
+ ((((intf)->channel) * 0x38) + 0x9ca00)
+#define RX_EDPKT_DMA_WRITE 0x00
+#define RX_EDPKT_DMA_READ 0x08
+#define RX_EDPKT_DMA_BASE 0x10
+#define RX_EDPKT_DMA_END 0x18
+#define RX_EDPKT_DMA_VALID 0x20
+#define RX_EDPKT_DMA_FULLNESS 0x28
+#define RX_EDPKT_DMA_MIN_THRES 0x2c
+#define RX_EDPKT_DMA_CH_XONOFF 0x30
+
+#define RX_EDPKT_CFG_OFFSET(intf) \
+ ((((intf)->channel) * 0x70) + 0x9c600)
+#define RX_EDPKT_CFG_CFG0 0x0
+#define RX_EDPKT_CFG_CFG0_DBUF_SHIFT 9
+#define RX_EDPKT_CFG_CFG0_RBUF 0x0
+#define RX_EDPKT_CFG_CFG0_RBUF_4K 0x1
+#define RX_EDPKT_CFG_CFG0_BUF_4K 0x2
+/* EFRM STUFF, 0 = no byte stuff, 1 = two byte stuff */
+#define RX_EDPKT_CFG_CFG0_EFRM_STUF BIT(11)
+#define RX_EDPKT_CFG_CFG0_BALN_SHIFT 12
+#define RX_EDPKT_CFG_CFG0_NO_ALN 0
+#define RX_EDPKT_CFG_CFG0_4_ALN 2
+#define RX_EDPKT_CFG_CFG0_64_ALN 6
+#define RX_EDPKT_RING_BUFFER_WRITE 0x38
+#define RX_EDPKT_RING_BUFFER_READ 0x40
+#define RX_EDPKT_RING_BUFFER_BASE 0x48
+#define RX_EDPKT_RING_BUFFER_END 0x50
+#define RX_EDPKT_RING_BUFFER_VALID 0x58
+#define RX_EDPKT_CFG_ENABLE 0x6c
+#define RX_EDPKT_CFG_ENABLE_EN BIT(0)
+
+#define RX_SPB_DMA_OFFSET(intf) \
+ ((((intf)->channel) * 0x30) + 0xa0000)
+#define RX_SPB_DMA_READ 0x00
+#define RX_SPB_DMA_BASE 0x08
+#define RX_SPB_DMA_END 0x10
+#define RX_SPB_DMA_VALID 0x18
+#define RX_SPB_DMA_FIFO_CTRL 0x20
+#define RX_SPB_DMA_FIFO_FLUSH BIT(0)
+#define RX_SPB_DMA_FIFO_STATUS 0x24
+
+#define RX_SPB_CTRL_OFFSET(intf) \
+ ((((intf)->channel - 6) * 0x68) + 0xa1000)
+#define RX_SPB_CTRL_ENABLE 0x00
+#define RX_SPB_CTRL_ENABLE_EN BIT(0)
+
+#define RX_PAUSE_CTRL_OFFSET(intf) \
+ ((((intf)->channel - 6) * 0x4) + 0xa1138)
+#define RX_PAUSE_MAP_VECTOR 0x00
+
+#define RX_SPB_TOP_CTRL_OFFSET(intf) \
+ ((((intf)->channel - 6) * 0x14) + 0xa2000)
+#define RX_SPB_TOP_BLKOUT 0x00
+
+#define NUM_4K_BUFFERS 32
+#define RING_BUFFER_SIZE (PAGE_SIZE * NUM_4K_BUFFERS)
+
+#define DESC_RING_COUNT (64 * NUM_4K_BUFFERS)
+#define DESC_SIZE 16
+#define DESC_RING_SIZE (DESC_RING_COUNT * DESC_SIZE)
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 969591bbc066..888f28f11406 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -31,6 +31,7 @@
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/uaccess.h>
#include <asm/io.h>
@@ -196,28 +197,6 @@ static int b44_wait_bit(struct b44 *bp, unsigned long reg,
return 0;
}
-static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
-{
- u32 val;
-
- bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
- (index << CAM_CTRL_INDEX_SHIFT)));
-
- b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
-
- val = br32(bp, B44_CAM_DATA_LO);
-
- data[2] = (val >> 24) & 0xFF;
- data[3] = (val >> 16) & 0xFF;
- data[4] = (val >> 8) & 0xFF;
- data[5] = (val >> 0) & 0xFF;
-
- val = br32(bp, B44_CAM_DATA_HI);
-
- data[0] = (val >> 8) & 0xFF;
- data[1] = (val >> 0) & 0xFF;
-}
-
static inline void __b44_cam_write(struct b44 *bp,
const unsigned char *data, int index)
{
@@ -597,7 +576,7 @@ static void b44_check_phy(struct b44 *bp)
static void b44_timer(struct timer_list *t)
{
- struct b44 *bp = from_timer(bp, t, timer);
+ struct b44 *bp = timer_container_of(bp, t, timer);
spin_lock_irq(&bp->lock);
@@ -1064,13 +1043,13 @@ static int b44_change_mtu(struct net_device *dev, int new_mtu)
/* We'll just catch it later when the
* device is up'd.
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
spin_lock_irq(&bp->lock);
b44_halt(bp);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
spin_unlock_irq(&bp->lock);
@@ -1650,7 +1629,7 @@ static int b44_close(struct net_device *dev)
napi_disable(&bp->napi);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
spin_lock_irq(&bp->lock);
@@ -1680,7 +1659,7 @@ static void b44_get_stats64(struct net_device *dev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&hwstat->syncp);
+ start = u64_stats_fetch_begin(&hwstat->syncp);
/* Convert HW stats into rtnl_link_stats64 stats. */
nstat->rx_packets = hwstat->rx_pkts;
@@ -1714,7 +1693,7 @@ static void b44_get_stats64(struct net_device *dev,
/* Carrier lost counter seems to be broken for some devices */
nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
#endif
- } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
+ } while (u64_stats_fetch_retry(&hwstat->syncp, start));
}
@@ -1790,13 +1769,13 @@ static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *inf
struct b44 *bp = netdev_priv(dev);
struct ssb_bus *bus = bp->sdev->bus;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
- strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
break;
case SSB_BUSTYPE_SSB:
- strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
+ strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
break;
case SSB_BUSTYPE_PCMCIA:
case SSB_BUSTYPE_SDIO:
@@ -1815,11 +1794,9 @@ static int b44_nway_reset(struct net_device *dev)
b44_readphy(bp, MII_BMCR, &bmcr);
b44_readphy(bp, MII_BMCR, &bmcr);
r = -EINVAL;
- if (bmcr & BMCR_ANENABLE) {
- b44_writephy(bp, MII_BMCR,
- bmcr | BMCR_ANRESTART);
- r = 0;
- }
+ if (bmcr & BMCR_ANENABLE)
+ r = b44_writephy(bp, MII_BMCR,
+ bmcr | BMCR_ANRESTART);
spin_unlock_irq(&bp->lock);
return r;
@@ -1961,7 +1938,9 @@ static int b44_set_link_ksettings(struct net_device *dev,
}
static void b44_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct b44 *bp = netdev_priv(dev);
@@ -1972,7 +1951,9 @@ static void b44_get_ringparam(struct net_device *dev,
}
static int b44_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct b44 *bp = netdev_priv(dev);
@@ -2029,12 +2010,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
- b44_halt(bp);
- b44_init_rings(bp);
- b44_init_hw(bp, B44_FULL_RESET);
- } else {
- __b44_set_flow_ctrl(bp, bp->flags);
+ if (netif_running(dev)) {
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
}
spin_unlock_irq(&bp->lock);
@@ -2078,12 +2061,12 @@ static void b44_get_ethtool_stats(struct net_device *dev,
do {
data_src = &hwstat->tx_good_octets;
data_dst = data;
- start = u64_stats_fetch_begin_irq(&hwstat->syncp);
+ start = u64_stats_fetch_begin(&hwstat->syncp);
for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
*data_dst++ = *data_src++;
- } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
+ } while (u64_stats_fetch_retry(&hwstat->syncp, start));
}
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2251,7 +2234,6 @@ static int b44_register_phy_one(struct b44 *bp)
struct mii_bus *mii_bus;
struct ssb_device *sdev = bp->sdev;
struct phy_device *phydev;
- char bus_id[MII_BUS_ID_SIZE + 3];
struct ssb_sprom *sprom = &sdev->bus->sprom;
int err;
@@ -2278,27 +2260,26 @@ static int b44_register_phy_one(struct b44 *bp)
goto err_out_mdiobus;
}
- if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
- (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
-
+ phydev = mdiobus_get_phy(bp->mii_bus, bp->phy_addr);
+ if (!phydev &&
+ sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM)) {
dev_info(sdev->dev,
"could not find PHY at %i, use fixed one\n",
bp->phy_addr);
- bp->phy_addr = 0;
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
- bp->phy_addr);
- } else {
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
- bp->phy_addr);
+ phydev = fixed_phy_register_100fd();
+ if (!IS_ERR(phydev))
+ bp->phy_addr = phydev->mdio.addr;
}
- phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(phydev)) {
+ if (IS_ERR_OR_NULL(phydev))
+ err = -ENODEV;
+ else
+ err = phy_connect_direct(bp->dev, phydev, &b44_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (err) {
dev_err(sdev->dev, "could not attach PHY at %i\n",
bp->phy_addr);
- err = PTR_ERR(phydev);
goto err_out_mdiobus_unregister;
}
@@ -2311,7 +2292,6 @@ static int b44_register_phy_one(struct b44 *bp)
linkmode_copy(phydev->advertising, phydev->supported);
bp->old_link = 0;
- bp->phy_addr = phydev->mdio.addr;
phy_attached_info(phydev);
@@ -2329,10 +2309,15 @@ err_out:
static void b44_unregister_phy_one(struct b44 *bp)
{
- struct net_device *dev = bp->dev;
struct mii_bus *mii_bus = bp->mii_bus;
+ struct net_device *dev = bp->dev;
+ struct phy_device *phydev;
+
+ phydev = dev->phydev;
- phy_disconnect(dev->phydev);
+ phy_disconnect(phydev);
+ if (phy_is_pseudo_fixed_link(phydev))
+ fixed_phy_unregister(phydev);
mdiobus_unregister(mii_bus);
mdiobus_free(mii_bus);
}
@@ -2371,7 +2356,7 @@ static int b44_init_one(struct ssb_device *sdev,
bp->tx_pending = B44_DEF_TX_RING_PENDING;
dev->netdev_ops = &b44_netdev_ops;
- netif_napi_add(dev, &bp->napi, b44_poll, 64);
+ netif_napi_add(dev, &bp->napi, b44_poll);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->min_mtu = B44_MIN_MTU;
dev->max_mtu = B44_MAX_MTU;
@@ -2491,7 +2476,7 @@ static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
if (!netif_running(dev))
return 0;
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
spin_lock_irq(&bp->lock);
@@ -2588,7 +2573,7 @@ static int __init b44_init(void)
unsigned int dma_desc_align_size = dma_get_cache_alignment();
int err;
- /* Setup paramaters for syncing RX/TX DMA descriptors */
+ /* Setup parameters for syncing RX/TX DMA descriptors */
dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
err = b44_pci_init();
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 7cc5213c575a..203e8d0dd04b 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -36,13 +36,24 @@
#define ENET_MAX_ETH_OVERHEAD (ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
ETH_FCS_LEN + 4) /* 32 */
+#define ENET_RX_SKB_BUF_SIZE (NET_SKB_PAD + NET_IP_ALIGN + \
+ ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
+ ENET_MTU_MAX + ETH_FCS_LEN + 4)
+#define ENET_RX_SKB_BUF_ALLOC_SIZE (SKB_DATA_ALIGN(ENET_RX_SKB_BUF_SIZE) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define ENET_RX_BUF_DMA_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define ENET_RX_BUF_DMA_SIZE (ENET_RX_SKB_BUF_SIZE - ENET_RX_BUF_DMA_OFFSET)
+
struct bcm4908_enet_dma_ring_bd {
__le32 ctl;
__le32 addr;
} __packed;
struct bcm4908_enet_dma_ring_slot {
- struct sk_buff *skb;
+ union {
+ void *buf; /* RX */
+ struct sk_buff *skb; /* TX */
+ };
unsigned int len;
dma_addr_t dma_addr;
};
@@ -260,22 +271,21 @@ static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet *enet, unsigned int
u32 tmp;
int err;
- slot->len = ENET_MTU_MAX + ENET_MAX_ETH_OVERHEAD;
-
- slot->skb = netdev_alloc_skb(enet->netdev, slot->len);
- if (!slot->skb)
+ slot->buf = napi_alloc_frag(ENET_RX_SKB_BUF_ALLOC_SIZE);
+ if (!slot->buf)
return -ENOMEM;
- slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE);
+ slot->dma_addr = dma_map_single(dev, slot->buf + ENET_RX_BUF_DMA_OFFSET,
+ ENET_RX_BUF_DMA_SIZE, DMA_FROM_DEVICE);
err = dma_mapping_error(dev, slot->dma_addr);
if (err) {
dev_err(dev, "Failed to map DMA buffer: %d\n", err);
- kfree_skb(slot->skb);
- slot->skb = NULL;
+ skb_free_frag(slot->buf);
+ slot->buf = NULL;
return err;
}
- tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+ tmp = ENET_RX_BUF_DMA_SIZE << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
tmp |= DMA_CTL_STATUS_OWN;
if (idx == enet->rx_ring.length - 1)
tmp |= DMA_CTL_STATUS_WRAP;
@@ -315,11 +325,11 @@ static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
for (i = rx_ring->length - 1; i >= 0; i--) {
slot = &rx_ring->slots[i];
- if (!slot->skb)
+ if (!slot->buf)
continue;
dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE);
- kfree_skb(slot->skb);
- slot->skb = NULL;
+ skb_free_frag(slot->buf);
+ slot->buf = NULL;
}
}
@@ -495,6 +505,7 @@ static int bcm4908_enet_stop(struct net_device *netdev)
netif_carrier_off(netdev);
napi_disable(&rx_ring->napi);
napi_disable(&tx_ring->napi);
+ netdev_reset_queue(netdev);
bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
@@ -507,7 +518,7 @@ static int bcm4908_enet_stop(struct net_device *netdev)
return 0;
}
-static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
@@ -554,6 +565,8 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde
if (ring->write_idx + 1 == ring->length - 1)
tmp |= DMA_CTL_STATUS_WRAP;
+ netdev_sent_queue(enet->netdev, skb->len);
+
buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr);
buf_desc->ctl = cpu_to_le32(tmp);
@@ -561,8 +574,6 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde
if (++ring->write_idx == ring->length - 1)
ring->write_idx = 0;
- enet->netdev->stats.tx_bytes += skb->len;
- enet->netdev->stats.tx_packets++;
return NETDEV_TX_OK;
}
@@ -577,6 +588,7 @@ static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight)
while (handled < weight) {
struct bcm4908_enet_dma_ring_bd *buf_desc;
struct bcm4908_enet_dma_ring_slot slot;
+ struct sk_buff *skb;
u32 ctl;
int len;
int err;
@@ -600,16 +612,24 @@ static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight)
if (len < ETH_ZLEN ||
(ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
- kfree_skb(slot.skb);
+ skb_free_frag(slot.buf);
enet->netdev->stats.rx_dropped++;
break;
}
- dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, slot.dma_addr, ENET_RX_BUF_DMA_SIZE, DMA_FROM_DEVICE);
+
+ skb = build_skb(slot.buf, ENET_RX_SKB_BUF_ALLOC_SIZE);
+ if (unlikely(!skb)) {
+ skb_free_frag(slot.buf);
+ enet->netdev->stats.rx_dropped++;
+ break;
+ }
+ skb_reserve(skb, ENET_RX_BUF_DMA_OFFSET);
+ skb_put(skb, len - ETH_FCS_LEN);
+ skb->protocol = eth_type_trans(skb, enet->netdev);
- skb_put(slot.skb, len - ETH_FCS_LEN);
- slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev);
- netif_receive_skb(slot.skb);
+ netif_receive_skb(skb);
enet->netdev->stats.rx_packets++;
enet->netdev->stats.rx_bytes += len;
@@ -646,13 +666,18 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
dev_kfree_skb(slot->skb);
+
+ handled++;
bytes += slot->len;
+
if (++tx_ring->read_idx == tx_ring->length)
tx_ring->read_idx = 0;
-
- handled++;
}
+ netdev_completed_queue(enet->netdev, handled, bytes);
+ enet->netdev->stats.tx_packets += handled;
+ enet->netdev->stats.tx_bytes += bytes;
+
if (handled < weight) {
napi_complete_done(napi, handled);
bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
@@ -708,7 +733,9 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
enet->irq_tx = platform_get_irq_byname(pdev, "tx");
- dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
err = bcm4908_enet_dma_alloc(enet);
if (err)
@@ -716,27 +743,32 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
err = of_get_ethdev_address(dev->of_node, netdev);
+ if (err == -EPROBE_DEFER)
+ goto err_dma_free;
if (err)
eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
netdev->min_mtu = ETH_ZLEN;
netdev->mtu = ETH_DATA_LEN;
netdev->max_mtu = ENET_MTU_MAX;
- netif_tx_napi_add(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx, NAPI_POLL_WEIGHT);
- netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx);
+ netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx);
err = register_netdev(netdev);
- if (err) {
- bcm4908_enet_dma_free(enet);
- return err;
- }
+ if (err)
+ goto err_dma_free;
platform_set_drvdata(pdev, enet);
return 0;
+
+err_dma_free:
+ bcm4908_enet_dma_free(enet);
+
+ return err;
}
-static int bcm4908_enet_remove(struct platform_device *pdev)
+static void bcm4908_enet_remove(struct platform_device *pdev)
{
struct bcm4908_enet *enet = platform_get_drvdata(pdev);
@@ -744,8 +776,6 @@ static int bcm4908_enet_remove(struct platform_device *pdev)
netif_napi_del(&enet->rx_ring.napi);
netif_napi_del(&enet->tx_ring.napi);
bcm4908_enet_dma_free(enet);
-
- return 0;
}
static const struct of_device_id bcm4908_enet_of_match[] = {
@@ -763,5 +793,6 @@ static struct platform_driver bcm4908_enet_driver = {
};
module_platform_driver(bcm4908_enet_driver);
+MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a568994a03a6..92204fea1f08 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -286,7 +286,7 @@ static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
*/
static void bcm_enet_refill_rx_timer(struct timer_list *t)
{
- struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
+ struct bcm_enet_priv *priv = timer_container_of(priv, t, rx_timeout);
struct net_device *dev = priv->net_dev;
spin_lock(&priv->rx_lock);
@@ -388,7 +388,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
priv->rx_buf_size, DMA_FROM_DEVICE);
priv->rx_buf[desc_idx] = NULL;
- skb = build_skb(buf, priv->rx_frag_size);
+ skb = napi_build_skb(buf, priv->rx_frag_size);
if (unlikely(!skb)) {
skb_free_frag(buf);
dev->stats.rx_dropped++;
@@ -423,7 +423,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
/*
* try to or force reclaim of transmitted buffers
*/
-static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
+static int bcm_enet_tx_reclaim(struct net_device *dev, int force, int budget)
{
struct bcm_enet_priv *priv;
unsigned int bytes;
@@ -468,7 +468,7 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
dev->stats.tx_errors++;
bytes += skb->len;
- dev_kfree_skb(skb);
+ napi_consume_skb(skb, budget);
released++;
}
@@ -499,7 +499,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
ENETDMAC_IR, priv->tx_chan);
/* reclaim sent skb */
- bcm_enet_tx_reclaim(dev, 0);
+ bcm_enet_tx_reclaim(dev, 0, budget);
spin_lock(&priv->rx_lock);
rx_work_done = bcm_enet_receive_queue(dev, budget);
@@ -1195,7 +1195,7 @@ static int bcm_enet_stop(struct net_device *dev)
napi_disable(&priv->napi);
if (priv->has_phy)
phy_stop(dev->phydev);
- del_timer_sync(&priv->rx_timeout);
+ timer_delete_sync(&priv->rx_timeout);
/* mask all interrupts */
enet_writel(priv, 0, ENET_IRMASK_REG);
@@ -1211,7 +1211,7 @@ static int bcm_enet_stop(struct net_device *dev)
bcm_enet_disable_mac(priv);
/* force reclaim of all tx buffers */
- bcm_enet_tx_reclaim(dev, 1);
+ bcm_enet_tx_reclaim(dev, 1, 0);
/* free the rx buffer ring */
bcm_enet_free_rx_buf_ring(kdev, priv);
@@ -1321,8 +1321,8 @@ static const u32 unused_mib_regs[] = {
static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
static int bcm_enet_get_sset_count(struct net_device *netdev,
@@ -1339,14 +1339,14 @@ static int bcm_enet_get_sset_count(struct net_device *netdev,
static void bcm_enet_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_enet_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcm_enet_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -1497,8 +1497,11 @@ static int bcm_enet_set_link_ksettings(struct net_device *dev,
}
}
-static void bcm_enet_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+bcm_enet_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
@@ -1512,7 +1515,9 @@ static void bcm_enet_get_ringparam(struct net_device *dev,
}
static int bcm_enet_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
int was_running;
@@ -1647,7 +1652,7 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -1711,17 +1716,17 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm_enet_priv *priv;
struct net_device *dev;
struct bcm63xx_enet_platform_data *pd;
- struct resource *res_irq, *res_irq_rx, *res_irq_tx;
+ int irq, irq_rx, irq_tx;
struct mii_bus *bus;
int i, ret;
if (!bcm_enet_shared_base[0])
return -EPROBE_DEFER;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
- if (!res_irq || !res_irq_rx || !res_irq_tx)
+ irq = platform_get_irq(pdev, 0);
+ irq_rx = platform_get_irq(pdev, 1);
+ irq_tx = platform_get_irq(pdev, 2);
+ if (irq < 0 || irq_rx < 0 || irq_tx < 0)
return -ENODEV;
dev = alloc_etherdev(sizeof(*priv));
@@ -1743,9 +1748,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
goto out;
}
- dev->irq = priv->irq = res_irq->start;
- priv->irq_rx = res_irq_rx->start;
- priv->irq_tx = res_irq_tx->start;
+ dev->irq = priv->irq = irq;
+ priv->irq_rx = irq_rx;
+ priv->irq_tx = irq_tx;
priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
if (IS_ERR(priv->mac_clk)) {
@@ -1854,7 +1859,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
/* register netdevice */
dev->netdev_ops = &bcm_enet_ops;
- netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
+ netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
dev->ethtool_ops = &bcm_enet_ethtool_ops;
/* MTU range: 46 - 2028 */
@@ -1897,7 +1902,7 @@ out:
/*
* exit func, stops hardware and unregisters netdevice
*/
-static int bcm_enet_remove(struct platform_device *pdev)
+static void bcm_enet_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
@@ -1927,15 +1932,13 @@ static int bcm_enet_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->mac_clk);
free_netdev(dev);
- return 0;
}
-struct platform_driver bcm63xx_enet_driver = {
+static struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
- .remove = bcm_enet_remove,
+ .remove = bcm_enet_remove,
.driver = {
.name = "bcm63xx_enet",
- .owner = THIS_MODULE,
},
};
@@ -1998,7 +2001,7 @@ static inline int bcm_enet_port_is_rgmii(int portid)
*/
static void swphy_poll_timer(struct timer_list *t)
{
- struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
+ struct bcm_enet_priv *priv = timer_container_of(priv, t, swphy_poll);
unsigned int i;
for (i = 0; i < priv->num_ports; i++) {
@@ -2343,10 +2346,10 @@ static int bcm_enetsw_stop(struct net_device *dev)
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
- del_timer_sync(&priv->swphy_poll);
+ timer_delete_sync(&priv->swphy_poll);
netif_stop_queue(dev);
napi_disable(&priv->napi);
- del_timer_sync(&priv->rx_timeout);
+ timer_delete_sync(&priv->rx_timeout);
/* mask all interrupts */
enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
@@ -2357,7 +2360,7 @@ static int bcm_enetsw_stop(struct net_device *dev)
bcm_enet_disable_dma(priv, priv->rx_chan);
/* force reclaim of all tx buffers */
- bcm_enet_tx_reclaim(dev, 1);
+ bcm_enet_tx_reclaim(dev, 1, 0);
/* free the rx buffer ring */
bcm_enet_free_rx_buf_ring(kdev, priv);
@@ -2500,14 +2503,14 @@ static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
static void bcm_enetsw_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_enetsw_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcm_enetsw_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -2527,8 +2530,8 @@ static int bcm_enetsw_get_sset_count(struct net_device *netdev,
static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
- strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
@@ -2579,8 +2582,11 @@ static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
}
}
-static void bcm_enetsw_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+bcm_enetsw_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
@@ -2595,8 +2601,11 @@ static void bcm_enetsw_get_ringparam(struct net_device *dev,
ering->tx_pending = priv->tx_ring_size;
}
-static int bcm_enetsw_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static int
+bcm_enetsw_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
int was_running;
@@ -2703,7 +2712,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
/* register netdevice */
dev->netdev_ops = &bcm_enetsw_ops;
- netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
+ netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -2729,7 +2738,7 @@ out:
/* exit func, stops hardware and unregisters netdevice */
-static int bcm_enetsw_remove(struct platform_device *pdev)
+static void bcm_enetsw_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
@@ -2742,15 +2751,13 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->mac_clk);
free_netdev(dev);
- return 0;
}
-struct platform_driver bcm63xx_enetsw_driver = {
+static struct platform_driver bcm63xx_enetsw_driver = {
.probe = bcm_enetsw_probe,
- .remove = bcm_enetsw_remove,
+ .remove = bcm_enetsw_remove,
.driver = {
.name = "bcm63xx_enetsw",
- .owner = THIS_MODULE,
},
};
@@ -2773,20 +2780,13 @@ static int bcm_enet_shared_probe(struct platform_device *pdev)
return 0;
}
-static int bcm_enet_shared_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
/* this "shared" driver is needed because both macs share a single
* address space
*/
struct platform_driver bcm63xx_enet_shared_driver = {
.probe = bcm_enet_shared_probe,
- .remove = bcm_enet_shared_remove,
.driver = {
.name = "bcm63xx_enet_shared",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 40933bf5a710..bc4e1f3b3752 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -27,30 +27,6 @@
#include "bcmsysport.h"
-/* I/O accessors register helpers */
-#define BCM_SYSPORT_IO_MACRO(name, offset) \
-static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
-{ \
- u32 reg = readl_relaxed(priv->base + offset + off); \
- return reg; \
-} \
-static inline void name##_writel(struct bcm_sysport_priv *priv, \
- u32 val, u32 off) \
-{ \
- writel_relaxed(val, priv->base + offset + off); \
-} \
-
-BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
-BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
-BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
-BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
-BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
-BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
-BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
-BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
-BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
-BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
-
/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
* same layout, except it has been moved by 4 bytes up, *sigh*
*/
@@ -295,6 +271,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
/* RBUF misc statistics */
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+ /* RDMA misc statistics */
+ STAT_RDMA("rdma_ovflow_cnt", mib.rdma_ovflow_cnt, RDMA_OVFL_DISC_CNTR),
STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
@@ -308,8 +286,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
static void bcm_sysport_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "platform", sizeof(info->bus_info));
}
static u32 bcm_sysport_get_msglvl(struct net_device *dev)
@@ -333,6 +311,7 @@ static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
case BCM_SYSPORT_STAT_NETDEV64:
case BCM_SYSPORT_STAT_RXCHK:
case BCM_SYSPORT_STAT_RBUF:
+ case BCM_SYSPORT_STAT_RDMA:
case BCM_SYSPORT_STAT_SOFT:
return true;
default:
@@ -367,32 +346,22 @@ static void bcm_sysport_get_strings(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
const struct bcm_sysport_stats *s;
- char buf[128];
- int i, j;
+ int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
s = &bcm_sysport_gstrings_stats[i];
if (priv->is_lite &&
!bcm_sysport_lite_stat_valid(s->type))
continue;
- memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
- ETH_GSTRING_LEN);
- j++;
+ ethtool_puts(&data, s->stat_string);
}
for (i = 0; i < dev->num_tx_queues; i++) {
- snprintf(buf, sizeof(buf), "txq%d_packets", i);
- memcpy(data + j * ETH_GSTRING_LEN, buf,
- ETH_GSTRING_LEN);
- j++;
-
- snprintf(buf, sizeof(buf), "txq%d_bytes", i);
- memcpy(data + j * ETH_GSTRING_LEN, buf,
- ETH_GSTRING_LEN);
- j++;
+ ethtool_sprintf(&data, "txq%d_packets", i);
+ ethtool_sprintf(&data, "txq%d_bytes", i);
}
break;
default:
@@ -436,6 +405,14 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
if (val == ~0)
rbuf_writel(priv, 0, s->reg_offset);
break;
+ case BCM_SYSPORT_STAT_RDMA:
+ if (!priv->is_lite)
+ continue;
+
+ val = rdma_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rdma_writel(priv, 0, s->reg_offset);
+ break;
}
j += s->stat_sizeof;
@@ -457,10 +434,10 @@ static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
for (q = 0; q < priv->netdev->num_tx_queues; q++) {
ring = &priv->tx_rings[q];
do {
- start = u64_stats_fetch_begin_irq(&priv->syncp);
+ start = u64_stats_fetch_begin(&priv->syncp);
bytes = ring->bytes;
packets = ring->packets;
- } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
*tx_bytes += bytes;
*tx_packets += packets;
@@ -504,9 +481,9 @@ static void bcm_sysport_get_stats(struct net_device *dev,
if (s->stat_sizeof == sizeof(u64) &&
s->type == BCM_SYSPORT_STAT_NETDEV64) {
do {
- start = u64_stats_fetch_begin_irq(syncp);
+ start = u64_stats_fetch_begin(syncp);
data[i] = *(u64 *)p;
- } while (u64_stats_fetch_retry_irq(syncp, start));
+ } while (u64_stats_fetch_retry(syncp, start));
} else
data[i] = *(u32 *)p;
j++;
@@ -1042,7 +1019,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
if (priv->dim.use_dim) {
dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
priv->dim.bytes, &dim_sample);
- net_dim(&priv->dim.dim, dim_sample);
+ net_dim(&priv->dim.dim, &dim_sample);
}
return work_done;
@@ -1309,11 +1286,11 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct bcm_sysport_tx_ring *ring;
+ unsigned long flags, desc_flags;
struct bcm_sysport_cb *cb;
struct netdev_queue *txq;
u32 len_status, addr_lo;
unsigned int skb_len;
- unsigned long flags;
dma_addr_t mapping;
u16 queue;
int ret;
@@ -1348,6 +1325,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
skb->data, skb_len);
ret = NETDEV_TX_OK;
+ dev_kfree_skb_any(skb);
goto out;
}
@@ -1373,8 +1351,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
ring->desc_count--;
/* Ports are latched, so write upper address first */
+ spin_lock_irqsave(&priv->desc_lock, desc_flags);
tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
+ spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
/* Check ring space and update SW control flow */
if (ring->desc_count == 0)
@@ -1515,7 +1495,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
/* Initialize SW view of the ring */
spin_lock_init(&ring->lock);
ring->priv = priv;
- netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
+ netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll);
ring->index = index;
ring->size = size;
ring->clean_index = 0;
@@ -1876,10 +1856,10 @@ static void bcm_sysport_get_stats64(struct net_device *dev,
&stats->tx_packets);
do {
- start = u64_stats_fetch_begin_irq(&priv->syncp);
+ start = u64_stats_fetch_begin(&priv->syncp);
stats->rx_packets = stats64->rx_packets;
stats->rx_bytes = stats64->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
}
static void bcm_sysport_netif_start(struct net_device *dev)
@@ -1953,7 +1933,11 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
/* Reset UniMAC */
umac_reset(priv);
@@ -1989,6 +1973,9 @@ static int bcm_sysport_open(struct net_device *dev)
goto out_clk_disable;
}
+ /* Indicate that the MAC is responsible for PHY PM */
+ phydev->mac_managed_pm = true;
+
/* Reset house keeping link status */
priv->old_duplex = -1;
priv->old_link = -1;
@@ -2013,6 +2000,7 @@ static int bcm_sysport_open(struct net_device *dev)
}
/* Initialize both hardware and software ring */
+ spin_lock_init(&priv->desc_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
ret = bcm_sysport_init_tx_ring(priv, i);
if (ret) {
@@ -2177,13 +2165,9 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
return -EOPNOTSUPP;
- /* All filters are already in use, we cannot match more rules */
- if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
- RXCHK_BRCM_TAG_MAX)
- return -ENOSPC;
-
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
if (index >= RXCHK_BRCM_TAG_MAX)
+ /* All filters are already in use, we cannot match more rules */
return -ENOSPC;
/* Location is the classification ID, and index is the position
@@ -2417,7 +2401,7 @@ static int bcm_sysport_netdevice_event(struct notifier_block *nb,
if (dev->netdev_ops != &bcm_sysport_netdev_ops)
return NOTIFY_DONE;
- if (!dsa_slave_dev_check(info->upper_dev))
+ if (!dsa_user_dev_check(info->upper_dev))
return NOTIFY_DONE;
if (info->linking)
@@ -2518,9 +2502,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->irq0 = platform_get_irq(pdev, 0);
if (!priv->is_lite) {
priv->irq1 = platform_get_irq(pdev, 1);
- priv->wol_irq = platform_get_irq(pdev, 2);
+ priv->wol_irq = platform_get_irq_optional(pdev, 2);
} else {
- priv->wol_irq = platform_get_irq(pdev, 1);
+ priv->wol_irq = platform_get_irq_optional(pdev, 1);
}
if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
ret = -EINVAL;
@@ -2565,7 +2549,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, dev);
dev->ethtool_ops = &bcm_sysport_ethtool_ops;
dev->netdev_ops = &bcm_sysport_netdev_ops;
- netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+ netif_napi_add(dev, &priv->napi, bcm_sysport_poll);
dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -2582,8 +2566,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1);
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
- if (IS_ERR(priv->wol_clk))
- return PTR_ERR(priv->wol_clk);
+ if (IS_ERR(priv->wol_clk)) {
+ ret = PTR_ERR(priv->wol_clk);
+ goto err_deregister_fixed_link;
+ }
/* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
@@ -2609,7 +2595,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable priv clock\n");
+ goto err_deregister_netdev;
+ }
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
@@ -2623,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
+err_deregister_netdev:
+ unregister_netdev(dev);
err_deregister_notifier:
unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
@@ -2633,7 +2625,7 @@ err_free_netdev:
return ret;
}
-static int bcm_sysport_remove(struct platform_device *pdev)
+static void bcm_sysport_remove(struct platform_device *pdev)
{
struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2648,8 +2640,6 @@ static int bcm_sysport_remove(struct platform_device *pdev)
of_phy_deregister_fixed_link(dn);
free_netdev(dev);
dev_set_drvdata(&pdev->dev, NULL);
-
- return 0;
}
static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
@@ -2794,7 +2784,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
+
if (priv->wolopts)
clk_disable_unprepare(priv->wol_clk);
@@ -2886,7 +2881,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
static struct platform_driver bcm_sysport_driver = {
.probe = bcm_sysport_probe,
- .remove = bcm_sysport_remove,
+ .remove = bcm_sysport_remove,
.driver = {
.name = "brcm-systemport",
.of_match_table = bcm_sysport_of_match,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 984f76e74b43..a34296f989f1 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -290,6 +290,7 @@ struct bcm_rsb {
#define RDMA_WRITE_PTR_HI 0x1010
#define RDMA_WRITE_PTR_LO 0x1014
+#define RDMA_OVFL_DISC_CNTR 0x1018
#define RDMA_PROD_INDEX 0x1018
#define RDMA_PROD_INDEX_MASK 0xffff
@@ -484,7 +485,7 @@ struct bcm_rsb {
/* Number of Receive hardware descriptor words */
#define SP_NUM_HW_RX_DESC_WORDS 1024
-#define SP_LT_NUM_HW_RX_DESC_WORDS 256
+#define SP_LT_NUM_HW_RX_DESC_WORDS 512
/* Internal linked-list RAM size */
#define SP_NUM_TX_DESC 1536
@@ -565,6 +566,7 @@ struct bcm_sysport_mib {
u32 rxchk_other_pkt_disc;
u32 rbuf_ovflow_cnt;
u32 rbuf_err_cnt;
+ u32 rdma_ovflow_cnt;
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
u32 tx_dma_failed;
@@ -581,6 +583,7 @@ enum bcm_sysport_stat_type {
BCM_SYSPORT_STAT_RUNT,
BCM_SYSPORT_STAT_RXCHK,
BCM_SYSPORT_STAT_RBUF,
+ BCM_SYSPORT_STAT_RDMA,
BCM_SYSPORT_STAT_SOFT,
};
@@ -627,6 +630,14 @@ enum bcm_sysport_stat_type {
.reg_offset = ofs, \
}
+#define STAT_RDMA(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RDMA, \
+ .reg_offset = ofs, \
+}
+
/* TX bytes and packets */
#define NUM_SYSPORT_TXQ_STAT 2
@@ -711,6 +722,7 @@ struct bcm_sysport_priv {
int wol_irq;
/* Transmit rings */
+ spinlock_t desc_lock;
struct bcm_sysport_tx_ring *tx_rings;
/* Receive queue */
@@ -761,4 +773,27 @@ struct bcm_sysport_priv {
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
};
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + (offset) + off); \
+ return reg; \
+} \
+static inline void name##_writel(struct bcm_sysport_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + (offset) + off); \
+} \
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 086739e4f40a..50b8e97a811d 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -234,6 +234,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
np = of_get_child_by_name(core->dev.of_node, "mdio");
err = of_mdiobus_register(mii_bus, np);
+ of_node_put(np);
if (err) {
dev_err(&core->dev, "Registration of mii bus failed\n");
goto err_free_bus;
@@ -259,4 +260,5 @@ void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index e6f48786949c..36f9bad28e6a 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -240,12 +240,12 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
- ci->pkg == BCMA_PKG_ID_BCM47186) {
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
- if (ci->pkg == BCMA_PKG_ID_BCM5358)
+ if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
break;
case BCMA_CHIP_ID_BCM53573:
@@ -332,7 +332,6 @@ static void bgmac_remove(struct bcma_device *core)
bcma_mdio_mii_unregister(bgmac->mii_bus);
bgmac_enet_remove(bgmac);
bcma_set_drvdata(core, NULL);
- kfree(bgmac);
}
static struct bcma_driver bgmac_bcma_driver = {
@@ -363,4 +362,5 @@ module_init(bgmac_init)
module_exit(bgmac_exit)
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index c6412c523637..4e266ce41180 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -171,7 +171,9 @@ static int platform_phy_connect(struct bgmac *bgmac)
static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct device_node *phy_node;
struct bgmac *bgmac;
+ struct resource *regs;
int ret;
bgmac = bgmac_alloc(&pdev->dev);
@@ -208,15 +210,23 @@ static int bgmac_probe(struct platform_device *pdev)
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
- bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
- else
+ /* The idm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+ if (regs) {
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+ }
- bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
- if (IS_ERR(bgmac->plat.nicpm_base))
- return PTR_ERR(bgmac->plat.nicpm_base);
+ /* The nicpm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+ if (regs) {
+ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+ regs);
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
+ }
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
@@ -227,7 +237,9 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
- if (of_parse_phandle(np, "phy-handle", 0)) {
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ of_node_put(phy_node);
bgmac->phy_connect = platform_phy_connect;
} else {
bgmac->phy_connect = bgmac_phy_connect_direct;
@@ -237,13 +249,11 @@ static int bgmac_probe(struct platform_device *pdev)
return bgmac_enet_probe(bgmac);
}
-static int bgmac_remove(struct platform_device *pdev)
+static void bgmac_remove(struct platform_device *pdev)
{
struct bgmac *bgmac = platform_get_drvdata(pdev);
bgmac_enet_remove(bgmac);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -291,4 +301,5 @@ static struct platform_driver bgmac_enet_driver = {
};
module_platform_driver(bgmac_enet_driver);
+MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 7b525c65bacb..3e9c57196a39 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -189,8 +189,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
}
slot->skb = skb;
- ring->end += nr_frags + 1;
netdev_sent_queue(net_dev, skb->len);
+ ring->end += nr_frags + 1;
wmb();
@@ -890,13 +890,13 @@ static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
if (iost & BGMAC_BCMA_IOST_ATTACHED) {
flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
- if (!bgmac->has_robosw)
+ if (bgmac->in_init || !bgmac->has_robosw)
flags |= BGMAC_BCMA_IOCTL_SW_RESET;
}
bgmac_clk_enable(bgmac, flags);
}
- if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+ if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw))
bgmac_idm_write(bgmac, BCMA_IOCTL,
bgmac_idm_read(bgmac, BCMA_IOCTL) &
~BGMAC_BCMA_IOCTL_SW_RESET);
@@ -1367,8 +1367,7 @@ static void bgmac_get_strings(struct net_device *dev, u32 stringset,
return;
for (i = 0; i < BGMAC_STATS_LEN; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
- bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, bgmac_get_strings_stats[i].name);
}
static void bgmac_get_ethtool_stats(struct net_device *dev,
@@ -1395,8 +1394,8 @@ static void bgmac_get_ethtool_stats(struct net_device *dev,
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "AXI", sizeof(info->bus_info));
}
static const struct ethtool_ops bgmac_ethtool_ops = {
@@ -1447,10 +1446,10 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
struct phy_device *phy_dev;
int err;
- phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
- if (!phy_dev || IS_ERR(phy_dev)) {
+ phy_dev = fixed_phy_register(&fphy_status, NULL);
+ if (IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
- return -ENODEV;
+ return PTR_ERR(phy_dev);
}
err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
@@ -1490,7 +1489,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
struct net_device *net_dev = bgmac->net_dev;
int err;
- bgmac_chip_intrs_off(bgmac);
+ bgmac->in_init = true;
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
@@ -1509,6 +1508,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
*/
bgmac_clk_enable(bgmac, 0);
+ bgmac_chip_intrs_off(bgmac);
+
/* This seems to be fixing IRQ by assigning OOB #6 to the core */
if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
@@ -1527,7 +1528,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
- netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll);
err = bgmac_phy_connect(bgmac);
if (err) {
@@ -1542,6 +1543,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
/* Omit FCS from max MTU size */
net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
+ bgmac->in_init = false;
+
err = register_netdev(bgmac->net_dev);
if (err) {
dev_err(bgmac->dev, "Cannot register net device\n");
@@ -1568,7 +1571,6 @@ void bgmac_enet_remove(struct bgmac *bgmac)
phy_disconnect(bgmac->net_dev->phydev);
netif_napi_del(&bgmac->napi);
bgmac_dma_free(bgmac);
- free_netdev(bgmac->net_dev);
}
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
@@ -1623,4 +1625,5 @@ int bgmac_enet_resume(struct bgmac *bgmac)
EXPORT_SYMBOL_GPL(bgmac_enet_resume);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 110088e662ea..6fee9a41839c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -328,8 +328,7 @@
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
BGMAC_RX_FRAME_OFFSET)
-/* Jumbo frame size with FCS */
-#define BGMAC_RX_MAX_FRAME_SIZE 9724
+#define BGMAC_RX_MAX_FRAME_SIZE 1536
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -364,8 +363,6 @@
#define BGMAC_CHIPCTL_7_IF_TYPE_MII 0x00000040
#define BGMAC_CHIPCTL_7_IF_TYPE_RGMII 0x00000080
-#define BGMAC_WEIGHT 64
-
#define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN)
/* Feature Flags */
@@ -474,6 +471,8 @@ struct bgmac {
int irq;
u32 int_mask;
+ bool in_init;
+
/* Current MAC state */
int mac_speed;
int mac_duplex;
diff --git a/drivers/net/ethernet/broadcom/bnge/Makefile b/drivers/net/ethernet/broadcom/bnge/Makefile
new file mode 100644
index 000000000000..ea6596854e5c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_BNGE) += bng_en.o
+
+bng_en-y := bnge_core.o \
+ bnge_devlink.o \
+ bnge_hwrm.o \
+ bnge_hwrm_lib.o \
+ bnge_rmem.o \
+ bnge_resc.o \
+ bnge_netdev.o \
+ bnge_ethtool.o \
+ bnge_auxr.o
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
new file mode 100644
index 000000000000..411744894349
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -0,0 +1,255 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_H_
+#define _BNGE_H_
+
+#define DRV_NAME "bng_en"
+#define DRV_SUMMARY "Broadcom 800G Ethernet Linux Driver"
+
+#include <linux/etherdevice.h>
+#include <linux/bnxt/hsi.h>
+#include "bnge_rmem.h"
+#include "bnge_resc.h"
+#include "bnge_auxr.h"
+
+#define DRV_VER_MAJ 1
+#define DRV_VER_MIN 15
+#define DRV_VER_UPD 1
+
+extern char bnge_driver_name[];
+
+enum board_idx {
+ BCM57708,
+};
+
+struct bnge_auxr_priv {
+ struct auxiliary_device aux_dev;
+ struct bnge_auxr_dev *auxr_dev;
+ int id;
+};
+
+struct bnge_pf_info {
+ u16 fw_fid;
+ u16 port_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+#define INVALID_HW_RING_ID ((u16)-1)
+
+enum {
+ BNGE_FW_CAP_SHORT_CMD = BIT_ULL(0),
+ BNGE_FW_CAP_LLDP_AGENT = BIT_ULL(1),
+ BNGE_FW_CAP_DCBX_AGENT = BIT_ULL(2),
+ BNGE_FW_CAP_IF_CHANGE = BIT_ULL(3),
+ BNGE_FW_CAP_KONG_MB_CHNL = BIT_ULL(4),
+ BNGE_FW_CAP_ERROR_RECOVERY = BIT_ULL(5),
+ BNGE_FW_CAP_PKG_VER = BIT_ULL(6),
+ BNGE_FW_CAP_CFA_ADV_FLOW = BIT_ULL(7),
+ BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 = BIT_ULL(8),
+ BNGE_FW_CAP_PCIE_STATS_SUPPORTED = BIT_ULL(9),
+ BNGE_FW_CAP_EXT_STATS_SUPPORTED = BIT_ULL(10),
+ BNGE_FW_CAP_ERR_RECOVER_RELOAD = BIT_ULL(11),
+ BNGE_FW_CAP_HOT_RESET = BIT_ULL(12),
+ BNGE_FW_CAP_RX_ALL_PKT_TS = BIT_ULL(13),
+ BNGE_FW_CAP_VLAN_RX_STRIP = BIT_ULL(14),
+ BNGE_FW_CAP_VLAN_TX_INSERT = BIT_ULL(15),
+ BNGE_FW_CAP_EXT_HW_STATS_SUPPORTED = BIT_ULL(16),
+ BNGE_FW_CAP_LIVEPATCH = BIT_ULL(17),
+ BNGE_FW_CAP_HOT_RESET_IF = BIT_ULL(18),
+ BNGE_FW_CAP_RING_MONITOR = BIT_ULL(19),
+ BNGE_FW_CAP_DBG_QCAPS = BIT_ULL(20),
+ BNGE_FW_CAP_THRESHOLD_TEMP_SUPPORTED = BIT_ULL(21),
+ BNGE_FW_CAP_DFLT_VLAN_TPID_PCP = BIT_ULL(22),
+ BNGE_FW_CAP_VNIC_TUNNEL_TPA = BIT_ULL(23),
+ BNGE_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO = BIT_ULL(24),
+ BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 = BIT_ULL(25),
+ BNGE_FW_CAP_VNIC_RE_FLUSH = BIT_ULL(26),
+};
+
+enum {
+ BNGE_EN_ROCE_V1 = BIT_ULL(0),
+ BNGE_EN_ROCE_V2 = BIT_ULL(1),
+ BNGE_EN_STRIP_VLAN = BIT_ULL(2),
+ BNGE_EN_SHARED_CHNL = BIT_ULL(3),
+ BNGE_EN_UDP_GSO_SUPP = BIT_ULL(4),
+};
+
+#define BNGE_EN_ROCE (BNGE_EN_ROCE_V1 | BNGE_EN_ROCE_V2)
+
+enum {
+ BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA = BIT(0),
+ BNGE_RSS_CAP_UDP_RSS_CAP = BIT(1),
+ BNGE_RSS_CAP_NEW_RSS_CAP = BIT(2),
+ BNGE_RSS_CAP_RSS_TCAM = BIT(3),
+ BNGE_RSS_CAP_AH_V4_RSS_CAP = BIT(4),
+ BNGE_RSS_CAP_AH_V6_RSS_CAP = BIT(5),
+ BNGE_RSS_CAP_ESP_V4_RSS_CAP = BIT(6),
+ BNGE_RSS_CAP_ESP_V6_RSS_CAP = BIT(7),
+};
+
+#define BNGE_MAX_QUEUE 8
+struct bnge_queue_info {
+ u8 queue_id;
+ u8 queue_profile;
+};
+
+struct bnge_dev {
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ u64 dsn;
+#define BNGE_VPD_FLD_LEN 32
+ char board_partno[BNGE_VPD_FLD_LEN];
+ char board_serialno[BNGE_VPD_FLD_LEN];
+
+ void __iomem *bar0;
+ void __iomem *bar1;
+
+ u16 chip_num;
+ u8 chip_rev;
+
+#if BITS_PER_LONG == 32
+ /* ensure atomic 64-bit doorbell writes on 32-bit systems. */
+ spinlock_t db_lock;
+#endif
+ int db_offset; /* db_offset within db_size */
+ int db_size;
+
+ /* HWRM members */
+ u16 hwrm_cmd_seq;
+ u16 hwrm_cmd_kong_seq;
+ struct dma_pool *hwrm_dma_pool;
+ struct hlist_head hwrm_pending_list;
+ u16 hwrm_max_req_len;
+ u16 hwrm_max_ext_req_len;
+ unsigned int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_max_timeout;
+ struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
+
+ struct hwrm_ver_get_output ver_resp;
+#define FW_VER_STR_LEN 32
+ char fw_ver_str[FW_VER_STR_LEN];
+ char hwrm_ver_supp[FW_VER_STR_LEN];
+ char nvm_cfg_ver[FW_VER_STR_LEN];
+ u64 fw_ver_code;
+#define BNGE_FW_VER_CODE(maj, min, bld, rsv) \
+ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+
+ struct bnge_pf_info pf;
+
+ unsigned long state;
+#define BNGE_STATE_DRV_REGISTERED 0
+#define BNGE_STATE_OPEN 1
+
+ u64 fw_cap;
+
+ /* Backing stores */
+ struct bnge_ctx_mem_info *ctx;
+
+ u64 flags;
+
+ struct bnge_hw_resc hw_resc;
+
+ u16 tso_max_segs;
+
+ int max_fltr;
+#define BNGE_L2_FLTR_MAX_FLTR 1024
+
+ u32 *rss_indir_tbl;
+#define BNGE_RSS_TABLE_ENTRIES 64
+#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4)
+#define BNGE_RSS_TABLE_MAX_TBL 8
+#define BNGE_MAX_RSS_TABLE_SIZE \
+ (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
+#define BNGE_MAX_RSS_TABLE_ENTRIES \
+ (BNGE_RSS_TABLE_ENTRIES * BNGE_RSS_TABLE_MAX_TBL)
+ u16 rss_indir_tbl_entries;
+
+ u32 rss_cap;
+ u32 rss_hash_cfg;
+
+ u16 rx_nr_rings;
+ u16 tx_nr_rings;
+ u16 tx_nr_rings_per_tc;
+ /* Number of NQs */
+ u16 nq_nr_rings;
+
+ /* Aux device resources */
+ u16 aux_num_msix;
+ u16 aux_num_stat_ctxs;
+
+ u16 max_mtu;
+#define BNGE_MAX_MTU 9500
+
+ u16 hw_ring_stats_size;
+#define BNGE_NUM_RX_RING_STATS 8
+#define BNGE_NUM_TX_RING_STATS 8
+#define BNGE_NUM_TPA_RING_STATS 6
+#define BNGE_RING_STATS_SIZE \
+ ((BNGE_NUM_RX_RING_STATS + BNGE_NUM_TX_RING_STATS + \
+ BNGE_NUM_TPA_RING_STATS) * 8)
+
+ u16 max_tpa_v2;
+#define BNGE_SUPPORTS_TPA(bd) ((bd)->max_tpa_v2)
+
+ u8 num_tc;
+ u8 max_tc;
+ u8 max_lltc; /* lossless TCs */
+ struct bnge_queue_info q_info[BNGE_MAX_QUEUE];
+ u8 tc_to_qidx[BNGE_MAX_QUEUE];
+ u8 q_ids[BNGE_MAX_QUEUE];
+ u8 max_q;
+ u8 port_count;
+
+ struct bnge_irq *irq_tbl;
+ u16 irqs_acquired;
+
+ struct bnge_auxr_priv *aux_priv;
+ struct bnge_auxr_dev *auxr_dev;
+};
+
+static inline bool bnge_is_roce_en(struct bnge_dev *bd)
+{
+ return bd->flags & BNGE_EN_ROCE;
+}
+
+static inline bool bnge_is_agg_reqd(struct bnge_dev *bd)
+{
+ if (bd->netdev) {
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA ||
+ bn->priv_flags & BNGE_NET_EN_JUMBO)
+ return true;
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static inline void bnge_writeq(struct bnge_dev *bd, u64 val,
+ void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bd->db_lock);
+ lo_hi_writeq(val, addr);
+ spin_unlock(&bd->db_lock);
+#else
+ writeq(val, addr);
+#endif
+}
+
+/* For TX and RX ring doorbells */
+static inline void bnge_db_write(struct bnge_dev *bd, struct bnge_db_info *db,
+ u32 idx)
+{
+ bnge_writeq(bd, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
+}
+
+bool bnge_aux_registered(struct bnge_dev *bd);
+u16 bnge_aux_get_msix(struct bnge_dev *bd);
+
+#endif /* _BNGE_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
new file mode 100644
index 000000000000..d64592b64e17
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <asm/byteorder.h>
+#include <linux/bitmap.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bnxt/hsi.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_auxr.h"
+
+static DEFINE_IDA(bnge_aux_dev_ids);
+
+static void bnge_fill_msix_vecs(struct bnge_dev *bd,
+ struct bnge_msix_info *info)
+{
+ struct bnge_auxr_dev *auxr_dev = bd->auxr_dev;
+ int num_msix, i;
+
+ if (!auxr_dev->auxr_info->msix_requested) {
+ dev_warn(bd->dev, "Requested MSI-X vectors not allocated\n");
+ return;
+ }
+ num_msix = auxr_dev->auxr_info->msix_requested;
+ for (i = 0; i < num_msix; i++) {
+ info[i].vector = bd->irq_tbl[i].vector;
+ info[i].db_offset = bd->db_offset;
+ info[i].ring_idx = i;
+ }
+}
+
+int bnge_register_dev(struct bnge_auxr_dev *auxr_dev,
+ void *handle)
+{
+ struct bnge_dev *bd = pci_get_drvdata(auxr_dev->pdev);
+ struct bnge_auxr_info *auxr_info;
+ int rc = 0;
+
+ netdev_lock(bd->netdev);
+ mutex_lock(&auxr_dev->auxr_dev_lock);
+ if (!bd->irq_tbl) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ if (!bnge_aux_has_enough_resources(bd)) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ auxr_info = auxr_dev->auxr_info;
+ auxr_info->handle = handle;
+
+ auxr_info->msix_requested = bd->aux_num_msix;
+
+ bnge_fill_msix_vecs(bd, bd->auxr_dev->msix_info);
+ auxr_dev->flags |= BNGE_ARDEV_MSIX_ALLOC;
+
+exit:
+ mutex_unlock(&auxr_dev->auxr_dev_lock);
+ netdev_unlock(bd->netdev);
+ return rc;
+}
+EXPORT_SYMBOL(bnge_register_dev);
+
+void bnge_unregister_dev(struct bnge_auxr_dev *auxr_dev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(auxr_dev->pdev);
+ struct bnge_auxr_info *auxr_info;
+
+ auxr_info = auxr_dev->auxr_info;
+ netdev_lock(bd->netdev);
+ mutex_lock(&auxr_dev->auxr_dev_lock);
+ if (auxr_info->msix_requested)
+ auxr_dev->flags &= ~BNGE_ARDEV_MSIX_ALLOC;
+ auxr_info->msix_requested = 0;
+
+ mutex_unlock(&auxr_dev->auxr_dev_lock);
+ netdev_unlock(bd->netdev);
+}
+EXPORT_SYMBOL(bnge_unregister_dev);
+
+int bnge_send_msg(struct bnge_auxr_dev *auxr_dev, struct bnge_fw_msg *fw_msg)
+{
+ struct bnge_dev *bd = pci_get_drvdata(auxr_dev->pdev);
+ struct output *resp;
+ struct input *req;
+ u32 resp_len;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, 0 /* don't care */);
+ if (rc)
+ return rc;
+
+ rc = bnge_hwrm_req_replace(bd, req, fw_msg->msg, fw_msg->msg_len);
+ if (rc)
+ goto drop_req;
+
+ bnge_hwrm_req_timeout(bd, req, fw_msg->timeout);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ resp_len = le16_to_cpu(resp->resp_len);
+ if (resp_len) {
+ if (fw_msg->resp_max_len < resp_len)
+ resp_len = fw_msg->resp_max_len;
+
+ memcpy(fw_msg->resp, resp, resp_len);
+ }
+drop_req:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+EXPORT_SYMBOL(bnge_send_msg);
+
+void bnge_rdma_aux_device_uninit(struct bnge_dev *bd)
+{
+ struct bnge_auxr_priv *aux_priv;
+ struct auxiliary_device *adev;
+
+ /* Skip if no auxiliary device init was done. */
+ if (!bd->aux_priv)
+ return;
+
+ aux_priv = bd->aux_priv;
+ adev = &aux_priv->aux_dev;
+ auxiliary_device_uninit(adev);
+}
+
+static void bnge_aux_dev_release(struct device *dev)
+{
+ struct bnge_auxr_priv *aux_priv =
+ container_of(dev, struct bnge_auxr_priv, aux_dev.dev);
+ struct bnge_dev *bd = pci_get_drvdata(aux_priv->auxr_dev->pdev);
+
+ ida_free(&bnge_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv->auxr_dev->auxr_info);
+ bd->auxr_dev = NULL;
+ kfree(aux_priv->auxr_dev);
+ kfree(aux_priv);
+ bd->aux_priv = NULL;
+}
+
+void bnge_rdma_aux_device_del(struct bnge_dev *bd)
+{
+ if (!bd->auxr_dev)
+ return;
+
+ auxiliary_device_delete(&bd->aux_priv->aux_dev);
+}
+
+static void bnge_set_auxr_dev_info(struct bnge_auxr_dev *auxr_dev,
+ struct bnge_dev *bd)
+{
+ auxr_dev->pdev = bd->pdev;
+ auxr_dev->l2_db_size = bd->db_size;
+ auxr_dev->l2_db_size_nc = bd->db_size;
+ auxr_dev->l2_db_offset = bd->db_offset;
+ mutex_init(&auxr_dev->auxr_dev_lock);
+
+ if (bd->flags & BNGE_EN_ROCE_V1)
+ auxr_dev->flags |= BNGE_ARDEV_ROCEV1_SUPP;
+ if (bd->flags & BNGE_EN_ROCE_V2)
+ auxr_dev->flags |= BNGE_ARDEV_ROCEV2_SUPP;
+
+ auxr_dev->chip_num = bd->chip_num;
+ auxr_dev->hw_ring_stats_size = bd->hw_ring_stats_size;
+ auxr_dev->pf_port_id = bd->pf.port_id;
+ auxr_dev->en_state = bd->state;
+ auxr_dev->bar0 = bd->bar0;
+}
+
+void bnge_rdma_aux_device_add(struct bnge_dev *bd)
+{
+ struct auxiliary_device *aux_dev;
+ int rc;
+
+ if (!bd->auxr_dev)
+ return;
+
+ aux_dev = &bd->aux_priv->aux_dev;
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ dev_warn(bd->dev, "Failed to add auxiliary device for ROCE\n");
+ auxiliary_device_uninit(aux_dev);
+ bd->flags &= ~BNGE_EN_ROCE;
+ }
+
+ bd->auxr_dev->net = bd->netdev;
+}
+
+void bnge_rdma_aux_device_init(struct bnge_dev *bd)
+{
+ struct auxiliary_device *aux_dev;
+ struct bnge_auxr_info *auxr_info;
+ struct bnge_auxr_priv *aux_priv;
+ struct bnge_auxr_dev *auxr_dev;
+ int rc;
+
+ if (!bnge_is_roce_en(bd))
+ return;
+
+ aux_priv = kzalloc(sizeof(*aux_priv), GFP_KERNEL);
+ if (!aux_priv)
+ goto exit;
+
+ aux_priv->id = ida_alloc(&bnge_aux_dev_ids, GFP_KERNEL);
+ if (aux_priv->id < 0) {
+ dev_warn(bd->dev, "ida alloc failed for aux device\n");
+ kfree(aux_priv);
+ goto exit;
+ }
+
+ aux_dev = &aux_priv->aux_dev;
+ aux_dev->id = aux_priv->id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &bd->pdev->dev;
+ aux_dev->dev.release = bnge_aux_dev_release;
+
+ rc = auxiliary_device_init(aux_dev);
+ if (rc) {
+ ida_free(&bnge_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv);
+ goto exit;
+ }
+ bd->aux_priv = aux_priv;
+
+ auxr_dev = kzalloc(sizeof(*auxr_dev), GFP_KERNEL);
+ if (!auxr_dev)
+ goto aux_dev_uninit;
+
+ aux_priv->auxr_dev = auxr_dev;
+
+ auxr_info = kzalloc(sizeof(*auxr_info), GFP_KERNEL);
+ if (!auxr_info)
+ goto aux_dev_uninit;
+
+ auxr_dev->auxr_info = auxr_info;
+ bd->auxr_dev = auxr_dev;
+ bnge_set_auxr_dev_info(auxr_dev, bd);
+
+ return;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(aux_dev);
+exit:
+ bd->flags &= ~BNGE_EN_ROCE;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_auxr.h b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.h
new file mode 100644
index 000000000000..6c5c15ef2b0a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_auxr.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_AUXR_H_
+#define _BNGE_AUXR_H_
+
+#include <linux/auxiliary_bus.h>
+
+#define BNGE_MIN_ROCE_CP_RINGS 2
+#define BNGE_MIN_ROCE_STAT_CTXS 1
+
+#define BNGE_MAX_ROCE_MSIX 64
+
+struct hwrm_async_event_cmpl;
+struct bnge;
+
+struct bnge_msix_info {
+ u32 vector;
+ u32 ring_idx;
+ u32 db_offset;
+};
+
+struct bnge_fw_msg {
+ void *msg;
+ int msg_len;
+ void *resp;
+ int resp_max_len;
+ int timeout;
+};
+
+struct bnge_auxr_info {
+ void *handle;
+ u16 msix_requested;
+};
+
+enum {
+ BNGE_ARDEV_ROCEV1_SUPP = BIT(0),
+ BNGE_ARDEV_ROCEV2_SUPP = BIT(1),
+ BNGE_ARDEV_MSIX_ALLOC = BIT(2),
+};
+
+#define BNGE_ARDEV_ROCE_SUPP (BNGE_ARDEV_ROCEV1_SUPP | \
+ BNGE_ARDEV_ROCEV2_SUPP)
+
+struct bnge_auxr_dev {
+ struct net_device *net;
+ struct pci_dev *pdev;
+ void __iomem *bar0;
+
+ struct bnge_msix_info msix_info[BNGE_MAX_ROCE_MSIX];
+
+ u32 flags;
+
+ struct bnge_auxr_info *auxr_info;
+
+ /* Doorbell BAR size in bytes mapped by L2 driver. */
+ int l2_db_size;
+ /* Doorbell BAR size in bytes mapped as non-cacheable. */
+ int l2_db_size_nc;
+ /* Doorbell offset in bytes within l2_db_size_nc. */
+ int l2_db_offset;
+
+ u16 chip_num;
+ u16 hw_ring_stats_size;
+ u16 pf_port_id;
+ unsigned long en_state;
+
+ u16 auxr_num_msix_vec;
+ u16 auxr_num_ctxs;
+
+ /* serialize auxr operations */
+ struct mutex auxr_dev_lock;
+};
+
+void bnge_rdma_aux_device_uninit(struct bnge_dev *bdev);
+void bnge_rdma_aux_device_del(struct bnge_dev *bdev);
+void bnge_rdma_aux_device_add(struct bnge_dev *bdev);
+void bnge_rdma_aux_device_init(struct bnge_dev *bdev);
+int bnge_register_dev(struct bnge_auxr_dev *adev,
+ void *handle);
+void bnge_unregister_dev(struct bnge_auxr_dev *adev);
+int bnge_send_msg(struct bnge_auxr_dev *adev, struct bnge_fw_msg *fw_msg);
+
+#endif /* _BNGE_AUXR_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
new file mode 100644
index 000000000000..c94e132bebc8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/init.h>
+#include <linux/crash_dump.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "bnge.h"
+#include "bnge_devlink.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_SUMMARY);
+
+char bnge_driver_name[] = DRV_NAME;
+
+static const struct {
+ char *name;
+} board_info[] = {
+ [BCM57708] = { "Broadcom BCM57708 50Gb/100Gb/200Gb/400Gb/800Gb Ethernet" },
+};
+
+static const struct pci_device_id bnge_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x1780), .driver_data = BCM57708 },
+ /* Required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, bnge_pci_tbl);
+
+static void bnge_print_device_info(struct pci_dev *pdev, enum board_idx idx)
+{
+ struct device *dev = &pdev->dev;
+
+ dev_info(dev, "%s found at mem %lx\n", board_info[idx].name,
+ (long)pci_resource_start(pdev, 0));
+
+ pcie_print_link_status(pdev);
+}
+
+bool bnge_aux_registered(struct bnge_dev *bd)
+{
+ struct bnge_auxr_dev *ba_dev = bd->auxr_dev;
+
+ if (ba_dev && ba_dev->auxr_info->msix_requested)
+ return true;
+
+ return false;
+}
+
+static void bnge_nvm_cfg_ver_get(struct bnge_dev *bd)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_info;
+
+ if (!bnge_hwrm_nvm_dev_info(bd, &nvm_info))
+ snprintf(bd->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
+ nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
+ nvm_info.nvm_cfg_ver_upd);
+}
+
+static int bnge_func_qcaps(struct bnge_dev *bd)
+{
+ int rc;
+
+ rc = bnge_hwrm_func_qcaps(bd);
+ if (rc)
+ return rc;
+
+ rc = bnge_hwrm_queue_qportcfg(bd);
+ if (rc) {
+ dev_err(bd->dev, "query qportcfg failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_func_resc_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "query resc caps failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_func_qcfg(bd);
+ if (rc) {
+ dev_err(bd->dev, "query config failure rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_hwrm_vnic_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "vnic caps failure rc: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void bnge_fw_unregister_dev(struct bnge_dev *bd)
+{
+ /* ctx mem free after unrgtr only */
+ bnge_hwrm_func_drv_unrgtr(bd);
+ bnge_free_ctx_mem(bd);
+}
+
+static void bnge_set_dflt_rss_hash_type(struct bnge_dev *bd)
+{
+ bd->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+}
+
+static int bnge_fw_register_dev(struct bnge_dev *bd)
+{
+ int rc;
+
+ bd->fw_cap = 0;
+ rc = bnge_hwrm_ver_get(bd);
+ if (rc) {
+ dev_err(bd->dev, "Get Version command failed rc: %d\n", rc);
+ return rc;
+ }
+
+ bnge_nvm_cfg_ver_get(bd);
+
+ rc = bnge_hwrm_func_reset(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to reset function rc: %d\n", rc);
+ return rc;
+ }
+
+ bnge_hwrm_fw_set_time(bd);
+
+ rc = bnge_hwrm_func_drv_rgtr(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to rgtr with firmware rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_alloc_ctx_mem(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed to allocate ctx mem rc: %d\n", rc);
+ goto err_func_unrgtr;
+ }
+
+ /* Get the resources and configuration from firmware */
+ rc = bnge_func_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed initial configuration rc: %d\n", rc);
+ rc = -ENODEV;
+ goto err_func_unrgtr;
+ }
+
+ bnge_set_dflt_rss_hash_type(bd);
+
+ return 0;
+
+err_func_unrgtr:
+ bnge_fw_unregister_dev(bd);
+ return rc;
+}
+
+static void bnge_pci_disable(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+}
+
+static int bnge_pci_enable(struct pci_dev *pdev)
+{
+ int rc;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return rc;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto err_pci_disable;
+ }
+
+ rc = pci_request_regions(pdev, bnge_driver_name);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto err_pci_disable;
+ }
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
+ pci_set_master(pdev);
+
+ return 0;
+
+err_pci_disable:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void bnge_unmap_bars(struct pci_dev *pdev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(pdev);
+
+ if (bd->bar1) {
+ pci_iounmap(pdev, bd->bar1);
+ bd->bar1 = NULL;
+ }
+
+ if (bd->bar0) {
+ pci_iounmap(pdev, bd->bar0);
+ bd->bar0 = NULL;
+ }
+}
+
+static void bnge_set_max_func_irqs(struct bnge_dev *bd,
+ unsigned int max_irqs)
+{
+ bd->hw_resc.max_irqs = max_irqs;
+}
+
+static int bnge_get_max_irq(struct pci_dev *pdev)
+{
+ u16 ctrl;
+
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+static int bnge_map_db_bar(struct bnge_dev *bd)
+{
+ if (!bd->db_size)
+ return -ENODEV;
+
+ bd->bar1 = pci_iomap(bd->pdev, 2, bd->db_size);
+ if (!bd->bar1)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnge_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned int max_irqs;
+ struct bnge_dev *bd;
+ int rc;
+
+ if (pci_is_bridge(pdev))
+ return -ENODEV;
+
+ if (!pdev->msix_cap) {
+ dev_err(&pdev->dev, "MSIX capability missing, aborting\n");
+ return -ENODEV;
+ }
+
+ if (is_kdump_kernel()) {
+ pci_clear_master(pdev);
+ pcie_flr(pdev);
+ }
+
+ rc = bnge_pci_enable(pdev);
+ if (rc)
+ return rc;
+
+ bnge_print_device_info(pdev, ent->driver_data);
+
+ bd = bnge_devlink_alloc(pdev);
+ if (!bd) {
+ dev_err(&pdev->dev, "Devlink allocation failed\n");
+ rc = -ENOMEM;
+ goto err_pci_disable;
+ }
+
+ bd->bar0 = pci_ioremap_bar(pdev, 0);
+ if (!bd->bar0) {
+ dev_err(&pdev->dev, "Failed mapping BAR-0, aborting\n");
+ rc = -ENOMEM;
+ goto err_devl_free;
+ }
+
+ rc = bnge_init_hwrm_resources(bd);
+ if (rc)
+ goto err_bar_unmap;
+
+ rc = bnge_fw_register_dev(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to register with firmware rc = %d\n", rc);
+ goto err_hwrm_cleanup;
+ }
+
+ bnge_devlink_register(bd);
+
+ max_irqs = bnge_get_max_irq(pdev);
+ bnge_set_max_func_irqs(bd, max_irqs);
+
+ bnge_aux_init_dflt_config(bd);
+
+ rc = bnge_net_init_dflt_config(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Error setting up default cfg to netdev rc = %d\n",
+ rc);
+ goto err_fw_reg;
+ }
+
+ rc = bnge_map_db_bar(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed mapping doorbell BAR rc = %d, aborting\n",
+ rc);
+ goto err_config_uninit;
+ }
+
+#if BITS_PER_LONG == 32
+ spin_lock_init(&bd->db_lock);
+#endif
+
+ bnge_rdma_aux_device_init(bd);
+
+ rc = bnge_alloc_irqs(bd);
+ if (rc) {
+ dev_err(&pdev->dev, "Error IRQ allocation rc = %d\n", rc);
+ goto err_uninit_auxr;
+ }
+
+ rc = bnge_netdev_alloc(bd, max_irqs);
+ if (rc)
+ goto err_free_irq;
+
+ bnge_rdma_aux_device_add(bd);
+
+ pci_save_state(pdev);
+
+ return 0;
+
+err_free_irq:
+ bnge_free_irqs(bd);
+
+err_uninit_auxr:
+ bnge_rdma_aux_device_uninit(bd);
+
+err_config_uninit:
+ bnge_net_uninit_dflt_config(bd);
+
+err_fw_reg:
+ bnge_devlink_unregister(bd);
+ bnge_fw_unregister_dev(bd);
+
+err_hwrm_cleanup:
+ bnge_cleanup_hwrm_resources(bd);
+
+err_bar_unmap:
+ bnge_unmap_bars(pdev);
+
+err_devl_free:
+ bnge_devlink_free(bd);
+
+err_pci_disable:
+ bnge_pci_disable(pdev);
+ return rc;
+}
+
+static void bnge_remove_one(struct pci_dev *pdev)
+{
+ struct bnge_dev *bd = pci_get_drvdata(pdev);
+
+ bnge_rdma_aux_device_del(bd);
+
+ bnge_netdev_free(bd);
+
+ bnge_free_irqs(bd);
+
+ bnge_rdma_aux_device_uninit(bd);
+
+ bnge_net_uninit_dflt_config(bd);
+
+ bnge_devlink_unregister(bd);
+
+ bnge_fw_unregister_dev(bd);
+
+ bnge_cleanup_hwrm_resources(bd);
+
+ bnge_unmap_bars(pdev);
+
+ bnge_devlink_free(bd);
+
+ bnge_pci_disable(pdev);
+}
+
+static void bnge_shutdown(struct pci_dev *pdev)
+{
+ pci_disable_device(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, 0);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+static struct pci_driver bnge_driver = {
+ .name = bnge_driver_name,
+ .id_table = bnge_pci_tbl,
+ .probe = bnge_probe_one,
+ .remove = bnge_remove_one,
+ .shutdown = bnge_shutdown,
+};
+
+static int __init bnge_init_module(void)
+{
+ return pci_register_driver(&bnge_driver);
+}
+module_init(bnge_init_module);
+
+static void __exit bnge_exit_module(void)
+{
+ pci_unregister_driver(&bnge_driver);
+}
+module_exit(bnge_exit_module);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_db.h b/drivers/net/ethernet/broadcom/bnge/bnge_db.h
new file mode 100644
index 000000000000..950ed582f1d8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_db.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_DB_H_
+#define _BNGE_DB_H_
+
+/* 64-bit doorbell */
+#define DBR_EPOCH_SFT 24
+#define DBR_TOGGLE_SFT 25
+#define DBR_XID_SFT 32
+#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_VALID (0x1ULL << 58)
+#define DBR_TYPE_SQ (0x0ULL << 60)
+#define DBR_TYPE_SRQ (0x2ULL << 60)
+#define DBR_TYPE_CQ (0x4ULL << 60)
+#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60)
+#define DBR_TYPE_NQ (0xaULL << 60)
+#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NQ_MASK (0xeULL << 60)
+
+struct bnge_db_info {
+ void __iomem *doorbell;
+ u64 db_key64;
+ u32 db_ring_mask;
+ u32 db_epoch_mask;
+ u8 db_epoch_shift;
+};
+
+#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
+ ((db)->db_epoch_shift))
+#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
+ DB_EPOCH(db, idx))
+
+#endif /* _BNGE_DB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
new file mode 100644
index 000000000000..a987afebd64d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "bnge.h"
+#include "bnge_devlink.h"
+#include "bnge_hwrm_lib.h"
+
+static int bnge_dl_info_put(struct bnge_dev *bd, struct devlink_info_req *req,
+ enum bnge_dl_version_type type, const char *key,
+ char *buf)
+{
+ if (!strlen(buf))
+ return 0;
+
+ if (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
+ !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE))
+ return 0;
+
+ switch (type) {
+ case BNGE_VERSION_FIXED:
+ return devlink_info_version_fixed_put(req, key, buf);
+ case BNGE_VERSION_RUNNING:
+ return devlink_info_version_running_put(req, key, buf);
+ case BNGE_VERSION_STORED:
+ return devlink_info_version_stored_put(req, key, buf);
+ }
+
+ return 0;
+}
+
+static void bnge_vpd_read_info(struct bnge_dev *bd)
+{
+ struct pci_dev *pdev = bd->pdev;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
+ u8 *vpd_data;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD\n");
+ return;
+ }
+
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (pos < 0)
+ goto read_sn;
+
+ size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1);
+ memcpy(bd->board_partno, &vpd_data[pos], size);
+
+read_sn:
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO,
+ &kw_len);
+ if (pos < 0)
+ goto exit;
+
+ size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1);
+ memcpy(bd->board_serialno, &vpd_data[pos], size);
+
+exit:
+ kfree(vpd_data);
+}
+
+#define HWRM_FW_VER_STR_LEN 16
+
+static int bnge_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_dev_info;
+ struct bnge_dev *bd = devlink_priv(devlink);
+ struct hwrm_ver_get_output *ver_resp;
+ char mgmt_ver[FW_VER_STR_LEN];
+ char roce_ver[FW_VER_STR_LEN];
+ char ncsi_ver[FW_VER_STR_LEN];
+ char buf[32];
+
+ int rc;
+
+ if (bd->dsn) {
+ char buf[32];
+ u8 dsn[8];
+ int rc;
+
+ put_unaligned_le64(bd->dsn, dsn);
+ sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
+ dsn[7], dsn[6], dsn[5], dsn[4],
+ dsn[3], dsn[2], dsn[1], dsn[0]);
+ rc = devlink_info_serial_number_put(req, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set dsn");
+ return rc;
+ }
+ }
+
+ if (strlen(bd->board_serialno)) {
+ rc = devlink_info_board_serial_number_put(req,
+ bd->board_serialno);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set board serial number");
+ return rc;
+ }
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ bd->board_partno);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set board part number");
+ return rc;
+ }
+
+ /* More information from HWRM ver get command */
+ sprintf(buf, "%X", bd->chip_num);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set asic id");
+ return rc;
+ }
+
+ ver_resp = &bd->ver_resp;
+ sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set asic info");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ bd->nvm_cfg_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set firmware version");
+ return rc;
+ }
+
+ buf[0] = 0;
+ strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware generic version");
+ return rc;
+ }
+
+ if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
+ ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
+ ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
+ ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
+ } else {
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
+ ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
+ ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
+ ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
+ }
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware mgmt version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ bd->hwrm_ver_supp);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set firmware mgmt api version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set ncsi firmware version");
+ return rc;
+ }
+
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set roce firmware version");
+ return rc;
+ }
+
+ rc = bnge_hwrm_nvm_dev_info(bd, &nvm_dev_info);
+ if (!(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID))
+ return 0;
+
+ buf[0] = 0;
+ strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set roce firmware version");
+ return rc;
+ }
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor,
+ nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored firmware version");
+ return rc;
+ }
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor,
+ nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored ncsi firmware version");
+ return rc;
+ }
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
+ nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
+ rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set stored roce firmware version");
+
+ return rc;
+}
+
+static const struct devlink_ops bnge_devlink_ops = {
+ .info_get = bnge_devlink_info_get,
+};
+
+void bnge_devlink_free(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+
+ devlink_free(devlink);
+}
+
+struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev)
+{
+ struct devlink *devlink;
+ struct bnge_dev *bd;
+
+ devlink = devlink_alloc(&bnge_devlink_ops, sizeof(*bd), &pdev->dev);
+ if (!devlink)
+ return NULL;
+
+ bd = devlink_priv(devlink);
+ pci_set_drvdata(pdev, bd);
+ bd->dev = &pdev->dev;
+ bd->pdev = pdev;
+
+ bd->dsn = pci_get_dsn(pdev);
+ if (!bd->dsn)
+ pci_warn(pdev, "Failed to get DSN\n");
+
+ bnge_vpd_read_info(bd);
+
+ return bd;
+}
+
+void bnge_devlink_register(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+ devlink_register(devlink);
+}
+
+void bnge_devlink_unregister(struct bnge_dev *bd)
+{
+ struct devlink *devlink = priv_to_devlink(bd);
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h
new file mode 100644
index 000000000000..c6575255e650
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_DEVLINK_H_
+#define _BNGE_DEVLINK_H_
+
+enum bnge_dl_version_type {
+ BNGE_VERSION_FIXED,
+ BNGE_VERSION_RUNNING,
+ BNGE_VERSION_STORED,
+};
+
+void bnge_devlink_free(struct bnge_dev *bd);
+struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev);
+void bnge_devlink_register(struct bnge_dev *bd);
+void bnge_devlink_unregister(struct bnge_dev *bd);
+
+#endif /* _BNGE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c
new file mode 100644
index 000000000000..569371c1b4f2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+
+#include "bnge.h"
+#include "bnge_ethtool.h"
+
+static void bnge_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnge_net *bn = netdev_priv(dev);
+ struct bnge_dev *bd = bn->bd;
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bd->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bd->pdev), sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops bnge_ethtool_ops = {
+ .get_drvinfo = bnge_get_drvinfo,
+};
+
+void bnge_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &bnge_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h
new file mode 100644
index 000000000000..21e96a0976d5
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_ETHTOOL_H_
+#define _BNGE_ETHTOOL_H_
+
+void bnge_set_ethtool_ops(struct net_device *dev);
+
+#endif /* _BNGE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
new file mode 100644
index 000000000000..c3087e5cd875
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c
@@ -0,0 +1,548 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+
+static u64 bnge_cal_sentinel(struct bnge_hwrm_ctx *ctx, u16 req_type)
+{
+ return (((uintptr_t)ctx) + req_type) ^ BNGE_HWRM_SENTINEL;
+}
+
+int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type,
+ u32 req_len)
+{
+ struct bnge_hwrm_ctx *ctx;
+ dma_addr_t dma_handle;
+ u8 *req_addr;
+
+ if (req_len > BNGE_HWRM_CTX_OFFSET)
+ return -E2BIG;
+
+ req_addr = dma_pool_alloc(bd->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
+ &dma_handle);
+ if (!req_addr)
+ return -ENOMEM;
+
+ ctx = (struct bnge_hwrm_ctx *)(req_addr + BNGE_HWRM_CTX_OFFSET);
+ /* safety first, sentinel used to check for invalid requests */
+ ctx->sentinel = bnge_cal_sentinel(ctx, req_type);
+ ctx->req_len = req_len;
+ ctx->req = (struct input *)req_addr;
+ ctx->resp = (struct output *)(req_addr + BNGE_HWRM_RESP_OFFSET);
+ ctx->dma_handle = dma_handle;
+ ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */
+ ctx->timeout = bd->hwrm_cmd_timeout ?: BNGE_DFLT_HWRM_CMD_TIMEOUT;
+ ctx->allocated = BNGE_HWRM_DMA_SIZE - BNGE_HWRM_CTX_OFFSET;
+ ctx->gfp = GFP_KERNEL;
+ ctx->slice_addr = NULL;
+
+ /* initialize common request fields */
+ ctx->req->req_type = cpu_to_le16(req_type);
+ ctx->req->resp_addr = cpu_to_le64(dma_handle + BNGE_HWRM_RESP_OFFSET);
+ ctx->req->cmpl_ring = cpu_to_le16(BNGE_HWRM_NO_CMPL_RING);
+ ctx->req->target_id = cpu_to_le16(BNGE_HWRM_TARGET);
+ *req = ctx->req;
+
+ return 0;
+}
+
+static struct bnge_hwrm_ctx *__hwrm_ctx_get(struct bnge_dev *bd, u8 *req_addr)
+{
+ void *ctx_addr = req_addr + BNGE_HWRM_CTX_OFFSET;
+ struct input *req = (struct input *)req_addr;
+ struct bnge_hwrm_ctx *ctx = ctx_addr;
+ u64 sentinel;
+
+ if (!req) {
+ dev_err(bd->dev, "null HWRM request");
+ dump_stack();
+ return NULL;
+ }
+
+ /* HWRM API has no type safety, verify sentinel to validate address */
+ sentinel = bnge_cal_sentinel(ctx, le16_to_cpu(req->req_type));
+ if (ctx->sentinel != sentinel) {
+ dev_err(bd->dev, "HWRM sentinel mismatch, req_type = %u\n",
+ (u32)le16_to_cpu(req->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ return ctx;
+}
+
+void bnge_hwrm_req_timeout(struct bnge_dev *bd,
+ void *req, unsigned int timeout)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->timeout = timeout;
+}
+
+void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t gfp)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->gfp = gfp;
+}
+
+int bnge_hwrm_req_replace(struct bnge_dev *bd, void *req, void *new_req,
+ u32 len)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ struct input *internal_req = req;
+ u16 req_type;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (len > BNGE_HWRM_CTX_OFFSET)
+ return -E2BIG;
+
+ /* free any existing slices */
+ ctx->allocated = BNGE_HWRM_DMA_SIZE - BNGE_HWRM_CTX_OFFSET;
+ if (ctx->slice_addr) {
+ dma_free_coherent(bd->dev, ctx->slice_size,
+ ctx->slice_addr, ctx->slice_handle);
+ ctx->slice_addr = NULL;
+ }
+ ctx->gfp = GFP_KERNEL;
+
+ if ((bd->fw_cap & BNGE_FW_CAP_SHORT_CMD) || len > BNGE_HWRM_MAX_REQ_LEN) {
+ memcpy(internal_req, new_req, len);
+ } else {
+ internal_req->req_type = ((struct input *)new_req)->req_type;
+ ctx->req = new_req;
+ }
+
+ ctx->req_len = len;
+ ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle +
+ BNGE_HWRM_RESP_OFFSET);
+
+ /* update sentinel for potentially new request type */
+ req_type = le16_to_cpu(internal_req->req_type);
+ ctx->sentinel = bnge_cal_sentinel(ctx, req_type);
+
+ return 0;
+}
+
+void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req,
+ enum bnge_hwrm_ctx_flags flags)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ ctx->flags |= (flags & BNGE_HWRM_API_FLAGS);
+}
+
+void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ struct input *input = (struct input *)req;
+
+ if (!ctx)
+ return NULL;
+
+ if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED) {
+ dev_err(bd->dev, "HWRM context already owned, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ ctx->flags |= BNGE_HWRM_INTERNAL_CTX_OWNED;
+ return ((u8 *)req) + BNGE_HWRM_RESP_OFFSET;
+}
+
+static void __hwrm_ctx_invalidate(struct bnge_dev *bd,
+ struct bnge_hwrm_ctx *ctx)
+{
+ void *addr = ((u8 *)ctx) - BNGE_HWRM_CTX_OFFSET;
+ dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */
+
+ /* unmap any auxiliary DMA slice */
+ if (ctx->slice_addr)
+ dma_free_coherent(bd->dev, ctx->slice_size,
+ ctx->slice_addr, ctx->slice_handle);
+
+ /* invalidate, ensure ownership, sentinel and dma_handle are cleared */
+ memset(ctx, 0, sizeof(struct bnge_hwrm_ctx));
+
+ /* return the buffer to the DMA pool */
+ if (dma_handle)
+ dma_pool_free(bd->hwrm_dma_pool, addr, dma_handle);
+}
+
+void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (ctx)
+ __hwrm_ctx_invalidate(bd, ctx);
+}
+
+static int bnge_map_hwrm_error(u32 hwrm_err)
+{
+ switch (hwrm_err) {
+ case HWRM_ERR_CODE_SUCCESS:
+ return 0;
+ case HWRM_ERR_CODE_RESOURCE_LOCKED:
+ return -EROFS;
+ case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
+ return -EACCES;
+ case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
+ return -ENOSPC;
+ case HWRM_ERR_CODE_INVALID_PARAMS:
+ case HWRM_ERR_CODE_INVALID_FLAGS:
+ case HWRM_ERR_CODE_INVALID_ENABLES:
+ case HWRM_ERR_CODE_UNSUPPORTED_TLV:
+ case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
+ return -EINVAL;
+ case HWRM_ERR_CODE_NO_BUFFER:
+ return -ENOMEM;
+ case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ case HWRM_ERR_CODE_BUSY:
+ return -EAGAIN;
+ case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HWRM_ERR_CODE_PF_UNAVAILABLE:
+ return -ENODEV;
+ default:
+ return -EIO;
+ }
+}
+
+static struct bnge_hwrm_wait_token *
+bnge_hwrm_create_token(struct bnge_dev *bd, enum bnge_hwrm_chnl dst)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ token = kzalloc(sizeof(*token), GFP_KERNEL);
+ if (!token)
+ return NULL;
+
+ mutex_lock(&bd->hwrm_cmd_lock);
+
+ token->dst = dst;
+ token->state = BNGE_HWRM_PENDING;
+ if (dst == BNGE_HWRM_CHNL_CHIMP) {
+ token->seq_id = bd->hwrm_cmd_seq++;
+ hlist_add_head_rcu(&token->node, &bd->hwrm_pending_list);
+ } else {
+ token->seq_id = bd->hwrm_cmd_kong_seq++;
+ }
+
+ return token;
+}
+
+static void
+bnge_hwrm_destroy_token(struct bnge_dev *bd, struct bnge_hwrm_wait_token *token)
+{
+ if (token->dst == BNGE_HWRM_CHNL_CHIMP) {
+ hlist_del_rcu(&token->node);
+ kfree_rcu(token, rcu);
+ } else {
+ kfree(token);
+ }
+ mutex_unlock(&bd->hwrm_cmd_lock);
+}
+
+static void bnge_hwrm_req_dbg(struct bnge_dev *bd, struct input *req)
+{
+ u32 ring = le16_to_cpu(req->cmpl_ring);
+ u32 type = le16_to_cpu(req->req_type);
+ u32 tgt = le16_to_cpu(req->target_id);
+ u32 seq = le16_to_cpu(req->seq_id);
+ char opt[32] = "\n";
+
+ if (unlikely(ring != (u16)BNGE_HWRM_NO_CMPL_RING))
+ snprintf(opt, 16, " ring %d\n", ring);
+
+ if (unlikely(tgt != BNGE_HWRM_TARGET))
+ snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
+
+ dev_dbg(bd->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
+ type, seq, opt);
+}
+
+#define bnge_hwrm_err(bd, ctx, fmt, ...) \
+ do { \
+ if ((ctx)->flags & BNGE_HWRM_CTX_SILENT) \
+ dev_dbg((bd)->dev, fmt, __VA_ARGS__); \
+ else \
+ dev_err((bd)->dev, fmt, __VA_ARGS__); \
+ } while (0)
+
+static int __hwrm_send_ctx(struct bnge_dev *bd, struct bnge_hwrm_ctx *ctx)
+{
+ u32 doorbell_offset = BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER;
+ enum bnge_hwrm_chnl dst = BNGE_HWRM_CHNL_CHIMP;
+ u32 bar_offset = BNGE_GRCPF_REG_CHIMP_COMM;
+ struct bnge_hwrm_wait_token *token = NULL;
+ u16 max_req_len = BNGE_HWRM_MAX_REQ_LEN;
+ unsigned int i, timeout, tmo_count;
+ u32 *data = (u32 *)ctx->req;
+ u32 msg_len = ctx->req_len;
+ int rc = -EBUSY;
+ u32 req_type;
+ u16 len = 0;
+ u8 *valid;
+
+ if (ctx->flags & BNGE_HWRM_INTERNAL_RESP_DIRTY)
+ memset(ctx->resp, 0, PAGE_SIZE);
+
+ req_type = le16_to_cpu(ctx->req->req_type);
+
+ if (msg_len > BNGE_HWRM_MAX_REQ_LEN &&
+ msg_len > bd->hwrm_max_ext_req_len) {
+ dev_warn(bd->dev, "oversized hwrm request, req_type 0x%x",
+ req_type);
+ rc = -E2BIG;
+ goto exit;
+ }
+
+ token = bnge_hwrm_create_token(bd, dst);
+ if (!token) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ ctx->req->seq_id = cpu_to_le16(token->seq_id);
+
+ /* Ensure any associated DMA buffers are written before doorbell */
+ wmb();
+
+ /* Write request msg to hwrm channel */
+ __iowrite32_copy(bd->bar0 + bar_offset, data, msg_len / 4);
+
+ for (i = msg_len; i < max_req_len; i += 4)
+ writel(0, bd->bar0 + bar_offset + i);
+
+ /* Ring channel doorbell */
+ writel(1, bd->bar0 + doorbell_offset);
+
+ bnge_hwrm_req_dbg(bd, ctx->req);
+
+ /* Limit timeout to an upper limit */
+ timeout = min(ctx->timeout,
+ bd->hwrm_cmd_max_timeout ?: BNGE_HWRM_CMD_MAX_TIMEOUT);
+ /* convert timeout to usec */
+ timeout *= 1000;
+
+ i = 0;
+ /* Short timeout for the first few iterations:
+ * number of loops = number of loops for short timeout +
+ * number of loops for standard timeout.
+ */
+ tmo_count = BNGE_HWRM_SHORT_TIMEOUT_COUNTER;
+ timeout = timeout - BNGE_HWRM_SHORT_MIN_TIMEOUT *
+ BNGE_HWRM_SHORT_TIMEOUT_COUNTER;
+ tmo_count += DIV_ROUND_UP(timeout, BNGE_HWRM_MIN_TIMEOUT);
+
+ if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
+ /* Wait until hwrm response cmpl interrupt is processed */
+ while (READ_ONCE(token->state) < BNGE_HWRM_COMPLETE &&
+ i++ < tmo_count) {
+ /* on first few passes, just barely sleep */
+ if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT,
+ BNGE_HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ usleep_range(BNGE_HWRM_MIN_TIMEOUT,
+ BNGE_HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (READ_ONCE(token->state) != BNGE_HWRM_COMPLETE) {
+ bnge_hwrm_err(bd, ctx, "No hwrm cmpl received: 0x%x\n",
+ req_type);
+ goto exit;
+ }
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ valid = ((u8 *)ctx->resp) + len - 1;
+ } else {
+ __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */
+ int j;
+
+ /* Check if response len is updated */
+ for (i = 0; i < tmo_count; i++) {
+ if (token &&
+ READ_ONCE(token->state) == BNGE_HWRM_DEFERRED) {
+ bnge_hwrm_destroy_token(bd, token);
+ token = NULL;
+ }
+
+ len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
+ if (len) {
+ __le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
+
+ if (resp_seq == ctx->req->seq_id)
+ break;
+ if (resp_seq != seen_out_of_seq) {
+ dev_warn(bd->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
+ le16_to_cpu(resp_seq), req_type, le16_to_cpu(ctx->req->seq_id));
+ seen_out_of_seq = resp_seq;
+ }
+ }
+
+ /* on first few passes, just barely sleep */
+ if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) {
+ usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT,
+ BNGE_HWRM_SHORT_MAX_TIMEOUT);
+ } else {
+ usleep_range(BNGE_HWRM_MIN_TIMEOUT,
+ BNGE_HWRM_MAX_TIMEOUT);
+ }
+ }
+
+ if (i >= tmo_count) {
+ bnge_hwrm_err(bd, ctx,
+ "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
+ bnge_hwrm_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len);
+ goto exit;
+ }
+
+ /* Last byte of resp contains valid bit */
+ valid = ((u8 *)ctx->resp) + len - 1;
+ for (j = 0; j < BNGE_HWRM_FIN_WAIT_USEC; ) {
+ /* make sure we read from updated DMA memory */
+ dma_rmb();
+ if (*valid)
+ break;
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
+ }
+
+ if (j >= BNGE_HWRM_FIN_WAIT_USEC) {
+ bnge_hwrm_err(bd, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
+ bnge_hwrm_timeout(i) + j, req_type,
+ le16_to_cpu(ctx->req->seq_id), len, *valid);
+ goto exit;
+ }
+ }
+
+ /* Zero valid bit for compatibility. Valid bit in an older spec
+ * may become a new field in a newer spec. We must make sure that
+ * a new field not implemented by old spec will read zero.
+ */
+ *valid = 0;
+ rc = le16_to_cpu(ctx->resp->error_code);
+ if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNGE_HWRM_CTX_SILENT))
+ dev_warn(bd->dev, "FW returned busy, hwrm req_type 0x%x\n",
+ req_type);
+ else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ bnge_hwrm_err(bd, ctx, "hwrm req_type 0x%x seq id 0x%x error %d\n",
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
+ rc = bnge_map_hwrm_error(rc);
+
+exit:
+ if (token)
+ bnge_hwrm_destroy_token(bd, token);
+ if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED)
+ ctx->flags |= BNGE_HWRM_INTERNAL_RESP_DIRTY;
+ else
+ __hwrm_ctx_invalidate(bd, ctx);
+ return rc;
+}
+
+int bnge_hwrm_req_send(struct bnge_dev *bd, void *req)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+
+ if (!ctx)
+ return -EINVAL;
+
+ return __hwrm_send_ctx(bd, ctx);
+}
+
+int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req)
+{
+ bnge_hwrm_req_flags(bd, req, BNGE_HWRM_CTX_SILENT);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+void *
+bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size,
+ dma_addr_t *dma_handle)
+{
+ struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req);
+ u8 *end = ((u8 *)req) + BNGE_HWRM_DMA_SIZE;
+ struct input *input = req;
+ u8 *addr, *req_addr = req;
+ u32 max_offset, offset;
+
+ if (!ctx)
+ return NULL;
+
+ max_offset = BNGE_HWRM_DMA_SIZE - ctx->allocated;
+ offset = max_offset - size;
+ offset = ALIGN_DOWN(offset, BNGE_HWRM_DMA_ALIGN);
+ addr = req_addr + offset;
+
+ if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
+ ctx->allocated = end - addr;
+ *dma_handle = ctx->dma_handle + offset;
+ return addr;
+ }
+
+ if (ctx->slice_addr) {
+ dev_err(bd->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
+ (u32)le16_to_cpu(input->req_type));
+ dump_stack();
+ return NULL;
+ }
+
+ addr = dma_alloc_coherent(bd->dev, size, dma_handle, ctx->gfp);
+ if (!addr)
+ return NULL;
+
+ ctx->slice_addr = addr;
+ ctx->slice_size = size;
+ ctx->slice_handle = *dma_handle;
+
+ return addr;
+}
+
+void bnge_cleanup_hwrm_resources(struct bnge_dev *bd)
+{
+ struct bnge_hwrm_wait_token *token;
+
+ dma_pool_destroy(bd->hwrm_dma_pool);
+ bd->hwrm_dma_pool = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node)
+ WRITE_ONCE(token->state, BNGE_HWRM_CANCELLED);
+ rcu_read_unlock();
+}
+
+int bnge_init_hwrm_resources(struct bnge_dev *bd)
+{
+ bd->hwrm_dma_pool = dma_pool_create("bnge_hwrm", bd->dev,
+ BNGE_HWRM_DMA_SIZE,
+ BNGE_HWRM_DMA_ALIGN, 0);
+ if (!bd->hwrm_dma_pool)
+ return -ENOMEM;
+
+ INIT_HLIST_HEAD(&bd->hwrm_pending_list);
+ mutex_init(&bd->hwrm_cmd_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
new file mode 100644
index 000000000000..6df629761d95
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HWRM_H_
+#define _BNGE_HWRM_H_
+
+#include <linux/bnxt/hsi.h>
+
+enum bnge_hwrm_ctx_flags {
+ BNGE_HWRM_INTERNAL_CTX_OWNED = BIT(0),
+ BNGE_HWRM_INTERNAL_RESP_DIRTY = BIT(1),
+ BNGE_HWRM_CTX_SILENT = BIT(2),
+ BNGE_HWRM_FULL_WAIT = BIT(3),
+};
+
+#define BNGE_HWRM_API_FLAGS (BNGE_HWRM_CTX_SILENT | BNGE_HWRM_FULL_WAIT)
+
+struct bnge_hwrm_ctx {
+ u64 sentinel;
+ dma_addr_t dma_handle;
+ struct output *resp;
+ struct input *req;
+ dma_addr_t slice_handle;
+ void *slice_addr;
+ u32 slice_size;
+ u32 req_len;
+ enum bnge_hwrm_ctx_flags flags;
+ unsigned int timeout;
+ u32 allocated;
+ gfp_t gfp;
+};
+
+enum bnge_hwrm_wait_state {
+ BNGE_HWRM_PENDING,
+ BNGE_HWRM_DEFERRED,
+ BNGE_HWRM_COMPLETE,
+ BNGE_HWRM_CANCELLED,
+};
+
+enum bnge_hwrm_chnl { BNGE_HWRM_CHNL_CHIMP, BNGE_HWRM_CHNL_KONG };
+
+struct bnge_hwrm_wait_token {
+ struct rcu_head rcu;
+ struct hlist_node node;
+ enum bnge_hwrm_wait_state state;
+ enum bnge_hwrm_chnl dst;
+ u16 seq_id;
+};
+
+#define BNGE_DFLT_HWRM_CMD_TIMEOUT 500
+
+#define BNGE_GRCPF_REG_CHIMP_COMM 0x0
+#define BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
+
+#define BNGE_HWRM_MAX_REQ_LEN (bd->hwrm_max_req_len)
+#define BNGE_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
+#define BNGE_HWRM_CMD_MAX_TIMEOUT 40000U
+#define BNGE_SHORT_HWRM_CMD_TIMEOUT 20
+#define BNGE_HWRM_CMD_TIMEOUT (bd->hwrm_cmd_timeout)
+#define BNGE_HWRM_RESET_TIMEOUT ((BNGE_HWRM_CMD_TIMEOUT) * 4)
+#define BNGE_HWRM_TARGET 0xffff
+#define BNGE_HWRM_NO_CMPL_RING -1
+#define BNGE_HWRM_REQ_MAX_SIZE 128
+#define BNGE_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */
+#define BNGE_HWRM_RESP_RESERVED PAGE_SIZE
+#define BNGE_HWRM_RESP_OFFSET (BNGE_HWRM_DMA_SIZE - \
+ BNGE_HWRM_RESP_RESERVED)
+#define BNGE_HWRM_CTX_OFFSET (BNGE_HWRM_RESP_OFFSET - \
+ sizeof(struct bnge_hwrm_ctx))
+#define BNGE_HWRM_DMA_ALIGN 16
+#define BNGE_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */
+#define BNGE_HWRM_SHORT_MIN_TIMEOUT 3
+#define BNGE_HWRM_SHORT_MAX_TIMEOUT 10
+#define BNGE_HWRM_SHORT_TIMEOUT_COUNTER 5
+
+#define BNGE_HWRM_MIN_TIMEOUT 25
+#define BNGE_HWRM_MAX_TIMEOUT 40
+
+static inline unsigned int bnge_hwrm_timeout(unsigned int n)
+{
+ return n <= BNGE_HWRM_SHORT_TIMEOUT_COUNTER ?
+ n * BNGE_HWRM_SHORT_MIN_TIMEOUT :
+ BNGE_HWRM_SHORT_TIMEOUT_COUNTER *
+ BNGE_HWRM_SHORT_MIN_TIMEOUT +
+ (n - BNGE_HWRM_SHORT_TIMEOUT_COUNTER) *
+ BNGE_HWRM_MIN_TIMEOUT;
+}
+
+#define BNGE_HWRM_FIN_WAIT_USEC 50000
+
+void bnge_cleanup_hwrm_resources(struct bnge_dev *bd);
+int bnge_init_hwrm_resources(struct bnge_dev *bd);
+
+int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type,
+ u32 req_len);
+#define bnge_hwrm_req_init(bd, req, req_type) \
+ bnge_hwrm_req_create((bd), (void **)&(req), (req_type), \
+ sizeof(*(req)))
+void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req,
+ enum bnge_hwrm_ctx_flags flags);
+void bnge_hwrm_req_timeout(struct bnge_dev *bd, void *req,
+ unsigned int timeout);
+int bnge_hwrm_req_send(struct bnge_dev *bd, void *req);
+int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req);
+void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t flags);
+void *bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size,
+ dma_addr_t *dma);
+int bnge_hwrm_req_replace(struct bnge_dev *bd, void *req, void *new_req,
+ u32 len);
+#endif /* _BNGE_HWRM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
new file mode 100644
index 000000000000..198f49b40dbf
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
@@ -0,0 +1,1185 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/bnxt/hsi.h>
+#include <linux/if_vlan.h>
+#include <net/netdev_queues.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_rmem.h"
+#include "bnge_resc.h"
+
+int bnge_hwrm_ver_get(struct bnge_dev *bd)
+{
+ u32 dev_caps_cfg, hwrm_ver, hwrm_spec_code;
+ u16 fw_maj, fw_min, fw_bld, fw_rsv;
+ struct hwrm_ver_get_output *resp;
+ struct hwrm_ver_get_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VER_GET);
+ if (rc)
+ return rc;
+
+ bnge_hwrm_req_flags(bd, req, BNGE_HWRM_FULL_WAIT);
+ bd->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
+ req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req->hwrm_intf_min = HWRM_VERSION_MINOR;
+ req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto hwrm_ver_get_exit;
+
+ memcpy(&bd->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+ hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
+ resp->hwrm_intf_min_8b << 8 |
+ resp->hwrm_intf_upd_8b;
+ hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
+ HWRM_VERSION_UPDATE;
+
+ if (hwrm_spec_code > hwrm_ver)
+ snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
+ HWRM_VERSION_UPDATE);
+ else
+ snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
+
+ fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+ fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+ fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+ fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+
+ bd->fw_ver_code = BNGE_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+ snprintf(bd->fw_ver_str, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ fw_maj, fw_min, fw_bld, fw_rsv);
+
+ if (strlen(resp->active_pkg_name)) {
+ int fw_ver_len = strlen(bd->fw_ver_str);
+
+ snprintf(bd->fw_ver_str + fw_ver_len,
+ FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
+ resp->active_pkg_name);
+ bd->fw_cap |= BNGE_FW_CAP_PKG_VER;
+ }
+
+ bd->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
+ if (!bd->hwrm_cmd_timeout)
+ bd->hwrm_cmd_timeout = BNGE_DFLT_HWRM_CMD_TIMEOUT;
+ bd->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
+ if (!bd->hwrm_cmd_max_timeout)
+ bd->hwrm_cmd_max_timeout = BNGE_HWRM_CMD_MAX_TIMEOUT;
+ else if (bd->hwrm_cmd_max_timeout > BNGE_HWRM_CMD_MAX_TIMEOUT)
+ dev_warn(bd->dev, "Default HWRM commands max timeout increased to %d seconds\n",
+ bd->hwrm_cmd_max_timeout / 1000);
+
+ bd->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+ bd->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
+
+ if (bd->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
+ bd->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
+
+ bd->chip_num = le16_to_cpu(resp->chip_num);
+ bd->chip_rev = resp->chip_rev;
+
+ dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
+ if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
+ bd->fw_cap |= BNGE_FW_CAP_SHORT_CMD;
+
+ if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
+ bd->fw_cap |= BNGE_FW_CAP_KONG_MB_CHNL;
+
+ if (dev_caps_cfg &
+ VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
+ bd->fw_cap |= BNGE_FW_CAP_CFA_ADV_FLOW;
+
+hwrm_ver_get_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int
+bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
+ struct hwrm_nvm_get_dev_info_output *nvm_info)
+{
+ struct hwrm_nvm_get_dev_info_output *resp;
+ struct hwrm_nvm_get_dev_info_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_NVM_GET_DEV_INFO);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ memcpy(nvm_info, resp, sizeof(*resp));
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_reset(struct bnge_dev *bd)
+{
+ struct hwrm_func_reset_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESET);
+ if (rc)
+ return rc;
+
+ req->enables = 0;
+ bnge_hwrm_req_timeout(bd, req, BNGE_HWRM_RESET_TIMEOUT);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_fw_set_time(struct bnge_dev *bd)
+{
+ struct hwrm_fw_set_time_input *req;
+ struct tm tm;
+ int rc;
+
+ time64_to_tm(ktime_get_real_seconds(), 0, &tm);
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FW_SET_TIME);
+ if (rc)
+ return rc;
+
+ req->year = cpu_to_le16(1900 + tm.tm_year);
+ req->month = 1 + tm.tm_mon;
+ req->day = tm.tm_mday;
+ req->hour = tm.tm_hour;
+ req->minute = tm.tm_min;
+ req->second = tm.tm_sec;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd)
+{
+ struct hwrm_func_drv_rgtr_output *resp;
+ struct hwrm_func_drv_rgtr_input *req;
+ u32 flags;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_RGTR);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+ req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
+ flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
+
+ req->flags = cpu_to_le32(flags);
+ req->ver_maj_8b = DRV_VER_MAJ;
+ req->ver_min_8b = DRV_VER_MIN;
+ req->ver_upd_8b = DRV_VER_UPD;
+ req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
+ req->ver_min = cpu_to_le16(DRV_VER_MIN);
+ req->ver_upd = cpu_to_le16(DRV_VER_UPD);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc) {
+ set_bit(BNGE_STATE_DRV_REGISTERED, &bd->state);
+ if (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ bd->fw_cap |= BNGE_FW_CAP_IF_CHANGE;
+ }
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd)
+{
+ struct hwrm_func_drv_unrgtr_input *req;
+ int rc;
+
+ if (!test_and_clear_bit(BNGE_STATE_DRV_REGISTERED, &bd->state))
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_UNRGTR);
+ if (rc)
+ return rc;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+static void bnge_init_ctx_initializer(struct bnge_ctx_mem_type *ctxm,
+ u8 init_val, u8 init_offset,
+ bool init_mask_set)
+{
+ ctxm->init_value = init_val;
+ ctxm->init_offset = BNGE_CTX_INIT_INVALID_OFFSET;
+ if (init_mask_set)
+ ctxm->init_offset = init_offset * 4;
+ else
+ ctxm->init_value = 0;
+}
+
+static int bnge_alloc_all_ctx_pg_info(struct bnge_dev *bd, int ctx_max)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ u16 type;
+
+ for (type = 0; type < ctx_max; type++) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ int n = 1;
+
+ if (!ctxm->max_entries)
+ continue;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
+ if (!ctxm->pg_info)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+#define BNGE_CTX_INIT_VALID(flags) \
+ (!!((flags) & \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
+
+int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_backing_store_qcaps_v2_output *resp;
+ struct hwrm_func_backing_store_qcaps_v2_input *req;
+ struct bnge_ctx_mem_info *ctx;
+ u16 type;
+ int rc;
+
+ if (bd->ctx)
+ return 0;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
+ if (rc)
+ return rc;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bd->ctx = ctx;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+
+ for (type = 0; type < BNGE_CTX_V2_MAX; ) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ u8 init_val, init_off, i;
+ __le32 *p;
+ u32 flags;
+
+ req->type = cpu_to_le16(type);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto ctx_done;
+ flags = le32_to_cpu(resp->flags);
+ type = le16_to_cpu(resp->next_valid_type);
+ if (!(flags &
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ continue;
+
+ ctxm->type = le16_to_cpu(resp->type);
+ ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->flags = flags;
+ ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
+ ctxm->entry_multiple = resp->entry_multiple;
+ ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
+ init_val = resp->ctx_init_value;
+ init_off = resp->ctx_init_offset;
+ bnge_init_ctx_initializer(ctxm, init_val, init_off,
+ BNGE_CTX_INIT_VALID(flags));
+ ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
+ BNGE_MAX_SPLIT_ENTRY);
+ for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
+ i++, p++)
+ ctxm->split[i] = le32_to_cpu(*p);
+ }
+ rc = bnge_alloc_all_ctx_pg_info(bd, BNGE_CTX_V2_MAX);
+
+ctx_done:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+static void bnge_hwrm_set_pg_attr(struct bnge_ring_mem_info *rmem, u8 *pg_attr,
+ __le64 *pg_dir)
+{
+ if (!rmem->nr_pages)
+ return;
+
+ BNGE_SET_CTX_PAGE_ATTR(*pg_attr);
+ if (rmem->depth >= 1) {
+ if (rmem->depth == 2)
+ *pg_attr |= 2;
+ else
+ *pg_attr |= 1;
+ *pg_dir = cpu_to_le64(rmem->dma_pg_tbl);
+ } else {
+ *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
+ }
+}
+
+int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm,
+ bool last)
+{
+ struct hwrm_func_backing_store_cfg_v2_input *req;
+ u32 instance_bmap = ctxm->instance_bmap;
+ int i, j, rc = 0, n = 1;
+ __le32 *p;
+
+ if (!(ctxm->flags & BNGE_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
+ return 0;
+
+ if (instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ else
+ instance_bmap = 1;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
+ if (rc)
+ return rc;
+ bnge_hwrm_req_hold(bd, req);
+ req->type = cpu_to_le16(ctxm->type);
+ req->entry_size = cpu_to_le16(ctxm->entry_size);
+ req->subtype_valid_cnt = ctxm->split_entry_cnt;
+ for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
+ p[i] = cpu_to_le32(ctxm->split[i]);
+ for (i = 0, j = 0; j < n && !rc; i++) {
+ struct bnge_ctx_pg_info *ctx_pg;
+
+ if (!(instance_bmap & (1 << i)))
+ continue;
+ req->instance = cpu_to_le16(i);
+ ctx_pg = &ctxm->pg_info[j++];
+ if (!ctx_pg->entries)
+ continue;
+ req->num_entries = cpu_to_le32(ctx_pg->entries);
+ bnge_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req->page_size_pbl_level,
+ &req->page_dir);
+ if (last && j == n)
+ req->flags =
+ cpu_to_le32(BNGE_BS_CFG_ALL_DONE);
+ rc = bnge_hwrm_req_send(bd, req);
+ }
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+static int bnge_hwrm_get_rings(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ u16 cp, stats;
+ u16 rx, tx;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc) {
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+ }
+
+ hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+ hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
+ hw_resc->resv_hw_ring_grps =
+ le32_to_cpu(resp->alloc_hw_ring_grps);
+ hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
+ cp = le16_to_cpu(resp->alloc_cmpl_rings);
+ stats = le16_to_cpu(resp->alloc_stat_ctx);
+ hw_resc->resv_irqs = cp;
+ rx = hw_resc->resv_rx_rings;
+ tx = hw_resc->resv_tx_rings;
+ if (bnge_is_agg_reqd(bd))
+ rx >>= 1;
+ if (cp < (rx + tx)) {
+ rc = bnge_fix_rings_count(&rx, &tx, cp, false);
+ if (rc)
+ goto get_rings_exit;
+ if (bnge_is_agg_reqd(bd))
+ rx <<= 1;
+ hw_resc->resv_rx_rings = rx;
+ hw_resc->resv_tx_rings = tx;
+ }
+ hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
+ hw_resc->resv_hw_ring_grps = rx;
+ hw_resc->resv_cp_rings = cp;
+ hw_resc->resv_stat_ctxs = stats;
+
+get_rings_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+static struct hwrm_func_cfg_input *
+__bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ struct hwrm_func_cfg_input *req;
+ u32 enables = 0;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG))
+ return NULL;
+
+ req->fid = cpu_to_le16(0xffff);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->nq ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->cmpl ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cmpl);
+ req->num_msix = cpu_to_le16(hwr->nq);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
+ req->enables = cpu_to_le32(enables);
+
+ return req;
+}
+
+static int
+bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ req = __bnge_hwrm_reserve_pf_rings(bd, hwr);
+ if (!req)
+ return -ENOMEM;
+
+ if (!req->enables) {
+ bnge_hwrm_req_drop(bd, req);
+ return 0;
+ }
+
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ return rc;
+
+ return bnge_hwrm_get_rings(bd);
+}
+
+int bnge_hwrm_reserve_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
+{
+ return bnge_hwrm_reserve_pf_rings(bd, hwr);
+}
+
+int bnge_hwrm_func_qcfg(struct bnge_dev *bd)
+{
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto func_qcfg_exit;
+
+ bd->max_mtu = le16_to_cpu(resp->max_mtu_configured);
+ if (!bd->max_mtu)
+ bd->max_mtu = BNGE_MAX_MTU;
+
+ if (bd->db_size)
+ goto func_qcfg_exit;
+
+ bd->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
+ bd->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
+ 1024);
+ if (!bd->db_size || bd->db_size > pci_resource_len(bd->pdev, 2) ||
+ bd->db_size <= bd->db_offset)
+ bd->db_size = pci_resource_len(bd->pdev, 2);
+
+func_qcfg_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_resource_qcaps_output *resp;
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ struct hwrm_func_resource_qcaps_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESOURCE_QCAPS);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send_silent(bd, req);
+ if (rc)
+ goto hwrm_func_resc_qcaps_exit;
+
+ hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
+ hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
+ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
+ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
+ hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
+ hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
+ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
+ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
+ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ hw_resc->max_nqs = le16_to_cpu(resp->max_msix);
+ hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
+
+hwrm_func_resc_qcaps_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_func_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_func_qcaps_output *resp;
+ struct hwrm_func_qcaps_input *req;
+ struct bnge_pf_info *pf = &bd->pf;
+ u32 flags;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCAPS);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto hwrm_func_qcaps_exit;
+
+ flags = le32_to_cpu(resp->flags);
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
+ bd->flags |= BNGE_EN_ROCE_V1;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
+ bd->flags |= BNGE_EN_ROCE_V2;
+
+ pf->fw_fid = le16_to_cpu(resp->fid);
+ pf->port_id = le16_to_cpu(resp->port_id);
+ memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
+
+ bd->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
+
+hwrm_func_qcaps_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd)
+{
+ struct hwrm_vnic_qcaps_output *resp;
+ struct hwrm_vnic_qcaps_input *req;
+ int rc;
+
+ bd->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
+ bd->rss_cap &= ~BNGE_RSS_CAP_NEW_RSS_CAP;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_QCAPS);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc) {
+ u32 flags = le32_to_cpu(resp->flags);
+
+ if (flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP)
+ bd->fw_cap |= BNGE_FW_CAP_VLAN_RX_STRIP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
+ bd->rss_cap |= BNGE_RSS_CAP_RSS_TCAM;
+ bd->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
+ if (bd->max_tpa_v2)
+ bd->hw_ring_stats_size = BNGE_RING_STATS_SIZE;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
+ bd->fw_cap |= BNGE_FW_CAP_VNIC_TUNNEL_TPA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_AH_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_AH_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_ESP_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
+ bd->rss_cap |= BNGE_RSS_CAP_ESP_V6_RSS_CAP;
+ }
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+#define BNGE_CNPQ(q_profile) \
+ ((q_profile) == \
+ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
+
+int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd)
+{
+ struct hwrm_queue_qportcfg_output *resp;
+ struct hwrm_queue_qportcfg_input *req;
+ u8 i, j, *qptr;
+ bool no_rdma;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_QUEUE_QPORTCFG);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto qportcfg_exit;
+
+ if (!resp->max_configurable_queues) {
+ rc = -EINVAL;
+ goto qportcfg_exit;
+ }
+ bd->max_tc = resp->max_configurable_queues;
+ bd->max_lltc = resp->max_configurable_lossless_queues;
+ if (bd->max_tc > BNGE_MAX_QUEUE)
+ bd->max_tc = BNGE_MAX_QUEUE;
+
+ no_rdma = !bnge_is_roce_en(bd);
+ qptr = &resp->queue_id0;
+ for (i = 0, j = 0; i < bd->max_tc; i++) {
+ bd->q_info[j].queue_id = *qptr;
+ bd->q_ids[i] = *qptr++;
+ bd->q_info[j].queue_profile = *qptr++;
+ bd->tc_to_qidx[j] = j;
+ if (!BNGE_CNPQ(bd->q_info[j].queue_profile) || no_rdma)
+ j++;
+ }
+ bd->max_q = bd->max_tc;
+ bd->max_tc = max_t(u8, j, 1);
+
+ if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
+ bd->max_tc = 1;
+
+ if (bd->max_lltc > bd->max_tc)
+ bd->max_lltc = bd->max_tc;
+
+qportcfg_exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ u16 hds_thresh = (u16)bn->netdev->cfg_pending->hds_thresh;
+ struct hwrm_vnic_plcmodes_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_PLCMODES_CFG);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ req->enables = cpu_to_le32(BNGE_PLC_EN_JUMBO_THRES_VALID);
+ req->jumbo_thresh = cpu_to_le16(bn->rx_buf_use_size);
+
+ if (bnge_is_agg_reqd(bd)) {
+ req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req->enables |=
+ cpu_to_le32(BNGE_PLC_EN_HDS_THRES_VALID);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
+ }
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] =
+ le16_to_cpu(resp->rss_cos_lb_ctx_id);
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+static void
+__bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct hwrm_vnic_rss_cfg_input *req,
+ struct bnge_vnic_info *vnic)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ bnge_fill_hw_rss_tbl(bn, vnic);
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+
+ req->hash_type = cpu_to_le32(bd->rss_hash_cfg);
+ req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+}
+
+int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic, bool set_rss)
+{
+ struct hwrm_vnic_rss_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ dma_addr_t ring_tbl_map;
+ u32 i, nr_ctxs;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_CFG);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ if (!set_rss)
+ return bnge_hwrm_req_send(bd, req);
+
+ __bnge_hwrm_vnic_set_rss(bn, req, vnic);
+ ring_tbl_map = vnic->rss_table_dma_addr;
+ nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
+
+ bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < nr_ctxs; ring_tbl_map += BNGE_RSS_TABLE_SIZE, i++) {
+ req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
+ req->ring_table_pair_index = i;
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto exit;
+ }
+
+exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[0];
+ struct hwrm_vnic_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_CFG);
+ if (rc)
+ return rc;
+
+ req->default_rx_ring_id =
+ cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
+ req->default_cmpl_ring_id =
+ cpu_to_le16(bnge_cp_ring_for_rx(rxr));
+ req->enables =
+ cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
+ VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
+ vnic->mru = bd->netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ req->mru = cpu_to_le16(vnic->mru);
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+ if (bd->flags & BNGE_EN_STRIP_VLAN)
+ req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+ if (vnic->vnic_id == BNGE_VNIC_DEFAULT && bnge_aux_registered(bd))
+ req->flags |= cpu_to_le32(BNGE_VNIC_CFG_ROCE_DUAL_MODE);
+
+ return bnge_hwrm_req_send(bd, req);
+}
+
+void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct hwrm_vnic_rss_qcfg_output *resp;
+ struct hwrm_vnic_rss_qcfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_QCFG))
+ return;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ /* all contexts configured to same hash_type, zero always exists */
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ resp = bnge_hwrm_req_hold(bd, req);
+ if (!bnge_hwrm_req_send(bd, req))
+ bd->rss_hash_cfg =
+ le32_to_cpu(resp->hash_type) ?: bd->rss_hash_cfg;
+ bnge_hwrm_req_drop(bd, req);
+}
+
+int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_free_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_FREE);
+ if (rc)
+ return rc;
+
+ req->l2_filter_id = fltr->base.filter_id;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_alloc_output *resp;
+ struct hwrm_cfa_l2_filter_alloc_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_ALLOC);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+
+ req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
+ req->enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
+ eth_broadcast_addr(req->l2_addr_mask);
+
+ if (fltr->l2_key.vlan) {
+ req->enables |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
+ req->num_vlans = 1;
+ req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
+ req->l2_ivlan_mask = cpu_to_le16(0xfff);
+ }
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ fltr->base.filter_id = resp->l2_filter_id;
+
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic)
+{
+ struct hwrm_cfa_l2_set_rx_mask_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_SET_RX_MASK);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
+ req->mask = cpu_to_le32(vnic->rx_mask);
+ return bnge_hwrm_req_send_silent(bd, req);
+}
+
+int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ unsigned int nr_rings)
+{
+ struct hwrm_vnic_alloc_output *resp;
+ struct hwrm_vnic_alloc_input *req;
+ unsigned int i;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_ALLOC);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < BNGE_MAX_CTX_PER_VNIC; i++)
+ vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
+ if (vnic->vnic_id == BNGE_VNIC_DEFAULT)
+ req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic)
+{
+ if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
+ struct hwrm_vnic_free_input *req;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_FREE))
+ return;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ bnge_hwrm_req_send(bd, req);
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ }
+}
+
+void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
+ return;
+
+ req->rss_cos_lb_ctx_id =
+ cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
+
+ bnge_hwrm_req_send(bd, req);
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
+}
+
+void bnge_hwrm_stat_ctx_free(struct bnge_net *bn)
+{
+ struct hwrm_stat_ctx_free_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_FREE))
+ return;
+
+ bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ if (nqr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+ req->stat_ctx_id = cpu_to_le32(nqr->hw_stats_ctx_id);
+ bnge_hwrm_req_send(bd, req);
+
+ nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ }
+ bnge_hwrm_req_drop(bd, req);
+}
+
+int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn)
+{
+ struct hwrm_stat_ctx_alloc_output *resp;
+ struct hwrm_stat_ctx_alloc_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc, i;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_ALLOC);
+ if (rc)
+ return rc;
+
+ req->stats_dma_length = cpu_to_le16(bd->hw_ring_stats_size);
+ req->update_period_ms = cpu_to_le32(bn->stats_coal_ticks / 1000);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ req->stats_dma_addr = cpu_to_le64(nqr->stats.hw_stats_map);
+
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ break;
+
+ nqr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+ bn->grp_info[i].fw_stats_ctx = nqr->hw_stats_ctx_id;
+ }
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int hwrm_ring_free_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id)
+{
+ struct hwrm_ring_free_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_RING_FREE);
+ if (rc)
+ goto exit;
+
+ req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
+ req->ring_type = ring_type;
+ req->ring_id = cpu_to_le16(ring->fw_ring_id);
+
+ bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ bnge_hwrm_req_drop(bd, req);
+exit:
+ if (rc) {
+ netdev_err(bd->netdev, "hwrm_ring_free type %d failed. rc:%d\n", ring_type, rc);
+ return -EIO;
+ }
+ return 0;
+}
+
+int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, u32 map_index)
+{
+ struct bnge_ring_mem_info *rmem = &ring->ring_mem;
+ struct bnge_ring_grp_info *grp_info;
+ struct hwrm_ring_alloc_output *resp;
+ struct hwrm_ring_alloc_input *req;
+ struct bnge_dev *bd = bn->bd;
+ u16 ring_id, flags = 0;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_RING_ALLOC);
+ if (rc)
+ goto exit;
+
+ req->enables = 0;
+ if (rmem->nr_pages > 1) {
+ req->page_tbl_addr = cpu_to_le64(rmem->dma_pg_tbl);
+ /* Page size is in log2 units */
+ req->page_size = BNGE_PAGE_SHIFT;
+ req->page_tbl_depth = 1;
+ } else {
+ req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
+ }
+ req->fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req->logical_id = cpu_to_le16(map_index);
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX: {
+ struct bnge_tx_ring_info *txr;
+
+ txr = container_of(ring, struct bnge_tx_ring_info,
+ tx_ring_struct);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+ /* Association of transmit ring with completion ring */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->cmpl_ring_id = cpu_to_le16(bnge_cp_ring_for_tx(txr));
+ req->length = cpu_to_le32(bn->tx_ring_mask + 1);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->queue_id = cpu_to_le16(ring->queue_id);
+ req->flags = cpu_to_le16(flags);
+ break;
+ }
+ case HWRM_RING_ALLOC_RX:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = cpu_to_le32(bn->rx_ring_mask + 1);
+
+ /* Association of rx ring with stats context */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->rx_buf_size = cpu_to_le16(bn->rx_buf_use_size);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ if (NET_IP_ALIGN == 2)
+ flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
+ req->flags = cpu_to_le16(flags);
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ /* Association of agg ring with rx ring */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNGE_RX_PAGE_SIZE);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
+ RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ req->length = cpu_to_le32(bn->rx_agg_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
+ req->length = cpu_to_le32(bn->cp_ring_mask + 1);
+ /* Association of cp ring with nq */
+ grp_info = &bn->grp_info[map_index];
+ req->nq_ring_id = cpu_to_le16(grp_info->nq_fw_ring_id);
+ req->cq_handle = cpu_to_le64(ring->handle);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
+ req->length = cpu_to_le32(bn->cp_ring_mask + 1);
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ break;
+ default:
+ netdev_err(bn->netdev, "hwrm alloc invalid ring type %d\n", ring_type);
+ return -EINVAL;
+ }
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ ring_id = le16_to_cpu(resp->ring_id);
+ bnge_hwrm_req_drop(bd, req);
+
+exit:
+ if (rc) {
+ netdev_err(bd->netdev, "hwrm_ring_alloc type %d failed. rc:%d\n", ring_type, rc);
+ return -EIO;
+ }
+ ring->fw_ring_id = ring_id;
+ return rc;
+}
+
+int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req->async_event_cr = cpu_to_le16(idx);
+ return bnge_hwrm_req_send(bd, req);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
new file mode 100644
index 000000000000..042f28e84a05
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_HWRM_LIB_H_
+#define _BNGE_HWRM_LIB_H_
+
+#define BNGE_PLC_EN_JUMBO_THRES_VALID \
+ VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID
+#define BNGE_PLC_EN_HDS_THRES_VALID \
+ VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID
+#define BNGE_VNIC_CFG_ROCE_DUAL_MODE \
+ VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE
+
+int bnge_hwrm_ver_get(struct bnge_dev *bd);
+int bnge_hwrm_func_reset(struct bnge_dev *bd);
+int bnge_hwrm_fw_set_time(struct bnge_dev *bd);
+int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd);
+int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd);
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_nvm_dev_info(struct bnge_dev *bd,
+ struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
+int bnge_hwrm_func_backing_store(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm,
+ bool last);
+int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_reserve_rings(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr);
+int bnge_hwrm_func_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_func_qcfg(struct bnge_dev *bd);
+int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd);
+int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd);
+
+int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx);
+int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic, bool set_rss);
+int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn);
+int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ unsigned int nr_rings);
+void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic);
+void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx);
+int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
+int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
+int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic);
+void bnge_hwrm_stat_ctx_free(struct bnge_net *bn);
+int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn);
+int hwrm_ring_free_send_msg(struct bnge_net *bn, struct bnge_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id);
+int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, u32 map_index);
+int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx);
+#endif /* _BNGE_HWRM_LIB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
new file mode 100644
index 000000000000..832eeb960bd2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -0,0 +1,2485 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <net/ip.h>
+#include <linux/skbuff.h>
+#include <net/page_pool/helpers.h>
+
+#include "bnge.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_ethtool.h"
+#include "bnge_rmem.h"
+
+#define BNGE_RING_TO_TC_OFF(bd, tx) \
+ ((tx) % (bd)->tx_nr_rings_per_tc)
+
+#define BNGE_RING_TO_TC(bd, tx) \
+ ((tx) / (bd)->tx_nr_rings_per_tc)
+
+#define BNGE_TC_TO_RING_BASE(bd, tc) \
+ ((tc) * (bd)->tx_nr_rings_per_tc)
+
+static void bnge_free_stats_mem(struct bnge_net *bn,
+ struct bnge_stats_mem *stats)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ if (stats->hw_stats) {
+ dma_free_coherent(bd->dev, stats->len, stats->hw_stats,
+ stats->hw_stats_map);
+ stats->hw_stats = NULL;
+ }
+}
+
+static int bnge_alloc_stats_mem(struct bnge_net *bn,
+ struct bnge_stats_mem *stats)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ stats->hw_stats = dma_alloc_coherent(bd->dev, stats->len,
+ &stats->hw_stats_map, GFP_KERNEL);
+ if (!stats->hw_stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bnge_free_ring_stats(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ bnge_free_stats_mem(bn, &nqr->stats);
+ }
+}
+
+static int bnge_alloc_ring_stats(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ u32 size, i;
+ int rc;
+
+ size = bd->hw_ring_stats_size;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ nqr->stats.len = size;
+ rc = bnge_alloc_stats_mem(bn, &nqr->stats);
+ if (rc)
+ goto err_free_ring_stats;
+
+ nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ return 0;
+
+err_free_ring_stats:
+ bnge_free_ring_stats(bn);
+ return rc;
+}
+
+static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
+{
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(nqr->desc_mapping);
+ nqr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring = &cpr->ring_struct;
+
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(cpr->desc_mapping);
+ cpr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
+{
+ nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL);
+ if (!nqr->desc_ring)
+ return -ENOMEM;
+
+ nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL);
+ if (!nqr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
+{
+ cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL);
+ if (!cpr->desc_ring)
+ return -ENOMEM;
+
+ cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL);
+ if (!cpr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static void bnge_free_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ bnge_free_nq_desc_arr(&bnapi->nq_ring);
+ }
+}
+
+static int bnge_alloc_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
+ if (rc)
+ goto err_free_nq_arrays;
+ }
+ return 0;
+
+err_free_nq_arrays:
+ bnge_free_nq_arrays(bn);
+ return rc;
+}
+
+static void bnge_free_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ if (!nqr->cp_ring_arr)
+ continue;
+
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ bnge_free_cp_desc_arr(cpr);
+ }
+ kfree(nqr->cp_ring_arr);
+ nqr->cp_ring_arr = NULL;
+ nqr->cp_ring_count = 0;
+ }
+}
+
+static int alloc_one_cp_ring(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_ring_struct *ring;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
+ if (rc)
+ return -ENOMEM;
+ ring = &cpr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->desc_ring;
+ rmem->dma_arr = cpr->desc_mapping;
+ rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
+ rc = bnge_alloc_ring(bd, rmem);
+ if (rc)
+ goto err_free_cp_desc_arr;
+ return rc;
+
+err_free_cp_desc_arr:
+ bnge_free_cp_desc_arr(cpr);
+ return rc;
+}
+
+static int bnge_alloc_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, ulp_msix, rc;
+ int tcs = 1;
+
+ ulp_msix = bnge_aux_get_msix(bd);
+ for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
+ bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ int cp_count = 0, k;
+ int rx = 0, tx = 0;
+
+ nqr = &bnapi->nq_ring;
+ nqr->bnapi = bnapi;
+ ring = &nqr->ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_nq_tree;
+
+ ring->map_idx = ulp_msix + i;
+
+ if (i < bd->rx_nr_rings) {
+ cp_count++;
+ rx = 1;
+ }
+
+ if ((sh && i < bd->tx_nr_rings) ||
+ (!sh && i >= bd->rx_nr_rings)) {
+ cp_count += tcs;
+ tx = 1;
+ }
+
+ nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+ GFP_KERNEL);
+ if (!nqr->cp_ring_arr) {
+ rc = -ENOMEM;
+ goto err_free_nq_tree;
+ }
+
+ nqr->cp_ring_count = cp_count;
+
+ for (k = 0; k < cp_count; k++) {
+ cpr = &nqr->cp_ring_arr[k];
+ rc = alloc_one_cp_ring(bn, cpr);
+ if (rc)
+ goto err_free_nq_tree;
+
+ cpr->bnapi = bnapi;
+ cpr->cp_idx = k;
+ if (!k && rx) {
+ bn->rx_ring[i].rx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
+ } else {
+ int n, tc = k - rx;
+
+ n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
+ bn->tx_ring[n].tx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
+ }
+ }
+ if (tx)
+ j++;
+ }
+ return 0;
+
+err_free_nq_tree:
+ bnge_free_nq_tree(bn);
+ return rc;
+}
+
+static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
+{
+ return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
+}
+
+static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ if (!rxr->rx_buf_ring)
+ return;
+
+ max_idx = bn->rx_nr_pages * RX_DESC_CNT;
+
+ for (i = 0; i < max_idx; i++) {
+ struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+ void *data = rx_buf->data;
+
+ if (!data)
+ continue;
+
+ rx_buf->data = NULL;
+ page_pool_free_va(rxr->head_pool, data, true);
+ }
+}
+
+static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ if (!rxr->rx_agg_buf_ring)
+ return;
+
+ max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT;
+
+ for (i = 0; i < max_idx; i++) {
+ struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i];
+ netmem_ref netmem = rx_agg_buf->netmem;
+
+ if (!netmem)
+ continue;
+
+ rx_agg_buf->netmem = 0;
+ __clear_bit(i, rxr->rx_agg_bmap);
+
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
+ }
+}
+
+static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ bnge_free_one_rx_ring_bufs(bn, rxr);
+ bnge_free_one_agg_ring_bufs(bn, rxr);
+}
+
+static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->rx_ring)
+ return;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
+}
+
+static void bnge_free_all_rings_bufs(struct bnge_net *bn)
+{
+ bnge_free_rx_ring_pair_bufs(bn);
+}
+
+static void bnge_free_rx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_ring_struct *ring;
+
+ page_pool_destroy(rxr->page_pool);
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = rxr->head_pool = NULL;
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ }
+}
+
+static int bnge_alloc_rx_page_pool(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int numa_node)
+{
+ const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE;
+ const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
+ struct page_pool_params pp = { 0 };
+ struct bnge_dev *bd = bn->bd;
+ struct page_pool *pool;
+
+ pp.pool_size = bn->rx_agg_ring_size / agg_size_fac;
+ pp.nid = numa_node;
+ pp.netdev = bn->netdev;
+ pp.dev = bd->dev;
+ pp.dma_dir = bn->rx_dir;
+ pp.max_len = PAGE_SIZE;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ rxr->page_pool = pool;
+
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnge_separate_head_pool(rxr)) {
+ pp.pool_size = min(bn->rx_ring_size / rx_size_fac, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ goto err_destroy_pp;
+ } else {
+ page_pool_get(pool);
+ }
+ rxr->head_pool = pool;
+ return 0;
+
+err_destroy_pp:
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+ return PTR_ERR(pool);
+}
+
+static void bnge_enable_rx_page_pool(struct bnge_rx_ring_info *rxr)
+{
+ page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
+ page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
+}
+
+static int bnge_alloc_rx_agg_bmap(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bn->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnge_alloc_rx_rings(struct bnge_net *bn)
+{
+ int i, rc = 0, agg_rings = 0, cpu;
+ struct bnge_dev *bd = bn->bd;
+
+ if (bnge_is_agg_reqd(bd))
+ agg_rings = 1;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_ring_struct *ring;
+ int cpu_node;
+
+ ring = &rxr->rx_ring_struct;
+
+ cpu = cpumask_local_spread(i, dev_to_node(bd->dev));
+ cpu_node = cpu_to_node(cpu);
+ netdev_dbg(bn->netdev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
+ i, cpu_node);
+ rc = bnge_alloc_rx_page_pool(bn, rxr, cpu_node);
+ if (rc)
+ goto err_free_rx_rings;
+ bnge_enable_rx_page_pool(rxr);
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_rings;
+
+ ring->grp_idx = i;
+ if (agg_rings) {
+ ring = &rxr->rx_agg_ring_struct;
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_rings;
+
+ ring->grp_idx = i;
+ rc = bnge_alloc_rx_agg_bmap(bn, rxr);
+ if (rc)
+ goto err_free_rx_rings;
+ }
+ }
+ return rc;
+
+err_free_rx_rings:
+ bnge_free_rx_rings(bn);
+ return rc;
+}
+
+static void bnge_free_tx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring;
+
+ ring = &txr->tx_ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+ }
+}
+
+static int bnge_alloc_tx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, rc;
+
+ for (i = 0, j = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring;
+ u8 qidx;
+
+ ring = &txr->tx_ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_tx_rings;
+
+ ring->grp_idx = txr->bnapi->index;
+ qidx = bd->tc_to_qidx[j];
+ ring->queue_id = bd->q_info[qidx].queue_id;
+ if (BNGE_RING_TO_TC_OFF(bd, i) == (bd->tx_nr_rings_per_tc - 1))
+ j++;
+ }
+ return 0;
+
+err_free_tx_rings:
+ bnge_free_tx_rings(bn);
+ return rc;
+}
+
+static void bnge_free_vnic_attributes(struct bnge_net *bn)
+{
+ struct pci_dev *pdev = bn->bd->pdev;
+ struct bnge_vnic_info *vnic;
+ int i;
+
+ if (!bn->vnic_info)
+ return;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ vnic = &bn->vnic_info[i];
+
+ kfree(vnic->uc_list);
+ vnic->uc_list = NULL;
+
+ if (vnic->mc_list) {
+ dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+ vnic->mc_list, vnic->mc_list_mapping);
+ vnic->mc_list = NULL;
+ }
+
+ if (vnic->rss_table) {
+ dma_free_coherent(&pdev->dev, vnic->rss_table_size,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ vnic->rss_table = NULL;
+ }
+
+ vnic->rss_hash_key = NULL;
+ vnic->flags = 0;
+ }
+}
+
+static int bnge_alloc_vnic_attributes(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_vnic_info *vnic;
+ int i, size;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ vnic = &bn->vnic_info[i];
+
+ if (vnic->flags & BNGE_VNIC_UCAST_FLAG) {
+ int mem_size = (BNGE_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+ vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+ if (!vnic->uc_list)
+ goto err_free_vnic_attributes;
+ }
+
+ if (vnic->flags & BNGE_VNIC_MCAST_FLAG) {
+ vnic->mc_list_size = BNGE_MAX_MC_ADDRS * ETH_ALEN;
+ vnic->mc_list =
+ dma_alloc_coherent(bd->dev,
+ vnic->mc_list_size,
+ &vnic->mc_list_mapping,
+ GFP_KERNEL);
+ if (!vnic->mc_list)
+ goto err_free_vnic_attributes;
+ }
+
+ /* Allocate rss table and hash key */
+ size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(bd->dev,
+ vnic->rss_table_size,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table)
+ goto err_free_vnic_attributes;
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ }
+ return 0;
+
+err_free_vnic_attributes:
+ bnge_free_vnic_attributes(bn);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_vnics(struct bnge_net *bn)
+{
+ int num_vnics;
+
+ /* Allocate only 1 VNIC for now
+ * Additional VNICs will be added based on RFS/NTUPLE in future patches
+ */
+ num_vnics = 1;
+
+ bn->vnic_info = kcalloc(num_vnics, sizeof(struct bnge_vnic_info),
+ GFP_KERNEL);
+ if (!bn->vnic_info)
+ return -ENOMEM;
+
+ bn->nr_vnics = num_vnics;
+
+ return 0;
+}
+
+static void bnge_free_vnics(struct bnge_net *bn)
+{
+ kfree(bn->vnic_info);
+ bn->vnic_info = NULL;
+ bn->nr_vnics = 0;
+}
+
+static void bnge_free_ring_grps(struct bnge_net *bn)
+{
+ kfree(bn->grp_info);
+ bn->grp_info = NULL;
+}
+
+static int bnge_init_ring_grps(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ bn->grp_info = kcalloc(bd->nq_nr_rings,
+ sizeof(struct bnge_ring_grp_info),
+ GFP_KERNEL);
+ if (!bn->grp_info)
+ return -ENOMEM;
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bn->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+ bn->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
+ }
+
+ return 0;
+}
+
+static void bnge_free_core(struct bnge_net *bn)
+{
+ bnge_free_vnic_attributes(bn);
+ bnge_free_tx_rings(bn);
+ bnge_free_rx_rings(bn);
+ bnge_free_nq_tree(bn);
+ bnge_free_nq_arrays(bn);
+ bnge_free_ring_stats(bn);
+ bnge_free_ring_grps(bn);
+ bnge_free_vnics(bn);
+ kfree(bn->tx_ring_map);
+ bn->tx_ring_map = NULL;
+ kfree(bn->tx_ring);
+ bn->tx_ring = NULL;
+ kfree(bn->rx_ring);
+ bn->rx_ring = NULL;
+ kfree(bn->bnapi);
+ bn->bnapi = NULL;
+}
+
+static int bnge_alloc_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, size, arr_size;
+ int rc = -ENOMEM;
+ void *bnapi;
+
+ arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) *
+ bd->nq_nr_rings);
+ size = L1_CACHE_ALIGN(sizeof(struct bnge_napi));
+ bnapi = kzalloc(arr_size + size * bd->nq_nr_rings, GFP_KERNEL);
+ if (!bnapi)
+ return rc;
+
+ bn->bnapi = bnapi;
+ bnapi += arr_size;
+ for (i = 0; i < bd->nq_nr_rings; i++, bnapi += size) {
+ struct bnge_nq_ring_info *nqr;
+
+ bn->bnapi[i] = bnapi;
+ bn->bnapi[i]->index = i;
+ bn->bnapi[i]->bn = bn;
+ nqr = &bn->bnapi[i]->nq_ring;
+ nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
+ }
+
+ bn->rx_ring = kcalloc(bd->rx_nr_rings,
+ sizeof(struct bnge_rx_ring_info),
+ GFP_KERNEL);
+ if (!bn->rx_ring)
+ goto err_free_core;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+
+ rxr->rx_ring_struct.ring_mem.flags =
+ BNGE_RMEM_RING_PTE_FLAG;
+ rxr->rx_agg_ring_struct.ring_mem.flags =
+ BNGE_RMEM_RING_PTE_FLAG;
+ rxr->bnapi = bn->bnapi[i];
+ bn->bnapi[i]->rx_ring = &bn->rx_ring[i];
+ }
+
+ bn->tx_ring = kcalloc(bd->tx_nr_rings,
+ sizeof(struct bnge_tx_ring_info),
+ GFP_KERNEL);
+ if (!bn->tx_ring)
+ goto err_free_core;
+
+ bn->tx_ring_map = kcalloc(bd->tx_nr_rings, sizeof(u16),
+ GFP_KERNEL);
+ if (!bn->tx_ring_map)
+ goto err_free_core;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ j = 0;
+ else
+ j = bd->rx_nr_rings;
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_napi *bnapi2;
+ int k;
+
+ txr->tx_ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
+ bn->tx_ring_map[i] = i;
+ k = j + BNGE_RING_TO_TC_OFF(bd, i);
+
+ bnapi2 = bn->bnapi[k];
+ txr->txq_index = i;
+ txr->tx_napi_idx =
+ BNGE_RING_TO_TC(bd, txr->txq_index);
+ bnapi2->tx_ring[txr->tx_napi_idx] = txr;
+ txr->bnapi = bnapi2;
+ }
+
+ rc = bnge_alloc_ring_stats(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_vnics(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_nq_arrays(bn);
+ if (rc)
+ goto err_free_core;
+
+ bnge_init_ring_struct(bn);
+
+ rc = bnge_alloc_rx_rings(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_tx_rings(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_nq_tree(bn);
+ if (rc)
+ goto err_free_core;
+
+ bn->vnic_info[BNGE_VNIC_DEFAULT].flags |= BNGE_VNIC_RSS_FLAG |
+ BNGE_VNIC_MCAST_FLAG |
+ BNGE_VNIC_UCAST_FLAG;
+ rc = bnge_alloc_vnic_attributes(bn);
+ if (rc)
+ goto err_free_core;
+ return 0;
+
+err_free_core:
+ bnge_free_core(bn);
+ return rc;
+}
+
+u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr)
+{
+ return rxr->rx_cpr->ring_struct.fw_ring_id;
+}
+
+u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
+{
+ return txr->tx_cpr->ring_struct.fw_ring_id;
+}
+
+static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
+static void bnge_db_cq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_CQ_ARMALL |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
+static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
+{
+ struct bnge_napi *bnapi = bn->bnapi[n];
+ struct bnge_nq_ring_info *nqr;
+
+ nqr = &bnapi->nq_ring;
+
+ return nqr->ring_struct.map_idx;
+}
+
+static irqreturn_t bnge_msix(int irq, void *dev_instance)
+{
+ /* NAPI scheduling to be added in a future patch */
+ return IRQ_HANDLED;
+}
+
+static void bnge_init_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_nq_ring_info *nqr = &bn->bnapi[i]->nq_ring;
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+}
+
+static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
+ dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ unsigned int *offset,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
+ BNGE_RX_PAGE_SIZE, gfp);
+ } else {
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ *offset = 0;
+ }
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
+ return netmem;
+}
+
+static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ gfp_t gfp)
+{
+ unsigned int offset;
+ struct page *page;
+
+ page = page_pool_alloc_frag(rxr->head_pool, &offset,
+ bn->rx_buf_size, gfp);
+ if (!page)
+ return NULL;
+
+ *mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
+ return page_address(page) + offset;
+}
+
+static int bnge_alloc_rx_data(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
+ struct rx_bd *rxbd;
+ dma_addr_t mapping;
+ u8 *data;
+
+ rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
+ data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->data = data;
+ rx_buf->data_ptr = data + bn->rx_offset;
+ rx_buf->mapping = mapping;
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ return 0;
+}
+
+static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod = rxr->rx_prod;
+ int i, rc = 0;
+
+ for (i = 0; i < bn->rx_ring_size; i++) {
+ rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL);
+ if (rc)
+ break;
+ prod = NEXT_RX(prod);
+ }
+
+ /* Abort if not a single buffer can be allocated */
+ if (rc && !i) {
+ netdev_err(bn->netdev,
+ "RX ring %d: allocated %d/%d buffers, abort\n",
+ ring_nr, i, bn->rx_ring_size);
+ return rc;
+ }
+
+ rxr->rx_prod = prod;
+
+ if (i < bn->rx_ring_size)
+ netdev_warn(bn->netdev,
+ "RX ring %d: allocated %d/%d buffers, continuing\n",
+ ring_nr, i, bn->rx_ring_size);
+ return 0;
+}
+
+static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
+{
+ u16 next, max = rxr->rx_agg_bmap_size;
+
+ next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+ if (next >= max)
+ next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+ return next;
+}
+
+static int bnge_alloc_rx_netmem(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct bnge_sw_rx_agg_bd *rx_agg_buf;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+ unsigned int offset = 0;
+ struct rx_bd *rxbd;
+ dma_addr_t mapping;
+ netmem_ref netmem;
+
+ rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)];
+ netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
+ if (!netmem)
+ return -ENOMEM;
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod];
+ rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
+
+ rx_agg_buf->netmem = netmem;
+ rx_agg_buf->offset = offset;
+ rx_agg_buf->mapping = mapping;
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rxbd->rx_bd_opaque = sw_prod;
+ return 0;
+}
+
+static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod = rxr->rx_agg_prod;
+ int i, rc = 0;
+
+ for (i = 0; i < bn->rx_agg_ring_size; i++) {
+ rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL);
+ if (rc)
+ break;
+ prod = NEXT_RX_AGG(prod);
+ }
+
+ if (rc && i < MAX_SKB_FRAGS) {
+ netdev_err(bn->netdev,
+ "Agg ring %d: allocated %d/%d buffers (min %d), abort\n",
+ ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS);
+ goto err_free_one_agg_ring_bufs;
+ }
+
+ rxr->rx_agg_prod = prod;
+
+ if (i < bn->rx_agg_ring_size)
+ netdev_warn(bn->netdev,
+ "Agg ring %d: allocated %d/%d buffers, continuing\n",
+ ring_nr, i, bn->rx_agg_ring_size);
+ return 0;
+
+err_free_one_agg_ring_bufs:
+ bnge_free_one_agg_ring_bufs(bn, rxr);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
+{
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
+ int rc;
+
+ rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr);
+ if (rc)
+ return rc;
+
+ if (bnge_is_agg_reqd(bn->bd)) {
+ rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr);
+ if (rc)
+ goto err_free_one_rx_ring_bufs;
+ }
+ return 0;
+
+err_free_one_rx_ring_bufs:
+ bnge_free_one_rx_ring_bufs(bn, rxr);
+ return rc;
+}
+
+static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type)
+{
+ struct rx_bd **rx_desc_ring;
+ u32 prod;
+ int i;
+
+ rx_desc_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
+ for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
+ struct rx_bd *rxbd = rx_desc_ring[i];
+ int j;
+
+ for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+ rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+ rxbd->rx_bd_opaque = prod;
+ }
+ }
+}
+
+static void bnge_init_one_rx_ring_rxbd(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring;
+ u32 type;
+
+ type = (bn->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ ring = &rxr->rx_ring_struct;
+ bnge_init_rxbd_pages(ring, type);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_init_one_agg_ring_rxbd(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring;
+ u32 type;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ if (bnge_is_agg_reqd(bn->bd)) {
+ type = ((u32)BNGE_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnge_init_rxbd_pages(ring, type);
+ }
+}
+
+static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr)
+{
+ struct bnge_rx_ring_info *rxr;
+
+ rxr = &bn->rx_ring[ring_nr];
+ bnge_init_one_rx_ring_rxbd(bn, rxr);
+
+ netif_queue_set_napi(bn->netdev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
+ bnge_init_one_agg_ring_rxbd(bn, rxr);
+}
+
+static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn)
+{
+ int i, rc;
+
+ for (i = 0; i < bn->bd->rx_nr_rings; i++) {
+ rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i);
+ if (rc)
+ goto err_free_rx_ring_pair_bufs;
+ }
+ return 0;
+
+err_free_rx_ring_pair_bufs:
+ bnge_free_rx_ring_pair_bufs(bn);
+ return rc;
+}
+
+static void bnge_init_rx_rings(struct bnge_net *bn)
+{
+ int i;
+
+#define BNGE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNGE_RX_DMA_OFFSET NET_SKB_PAD
+ bn->rx_offset = BNGE_RX_OFFSET;
+ bn->rx_dma_offset = BNGE_RX_DMA_OFFSET;
+
+ for (i = 0; i < bn->bd->rx_nr_rings; i++)
+ bnge_init_one_rx_ring_pair(bn, i);
+}
+
+static void bnge_init_tx_rings(struct bnge_net *bn)
+{
+ int i;
+
+ bn->tx_wake_thresh = max(bn->tx_ring_size / 2, BNGE_MIN_TX_DESC_CNT);
+
+ for (i = 0; i < bn->bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX,
+ &txr->bnapi->napi);
+ }
+}
+
+static void bnge_init_vnics(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic0 = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ int i;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ struct bnge_vnic_info *vnic = &bn->vnic_info[i];
+ int j;
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->vnic_id = i;
+ for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++)
+ vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
+
+ if (bn->vnic_info[i].rss_hash_key) {
+ if (i == BNGE_VNIC_DEFAULT) {
+ u8 *key = (void *)vnic->rss_hash_key;
+ int k;
+
+ if (!bn->rss_hash_key_valid &&
+ !bn->rss_hash_key_updated) {
+ get_random_bytes(bn->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ bn->rss_hash_key_updated = true;
+ }
+
+ memcpy(vnic->rss_hash_key, bn->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+
+ if (!bn->rss_hash_key_updated)
+ continue;
+
+ bn->rss_hash_key_updated = false;
+ bn->rss_hash_key_valid = true;
+
+ bn->toeplitz_prefix = 0;
+ for (k = 0; k < 8; k++) {
+ bn->toeplitz_prefix <<= 8;
+ bn->toeplitz_prefix |= key[k];
+ }
+ } else {
+ memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ }
+ }
+ }
+}
+
+static void bnge_set_db_mask(struct bnge_net *bn, struct bnge_db_info *db,
+ u32 ring_type)
+{
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_ring_mask = bn->tx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ db->db_ring_mask = bn->rx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ db->db_ring_mask = bn->rx_agg_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ case HWRM_RING_ALLOC_NQ:
+ db->db_ring_mask = bn->cp_ring_mask;
+ break;
+ }
+ db->db_epoch_mask = db->db_ring_mask + 1;
+ db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
+}
+
+static void bnge_set_db(struct bnge_net *bn, struct bnge_db_info *db,
+ u32 ring_type, u32 map_idx, u32 xid)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ case HWRM_RING_ALLOC_AGG:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ }
+ db->db_key64 |= ((u64)xid << DBR_XID_SFT) | DBR_VALID;
+
+ db->doorbell = bd->bar1 + bd->db_offset;
+ bnge_set_db_mask(bn, db, ring_type);
+}
+
+static int bnge_hwrm_cp_ring_alloc(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct bnge_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->ring_struct;
+ ring->handle = BNGE_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
+
+ return 0;
+}
+
+static int bnge_hwrm_tx_ring_alloc(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, tx_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+
+ return 0;
+}
+
+static int bnge_hwrm_rx_agg_ring_alloc(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 type = HWRM_RING_ALLOC_AGG;
+ struct bnge_dev *bd = bn->bd;
+ u32 grp_idx = ring->grp_idx;
+ u32 map_idx;
+ int rc;
+
+ map_idx = grp_idx + bd->rx_nr_rings;
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+ bn->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnge_hwrm_rx_ring_alloc(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
+ struct bnge_napi *bnapi = rxr->bnapi;
+ u32 type = HWRM_RING_ALLOC_RX;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bn->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnge_hwrm_ring_alloc(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ bool agg_rings;
+ int i, rc = 0;
+
+ agg_rings = !!(bnge_is_agg_reqd(bd));
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+ u32 type = HWRM_RING_ALLOC_NQ;
+ u32 map_idx = ring->map_idx;
+ unsigned int vector;
+
+ vector = bd->irq_tbl[map_idx].vector;
+ disable_irq_nosync(vector);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc) {
+ enable_irq(vector);
+ goto err_out;
+ }
+ bnge_set_db(bn, &nqr->nq_db, type, map_idx, ring->fw_ring_id);
+ bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
+ enable_irq(vector);
+ bn->grp_info[i].nq_fw_ring_id = ring->fw_ring_id;
+
+ if (!i) {
+ rc = bnge_hwrm_set_async_event_cr(bd, ring->fw_ring_id);
+ if (rc)
+ netdev_warn(bn->netdev, "Failed to set async event completion ring.\n");
+ }
+ }
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+
+ rc = bnge_hwrm_cp_ring_alloc(bn, txr->tx_cpr);
+ if (rc)
+ goto err_out;
+ rc = bnge_hwrm_tx_ring_alloc(bn, txr, i);
+ if (rc)
+ goto err_out;
+ }
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ struct bnge_napi *bnapi;
+ u32 map_idx, type;
+
+ rc = bnge_hwrm_rx_ring_alloc(bn, rxr);
+ if (rc)
+ goto err_out;
+ /* If we have agg rings, post agg buffers first. */
+ if (!agg_rings)
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+
+ cpr = rxr->rx_cpr;
+ bnapi = rxr->bnapi;
+ type = HWRM_RING_ALLOC_CMPL;
+ map_idx = bnapi->index;
+
+ ring = &cpr->ring_struct;
+ ring->handle = BNGE_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ goto err_out;
+ bnge_set_db(bn, &cpr->cp_db, type, map_idx,
+ ring->fw_ring_id);
+ bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
+ }
+
+ if (agg_rings) {
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ rc = bnge_hwrm_rx_agg_ring_alloc(bn, &bn->rx_ring[i]);
+ if (rc)
+ goto err_out;
+ }
+ }
+err_out:
+ return rc;
+}
+
+void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ __le16 *ring_tbl = vnic->rss_table;
+ struct bnge_rx_ring_info *rxr;
+ struct bnge_dev *bd = bn->bd;
+ u16 tbl_size, i;
+
+ tbl_size = bnge_get_rxfh_indir_size(bd);
+
+ for (i = 0; i < tbl_size; i++) {
+ u16 ring_id, j;
+
+ j = bd->rss_indir_tbl[i];
+ rxr = &bn->rx_ring[j];
+
+ ring_id = rxr->rx_ring_struct.fw_ring_id;
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ ring_id = bnge_cp_ring_for_rx(rxr);
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ }
+}
+
+static int bnge_hwrm_vnic_rss_cfg(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic)
+{
+ int rc;
+
+ rc = bnge_hwrm_vnic_set_rss(bn, vnic, true);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ rc = bnge_hwrm_vnic_cfg(bn, vnic);
+ if (rc)
+ netdev_err(bn->netdev, "hwrm vnic %d cfg failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+}
+
+static int bnge_setup_vnic(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ struct bnge_dev *bd = bn->bd;
+ int rc, i, nr_ctxs;
+
+ nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
+ for (i = 0; i < nr_ctxs; i++) {
+ rc = bnge_hwrm_vnic_ctx_alloc(bd, vnic, i);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic %d ctx %d alloc failure rc: %d\n",
+ vnic->vnic_id, i, rc);
+ return -ENOMEM;
+ }
+ bn->rsscos_nr_ctxs++;
+ }
+
+ rc = bnge_hwrm_vnic_rss_cfg(bn, vnic);
+ if (rc)
+ return rc;
+
+ if (bnge_is_agg_reqd(bd)) {
+ rc = bnge_hwrm_vnic_set_hds(bn, vnic);
+ if (rc)
+ netdev_err(bn->netdev, "hwrm vnic %d set hds failure rc: %d\n",
+ vnic->vnic_id, rc);
+ }
+ return rc;
+}
+
+static void bnge_del_l2_filter(struct bnge_net *bn, struct bnge_l2_filter *fltr)
+{
+ if (!refcount_dec_and_test(&fltr->refcnt))
+ return;
+ hlist_del_rcu(&fltr->base.hash);
+ kfree_rcu(fltr, base.rcu);
+}
+
+static void bnge_init_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_filter *fltr,
+ struct bnge_l2_key *key, u32 idx)
+{
+ struct hlist_head *head;
+
+ ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
+ fltr->l2_key.vlan = key->vlan;
+ fltr->base.type = BNGE_FLTR_TYPE_L2;
+
+ head = &bn->l2_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ refcount_set(&fltr->refcnt, 1);
+}
+
+static struct bnge_l2_filter *__bnge_lookup_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ u32 idx)
+{
+ struct bnge_l2_filter *fltr;
+ struct hlist_head *head;
+
+ head = &bn->l2_fltr_hash_tbl[idx];
+ hlist_for_each_entry_rcu(fltr, head, base.hash) {
+ struct bnge_l2_key *l2_key = &fltr->l2_key;
+
+ if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
+ l2_key->vlan == key->vlan)
+ return fltr;
+ }
+ return NULL;
+}
+
+static struct bnge_l2_filter *bnge_lookup_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ u32 idx)
+{
+ struct bnge_l2_filter *fltr;
+
+ rcu_read_lock();
+ fltr = __bnge_lookup_l2_filter(bn, key, idx);
+ if (fltr)
+ refcount_inc(&fltr->refcnt);
+ rcu_read_unlock();
+ return fltr;
+}
+
+static struct bnge_l2_filter *bnge_alloc_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ gfp_t gfp)
+{
+ struct bnge_l2_filter *fltr;
+ u32 idx;
+
+ idx = jhash2(&key->filter_key, BNGE_L2_KEY_SIZE, bn->hash_seed) &
+ BNGE_L2_FLTR_HASH_MASK;
+ fltr = bnge_lookup_l2_filter(bn, key, idx);
+ if (fltr)
+ return fltr;
+
+ fltr = kzalloc(sizeof(*fltr), gfp);
+ if (!fltr)
+ return ERR_PTR(-ENOMEM);
+
+ bnge_init_l2_filter(bn, fltr, key, idx);
+ return fltr;
+}
+
+static int bnge_hwrm_set_vnic_filter(struct bnge_net *bn, u16 vnic_id, u16 idx,
+ const u8 *mac_addr)
+{
+ struct bnge_l2_filter *fltr;
+ struct bnge_l2_key key;
+ int rc;
+
+ ether_addr_copy(key.dst_mac_addr, mac_addr);
+ key.vlan = 0;
+ fltr = bnge_alloc_l2_filter(bn, &key, GFP_KERNEL);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
+
+ fltr->base.fw_vnic_id = bn->vnic_info[vnic_id].fw_vnic_id;
+ rc = bnge_hwrm_l2_filter_alloc(bn->bd, fltr);
+ if (rc)
+ goto err_del_l2_filter;
+ bn->vnic_info[vnic_id].l2_filters[idx] = fltr;
+ return rc;
+
+err_del_l2_filter:
+ bnge_del_l2_filter(bn, fltr);
+ return rc;
+}
+
+static bool bnge_mc_list_updated(struct bnge_net *bn, u32 *rx_mask)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct netdev_hw_addr *ha;
+ int mc_count = 0, off = 0;
+ bool update = false;
+ u8 *haddr;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mc_count >= BNGE_MAX_MC_ADDRS) {
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ return false;
+ }
+ haddr = ha->addr;
+ if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+ memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+ update = true;
+ }
+ off += ETH_ALEN;
+ mc_count++;
+ }
+ if (mc_count)
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+ if (mc_count != vnic->mc_list_count) {
+ vnic->mc_list_count = mc_count;
+ update = true;
+ }
+ return update;
+}
+
+static bool bnge_uc_list_updated(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct netdev_hw_addr *ha;
+ int off = 0;
+
+ if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+ return true;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+ return true;
+
+ off += ETH_ALEN;
+ }
+ return false;
+}
+
+static bool bnge_promisc_ok(struct bnge_net *bn)
+{
+ return true;
+}
+
+static int bnge_cfg_def_vnic(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ struct netdev_hw_addr *ha;
+ int i, off = 0, rc;
+ bool uc_update;
+
+ netif_addr_lock_bh(dev);
+ uc_update = bnge_uc_list_updated(bn);
+ netif_addr_unlock_bh(dev);
+
+ if (!uc_update)
+ goto skip_uc;
+
+ for (i = 1; i < vnic->uc_filter_count; i++) {
+ struct bnge_l2_filter *fltr = vnic->l2_filters[i];
+
+ bnge_hwrm_l2_filter_free(bd, fltr);
+ bnge_del_l2_filter(bn, fltr);
+ }
+
+ vnic->uc_filter_count = 1;
+
+ netif_addr_lock_bh(dev);
+ if (netdev_uc_count(dev) > (BNGE_MAX_UC_ADDRS - 1)) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ } else {
+ netdev_for_each_uc_addr(ha, dev) {
+ memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+ off += ETH_ALEN;
+ vnic->uc_filter_count++;
+ }
+ }
+ netif_addr_unlock_bh(dev);
+
+ for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+ rc = bnge_hwrm_set_vnic_filter(bn, 0, i, vnic->uc_list + off);
+ if (rc) {
+ netdev_err(dev, "HWRM vnic filter failure rc: %d\n", rc);
+ vnic->uc_filter_count = i;
+ return rc;
+ }
+ }
+
+skip_uc:
+ if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
+ !bnge_promisc_ok(bn))
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
+ netdev_info(dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+ rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
+ }
+ if (rc)
+ netdev_err(dev, "HWRM cfa l2 rx mask failure rc: %d\n",
+ rc);
+
+ return rc;
+}
+
+static void bnge_hwrm_vnic_free(struct bnge_net *bn)
+{
+ int i;
+
+ for (i = 0; i < bn->nr_vnics; i++)
+ bnge_hwrm_vnic_free_one(bn->bd, &bn->vnic_info[i]);
+}
+
+static void bnge_hwrm_vnic_ctx_free(struct bnge_net *bn)
+{
+ int i, j;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ struct bnge_vnic_info *vnic = &bn->vnic_info[i];
+
+ for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) {
+ if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
+ bnge_hwrm_vnic_ctx_free_one(bn->bd, vnic, j);
+ }
+ }
+ bn->rsscos_nr_ctxs = 0;
+}
+
+static void bnge_hwrm_clear_vnic_filter(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ int i;
+
+ for (i = 0; i < vnic->uc_filter_count; i++) {
+ struct bnge_l2_filter *fltr = vnic->l2_filters[i];
+
+ bnge_hwrm_l2_filter_free(bn->bd, fltr);
+ bnge_del_l2_filter(bn, fltr);
+ }
+
+ vnic->uc_filter_count = 0;
+}
+
+static void bnge_clear_vnic(struct bnge_net *bn)
+{
+ bnge_hwrm_clear_vnic_filter(bn);
+ bnge_hwrm_vnic_free(bn);
+ bnge_hwrm_vnic_ctx_free(bn);
+}
+
+static void bnge_hwrm_rx_ring_free(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
+ hwrm_ring_free_send_msg(bn, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_rx_agg_ring_free(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_RX_AGG,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_tx_ring_free(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = close_path ? bnge_cp_ring_for_tx(txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_cp_ring_free(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring;
+
+ ring = &cpr->ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ bnge_hwrm_tx_ring_free(bn, &bn->tx_ring[i], close_path);
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ bnge_hwrm_rx_ring_free(bn, &bn->rx_ring[i], close_path);
+ bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
+ }
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ for (j = 0; j < nqr->cp_ring_count && nqr->cp_ring_arr; j++)
+ bnge_hwrm_cp_ring_free(bn, &nqr->cp_ring_arr[j]);
+
+ ring = &nqr->ring_struct;
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(bn, ring,
+ RING_FREE_REQ_RING_TYPE_NQ,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+}
+
+static void bnge_setup_msix(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ int len, i;
+
+ len = sizeof(bd->irq_tbl[0].name);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ char *attr;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ attr = "TxRx";
+ else if (i < bd->rx_nr_rings)
+ attr = "rx";
+ else
+ attr = "tx";
+
+ snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
+ attr, i);
+ bd->irq_tbl[map_idx].handler = bnge_msix;
+ }
+}
+
+static int bnge_setup_interrupts(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+
+ bnge_setup_msix(bn);
+
+ return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
+}
+
+static void bnge_hwrm_resource_free(struct bnge_net *bn, bool close_path)
+{
+ bnge_clear_vnic(bn);
+ bnge_hwrm_ring_free(bn, close_path);
+ bnge_hwrm_stat_ctx_free(bn);
+}
+
+static void bnge_free_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_irq *irq;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+
+ irq = &bd->irq_tbl[map_idx];
+ if (irq->requested) {
+ if (irq->have_cpumask) {
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_cpumask_var(irq->cpu_mask);
+ irq->have_cpumask = 0;
+ }
+ free_irq(irq->vector, bn->bnapi[i]);
+ }
+
+ irq->requested = 0;
+ }
+}
+
+static int bnge_request_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ rc = bnge_setup_interrupts(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
+ return rc;
+ }
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ struct bnge_irq *irq = &bd->irq_tbl[map_idx];
+
+ rc = request_irq(irq->vector, irq->handler, 0, irq->name,
+ bn->bnapi[i]);
+ if (rc)
+ goto err_free_irq;
+
+ netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
+ irq->requested = 1;
+
+ if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
+ int numa_node = dev_to_node(&bd->pdev->dev);
+
+ irq->have_cpumask = 1;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ irq->cpu_mask);
+ rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ if (rc) {
+ netdev_warn(bn->netdev,
+ "Set affinity failed, IRQ = %d\n",
+ irq->vector);
+ goto err_free_irq;
+ }
+ }
+ }
+ return 0;
+
+err_free_irq:
+ bnge_free_irq(bn);
+ return rc;
+}
+
+static int bnge_init_chip(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+#define BNGE_DEF_STATS_COAL_TICKS 1000000
+ bn->stats_coal_ticks = BNGE_DEF_STATS_COAL_TICKS;
+
+ rc = bnge_hwrm_stat_ctx_alloc(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm stat ctx alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_hwrm_ring_alloc(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm ring alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_hwrm_vnic_alloc(bd, vnic, bd->rx_nr_rings);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_setup_vnic(bn, vnic);
+ if (rc)
+ goto err_out;
+
+ if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
+ bnge_hwrm_update_rss_hash_cfg(bn);
+
+ /* Filter for default vnic 0 */
+ rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
+ if (rc) {
+ netdev_err(bn->netdev, "HWRM vnic filter failure rc: %d\n", rc);
+ goto err_out;
+ }
+ vnic->uc_filter_count = 1;
+
+ vnic->rx_mask = 0;
+
+ if (bn->netdev->flags & IFF_BROADCAST)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+ if (bn->netdev->flags & IFF_PROMISC)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ if (bn->netdev->flags & IFF_ALLMULTI) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ } else if (bn->netdev->flags & IFF_MULTICAST) {
+ u32 mask = 0;
+
+ bnge_mc_list_updated(bn, &mask);
+ vnic->rx_mask |= mask;
+ }
+
+ rc = bnge_cfg_def_vnic(bn);
+ if (rc)
+ goto err_out;
+ return 0;
+
+err_out:
+ bnge_hwrm_resource_free(bn, 0);
+ return rc;
+}
+
+static int bnge_napi_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+
+ /* defer NAPI implementation to next patch series */
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+static void bnge_init_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_napi *bnapi;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bnapi = bn->bnapi[i];
+ netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
+ bnge_napi_poll, bnapi->index);
+ }
+}
+
+static void bnge_del_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ __netif_napi_del_locked(&bnapi->napi);
+ }
+
+ /* Wait for RCU grace period after removing NAPI instances */
+ synchronize_net();
+}
+
+static int bnge_init_nic(struct bnge_net *bn)
+{
+ int rc;
+
+ bnge_init_nq_tree(bn);
+
+ bnge_init_rx_rings(bn);
+ rc = bnge_alloc_rx_ring_pair_bufs(bn);
+ if (rc)
+ return rc;
+
+ bnge_init_tx_rings(bn);
+
+ rc = bnge_init_ring_grps(bn);
+ if (rc)
+ goto err_free_rx_ring_pair_bufs;
+
+ bnge_init_vnics(bn);
+
+ rc = bnge_init_chip(bn);
+ if (rc)
+ goto err_free_ring_grps;
+ return rc;
+
+err_free_ring_grps:
+ bnge_free_ring_grps(bn);
+ return rc;
+
+err_free_rx_ring_pair_bufs:
+ bnge_free_rx_ring_pair_bufs(bn);
+ return rc;
+}
+
+static int bnge_open_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ netif_carrier_off(bn->netdev);
+
+ rc = bnge_reserve_rings(bd);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_reserve_rings err: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_alloc_core(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_alloc_core err: %d\n", rc);
+ return rc;
+ }
+
+ bnge_init_napi(bn);
+ rc = bnge_request_irq(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
+ goto err_del_napi;
+ }
+
+ rc = bnge_init_nic(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
+ goto err_free_irq;
+ }
+ set_bit(BNGE_STATE_OPEN, &bd->state);
+ return 0;
+
+err_free_irq:
+ bnge_free_irq(bn);
+err_del_napi:
+ bnge_del_napi(bn);
+ bnge_free_core(bn);
+ return rc;
+}
+
+static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int bnge_open(struct net_device *dev)
+{
+ struct bnge_net *bn = netdev_priv(dev);
+ int rc;
+
+ rc = bnge_open_core(bn);
+ if (rc)
+ netdev_err(dev, "bnge_open_core err: %d\n", rc);
+
+ return rc;
+}
+
+static int bnge_shutdown_nic(struct bnge_net *bn)
+{
+ /* TODO: close_path = 0 until we make NAPI functional */
+ bnge_hwrm_resource_free(bn, 0);
+ return 0;
+}
+
+static void bnge_close_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ clear_bit(BNGE_STATE_OPEN, &bd->state);
+ bnge_shutdown_nic(bn);
+ bnge_free_all_rings_bufs(bn);
+ bnge_free_irq(bn);
+ bnge_del_napi(bn);
+
+ bnge_free_core(bn);
+}
+
+static int bnge_close(struct net_device *dev)
+{
+ struct bnge_net *bn = netdev_priv(dev);
+
+ bnge_close_core(bn);
+
+ return 0;
+}
+
+static const struct net_device_ops bnge_netdev_ops = {
+ .ndo_open = bnge_open,
+ .ndo_stop = bnge_close,
+ .ndo_start_xmit = bnge_start_xmit,
+};
+
+static void bnge_init_mac_addr(struct bnge_dev *bd)
+{
+ eth_hw_addr_set(bd->netdev, bd->pf.mac_addr);
+}
+
+static void bnge_set_tpa_flags(struct bnge_dev *bd)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+
+ bn->priv_flags &= ~BNGE_NET_EN_TPA;
+
+ if (bd->netdev->features & NETIF_F_LRO)
+ bn->priv_flags |= BNGE_NET_EN_LRO;
+ else if (bd->netdev->features & NETIF_F_GRO_HW)
+ bn->priv_flags |= BNGE_NET_EN_GRO;
+}
+
+static void bnge_init_l2_fltr_tbl(struct bnge_net *bn)
+{
+ int i;
+
+ for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]);
+ get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed));
+}
+
+void bnge_set_ring_params(struct bnge_dev *bd)
+{
+ struct bnge_net *bn = netdev_priv(bd->netdev);
+ u32 ring_size, rx_size, rx_space, max_rx_cmpl;
+ u32 agg_factor = 0, agg_ring_size = 0;
+
+ /* 8 for CRC and VLAN */
+ rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+ rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ bn->rx_copy_thresh = BNGE_RX_COPY_THRESH;
+ ring_size = bn->rx_ring_size;
+ bn->rx_agg_ring_size = 0;
+ bn->rx_agg_nr_pages = 0;
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA)
+ agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE);
+
+ bn->priv_flags &= ~BNGE_NET_EN_JUMBO;
+ if (rx_space > PAGE_SIZE) {
+ u32 jumbo_factor;
+
+ bn->priv_flags |= BNGE_NET_EN_JUMBO;
+ jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT;
+ if (jumbo_factor > agg_factor)
+ agg_factor = jumbo_factor;
+ }
+ if (agg_factor) {
+ if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) {
+ ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA;
+ netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n",
+ bn->rx_ring_size, ring_size);
+ bn->rx_ring_size = ring_size;
+ }
+ agg_ring_size = ring_size * agg_factor;
+
+ bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size,
+ RX_DESC_CNT);
+ if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+ u32 tmp = agg_ring_size;
+
+ bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+ agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+ netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n",
+ tmp, agg_ring_size);
+ }
+ bn->rx_agg_ring_size = agg_ring_size;
+ bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+
+ rx_size = SKB_DATA_ALIGN(BNGE_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+
+ bn->rx_buf_use_size = rx_size;
+ bn->rx_buf_size = rx_space;
+
+ bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT);
+ bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1;
+
+ ring_size = bn->tx_ring_size;
+ bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT);
+ bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1;
+
+ max_rx_cmpl = bn->rx_ring_size;
+
+ if (bn->priv_flags & BNGE_NET_EN_TPA)
+ max_rx_cmpl += bd->max_tpa_v2;
+ ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size;
+ bn->cp_ring_size = ring_size;
+
+ bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT);
+ if (bn->cp_nr_pages > MAX_CP_PAGES) {
+ bn->cp_nr_pages = MAX_CP_PAGES;
+ bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+ netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n",
+ ring_size, bn->cp_ring_size);
+ }
+ bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT;
+ bn->cp_ring_mask = bn->cp_bit - 1;
+}
+
+int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
+{
+ struct net_device *netdev;
+ struct bnge_net *bn;
+ int rc;
+
+ netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE,
+ max_irqs);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, bd->dev);
+ bd->netdev = netdev;
+
+ netdev->netdev_ops = &bnge_netdev_ops;
+
+ bnge_set_ethtool_ops(netdev);
+
+ bn = netdev_priv(netdev);
+ bn->netdev = netdev;
+ bn->bd = bd;
+
+ netdev->min_mtu = ETH_ZLEN;
+ netdev->max_mtu = bd->max_mtu;
+
+ netdev->hw_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ NETIF_F_GRO;
+
+ if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+
+ if (BNGE_SUPPORTS_TPA(bd))
+ netdev->hw_features |= NETIF_F_LRO;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_PARTIAL;
+
+ if (bd->flags & BNGE_EN_UDP_GSO_SUPP)
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
+
+ netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA;
+ if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP)
+ netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX;
+ if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT)
+ netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX;
+
+ if (BNGE_SUPPORTS_TPA(bd))
+ netdev->hw_features |= NETIF_F_GRO_HW;
+
+ netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
+
+ if (netdev->features & NETIF_F_GRO_HW)
+ netdev->features &= ~NETIF_F_LRO;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
+ if (bd->tso_max_segs)
+ netif_set_tso_max_segs(netdev, bd->tso_max_segs);
+
+ bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
+ bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
+ bn->rx_dir = DMA_FROM_DEVICE;
+
+ bnge_set_tpa_flags(bd);
+ bnge_set_ring_params(bd);
+
+ bnge_init_l2_fltr_tbl(bn);
+ bnge_init_mac_addr(bd);
+
+ netdev->request_ops_lock = true;
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
+ goto err_netdev;
+ }
+
+ return 0;
+
+err_netdev:
+ free_netdev(netdev);
+ return rc;
+}
+
+void bnge_netdev_free(struct bnge_dev *bd)
+{
+ struct net_device *netdev = bd->netdev;
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ bd->netdev = NULL;
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
new file mode 100644
index 000000000000..fb3b961536ba
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_NETDEV_H_
+#define _BNGE_NETDEV_H_
+
+#include <linux/bnxt/hsi.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/refcount.h>
+#include "bnge_db.h"
+
+struct tx_bd {
+ __le32 tx_bd_len_flags_type;
+ #define TX_BD_TYPE (0x3f << 0)
+ #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0)
+ #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0)
+ #define TX_BD_FLAGS_PACKET_END (1 << 6)
+ #define TX_BD_FLAGS_NO_CMPL (1 << 7)
+ #define TX_BD_FLAGS_BD_CNT (0x1f << 8)
+ #define TX_BD_FLAGS_BD_CNT_SHIFT 8
+ #define TX_BD_FLAGS_LHINT (3 << 13)
+ #define TX_BD_FLAGS_LHINT_SHIFT 13
+ #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13)
+ #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13)
+ #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13)
+ #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13)
+ #define TX_BD_FLAGS_COAL_NOW (1 << 15)
+ #define TX_BD_LEN (0xffff << 16)
+ #define TX_BD_LEN_SHIFT 16
+ u32 tx_bd_opaque;
+ __le64 tx_bd_haddr;
+} __packed;
+
+struct rx_bd {
+ __le32 rx_bd_len_flags_type;
+ #define RX_BD_TYPE (0x3f << 0)
+ #define RX_BD_TYPE_RX_PACKET_BD 0x4
+ #define RX_BD_TYPE_RX_BUFFER_BD 0x5
+ #define RX_BD_TYPE_RX_AGG_BD 0x6
+ #define RX_BD_TYPE_16B_BD_SIZE (0 << 4)
+ #define RX_BD_TYPE_32B_BD_SIZE (1 << 4)
+ #define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
+ #define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
+ #define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_EOP (1 << 7)
+ #define RX_BD_FLAGS_BUFFERS (3 << 8)
+ #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
+ #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8)
+ #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8)
+ #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8)
+ #define RX_BD_LEN (0xffff << 16)
+ #define RX_BD_LEN_SHIFT 16
+ u32 rx_bd_opaque;
+ __le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+ __le32 tx_cmp_flags_type;
+ #define CMP_TYPE (0x3f << 0)
+ #define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_TX_L2_COAL_CMP 2
+ #define CMP_TYPE_TX_L2_PKT_TS_CMP 4
+ #define CMP_TYPE_RX_L2_CMP 17
+ #define CMP_TYPE_RX_AGG_CMP 18
+ #define CMP_TYPE_RX_L2_TPA_START_CMP 19
+ #define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_RX_TPA_AGG_CMP 22
+ #define CMP_TYPE_RX_L2_V3_CMP 23
+ #define CMP_TYPE_RX_L2_TPA_START_V3_CMP 25
+ #define CMP_TYPE_STATUS_CMP 32
+ #define CMP_TYPE_REMOTE_DRIVER_REQ 34
+ #define CMP_TYPE_REMOTE_DRIVER_RESP 36
+ #define CMP_TYPE_ERROR_STATUS 48
+ #define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL
+ #define CMPL_BASE_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define TX_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_CMP_FLAGS_PUSH (1 << 7)
+ u32 tx_cmp_opaque;
+ __le32 tx_cmp_errors_v;
+ #define TX_CMP_V (1 << 0)
+ #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1)
+ #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0
+ #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2
+ #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4
+ #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5
+ #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4)
+ #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5)
+ #define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
+ #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
+ __le32 sq_cons_idx;
+ #define TX_CMP_SQ_CONS_IDX_MASK 0x00ffffff
+};
+
+struct bnge_sw_tx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+ struct page *page;
+ u8 is_ts_pkt;
+ u8 is_push;
+ u8 action;
+ unsigned short nr_frags;
+ union {
+ u16 rx_prod;
+ u16 txts_prod;
+ };
+};
+
+struct bnge_sw_rx_bd {
+ void *data;
+ u8 *data_ptr;
+ dma_addr_t mapping;
+};
+
+struct bnge_sw_rx_agg_bd {
+ netmem_ref netmem;
+ unsigned int offset;
+ dma_addr_t mapping;
+};
+
+#define HWRM_RING_ALLOC_TX 0x1
+#define HWRM_RING_ALLOC_RX 0x2
+#define HWRM_RING_ALLOC_AGG 0x4
+#define HWRM_RING_ALLOC_CMPL 0x8
+#define HWRM_RING_ALLOC_NQ 0x10
+
+struct bnge_ring_grp_info {
+ u16 fw_stats_ctx;
+ u16 fw_grp_id;
+ u16 rx_fw_ring_id;
+ u16 agg_fw_ring_id;
+ u16 nq_fw_ring_id;
+};
+
+#define BNGE_RX_COPY_THRESH 256
+
+#define BNGE_HW_FEATURE_VLAN_ALL_RX \
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
+#define BNGE_HW_FEATURE_VLAN_ALL_TX \
+ (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)
+
+enum {
+ BNGE_NET_EN_GRO = BIT(0),
+ BNGE_NET_EN_LRO = BIT(1),
+ BNGE_NET_EN_JUMBO = BIT(2),
+};
+
+#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
+
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNGE_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
+#define RX_RING(bn, x) (((x) & (bn)->rx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define RX_AGG_RING(bn, x) (((x) & (bn)->rx_agg_ring_mask) >> \
+ (BNGE_PAGE_SHIFT - 4))
+#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(bn, x) (((x) & (bn)->tx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNGE_PAGE_SHIFT - 4))
+#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
+
+#define RING_RX(bn, idx) ((idx) & (bn)->rx_ring_mask)
+#define NEXT_RX(idx) ((idx) + 1)
+
+#define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask)
+#define NEXT_RX_AGG(idx) ((idx) + 1)
+
+#define BNGE_NQ_HDL_TYPE_SHIFT 24
+#define BNGE_NQ_HDL_TYPE_RX 0x00
+#define BNGE_NQ_HDL_TYPE_TX 0x01
+
+struct bnge_net {
+ struct bnge_dev *bd;
+ struct net_device *netdev;
+
+ u32 priv_flags;
+
+ u32 rx_ring_size;
+ u32 rx_buf_size;
+ u32 rx_buf_use_size; /* usable size */
+ u32 rx_agg_ring_size;
+ u32 rx_copy_thresh;
+ u32 rx_ring_mask;
+ u32 rx_agg_ring_mask;
+ u16 rx_nr_pages;
+ u16 rx_agg_nr_pages;
+
+ u32 tx_ring_size;
+ u32 tx_ring_mask;
+ u16 tx_nr_pages;
+
+ /* NQs and Completion rings */
+ u32 cp_ring_size;
+ u32 cp_ring_mask;
+ u32 cp_bit;
+ u16 cp_nr_pages;
+
+#define BNGE_L2_FLTR_HASH_SIZE 32
+#define BNGE_L2_FLTR_HASH_MASK (BNGE_L2_FLTR_HASH_SIZE - 1)
+ struct hlist_head l2_fltr_hash_tbl[BNGE_L2_FLTR_HASH_SIZE];
+ u32 hash_seed;
+ u64 toeplitz_prefix;
+
+ struct bnge_napi **bnapi;
+
+ struct bnge_rx_ring_info *rx_ring;
+ struct bnge_tx_ring_info *tx_ring;
+
+ u16 *tx_ring_map;
+ enum dma_data_direction rx_dir;
+
+ /* grp_info indexed by napi/nq index */
+ struct bnge_ring_grp_info *grp_info;
+ struct bnge_vnic_info *vnic_info;
+ int nr_vnics;
+ int total_irqs;
+
+ u32 tx_wake_thresh;
+ u16 rx_offset;
+ u16 rx_dma_offset;
+
+ u8 rss_hash_key[HW_HASH_KEY_SIZE];
+ u8 rss_hash_key_valid:1;
+ u8 rss_hash_key_updated:1;
+ int rsscos_nr_ctxs;
+ u32 stats_coal_ticks;
+};
+
+#define BNGE_DEFAULT_RX_RING_SIZE 511
+#define BNGE_DEFAULT_TX_RING_SIZE 511
+
+int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs);
+void bnge_netdev_free(struct bnge_dev *bd);
+void bnge_set_ring_params(struct bnge_dev *bd);
+
+#if (BNGE_PAGE_SHIFT == 16)
+#define MAX_RX_PAGES_AGG_ENA 1
+#define MAX_RX_PAGES 4
+#define MAX_RX_AGG_PAGES 4
+#define MAX_TX_PAGES 1
+#define MAX_CP_PAGES 16
+#else
+#define MAX_RX_PAGES_AGG_ENA 8
+#define MAX_RX_PAGES 32
+#define MAX_RX_AGG_PAGES 32
+#define MAX_TX_PAGES 8
+#define MAX_CP_PAGES 128
+#endif
+
+#define BNGE_RX_PAGE_SIZE (1 << BNGE_RX_PAGE_SHIFT)
+
+#define RX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_cmp))
+#define SW_RXBD_RING_SIZE (sizeof(struct bnge_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnge_sw_rx_agg_bd) * RX_DESC_CNT)
+#define SW_TXBD_RING_SIZE (sizeof(struct bnge_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+#define BNGE_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNGE_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1)
+#define BNGE_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNGE_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#define BNGE_MAX_TXR_PER_NAPI 8
+
+#define bnge_for_each_napi_tx(iter, bnapi, txr) \
+ for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
+ txr = (iter < BNGE_MAX_TXR_PER_NAPI - 1) ? \
+ (bnapi)->tx_ring[++iter] : NULL)
+
+#define BNGE_SET_NQ_HDL(cpr) \
+ (((cpr)->cp_ring_type << BNGE_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
+
+struct bnge_stats_mem {
+ u64 *sw_stats;
+ u64 *hw_masks;
+ void *hw_stats;
+ dma_addr_t hw_stats_map;
+ int len;
+};
+
+struct bnge_cp_ring_info {
+ struct bnge_napi *bnapi;
+ dma_addr_t *desc_mapping;
+ struct tx_cmp **desc_ring;
+ struct bnge_ring_struct ring_struct;
+ u8 cp_ring_type;
+ u8 cp_idx;
+ u32 cp_raw_cons;
+ struct bnge_db_info cp_db;
+};
+
+struct bnge_nq_ring_info {
+ struct bnge_napi *bnapi;
+ dma_addr_t *desc_mapping;
+ struct nqe_cn **desc_ring;
+ struct bnge_ring_struct ring_struct;
+ u32 nq_raw_cons;
+ struct bnge_db_info nq_db;
+
+ struct bnge_stats_mem stats;
+ u32 hw_stats_ctx_id;
+
+ int cp_ring_count;
+ struct bnge_cp_ring_info *cp_ring_arr;
+};
+
+struct bnge_rx_ring_info {
+ struct bnge_napi *bnapi;
+ struct bnge_cp_ring_info *rx_cpr;
+ u16 rx_prod;
+ u16 rx_agg_prod;
+ u16 rx_sw_agg_prod;
+ u16 rx_next_cons;
+ struct bnge_db_info rx_db;
+ struct bnge_db_info rx_agg_db;
+
+ struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
+ struct bnge_sw_rx_bd *rx_buf_ring;
+
+ struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+ struct bnge_sw_rx_agg_bd *rx_agg_buf_ring;
+
+ unsigned long *rx_agg_bmap;
+ u16 rx_agg_bmap_size;
+
+ dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
+ dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+ struct bnge_ring_struct rx_ring_struct;
+ struct bnge_ring_struct rx_agg_ring_struct;
+ struct page_pool *page_pool;
+ struct page_pool *head_pool;
+ bool need_head_pool;
+};
+
+struct bnge_tx_ring_info {
+ struct bnge_napi *bnapi;
+ struct bnge_cp_ring_info *tx_cpr;
+ u16 tx_prod;
+ u16 tx_cons;
+ u16 tx_hw_cons;
+ u16 txq_index;
+ u8 tx_napi_idx;
+ u8 kick_pending;
+ struct bnge_db_info tx_db;
+
+ struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
+ struct bnge_sw_tx_bd *tx_buf_ring;
+
+ dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
+
+ u32 dev_state;
+#define BNGE_DEV_STATE_CLOSING 0x1
+
+ struct bnge_ring_struct tx_ring_struct;
+};
+
+struct bnge_napi {
+ struct napi_struct napi;
+ struct bnge_net *bn;
+ int index;
+
+ struct bnge_nq_ring_info nq_ring;
+ struct bnge_rx_ring_info *rx_ring;
+ struct bnge_tx_ring_info *tx_ring[BNGE_MAX_TXR_PER_NAPI];
+};
+
+#define INVALID_STATS_CTX_ID -1
+#define BNGE_VNIC_DEFAULT 0
+#define BNGE_MAX_UC_ADDRS 4
+
+struct bnge_vnic_info {
+ u16 fw_vnic_id;
+#define BNGE_MAX_CTX_PER_VNIC 8
+ u16 fw_rss_cos_lb_ctx[BNGE_MAX_CTX_PER_VNIC];
+ u16 mru;
+ /* index 0 always dev_addr */
+ struct bnge_l2_filter *l2_filters[BNGE_MAX_UC_ADDRS];
+ u16 uc_filter_count;
+ u8 *uc_list;
+ dma_addr_t rss_table_dma_addr;
+ __le16 *rss_table;
+ dma_addr_t rss_hash_key_dma_addr;
+ u64 *rss_hash_key;
+ int rss_table_size;
+#define BNGE_RSS_TABLE_ENTRIES 64
+#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4)
+#define BNGE_RSS_TABLE_MAX_TBL 8
+#define BNGE_MAX_RSS_TABLE_SIZE \
+ (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
+ u32 rx_mask;
+
+ u8 *mc_list;
+ int mc_list_size;
+ int mc_list_count;
+ dma_addr_t mc_list_mapping;
+#define BNGE_MAX_MC_ADDRS 16
+
+ u32 flags;
+#define BNGE_VNIC_RSS_FLAG 1
+#define BNGE_VNIC_MCAST_FLAG 4
+#define BNGE_VNIC_UCAST_FLAG 8
+ u32 vnic_id;
+};
+
+struct bnge_filter_base {
+ struct hlist_node hash;
+ struct list_head list;
+ __le64 filter_id;
+ u8 type;
+#define BNGE_FLTR_TYPE_L2 2
+ u8 flags;
+ u16 rxq;
+ u16 fw_vnic_id;
+ u16 vf_idx;
+ unsigned long state;
+#define BNGE_FLTR_VALID 0
+#define BNGE_FLTR_FW_DELETED 2
+
+ struct rcu_head rcu;
+};
+
+struct bnge_l2_key {
+ union {
+ struct {
+ u8 dst_mac_addr[ETH_ALEN];
+ u16 vlan;
+ };
+ u32 filter_key;
+ };
+};
+
+#define BNGE_L2_KEY_SIZE (sizeof(struct bnge_l2_key) / 4)
+struct bnge_l2_filter {
+ /* base filter must be the first member */
+ struct bnge_filter_base base;
+ struct bnge_l2_key l2_key;
+ refcount_t refcnt;
+};
+
+u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr);
+u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr);
+void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+#endif /* _BNGE_NETDEV_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
new file mode 100644
index 000000000000..943df5f60f01
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "bnge.h"
+#include "bnge_hwrm.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_resc.h"
+
+static u16 bnge_num_tx_to_cp(struct bnge_dev *bd, u16 tx)
+{
+ u16 tcs = bd->num_tc;
+
+ if (!tcs)
+ tcs = 1;
+
+ return tx / tcs;
+}
+
+static u16 bnge_get_max_func_irqs(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+
+ return min_t(u16, hw_resc->max_irqs, hw_resc->max_nqs);
+}
+
+static unsigned int bnge_get_max_func_stat_ctxs(struct bnge_dev *bd)
+{
+ return bd->hw_resc.max_stat_ctxs;
+}
+
+bool bnge_aux_has_enough_resources(struct bnge_dev *bd)
+{
+ unsigned int max_stat_ctxs;
+
+ max_stat_ctxs = bnge_get_max_func_stat_ctxs(bd);
+ if (max_stat_ctxs <= BNGE_MIN_ROCE_STAT_CTXS ||
+ bd->nq_nr_rings == max_stat_ctxs)
+ return false;
+
+ return true;
+}
+
+static unsigned int bnge_get_max_func_cp_rings(struct bnge_dev *bd)
+{
+ return bd->hw_resc.max_cp_rings;
+}
+
+static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
+{
+ int roce_msix = BNGE_MAX_ROCE_MSIX;
+
+ return min_t(int, roce_msix, num_online_cpus() + 1);
+}
+
+u16 bnge_aux_get_msix(struct bnge_dev *bd)
+{
+ if (bnge_is_roce_en(bd))
+ return bd->aux_num_msix;
+
+ return 0;
+}
+
+static void bnge_aux_set_msix_num(struct bnge_dev *bd, u16 num)
+{
+ if (bnge_is_roce_en(bd))
+ bd->aux_num_msix = num;
+}
+
+static u16 bnge_aux_get_stat_ctxs(struct bnge_dev *bd)
+{
+ if (bnge_is_roce_en(bd))
+ return bd->aux_num_stat_ctxs;
+
+ return 0;
+}
+
+static void bnge_aux_set_stat_ctxs(struct bnge_dev *bd, u16 num_aux_ctx)
+{
+ if (bnge_is_roce_en(bd))
+ bd->aux_num_stat_ctxs = num_aux_ctx;
+}
+
+static u16 bnge_func_stat_ctxs_demand(struct bnge_dev *bd)
+{
+ return bd->nq_nr_rings + bnge_aux_get_stat_ctxs(bd);
+}
+
+static int bnge_get_dflt_aux_stat_ctxs(struct bnge_dev *bd)
+{
+ int stat_ctx = 0;
+
+ if (bnge_is_roce_en(bd)) {
+ stat_ctx = BNGE_MIN_ROCE_STAT_CTXS;
+
+ if (!bd->pf.port_id && bd->port_count > 1)
+ stat_ctx++;
+ }
+
+ return stat_ctx;
+}
+
+static u16 bnge_nqs_demand(struct bnge_dev *bd)
+{
+ return bd->nq_nr_rings + bnge_aux_get_msix(bd);
+}
+
+static u16 bnge_cprs_demand(struct bnge_dev *bd)
+{
+ return bd->tx_nr_rings + bd->rx_nr_rings;
+}
+
+static u16 bnge_get_avail_msix(struct bnge_dev *bd, int num)
+{
+ u16 max_irq = bnge_get_max_func_irqs(bd);
+ u16 total_demand = bd->nq_nr_rings + num;
+
+ if (max_irq < total_demand) {
+ num = max_irq - bd->nq_nr_rings;
+ if (num <= 0)
+ return 0;
+ }
+
+ return num;
+}
+
+static u16 bnge_num_cp_to_tx(struct bnge_dev *bd, u16 tx_chunks)
+{
+ return tx_chunks * bd->num_tc;
+}
+
+int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared)
+{
+ u16 _rx = *rx, _tx = *tx;
+
+ if (shared) {
+ *rx = min_t(u16, _rx, max);
+ *tx = min_t(u16, _tx, max);
+ } else {
+ if (max < 2)
+ return -ENOMEM;
+ while (_rx + _tx > max) {
+ if (_rx > _tx && _rx > 1)
+ _rx--;
+ else if (_tx > 1)
+ _tx--;
+ }
+ *rx = _rx;
+ *tx = _tx;
+ }
+
+ return 0;
+}
+
+static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx,
+ u16 *tx, u16 max_nq, bool sh)
+{
+ u16 tx_chunks = bnge_num_tx_to_cp(bd, *tx);
+
+ if (tx_chunks != *tx) {
+ u16 tx_saved = tx_chunks, rc;
+
+ rc = bnge_fix_rings_count(rx, &tx_chunks, max_nq, sh);
+ if (rc)
+ return rc;
+ if (tx_chunks != tx_saved)
+ *tx = bnge_num_cp_to_tx(bd, tx_chunks);
+ return 0;
+ }
+
+ return bnge_fix_rings_count(rx, tx, max_nq, sh);
+}
+
+int bnge_cal_nr_rss_ctxs(u16 rx_rings)
+{
+ if (!rx_rings)
+ return 0;
+
+ return bnge_adjust_pow_two(rx_rings - 1,
+ BNGE_RSS_TABLE_ENTRIES);
+}
+
+static u16 bnge_rss_ctxs_in_use(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr)
+{
+ return bnge_cal_nr_rss_ctxs(hwr->grp);
+}
+
+static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings)
+{
+ return 1;
+}
+
+u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
+{
+ return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) *
+ BNGE_RSS_TABLE_ENTRIES;
+}
+
+static void bnge_set_dflt_rss_indir_tbl(struct bnge_dev *bd)
+{
+ u16 max_entries, pad;
+ u32 *rss_indir_tbl;
+ int i;
+
+ max_entries = bnge_get_rxfh_indir_size(bd);
+ rss_indir_tbl = &bd->rss_indir_tbl[0];
+
+ for (i = 0; i < max_entries; i++)
+ rss_indir_tbl[i] = ethtool_rxfh_indir_default(i,
+ bd->rx_nr_rings);
+
+ pad = bd->rss_indir_tbl_entries - max_entries;
+ if (pad)
+ memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
+}
+
+static void bnge_copy_reserved_rings(struct bnge_dev *bd,
+ struct bnge_hw_rings *hwr)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->nq = hw_resc->resv_irqs;
+ hwr->cmpl = hw_resc->resv_cp_rings;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
+}
+
+static bool bnge_rings_ok(struct bnge_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->nq && hwr->grp && hwr->vnic &&
+ hwr->stat && hwr->cmpl;
+}
+
+static bool bnge_need_reserve_rings(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ u16 cprs = bnge_cprs_demand(bd);
+ u16 rx = bd->rx_nr_rings, stat;
+ u16 nqs = bnge_nqs_demand(bd);
+ u16 vnic;
+
+ if (hw_resc->resv_tx_rings != bd->tx_nr_rings)
+ return true;
+
+ vnic = bnge_get_total_vnics(bd, rx);
+
+ if (bnge_is_agg_reqd(bd))
+ rx <<= 1;
+ stat = bnge_func_stat_ctxs_demand(bd);
+ if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cprs ||
+ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat)
+ return true;
+ if (hw_resc->resv_irqs != nqs)
+ return true;
+
+ return false;
+}
+
+int bnge_reserve_rings(struct bnge_dev *bd)
+{
+ u16 aux_dflt_msix = bnge_aux_get_dflt_msix(bd);
+ struct bnge_hw_rings hwr = {0};
+ u16 rx_rings, old_rx_rings;
+ u16 nq = bd->nq_nr_rings;
+ u16 aux_msix = 0;
+ bool sh = false;
+ u16 tx_cp;
+ int rc;
+
+ if (!bnge_need_reserve_rings(bd))
+ return 0;
+
+ if (!bnge_aux_registered(bd)) {
+ aux_msix = bnge_get_avail_msix(bd, aux_dflt_msix);
+ if (!aux_msix)
+ bnge_aux_set_stat_ctxs(bd, 0);
+
+ if (aux_msix > aux_dflt_msix)
+ aux_msix = aux_dflt_msix;
+ hwr.nq = nq + aux_msix;
+ } else {
+ hwr.nq = bnge_nqs_demand(bd);
+ }
+
+ hwr.tx = bd->tx_nr_rings;
+ hwr.rx = bd->rx_nr_rings;
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ sh = true;
+ hwr.cmpl = hwr.rx + hwr.tx;
+
+ hwr.vnic = bnge_get_total_vnics(bd, hwr.rx);
+
+ if (bnge_is_agg_reqd(bd))
+ hwr.rx <<= 1;
+ hwr.grp = bd->rx_nr_rings;
+ hwr.rss_ctx = bnge_rss_ctxs_in_use(bd, &hwr);
+ hwr.stat = bnge_func_stat_ctxs_demand(bd);
+ old_rx_rings = bd->hw_resc.resv_rx_rings;
+
+ rc = bnge_hwrm_reserve_rings(bd, &hwr);
+ if (rc)
+ return rc;
+
+ bnge_copy_reserved_rings(bd, &hwr);
+
+ rx_rings = hwr.rx;
+ if (bnge_is_agg_reqd(bd)) {
+ if (hwr.rx >= 2)
+ rx_rings = hwr.rx >> 1;
+ else
+ return -ENOMEM;
+ }
+
+ rx_rings = min_t(u16, rx_rings, hwr.grp);
+ hwr.nq = min_t(u16, hwr.nq, bd->nq_nr_rings);
+ if (hwr.stat > bnge_aux_get_stat_ctxs(bd))
+ hwr.stat -= bnge_aux_get_stat_ctxs(bd);
+ hwr.nq = min_t(u16, hwr.nq, hwr.stat);
+
+ /* Adjust the rings */
+ rc = bnge_adjust_rings(bd, &rx_rings, &hwr.tx, hwr.nq, sh);
+ if (bnge_is_agg_reqd(bd))
+ hwr.rx = rx_rings << 1;
+ tx_cp = hwr.tx;
+ hwr.nq = sh ? max_t(u16, tx_cp, rx_rings) : tx_cp + rx_rings;
+ bd->tx_nr_rings = hwr.tx;
+
+ if (rx_rings != bd->rx_nr_rings)
+ dev_warn(bd->dev, "RX rings resv reduced to %d than earlier %d requested\n",
+ rx_rings, bd->rx_nr_rings);
+
+ bd->rx_nr_rings = rx_rings;
+ bd->nq_nr_rings = hwr.nq;
+
+ if (!bnge_rings_ok(&hwr))
+ return -ENOMEM;
+
+ if (old_rx_rings != bd->hw_resc.resv_rx_rings)
+ bnge_set_dflt_rss_indir_tbl(bd);
+
+ if (!bnge_aux_registered(bd)) {
+ u16 resv_msix, resv_ctx, aux_ctxs;
+ struct bnge_hw_resc *hw_resc;
+
+ hw_resc = &bd->hw_resc;
+ resv_msix = hw_resc->resv_irqs - bd->nq_nr_rings;
+ aux_msix = min_t(u16, resv_msix, aux_msix);
+ bnge_aux_set_msix_num(bd, aux_msix);
+ resv_ctx = hw_resc->resv_stat_ctxs - bd->nq_nr_rings;
+ aux_ctxs = min(resv_ctx, bnge_aux_get_stat_ctxs(bd));
+ bnge_aux_set_stat_ctxs(bd, aux_ctxs);
+ }
+
+ return rc;
+}
+
+int bnge_alloc_irqs(struct bnge_dev *bd)
+{
+ u16 aux_msix, tx_cp, num_entries;
+ int i, irqs_demand, rc;
+ u16 max, min = 1;
+
+ irqs_demand = bnge_nqs_demand(bd);
+ max = bnge_get_max_func_irqs(bd);
+ if (irqs_demand > max)
+ irqs_demand = max;
+
+ if (!(bd->flags & BNGE_EN_SHARED_CHNL))
+ min = 2;
+
+ irqs_demand = pci_alloc_irq_vectors(bd->pdev, min, irqs_demand,
+ PCI_IRQ_MSIX);
+ aux_msix = bnge_aux_get_msix(bd);
+ if (irqs_demand < 0 || irqs_demand < aux_msix) {
+ rc = -ENODEV;
+ goto err_free_irqs;
+ }
+
+ num_entries = irqs_demand;
+ if (pci_msix_can_alloc_dyn(bd->pdev))
+ num_entries = max;
+ bd->irq_tbl = kcalloc(num_entries, sizeof(*bd->irq_tbl), GFP_KERNEL);
+ if (!bd->irq_tbl) {
+ rc = -ENOMEM;
+ goto err_free_irqs;
+ }
+
+ for (i = 0; i < irqs_demand; i++)
+ bd->irq_tbl[i].vector = pci_irq_vector(bd->pdev, i);
+
+ bd->irqs_acquired = irqs_demand;
+ /* Reduce rings based upon num of vectors allocated.
+ * We dont need to consider NQs as they have been calculated
+ * and must be more than irqs_demand.
+ */
+ rc = bnge_adjust_rings(bd, &bd->rx_nr_rings,
+ &bd->tx_nr_rings,
+ irqs_demand - aux_msix, min == 1);
+ if (rc)
+ goto err_free_irqs;
+
+ tx_cp = bnge_num_tx_to_cp(bd, bd->tx_nr_rings);
+ bd->nq_nr_rings = (min == 1) ?
+ max_t(u16, tx_cp, bd->rx_nr_rings) :
+ tx_cp + bd->rx_nr_rings;
+
+ /* Readjust tx_nr_rings_per_tc */
+ if (!bd->num_tc)
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+
+ return 0;
+
+err_free_irqs:
+ dev_err(bd->dev, "Failed to allocate IRQs err = %d\n", rc);
+ bnge_free_irqs(bd);
+ return rc;
+}
+
+void bnge_free_irqs(struct bnge_dev *bd)
+{
+ pci_free_irq_vectors(bd->pdev);
+ kfree(bd->irq_tbl);
+ bd->irq_tbl = NULL;
+}
+
+static void _bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
+ u16 *max_tx, u16 *max_nq)
+{
+ struct bnge_hw_resc *hw_resc = &bd->hw_resc;
+ u16 max_ring_grps = 0, max_cp;
+ int rc;
+
+ *max_tx = hw_resc->max_tx_rings;
+ *max_rx = hw_resc->max_rx_rings;
+ *max_nq = min_t(int, bnge_get_max_func_irqs(bd),
+ hw_resc->max_stat_ctxs);
+ max_ring_grps = hw_resc->max_hw_ring_grps;
+ if (bnge_is_agg_reqd(bd))
+ *max_rx >>= 1;
+
+ max_cp = bnge_get_max_func_cp_rings(bd);
+
+ /* Fix RX and TX rings according to number of CPs available */
+ rc = bnge_fix_rings_count(max_rx, max_tx, max_cp, false);
+ if (rc) {
+ *max_rx = 0;
+ *max_tx = 0;
+ }
+
+ *max_rx = min_t(int, *max_rx, max_ring_grps);
+}
+
+static int bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx,
+ u16 *max_tx, bool shared)
+{
+ u16 rx, tx, nq;
+
+ _bnge_get_max_rings(bd, &rx, &tx, &nq);
+ *max_rx = rx;
+ *max_tx = tx;
+ if (!rx || !tx || !nq)
+ return -ENOMEM;
+
+ return bnge_fix_rings_count(max_rx, max_tx, nq, shared);
+}
+
+static int bnge_get_dflt_rings(struct bnge_dev *bd, u16 *max_rx, u16 *max_tx,
+ bool shared)
+{
+ int rc;
+
+ rc = bnge_get_max_rings(bd, max_rx, max_tx, shared);
+ if (rc) {
+ dev_info(bd->dev, "Not enough rings available\n");
+ return rc;
+ }
+
+ if (bnge_is_roce_en(bd)) {
+ int max_cp, max_stat, max_irq;
+
+ /* Reserve minimum resources for RoCE */
+ max_cp = bnge_get_max_func_cp_rings(bd);
+ max_stat = bnge_get_max_func_stat_ctxs(bd);
+ max_irq = bnge_get_max_func_irqs(bd);
+ if (max_cp <= BNGE_MIN_ROCE_CP_RINGS ||
+ max_irq <= BNGE_MIN_ROCE_CP_RINGS ||
+ max_stat <= BNGE_MIN_ROCE_STAT_CTXS)
+ return 0;
+
+ max_cp -= BNGE_MIN_ROCE_CP_RINGS;
+ max_irq -= BNGE_MIN_ROCE_CP_RINGS;
+ max_stat -= BNGE_MIN_ROCE_STAT_CTXS;
+ max_cp = min_t(u16, max_cp, max_irq);
+ max_cp = min_t(u16, max_cp, max_stat);
+ rc = bnge_adjust_rings(bd, max_rx, max_tx, max_cp, shared);
+ if (rc)
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/* In initial default shared ring setting, each shared ring must have a
+ * RX/TX ring pair.
+ */
+static void bnge_trim_dflt_sh_rings(struct bnge_dev *bd)
+{
+ bd->nq_nr_rings = min_t(u16, bd->tx_nr_rings_per_tc, bd->rx_nr_rings);
+ bd->rx_nr_rings = bd->nq_nr_rings;
+ bd->tx_nr_rings_per_tc = bd->nq_nr_rings;
+ bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
+}
+
+static int bnge_net_init_dflt_rings(struct bnge_dev *bd, bool sh)
+{
+ u16 dflt_rings, max_rx_rings, max_tx_rings;
+ int rc;
+
+ if (sh)
+ bd->flags |= BNGE_EN_SHARED_CHNL;
+
+ dflt_rings = netif_get_num_default_rss_queues();
+
+ rc = bnge_get_dflt_rings(bd, &max_rx_rings, &max_tx_rings, sh);
+ if (rc)
+ return rc;
+ bd->rx_nr_rings = min_t(u16, dflt_rings, max_rx_rings);
+ bd->tx_nr_rings_per_tc = min_t(u16, dflt_rings, max_tx_rings);
+ if (sh)
+ bnge_trim_dflt_sh_rings(bd);
+ else
+ bd->nq_nr_rings = bd->tx_nr_rings_per_tc + bd->rx_nr_rings;
+ bd->tx_nr_rings = bd->tx_nr_rings_per_tc;
+
+ rc = bnge_reserve_rings(bd);
+ if (rc && rc != -ENODEV)
+ dev_warn(bd->dev, "Unable to reserve tx rings\n");
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+ if (sh)
+ bnge_trim_dflt_sh_rings(bd);
+
+ /* Rings may have been reduced, re-reserve them again */
+ if (bnge_need_reserve_rings(bd)) {
+ rc = bnge_reserve_rings(bd);
+ if (rc && rc != -ENODEV)
+ dev_warn(bd->dev, "Fewer rings reservation failed\n");
+ bd->tx_nr_rings_per_tc = bd->tx_nr_rings;
+ }
+ if (rc) {
+ bd->tx_nr_rings = 0;
+ bd->rx_nr_rings = 0;
+ }
+
+ return rc;
+}
+
+static int bnge_alloc_rss_indir_tbl(struct bnge_dev *bd)
+{
+ u16 entries;
+
+ entries = BNGE_MAX_RSS_TABLE_ENTRIES;
+
+ bd->rss_indir_tbl_entries = entries;
+ bd->rss_indir_tbl =
+ kmalloc_array(entries, sizeof(*bd->rss_indir_tbl), GFP_KERNEL);
+ if (!bd->rss_indir_tbl)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int bnge_net_init_dflt_config(struct bnge_dev *bd)
+{
+ struct bnge_hw_resc *hw_resc;
+ int rc;
+
+ rc = bnge_alloc_rss_indir_tbl(bd);
+ if (rc)
+ return rc;
+
+ rc = bnge_net_init_dflt_rings(bd, true);
+ if (rc)
+ goto err_free_tbl;
+
+ hw_resc = &bd->hw_resc;
+ bd->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
+ BNGE_L2_FLTR_MAX_FLTR;
+
+ return 0;
+
+err_free_tbl:
+ kfree(bd->rss_indir_tbl);
+ bd->rss_indir_tbl = NULL;
+ return rc;
+}
+
+void bnge_net_uninit_dflt_config(struct bnge_dev *bd)
+{
+ kfree(bd->rss_indir_tbl);
+ bd->rss_indir_tbl = NULL;
+}
+
+void bnge_aux_init_dflt_config(struct bnge_dev *bd)
+{
+ bd->aux_num_msix = bnge_aux_get_dflt_msix(bd);
+ bd->aux_num_stat_ctxs = bnge_get_dflt_aux_stat_ctxs(bd);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
new file mode 100644
index 000000000000..b62a634669f6
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_RESC_H_
+#define _BNGE_RESC_H_
+
+#include "bnge_netdev.h"
+#include "bnge_rmem.h"
+
+struct bnge_hw_resc {
+ u16 min_rsscos_ctxs;
+ u16 max_rsscos_ctxs;
+ u16 resv_rsscos_ctxs;
+ u16 min_cp_rings;
+ u16 max_cp_rings;
+ u16 resv_cp_rings;
+ u16 min_tx_rings;
+ u16 max_tx_rings;
+ u16 resv_tx_rings;
+ u16 max_tx_sch_inputs;
+ u16 min_rx_rings;
+ u16 max_rx_rings;
+ u16 resv_rx_rings;
+ u16 min_hw_ring_grps;
+ u16 max_hw_ring_grps;
+ u16 resv_hw_ring_grps;
+ u16 min_l2_ctxs;
+ u16 max_l2_ctxs;
+ u16 min_vnics;
+ u16 max_vnics;
+ u16 resv_vnics;
+ u16 min_stat_ctxs;
+ u16 max_stat_ctxs;
+ u16 resv_stat_ctxs;
+ u16 max_nqs;
+ u16 max_irqs;
+ u16 resv_irqs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
+};
+
+struct bnge_hw_rings {
+ u16 tx;
+ u16 rx;
+ u16 grp;
+ u16 nq;
+ u16 cmpl;
+ u16 stat;
+ u16 vnic;
+ u16 rss_ctx;
+};
+
+/* "TXRX", 2 hypens, plus maximum integer */
+#define BNGE_IRQ_NAME_EXTRA 17
+struct bnge_irq {
+ irq_handler_t handler;
+ unsigned int vector;
+ u8 requested:1;
+ u8 have_cpumask:1;
+ char name[IFNAMSIZ + BNGE_IRQ_NAME_EXTRA];
+ cpumask_var_t cpu_mask;
+};
+
+int bnge_reserve_rings(struct bnge_dev *bd);
+int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared);
+int bnge_alloc_irqs(struct bnge_dev *bd);
+void bnge_free_irqs(struct bnge_dev *bd);
+int bnge_net_init_dflt_config(struct bnge_dev *bd);
+void bnge_net_uninit_dflt_config(struct bnge_dev *bd);
+void bnge_aux_init_dflt_config(struct bnge_dev *bd);
+u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd);
+int bnge_cal_nr_rss_ctxs(u16 rx_rings);
+bool bnge_aux_has_enough_resources(struct bnge_dev *bd);
+
+static inline u32
+bnge_adjust_pow_two(u32 total_ent, u16 ent_per_blk)
+{
+ u32 blks = total_ent / ent_per_blk;
+
+ if (blks == 0 || blks == 1)
+ return ++blks;
+
+ if (!is_power_of_2(blks))
+ blks = roundup_pow_of_two(blks);
+
+ return blks;
+}
+
+#define BNGE_MAX_ROCE_MSIX 64
+#define BNGE_MIN_ROCE_CP_RINGS 2
+#define BNGE_MIN_ROCE_STAT_CTXS 1
+
+#endif /* _BNGE_RESC_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
new file mode 100644
index 000000000000..79f5ce2e5d08
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
+#include <linux/bnxt/hsi.h>
+
+#include "bnge.h"
+#include "bnge_hwrm_lib.h"
+#include "bnge_rmem.h"
+
+static void bnge_init_ctx_mem(struct bnge_ctx_mem_type *ctxm,
+ void *p, int len)
+{
+ u8 init_val = ctxm->init_value;
+ u16 offset = ctxm->init_offset;
+ u8 *p2 = p;
+ int i;
+
+ if (!init_val)
+ return;
+ if (offset == BNGE_CTX_INIT_INVALID_OFFSET) {
+ memset(p, init_val, len);
+ return;
+ }
+ for (i = 0; i < len; i += ctxm->entry_size)
+ *(p2 + i + offset) = init_val;
+}
+
+void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
+{
+ struct pci_dev *pdev = bd->pdev;
+ int i;
+
+ if (!rmem->pg_arr)
+ goto skip_pages;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ if (!rmem->pg_arr[i])
+ continue;
+
+ dma_free_coherent(&pdev->dev, rmem->page_size,
+ rmem->pg_arr[i], rmem->dma_arr[i]);
+
+ rmem->pg_arr[i] = NULL;
+ }
+skip_pages:
+ if (rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ dma_free_coherent(&pdev->dev, pg_tbl_size,
+ rmem->pg_tbl, rmem->dma_pg_tbl);
+ rmem->pg_tbl = NULL;
+ }
+ if (rmem->vmem_size && *rmem->vmem) {
+ vfree(*rmem->vmem);
+ *rmem->vmem = NULL;
+ }
+}
+
+int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
+{
+ struct pci_dev *pdev = bd->pdev;
+ u64 valid_bit = 0;
+ int i;
+
+ if (rmem->flags & (BNGE_RMEM_VALID_PTE_FLAG | BNGE_RMEM_RING_PTE_FLAG))
+ valid_bit = PTU_PTE_VALID;
+
+ if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
+ &rmem->dma_pg_tbl,
+ GFP_KERNEL);
+ if (!rmem->pg_tbl)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ u64 extra_bits = valid_bit;
+
+ rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ rmem->page_size,
+ &rmem->dma_arr[i],
+ GFP_KERNEL);
+ if (!rmem->pg_arr[i])
+ goto err_free_ring;
+
+ if (rmem->ctx_mem)
+ bnge_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
+ rmem->page_size);
+
+ if (rmem->nr_pages > 1 || rmem->depth > 0) {
+ if (i == rmem->nr_pages - 2 &&
+ (rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_NEXT_TO_LAST;
+ else if (i == rmem->nr_pages - 1 &&
+ (rmem->flags & BNGE_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_LAST;
+ rmem->pg_tbl[i] =
+ cpu_to_le64(rmem->dma_arr[i] | extra_bits);
+ }
+ }
+
+ if (rmem->vmem_size) {
+ *rmem->vmem = vzalloc(rmem->vmem_size);
+ if (!(*rmem->vmem))
+ goto err_free_ring;
+ }
+ return 0;
+
+err_free_ring:
+ bnge_free_ring(bd, rmem);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_ctx_one_lvl(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ rmem->page_size = BNGE_PAGE_SIZE;
+ rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNGE_RMEM_VALID_PTE_FLAG;
+ if (rmem->depth >= 1)
+ rmem->flags |= BNGE_RMEM_USE_FULL_PAGE_FLAG;
+ return bnge_alloc_ring(bd, rmem);
+}
+
+static int bnge_alloc_ctx_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg, u32 mem_size,
+ u8 depth, struct bnge_ctx_mem_type *ctxm)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ int rc;
+
+ if (!mem_size)
+ return -EINVAL;
+
+ ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE);
+ if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
+ ctx_pg->nr_pages = 0;
+ return -EINVAL;
+ }
+ if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
+ int nr_tbls, i;
+
+ rmem->depth = 2;
+ ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
+ GFP_KERNEL);
+ if (!ctx_pg->ctx_pg_tbl)
+ return -ENOMEM;
+ nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
+ rmem->nr_pages = nr_tbls;
+ rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg);
+ if (rc)
+ return rc;
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnge_ctx_pg_info *pg_tbl;
+
+ pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
+ if (!pg_tbl)
+ return -ENOMEM;
+ ctx_pg->ctx_pg_tbl[i] = pg_tbl;
+ rmem = &pg_tbl->ring_mem;
+ rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
+ rmem->dma_pg_tbl = ctx_pg->ctx_dma_arr[i];
+ rmem->depth = 1;
+ rmem->nr_pages = MAX_CTX_PAGES;
+ rmem->ctx_mem = ctxm;
+ if (i == (nr_tbls - 1)) {
+ int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
+
+ if (rem)
+ rmem->nr_pages = rem;
+ }
+ rc = bnge_alloc_ctx_one_lvl(bd, pg_tbl);
+ if (rc)
+ break;
+ }
+ } else {
+ rmem->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE);
+ if (rmem->nr_pages > 1 || depth)
+ rmem->depth = 1;
+ rmem->ctx_mem = ctxm;
+ rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg);
+ }
+
+ return rc;
+}
+
+static void bnge_free_ctx_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_pg_info *ctx_pg)
+{
+ struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
+ ctx_pg->ctx_pg_tbl) {
+ int i, nr_tbls = rmem->nr_pages;
+
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnge_ctx_pg_info *pg_tbl;
+ struct bnge_ring_mem_info *rmem2;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ if (!pg_tbl)
+ continue;
+ rmem2 = &pg_tbl->ring_mem;
+ bnge_free_ring(bd, rmem2);
+ ctx_pg->ctx_pg_arr[i] = NULL;
+ kfree(pg_tbl);
+ ctx_pg->ctx_pg_tbl[i] = NULL;
+ }
+ kfree(ctx_pg->ctx_pg_tbl);
+ ctx_pg->ctx_pg_tbl = NULL;
+ }
+ bnge_free_ring(bd, rmem);
+ ctx_pg->nr_pages = 0;
+}
+
+static int bnge_setup_ctxm_pg_tbls(struct bnge_dev *bd,
+ struct bnge_ctx_mem_type *ctxm, u32 entries,
+ u8 pg_lvl)
+{
+ struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, rc = 0, n = 1;
+ u32 mem_size;
+
+ if (!ctxm->entry_size || !ctx_pg)
+ return -EINVAL;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ if (ctxm->entry_multiple)
+ entries = roundup(entries, ctxm->entry_multiple);
+ entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
+ mem_size = entries * ctxm->entry_size;
+ for (i = 0; i < n && !rc; i++) {
+ ctx_pg[i].entries = entries;
+ rc = bnge_alloc_ctx_pg_tbls(bd, &ctx_pg[i], mem_size, pg_lvl,
+ ctxm->init_value ? ctxm : NULL);
+ }
+
+ return rc;
+}
+
+static int bnge_backing_store_cfg(struct bnge_dev *bd, u32 ena)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ struct bnge_ctx_mem_type *ctxm;
+ u16 last_type;
+ int rc = 0;
+ u16 type;
+
+ if (!ena)
+ return 0;
+ else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
+ last_type = BNGE_CTX_MAX - 1;
+ else
+ last_type = BNGE_CTX_L2_MAX - 1;
+ ctx->ctx_arr[last_type].last = 1;
+
+ for (type = 0 ; type < BNGE_CTX_V2_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+
+ rc = bnge_hwrm_func_backing_store(bd, ctxm, ctxm->last);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void bnge_free_ctx_mem(struct bnge_dev *bd)
+{
+ struct bnge_ctx_mem_info *ctx = bd->ctx;
+ u16 type;
+
+ if (!ctx)
+ return;
+
+ for (type = 0; type < BNGE_CTX_V2_MAX; type++) {
+ struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, n = 1;
+
+ if (!ctx_pg)
+ continue;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++)
+ bnge_free_ctx_pg_tbls(bd, &ctx_pg[i]);
+
+ kfree(ctx_pg);
+ ctxm->pg_info = NULL;
+ }
+
+ ctx->flags &= ~BNGE_CTX_FLAG_INITED;
+ kfree(ctx);
+ bd->ctx = NULL;
+}
+
+#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
+ (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
+
+int bnge_alloc_ctx_mem(struct bnge_dev *bd)
+{
+ struct bnge_ctx_mem_type *ctxm;
+ struct bnge_ctx_mem_info *ctx;
+ u32 l2_qps, qp1_qps, max_qps;
+ u32 ena, entries_sp, entries;
+ u32 srqs, max_srqs, min;
+ u32 num_mr, num_ah;
+ u32 extra_srqs = 0;
+ u32 extra_qps = 0;
+ u32 fast_qpmd_qps;
+ u8 pg_lvl = 1;
+ int i, rc;
+
+ rc = bnge_hwrm_func_backing_store_qcaps(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed querying ctx mem caps, rc: %d\n", rc);
+ return rc;
+ }
+
+ ctx = bd->ctx;
+ if (!ctx || (ctx->flags & BNGE_CTX_FLAG_INITED))
+ return 0;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_QP];
+ l2_qps = ctxm->qp_l2_entries;
+ qp1_qps = ctxm->qp_qp1_entries;
+ fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
+ max_qps = ctxm->max_entries;
+ ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ];
+ srqs = ctxm->srq_l2_entries;
+ max_srqs = ctxm->max_entries;
+ ena = 0;
+ if (bnge_is_roce_en(bd) && !is_kdump_kernel()) {
+ pg_lvl = 2;
+ extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fast qp destroy feature enabled */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (fast_qpmd_qps)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
+ }
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_QP];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps,
+ pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, srqs + extra_srqs, pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_CQ];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->cq_l2_entries +
+ extra_qps * 2, pg_lvl);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_VNIC];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_STAT];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
+
+ if (!bnge_is_roce_en(bd))
+ goto skip_rdma;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_MRAV];
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNGE_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, num_mr + num_ah, 2);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_TIM];
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps, 1);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
+
+skip_rdma:
+ ctxm = &ctx->ctx_arr[BNGE_CTX_STQM];
+ min = ctxm->min_entries;
+ entries_sp = ctx->ctx_arr[BNGE_CTX_VNIC].vnic_entries + l2_qps +
+ 2 * (extra_qps + qp1_qps) + min;
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries_sp, 2);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNGE_CTX_FTQM];
+ entries = l2_qps + 2 * (extra_qps + qp1_qps);
+ rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries, 2);
+ if (rc)
+ return rc;
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+
+ rc = bnge_backing_store_cfg(bd, ena);
+ if (rc) {
+ dev_err(bd->dev, "Failed configuring ctx mem, rc: %d\n", rc);
+ return rc;
+ }
+ ctx->flags |= BNGE_CTX_FLAG_INITED;
+
+ return 0;
+}
+
+void bnge_init_ring_struct(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_rx_ring_info *rxr;
+ struct bnge_tx_ring_info *txr;
+ struct bnge_ring_struct *ring;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)nqr->desc_ring;
+ rmem->dma_arr = nqr->desc_mapping;
+ rmem->vmem_size = 0;
+
+ rxr = bnapi->rx_ring;
+ if (!rxr)
+ goto skip_rx;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bn->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bn->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_buf_ring;
+
+skip_rx:
+ bnge_for_each_napi_tx(j, bnapi, txr) {
+ ring = &txr->tx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->tx_nr_pages;
+ rmem->page_size = HW_TXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bn->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
new file mode 100644
index 000000000000..341c7f81ed09
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_RMEM_H_
+#define _BNGE_RMEM_H_
+
+struct bnge_ctx_mem_type;
+struct bnge_dev;
+struct bnge_net;
+
+#define PTU_PTE_VALID 0x1UL
+#define PTU_PTE_LAST 0x2UL
+#define PTU_PTE_NEXT_TO_LAST 0x4UL
+
+struct bnge_ring_mem_info {
+ /* Number of pages to next level */
+ int nr_pages;
+ int page_size;
+ u16 flags;
+#define BNGE_RMEM_VALID_PTE_FLAG 1
+#define BNGE_RMEM_RING_PTE_FLAG 2
+#define BNGE_RMEM_USE_FULL_PAGE_FLAG 4
+
+ u16 depth;
+
+ void **pg_arr;
+ dma_addr_t *dma_arr;
+
+ __le64 *pg_tbl;
+ dma_addr_t dma_pg_tbl;
+
+ int vmem_size;
+ void **vmem;
+
+ struct bnge_ctx_mem_type *ctx_mem;
+};
+
+/* The hardware supports certain page sizes.
+ * Use the supported page sizes to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNGE_PAGE_SHIFT 12
+#elif (PAGE_SHIFT <= 13)
+#define BNGE_PAGE_SHIFT PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNGE_PAGE_SHIFT 13
+#else
+#define BNGE_PAGE_SHIFT 16
+#endif
+#define BNGE_PAGE_SIZE (1 << BNGE_PAGE_SHIFT)
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNGE_RX_PAGE_SHIFT 15
+#else
+#define BNGE_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+#define MAX_CTX_PAGES (BNGE_PAGE_SIZE / 8)
+#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
+
+struct bnge_ctx_pg_info {
+ u32 entries;
+ u32 nr_pages;
+ void *ctx_pg_arr[MAX_CTX_PAGES];
+ dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
+ struct bnge_ring_mem_info ring_mem;
+ struct bnge_ctx_pg_info **ctx_pg_tbl;
+};
+
+#define BNGE_MAX_TQM_SP_RINGS 1
+#define BNGE_MAX_TQM_FP_RINGS 8
+#define BNGE_MAX_TQM_RINGS \
+ (BNGE_MAX_TQM_SP_RINGS + BNGE_MAX_TQM_FP_RINGS)
+#define BNGE_BACKING_STORE_CFG_LEGACY_LEN 256
+#define BNGE_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNGE_PAGE_SIZE == 0x2000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
+ else if (BNGE_PAGE_SIZE == 0x10000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
+ else \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
+} while (0)
+
+#define BNGE_CTX_MRAV_AV_SPLIT_ENTRY 0
+
+#define BNGE_CTX_QP \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP
+#define BNGE_CTX_SRQ \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ
+#define BNGE_CTX_CQ \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ
+#define BNGE_CTX_VNIC \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC
+#define BNGE_CTX_STAT \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT
+#define BNGE_CTX_STQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING
+#define BNGE_CTX_FTQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
+#define BNGE_CTX_MRAV \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
+#define BNGE_CTX_TIM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
+#define BNGE_CTX_TCK \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK
+#define BNGE_CTX_RCK \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK
+#define BNGE_CTX_MTQM \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
+#define BNGE_CTX_SQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
+#define BNGE_CTX_RQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
+#define BNGE_CTX_SRQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
+#define BNGE_CTX_CQDBS \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
+#define BNGE_CTX_SRT_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE
+#define BNGE_CTX_SRT2_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE
+#define BNGE_CTX_CRT_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE
+#define BNGE_CTX_CRT2_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE
+#define BNGE_CTX_RIGP0_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE
+#define BNGE_CTX_L2_HWRM_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE
+#define BNGE_CTX_ROCE_HWRM_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE
+
+#define BNGE_CTX_MAX (BNGE_CTX_TIM + 1)
+#define BNGE_CTX_L2_MAX (BNGE_CTX_FTQM + 1)
+#define BNGE_CTX_INV ((u16)-1)
+
+#define BNGE_CTX_V2_MAX \
+ (FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE + 1)
+
+#define BNGE_BS_CFG_ALL_DONE \
+ FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE
+
+struct bnge_ctx_mem_type {
+ u16 type;
+ u16 entry_size;
+ u32 flags;
+#define BNGE_CTX_MEM_TYPE_VALID \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+ u32 instance_bmap;
+ u8 init_value;
+ u8 entry_multiple;
+ u16 init_offset;
+#define BNGE_CTX_INIT_INVALID_OFFSET 0xffff
+ u32 max_entries;
+ u32 min_entries;
+ u8 last:1;
+ u8 split_entry_cnt;
+#define BNGE_MAX_SPLIT_ENTRY 4
+ union {
+ struct {
+ u32 qp_l2_entries;
+ u32 qp_qp1_entries;
+ u32 qp_fast_qpmd_entries;
+ };
+ u32 srq_l2_entries;
+ u32 cq_l2_entries;
+ u32 vnic_entries;
+ struct {
+ u32 mrav_av_entries;
+ u32 mrav_num_entries_units;
+ };
+ u32 split[BNGE_MAX_SPLIT_ENTRY];
+ };
+ struct bnge_ctx_pg_info *pg_info;
+};
+
+struct bnge_ctx_mem_info {
+ u8 tqm_fp_rings_count;
+ u32 flags;
+#define BNGE_CTX_FLAG_INITED 0x01
+ struct bnge_ctx_mem_type ctx_arr[BNGE_CTX_V2_MAX];
+};
+
+struct bnge_ring_struct {
+ struct bnge_ring_mem_info ring_mem;
+
+ u16 fw_ring_id;
+ union {
+ u16 grp_idx;
+ u16 map_idx; /* Used by NQs */
+ };
+ u32 handle;
+ u8 queue_id;
+};
+
+int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
+void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
+int bnge_alloc_ctx_mem(struct bnge_dev *bd);
+void bnge_free_ctx_mem(struct bnge_dev *bd);
+void bnge_init_ring_struct(struct bnge_net *bn);
+
+#endif /* _BNGE_RMEM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index babc955ba64e..805daae9dd36 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -48,7 +48,6 @@
#include <linux/cache.h>
#include <linux/firmware.h>
#include <linux/log2.h>
-#include <linux/aer.h>
#include <linux/crash_dump.h>
#if IS_ENABLED(CONFIG_CNIC)
@@ -176,12 +175,12 @@ static const struct flash_spec flash_table[] =
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
- "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
+ "Entry 0101: ST M45PE10 (128kB non-buffered)"},
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
- "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
+ "Entry 0110: ST M45PE20 (256kB non-buffered)"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
@@ -368,6 +367,7 @@ static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
cp->irq_arr[0].status_blk = (void *)
((unsigned long) bnapi->status_blk.msi +
(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+ cp->irq_arr[0].status_blk_map = bp->status_blk_mapping;
cp->irq_arr[0].status_blk_num = sb_id;
cp->num_irq = 1;
}
@@ -2956,7 +2956,6 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
shinfo = skb_shinfo(skb);
shinfo->nr_frags--;
page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
- __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
cons_rx_pg->page = page;
dev_kfree_skb(skb);
@@ -3045,7 +3044,7 @@ error:
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
DMA_FROM_DEVICE);
- skb = build_skb(data, 0);
+ skb = slab_build_skb(data);
if (!skb) {
kfree(data);
goto error;
@@ -3829,7 +3828,7 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
return 0;
}
-static int
+static void
load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
const struct bnx2_mips_fw_file_entry *fw_entry)
{
@@ -3897,48 +3896,34 @@ load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
val &= ~cpu_reg->mode_value_halt;
bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
-
- return 0;
}
-static int
+static void
bnx2_init_cpus(struct bnx2 *bp)
{
const struct bnx2_mips_fw_file *mips_fw =
(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
const struct bnx2_rv2p_fw_file *rv2p_fw =
(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
- int rc;
/* Initialize the RV2P processor. */
load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
/* Initialize the RX Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
/* Initialize the TX Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
/* Initialize the TX Patch-up Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
/* Initialize the Completion Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
/* Initialize the Command Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
-
-init_cpu_err:
- return rc;
+ load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
}
static void
@@ -4951,8 +4936,7 @@ bnx2_init_chip(struct bnx2 *bp)
} else
bnx2_init_context(bp);
- if ((rc = bnx2_init_cpus(bp)) != 0)
- return rc;
+ bnx2_init_cpus(bp);
bnx2_init_nvram(bp);
@@ -5415,8 +5399,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
bp->rx_buf_use_size = rx_size;
/* hw alignment + build_skb() overhead*/
- bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
- NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->rx_buf_size = kmalloc_size_roundup(
+ SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+ NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
bp->rx_ring_size = size;
bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
@@ -6178,7 +6163,7 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
static void
bnx2_timer(struct timer_list *t)
{
- struct bnx2 *bp = from_timer(bp, t, timer);
+ struct bnx2 *bp = timer_container_of(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -6415,7 +6400,7 @@ bnx2_open(struct net_device *dev)
rc = bnx2_request_irq(bp);
if (rc) {
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
goto open_err;
}
bnx2_enable_int(bp);
@@ -6459,7 +6444,6 @@ bnx2_reset_task(struct work_struct *work)
if (!(pcicmd & PCI_COMMAND_MEMORY)) {
/* in case PCI block has reset */
pci_restore_state(bp->pdev);
- pci_save_state(bp->pdev);
}
rc = bnx2_init_nic(bp, 1);
if (rc) {
@@ -6767,7 +6751,7 @@ bnx2_close(struct net_device *dev)
bnx2_disable_int_sync(bp);
bnx2_napi_disable(bp);
netif_tx_disable(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_shutdown_chip(bp);
bnx2_free_irq(bp);
bnx2_free_skbs(bp);
@@ -7042,9 +7026,9 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct bnx2 *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
- strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -7318,7 +7302,9 @@ static int bnx2_set_coalesce(struct net_device *dev,
}
static void
-bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2 *bp = netdev_priv(dev);
@@ -7389,7 +7375,9 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
}
static int
-bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2 *bp = netdev_priv(dev);
int rc;
@@ -7923,7 +7911,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnx2 *bp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
false);
}
@@ -8088,7 +8076,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
int rc, i, j;
u32 reg;
u64 dma_mask, persist_dma_mask;
- int err;
SET_NETDEV_DEV(dev, &pdev->dev);
bp = netdev_priv(dev);
@@ -8171,12 +8158,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->flags |= BNX2_FLAG_PCIE;
if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
-
- /* AER (Advanced Error Reporting) hooks */
- err = pci_enable_pcie_error_reporting(pdev);
- if (!err)
- bp->flags |= BNX2_FLAG_AER_ENABLED;
-
} else {
bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
if (bp->pcix_cap == 0) {
@@ -8212,7 +8193,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
if (rc) {
dev_err(&pdev->dev,
- "pci_set_consistent_dma_mask failed, aborting\n");
+ "dma_set_coherent_mask failed, aborting\n");
goto err_out_unmap;
}
} else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
@@ -8455,11 +8436,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
return 0;
err_out_unmap:
- if (bp->flags & BNX2_FLAG_AER_ENABLED) {
- pci_disable_pcie_error_reporting(pdev);
- bp->flags &= ~BNX2_FLAG_AER_ENABLED;
- }
-
pci_iounmap(pdev, bp->regview);
bp->regview = NULL;
@@ -8518,7 +8494,7 @@ bnx2_init_napi(struct bnx2 *bp)
else
poll = bnx2_poll_msix;
- netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
+ netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
bnapi->bp = bp;
}
}
@@ -8625,7 +8601,7 @@ bnx2_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
cancel_work_sync(&bp->reset_task);
pci_iounmap(bp->pdev, bp->regview);
@@ -8633,11 +8609,6 @@ bnx2_remove_one(struct pci_dev *pdev)
bnx2_free_stats_blk(dev);
kfree(bp->temp_stats_blk);
- if (bp->flags & BNX2_FLAG_AER_ENABLED) {
- pci_disable_pcie_error_reporting(pdev);
- bp->flags &= ~BNX2_FLAG_AER_ENABLED;
- }
-
bnx2_release_firmware(bp);
free_netdev(dev);
@@ -8657,7 +8628,7 @@ bnx2_suspend(struct device *device)
cancel_work_sync(&bp->reset_task);
bnx2_netif_stop(bp, true);
netif_device_detach(dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_shutdown_chip(bp);
__bnx2_free_irq(bp);
bnx2_free_skbs(bp);
@@ -8715,7 +8686,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
if (netif_running(dev)) {
bnx2_netif_stop(bp, true);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
}
@@ -8746,7 +8717,6 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (netif_running(dev))
err = bnx2_init_nic(bp, 1);
@@ -8761,9 +8731,6 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
}
rtnl_unlock();
- if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
- return result;
-
return result;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index a09ec47461c9..315b08c64edd 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6808,7 +6808,6 @@ struct bnx2 {
#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
#define BNX2_FLAG_BROKEN_STATS 0x00002000
-#define BNX2_FLAG_AER_ENABLED 0x00004000
struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2b06d78baa08..9580ab83d387 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1262,7 +1262,7 @@ enum {
struct bnx2x_fw_stats_req {
struct stats_query_header hdr;
- struct stats_query_entry query[FP_SB_MAX_E1x+
+ struct stats_query_entry query[FP_SB_MAX_E2 +
BNX2X_FIRST_QUEUE_QUERY_IDX];
};
@@ -1271,7 +1271,7 @@ struct bnx2x_fw_stats_data {
struct per_port_stats port;
struct per_pf_stats pf;
struct fcoe_statistics_params fcoe;
- struct per_queue_stats queue_stats[1];
+ struct per_queue_stats queue_stats[];
};
/* Public slow path states */
@@ -1486,7 +1486,6 @@ struct bnx2x {
#define IS_VF_FLAG (1 << 22)
#define BC_SUPPORTS_RMMOD_CMD (1 << 23)
#define HAS_PHYS_PORT_ID (1 << 24)
-#define AER_ENABLED (1 << 25)
#define PTP_SUPPORTED (1 << 26)
#define TX_TIMESTAMPING_EN (1 << 27)
@@ -1509,6 +1508,8 @@ struct bnx2x {
bool cnic_loaded;
struct cnic_eth_dev *(*cnic_probe)(struct net_device *);
+ bool nic_stopped;
+
/* Flag that indicates that we can start looking for FCoE L2 queue
* completions in the default status block.
*/
@@ -1850,6 +1851,14 @@ struct bnx2x {
/* Vxlan/Geneve related information */
u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX];
+
+#define FW_CAP_INVALIDATE_VF_FP_HSI BIT(0)
+ u32 fw_cap;
+
+ u32 fw_major;
+ u32 fw_minor;
+ u32 fw_rev;
+ u32 fw_eng;
};
/* Tx queues may be less or equal to Rx queues */
@@ -2525,5 +2534,4 @@ void bnx2x_register_phc(struct bnx2x *bp);
* Meant for implicit re-load flows.
*/
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
-
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e8e8c2d593c5..e59530357e2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -25,6 +25,7 @@
#include <linux/ip.h>
#include <linux/crash_dump.h>
#include <net/tcp.h>
+#include <net/gro.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/prefetch.h>
@@ -43,8 +44,7 @@ static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
/* Add NAPI objects */
for_each_rx_queue_cnic(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -54,8 +54,7 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
/* Add NAPI objects */
for_each_eth_queue(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -148,10 +147,11 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strlcpy(buf, bp->fw_ver, buf_len);
- snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
+ phy_fw_ver, sizeof(phy_fw_ver));
+ /* This may become truncated. */
+ scnprintf(buf, buf_len,
+ "%sbc %d.%d.%d%s%s",
+ bp->fw_ver,
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
@@ -673,6 +673,18 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return 0;
}
+static struct sk_buff *
+bnx2x_build_skb(const struct bnx2x_fastpath *fp, void *data)
+{
+ struct sk_buff *skb;
+
+ if (fp->rx_frag_size)
+ skb = build_skb(data, fp->rx_frag_size);
+ else
+ skb = slab_build_skb(data);
+ return skb;
+}
+
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
if (fp->rx_frag_size)
@@ -780,7 +792,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_data))
- skb = build_skb(data, fp->rx_frag_size);
+ skb = bnx2x_build_skb(fp, data);
if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
@@ -788,6 +800,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
pad, len, fp->rx_buf_size);
bnx2x_panic();
+ bnx2x_frag_free(fp, new_data);
return;
}
#endif
@@ -1046,7 +1059,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
DMA_FROM_DEVICE);
- skb = build_skb(data, fp->rx_frag_size);
+ skb = bnx2x_build_skb(fp, data);
if (unlikely(!skb)) {
bnx2x_frag_free(fp, data);
bnx2x_fp_qstats(bp, fp)->
@@ -1923,8 +1936,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
/* Skip VLAN tag if present */
if (ether_type == ETH_P_8021Q) {
- struct vlan_ethhdr *vhdr =
- (struct vlan_ethhdr *)skb->data;
+ struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
}
@@ -2363,26 +2375,30 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
/* is another pf loaded on this engine? */
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
- /* build my FW version dword */
- u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
- (BCM_5710_FW_MINOR_VERSION << 8) +
- (BCM_5710_FW_REVISION_VERSION << 16) +
- (BCM_5710_FW_ENGINEERING_VERSION << 24);
+ u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
+ u32 loaded_fw;
/* read loaded FW from chip */
- u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+ loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+ loaded_fw_major = loaded_fw & 0xff;
+ loaded_fw_minor = (loaded_fw >> 8) & 0xff;
+ loaded_fw_rev = (loaded_fw >> 16) & 0xff;
+ loaded_fw_eng = (loaded_fw >> 24) & 0xff;
- DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
- loaded_fw, my_fw);
+ DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
+ loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
/* abort nic load if version mismatch */
- if (my_fw != loaded_fw) {
+ if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION ||
+ loaded_fw_minor != BCM_5710_FW_MINOR_VERSION ||
+ loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION ||
+ loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) {
if (print_err)
- BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
- loaded_fw, my_fw);
+ BNX2X_ERR("loaded FW incompatible. Aborting\n");
else
- BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
- loaded_fw, my_fw);
+ BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
+
return -EBUSY;
}
}
@@ -2700,6 +2716,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_add_all_napi(bp);
DP(NETIF_MSG_IFUP, "napi added\n");
bnx2x_napi_enable(bp);
+ bp->nic_stopped = false;
if (IS_PF(bp)) {
/* set pf load just before approaching the MCP */
@@ -2945,6 +2962,7 @@ load_error2:
load_error1:
bnx2x_napi_disable(bp);
bnx2x_del_all_napi(bp);
+ bp->nic_stopped = true;
/* clear pf_load status, as it was already set */
if (IS_PF(bp))
@@ -3041,7 +3059,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->rx_mode = BNX2X_RX_MODE_NONE;
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* Set ALWAYS_ALIVE bit in shmem */
@@ -3080,14 +3098,17 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
if (!CHIP_IS_E1x(bp))
bnx2x_pf_disable(bp);
- /* Disable HW interrupts, NAPI */
- bnx2x_netif_stop(bp, 1);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
- if (CNIC_LOADED(bp))
- bnx2x_del_all_napi_cnic(bp);
- /* Release IRQs */
- bnx2x_free_irq(bp);
+ if (!bp->nic_stopped) {
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+ bp->nic_stopped = true;
+ }
/* Report UNLOAD_DONE to MCP */
bnx2x_send_unload_done(bp, false);
@@ -3416,12 +3437,9 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Headers length */
if (xmit_type & XMIT_GSO_ENC)
- hlen = (int)(skb_inner_transport_header(skb) -
- skb->data) +
- inner_tcp_hdrlen(skb);
+ hlen = skb_inner_tcp_all_headers(skb);
else
- hlen = (int)(skb_transport_header(skb) -
- skb->data) + tcp_hdrlen(skb);
+ hlen = skb_tcp_all_headers(skb);
/* Amount of data (w/o headers) on linear part of SKB*/
first_bd_sz = skb_headlen(skb) - hlen;
@@ -3520,7 +3538,7 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_inner_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3529,15 +3547,13 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data;
+ return skb_inner_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_inner_transport_header(skb) +
- sizeof(struct udphdr) - skb->data;
+ return skb_inner_transport_offset(skb) + sizeof(struct udphdr);
}
/**
@@ -3554,7 +3570,7 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3563,12 +3579,12 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+ return skb_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
+ return skb_transport_offset(skb) + sizeof(struct udphdr);
}
/* set FW indication according to inner or outer protocols if tunneled */
@@ -3597,7 +3613,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
u32 xmit_type)
{
- u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
+ u8 hlen = skb_network_offset(skb) >> 1;
/* for now NS flag is not used in Linux */
pbd->global_data =
@@ -3605,8 +3621,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
- pbd->ip_hlen_w = (skb_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ pbd->ip_hlen_w = skb_network_header_len(skb) >> 1;
hlen += pbd->ip_hlen_w;
@@ -3651,8 +3666,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
u8 outerip_off, outerip_len = 0;
/* from outer IP to transport */
- hlen_w = (skb_inner_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ hlen_w = skb_inner_transport_offset(skb) >> 1;
/* transport len */
hlen_w += inner_tcp_hdrlen(skb) >> 1;
@@ -3698,7 +3712,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
0, IPPROTO_TCP, 0));
}
- outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+ outerip_off = (skb_network_offset(skb)) >> 1;
*global_data |=
outerip_off |
@@ -4888,7 +4902,7 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
* because the actual alloc size is
* only updated as part of load
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!bnx2x_mtu_allows_gro(new_mtu))
dev->features &= ~NETIF_F_GRO_HW;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index d8b1824c334d..0bc1367fd649 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1002,9 +1002,6 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
struct bnx2x_alloc_pool *pool)
{
- if (!pool->page)
- return;
-
put_page(pool->page);
pool->page = NULL;
@@ -1015,6 +1012,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
{
int i;
+ if (!fp->page_pool.page)
+ return;
+
if (fp->mode == TPA_MODE_DISABLED)
return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 17ae6df90723..9af81630c8a4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -344,7 +344,7 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
}
}
-/* maps unmapped priorities to to the same COS as L2 */
+/* maps unmapped priorities to the same COS as L2 */
static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
{
int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 472a3a478038..3d853eeb976f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -39,34 +39,34 @@ static const struct {
int size;
char string[ETH_GSTRING_LEN];
} bnx2x_q_stats_arr[] = {
-/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
- 8, "[%s]: rx_ucast_packets" },
+ 8, "[%d]: rx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
- 8, "[%s]: rx_mcast_packets" },
+ 8, "[%d]: rx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
- 8, "[%s]: rx_bcast_packets" },
- { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
+ 8, "[%d]: rx_bcast_packets" },
+ { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
{ Q_STATS_OFFSET32(rx_err_discard_pkt),
- 4, "[%s]: rx_phy_ip_err_discards"},
+ 4, "[%d]: rx_phy_ip_err_discards"},
{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
- 4, "[%s]: rx_skb_alloc_discard" },
- { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
- { Q_STATS_OFFSET32(driver_xoff), 4, "[%s]: tx_exhaustion_events" },
- { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
+ 4, "[%d]: rx_skb_alloc_discard" },
+ { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
+ { Q_STATS_OFFSET32(driver_xoff), 4, "[%d]: tx_exhaustion_events" },
+ { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, "[%s]: tx_ucast_packets" },
+ 8, "[%d]: tx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
- 8, "[%s]: tx_mcast_packets" },
+ 8, "[%d]: tx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
- 8, "[%s]: tx_bcast_packets" },
+ 8, "[%d]: tx_bcast_packets" },
{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
- 8, "[%s]: tpa_aggregations" },
+ 8, "[%d]: tpa_aggregations" },
{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
- 8, "[%s]: tpa_aggregated_frames"},
- { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
+ 8, "[%d]: tpa_aggregated_frames"},
+ { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%d]: tpa_bytes"},
{ Q_STATS_OFFSET32(driver_filtered_tx_pkt),
- 4, "[%s]: driver_filtered_tx_pkt" }
+ 4, "[%d]: driver_filtered_tx_pkt" }
};
#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@@ -1112,7 +1112,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
int ext_dev_info_offset;
u32 mbi;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
ext_dev_info_offset = SHMEM2_RD(bp,
@@ -1126,16 +1126,16 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
(mbi & 0xff000000) >> 24,
(mbi & 0x00ff0000) >> 16,
(mbi & 0x0000ff00) >> 8);
- strlcpy(info->fw_version, version,
+ strscpy(info->fw_version, version,
sizeof(info->fw_version));
}
}
memset(version, 0, sizeof(version));
- bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ bnx2x_fill_fw_str(bp, version, sizeof(version));
strlcat(info->fw_version, version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
}
static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -1243,9 +1243,9 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
* pf B succeeds in taking the same lock since they are from the same port.
* pf A takes the per pf misc lock. Performs eeprom access.
* pf A finishes. Unlocks the per pf misc lock.
- * Pf B takes the lock and proceeds to perform it's own access.
+ * Pf B takes the lock and proceeds to perform its own access.
* pf A unlocks the per port lock, while pf B is still working (!).
- * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
+ * mcp takes the per port lock and corrupts pf B's access (and/or has its own
* access corrupted by pf B)
*/
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
@@ -1914,7 +1914,9 @@ static int bnx2x_set_coalesce(struct net_device *dev,
}
static void bnx2x_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1938,7 +1940,9 @@ static void bnx2x_get_ringparam(struct net_device *dev,
}
static int bnx2x_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -2077,34 +2081,31 @@ static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Storage only interface"
};
-static u32 bnx2x_eee_to_adv(u32 eee_adv)
+static void bnx2x_eee_to_linkmode(unsigned long *mode, u32 eee_adv)
{
- u32 modes = 0;
-
if (eee_adv & SHMEM_EEE_100M_ADV)
- modes |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_1G_ADV)
- modes |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_10G_ADV)
- modes |= ADVERTISED_10000baseT_Full;
-
- return modes;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
}
-static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+static u32 bnx2x_linkmode_to_eee(const unsigned long *mode, u32 shift)
{
u32 eee_adv = 0;
- if (modes & ADVERTISED_100baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_100M_ADV;
- if (modes & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_1G_ADV;
- if (modes & ADVERTISED_10000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_10G_ADV;
return eee_adv << shift;
}
-static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2116,16 +2117,17 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
eee_cfg = bp->link_vars.eee_status;
- edata->supported =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
- SHMEM_EEE_SUPPORTED_SHIFT);
+ bnx2x_eee_to_linkmode(edata->supported,
+ (eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ bnx2x_eee_to_linkmode(edata->advertised,
+ (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
- SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->lp_advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
- SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+ bnx2x_eee_to_linkmode(edata->lp_advertised,
+ (eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
/* SHMEM value is in 16u units --> Convert to 1u units. */
edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
@@ -2137,7 +2139,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2158,8 +2160,8 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- advertised = bnx2x_adv_to_eee(edata->advertised,
- SHMEM_EEE_ADV_STATUS_SHIFT);
+ advertised = bnx2x_linkmode_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
DP(BNX2X_MSG_ETHTOOL,
"Direct manipulation of EEE advertisement is not supported\n");
@@ -3182,49 +3184,43 @@ static u32 bnx2x_get_private_flags(struct net_device *dev)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k, start;
- char queue_name[MAX_QUEUE_NAME_LEN+1];
+ const char *str;
+ int i, j, start;
switch (stringset) {
case ETH_SS_STATS:
- k = 0;
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
- memset(queue_name, 0, sizeof(queue_name));
- snprintf(queue_name, sizeof(queue_name),
- "%d", i);
- for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
- snprintf(buf + (k + j)*ETH_GSTRING_LEN,
- ETH_GSTRING_LEN,
- bnx2x_q_stats_arr[j].string,
- queue_name);
- k += BNX2X_NUM_Q_STATS;
+ for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+ str = bnx2x_q_stats_arr[j].string;
+ ethtool_sprintf(&buf, str, i);
+ }
}
}
- for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ for (i = 0; i < BNX2X_NUM_STATS; i++) {
if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
- strcpy(buf + (k + j)*ETH_GSTRING_LEN,
- bnx2x_stats_arr[i].string);
- j++;
+ ethtool_puts(&buf, bnx2x_stats_arr[i].string);
}
break;
case ETH_SS_TEST:
+ if (IS_VF(bp))
+ break;
/* First 4 tests cannot be done in MF mode */
if (!IS_MF(bp))
start = 0;
else
start = 4;
- memcpy(buf, bnx2x_tests_str_arr + start,
- ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
+ for (i = start; i < BNX2X_NUM_TESTS_SF; i++)
+ ethtool_puts(&buf, bnx2x_tests_str_arr[i]);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(buf, bnx2x_private_arr,
- ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
+ for (i = 0; i < BNX2X_PRI_FLAG_LEN; i++)
+ ethtool_puts(&buf, bnx2x_private_arr[i]);
break;
}
}
@@ -3322,8 +3318,11 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 0;
}
-static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct bnx2x *bp = netdev_priv(dev);
+
switch (info->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
@@ -3356,29 +3355,22 @@ static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
return 0;
}
-static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 bnx2x_get_rx_ring_count(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = BNX2X_NUM_ETH_QUEUES(bp);
- return 0;
- case ETHTOOL_GRXFH:
- return bnx2x_get_rss_flags(bp, info);
- default:
- DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
+ return BNX2X_NUM_ETH_QUEUES(bp);
}
-static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
+ struct bnx2x *bp = netdev_priv(dev);
int udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
- "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ "Set rss flags command parameters: flow type = %d, data = %u\n",
info->flow_type, info->data);
switch (info->flow_type) {
@@ -3464,34 +3456,20 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
}
}
-static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct bnx2x *bp = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return bnx2x_set_rss_flags(bp, info);
- default:
- DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
-}
-
static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
{
return T_ETH_INDIRECTION_TABLE_SIZE;
}
-static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int bnx2x_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
/* Get the current configuration of the RSS indirection table */
@@ -3507,13 +3485,14 @@ static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
* queue.
*/
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
- indir[i] = ind_table[i] - bp->fp->cl_id;
+ rxfh->indir[i] = ind_table[i] - bp->fp->cl_id;
return 0;
}
-static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int bnx2x_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
@@ -3521,11 +3500,12 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
@@ -3538,7 +3518,7 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
* align the received table to the Client ID of the leading RSS
* queue
*/
- bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
+ bp->rss_conf_obj.ind_table[i] = rxfh->indir[i] + bp->fp->cl_id;
}
if (bp->state == BNX2X_STATE_OPEN)
@@ -3631,22 +3611,18 @@ static int bnx2x_set_channels(struct net_device *dev,
}
static int bnx2x_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bnx2x *bp = netdev_priv(dev);
if (bp->flags & PTP_SUPPORTED) {
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (bp->ptp_clock)
info->phc_index = ptp_clock_index(bp->ptp_clock);
- else
- info->phc_index = -1;
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
@@ -3690,11 +3666,12 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
- .get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rx_ring_count = bnx2x_get_rx_ring_count,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_module_info = bnx2x_get_module_info,
@@ -3717,11 +3694,12 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_sset_count = bnx2x_get_sset_count,
.get_strings = bnx2x_get_strings,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
- .get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rx_ring_count = bnx2x_get_rx_ring_count,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_link_ksettings = bnx2x_get_vf_link_ksettings,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 3f8435208bf4..9221942290a8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -241,6 +241,8 @@
IRO[221].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
+#define XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(fid) \
+ (IRO[386].base + ((fid) * IRO[386].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
/* eth hsi version */
@@ -330,7 +332,7 @@
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
-/* microcode fixed page page size 4K (chains and ring segments) */
+/* microcode fixed page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096
/* Number of indices per slow-path SB */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 622fadc50316..611efee75834 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 15
+#define BCM_5710_FW_REVISION_VERSION 21
+#define BCM_5710_FW_REVISION_VERSION_V15 15
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 1835d2e451c0..fc7fce642666 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -635,11 +635,13 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
{
int i, rc;
struct bnx2x_ilt *ilt = BP_ILT(bp);
- struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+ struct ilt_client_info *ilt_cli;
if (!ilt || !ilt->lines)
return -1;
+ ilt_cli = &ilt->clients[cli_num];
+
if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 4e85e7dbc2be..ea310057fe3a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6163,8 +6163,8 @@ static void bnx2x_link_int_ack(struct link_params *params,
static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
- str[0] = '\0';
- (*len)--;
+ if (*len)
+ str[0] = '\0';
return 0;
}
@@ -6173,12 +6173,13 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
u16 ret;
if (*len < 10) {
- /* Need more than 10chars for this format */
+ /* Need more than 10 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
- ret = scnprintf(str, *len, "%hx.%hx", num >> 16, num);
+ ret = scnprintf(str, *len, "%x.%x", (num >> 16) & 0xFFFF,
+ num & 0xFFFF);
*len -= ret;
return 0;
}
@@ -6187,13 +6188,14 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
u16 ret;
- if (*len < 10) {
- /* Need more than 10chars for this format */
+ if (*len < 9) {
+ /* Need more than 9 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
- ret = scnprintf(str, *len, "%hhx.%hhx.%hhx", num >> 16, num >> 8, num);
+ ret = scnprintf(str, *len, "%x.%x.%x", (num >> 16) & 0xFF,
+ (num >> 8) & 0xFF, num & 0xFF);
*len -= ret;
return 0;
}
@@ -6206,7 +6208,7 @@ int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
int status = 0;
u8 *ver_p = version;
u16 remain_len = len;
- if (version == NULL || params == NULL)
+ if (version == NULL || params == NULL || len == 0)
return -EINVAL;
bp = params->bp;
@@ -11544,7 +11546,7 @@ static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
str[2] = (spirom_ver & 0xFF0000) >> 16;
str[3] = (spirom_ver & 0xFF000000) >> 24;
str[4] = '\0';
- *len -= 5;
+ *len -= 4;
return 0;
}
@@ -13842,7 +13844,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* Since some switches tend to reinit the AN process and clear the
- * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index aec666e97683..6a1cc2032bf3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -74,9 +73,19 @@
__stringify(BCM_5710_FW_MINOR_VERSION) "." \
__stringify(BCM_5710_FW_REVISION_VERSION) "." \
__stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
+#define FW_FILE_VERSION_V15 \
+ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
+ __stringify(BCM_5710_FW_MINOR_VERSION) "." \
+ __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \
+ __stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw"
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -90,6 +99,9 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
+MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
int bnx2x_num_queues;
module_param_named(num_queues, bnx2x_num_queues, int, 0444);
@@ -285,7 +297,7 @@ const u32 dmae_reg_go_c[] = {
/* Global resources for unloading a previously loaded device */
#define BNX2X_PREV_WAIT_NEEDED 1
-static DEFINE_SEMAPHORE(bnx2x_prev_sem);
+static DEFINE_SEMAPHORE(bnx2x_prev_sem, 1);
static LIST_HEAD(bnx2x_prev_list);
/* Forward declaration */
@@ -296,8 +308,11 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
/****************************************************************************
* General service functions
****************************************************************************/
-
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config);
static void __storm_memset_dma_mapping(struct bnx2x *bp,
u32 addr, dma_addr_t mapping)
@@ -747,9 +762,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
CHIP_IS_E1(bp) ? "everest1" :
CHIP_IS_E1H(bp) ? "everest1h" :
CHIP_IS_E2(bp) ? "everest2" : "everest3",
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION);
+ bp->fw_major, bp->fw_minor, bp->fw_rev);
return rc;
}
@@ -1758,7 +1771,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
* @bp: driver handle
*
* Returns the recovery leader resource id according to the engine this function
- * belongs to. Currently only only 2 engines is supported.
+ * belongs to. Currently only 2 engines is supported.
*/
static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
{
@@ -3374,7 +3387,7 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
&bp->sp_objs->mac_obj;
int i;
- strlcpy(ether_stat->version, DRV_MODULE_VERSION,
+ strscpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
/* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
@@ -5773,7 +5786,7 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
static void bnx2x_timer(struct timer_list *t)
{
- struct bnx2x *bp = from_timer(bp, t, timer);
+ struct bnx2x *bp = timer_container_of(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -9464,15 +9477,18 @@ unload_error:
}
}
- /* Disable HW interrupts, NAPI */
- bnx2x_netif_stop(bp, 1);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
- if (CNIC_LOADED(bp))
- bnx2x_del_all_napi_cnic(bp);
+ if (!bp->nic_stopped) {
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
- /* Release IRQs */
- bnx2x_free_irq(bp);
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+ bp->nic_stopped = true;
+ }
/* Reset the chip, unless PCI function is offline. If we reach this
* point following a PCI error handling, it means device is really
@@ -10206,8 +10222,7 @@ static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
.sync_table = bnx2x_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -12801,14 +12816,9 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!netif_running(dev))
return -EAGAIN;
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bnx2x_hwtstamp_ioctl(bp, ifr);
- default:
- DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
- mdio->phy_id, mdio->reg_num, mdio->val_in);
- return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
- }
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
}
static int bnx2x_validate_addr(struct net_device *dev)
@@ -13024,29 +13034,10 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
+ .ndo_hwtstamp_get = bnx2x_hwtstamp_get,
+ .ndo_hwtstamp_set = bnx2x_hwtstamp_set,
};
-static int bnx2x_set_coherency_mask(struct bnx2x *bp)
-{
- struct device *dev = &bp->pdev->dev;
-
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
- dev_err(dev, "System does not support DMA, aborting\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
-{
- if (bp->flags & AER_ENABLED) {
- pci_disable_pcie_error_reporting(bp->pdev);
- bp->flags &= ~AER_ENABLED;
- }
-}
-
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
struct net_device *dev, unsigned long board_type)
{
@@ -13116,9 +13107,11 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
goto err_out_release;
}
- rc = bnx2x_set_coherency_mask(bp);
- if (rc)
+ rc = dma_set_mask_and_coherent(&bp->pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&bp->pdev->dev, "System does not support DMA, aborting\n");
goto err_out_release;
+ }
dev->mem_start = pci_resource_start(pdev, 0);
dev->base_addr = dev->mem_start;
@@ -13157,13 +13150,6 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
/* Set PCIe reset type to fundamental for EEH recovery */
pdev->needs_freset = 1;
- /* AER (Advanced Error reporting) configuration */
- rc = pci_enable_pcie_error_reporting(pdev);
- if (!rc)
- bp->flags |= AER_ENABLED;
- else
- BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
-
/*
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
@@ -13317,16 +13303,11 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Check FW version */
offset = be32_to_cpu(fw_hdr->fw_version.offset);
fw_ver = firmware->data + offset;
- if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
- (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
- (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
- (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+ if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor ||
+ fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) {
BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
- fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION,
- BCM_5710_FW_ENGINEERING_VERSION);
+ fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
+ bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng);
return -EINVAL;
}
@@ -13406,32 +13387,49 @@ do { \
static int bnx2x_init_firmware(struct bnx2x *bp)
{
- const char *fw_file_name;
+ const char *fw_file_name, *fw_file_name_v15;
struct bnx2x_fw_file_hdr *fw_hdr;
int rc;
if (bp->firmware)
return 0;
- if (CHIP_IS_E1(bp))
+ if (CHIP_IS_E1(bp)) {
fw_file_name = FW_FILE_NAME_E1;
- else if (CHIP_IS_E1H(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1_V15;
+ } else if (CHIP_IS_E1H(bp)) {
fw_file_name = FW_FILE_NAME_E1H;
- else if (!CHIP_IS_E1x(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1H_V15;
+ } else if (!CHIP_IS_E1x(bp)) {
fw_file_name = FW_FILE_NAME_E2;
- else {
+ fw_file_name_v15 = FW_FILE_NAME_E2_V15;
+ } else {
BNX2X_ERR("Unsupported chip revision\n");
return -EINVAL;
}
+
BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
if (rc) {
- BNX2X_ERR("Can't load firmware file %s\n",
- fw_file_name);
- goto request_firmware_exit;
+ BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15);
+
+ /* try to load prev version */
+ rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev);
+
+ if (rc)
+ goto request_firmware_exit;
+
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15;
+ } else {
+ bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI;
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION;
}
+ bp->fw_major = BCM_5710_FW_MAJOR_VERSION;
+ bp->fw_minor = BCM_5710_FW_MINOR_VERSION;
+ bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION;
+
rc = bnx2x_check_firmware(bp);
if (rc) {
BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
@@ -13659,19 +13657,20 @@ static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
return bnx2x_func_state_change(bp, &func_params);
}
-static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int bnx2x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
int rc;
int drift_dir = 1;
int val, period, period1, period2, dif, dif1, dif2;
int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
- DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
+ DP(BNX2X_MSG_PTP, "PTP adjfine called, ppb = %d\n", ppb);
if (!netif_running(bp->dev)) {
DP(BNX2X_MSG_PTP,
- "PTP adjfreq called while the interface is down\n");
+ "PTP adjfine called while the interface is down\n");
return -ENETDOWN;
}
@@ -13806,7 +13805,7 @@ void bnx2x_register_phc(struct bnx2x *bp)
bp->ptp_clock_info.n_ext_ts = 0;
bp->ptp_clock_info.n_per_out = 0;
bp->ptp_clock_info.pps = 0;
- bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
+ bp->ptp_clock_info.adjfine = bnx2x_ptp_adjfine;
bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
@@ -14007,8 +14006,6 @@ init_one_freemem:
bnx2x_free_mem_bp(bp);
init_one_exit:
- bnx2x_disable_pcie_error_reporting(bp);
-
if (bp->regview)
iounmap(bp->regview);
@@ -14089,7 +14086,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_set_power_state(pdev, PCI_D3hot);
}
- bnx2x_disable_pcie_error_reporting(bp);
if (remove_netdev) {
if (bp->regview)
iounmap(bp->regview);
@@ -14141,13 +14137,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
- if (CNIC_LOADED(bp))
- bnx2x_del_all_napi_cnic(bp);
netdev_reset_tc(bp->dev);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
cancel_delayed_work_sync(&bp->sp_task);
cancel_delayed_work_sync(&bp->period_task);
@@ -14224,7 +14216,6 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (netif_running(dev))
bnx2x_set_power_state(bp, PCI_D0);
@@ -14248,8 +14239,16 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
}
bnx2x_drain_tx_queues(bp);
bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
- bnx2x_netif_stop(bp, 1);
- bnx2x_free_irq(bp);
+ if (!bp->nic_stopped) {
+ bnx2x_netif_stop(bp, 1);
+ bnx2x_del_all_napi(bp);
+
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
+
+ bnx2x_free_irq(bp);
+ bp->nic_stopped = true;
+ }
/* Report UNLOAD_DONE to MCP */
bnx2x_send_unload_done(bp, true);
@@ -14299,11 +14298,16 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK;
- if (netif_running(dev))
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (netif_running(dev)) {
+ if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ netdev_err(bp->dev, "Error during driver initialization, try unloading/reloading the driver\n");
+ goto done;
+ }
+ }
netif_device_attach(dev);
+done:
rtnl_unlock();
}
@@ -14906,9 +14910,11 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
else
cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+ cp->irq_arr[0].status_blk_map = bp->cnic_sb_mapping;
cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
cp->irq_arr[1].status_blk = bp->def_status_blk;
+ cp->irq_arr[1].status_blk_map = bp->def_status_blk_mapping;
cp->irq_arr[1].status_blk_num = DEF_SB_ID;
cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
@@ -15168,7 +15174,7 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
}
/* Read the PHC */
-static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
+static u64 bnx2x_cyclecounter_read(struct cyclecounter *cc)
{
struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
int port = BP_PORT(bp);
@@ -15343,36 +15349,57 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
return 0;
}
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct bnx2x *bp = netdev_priv(dev);
int rc;
- DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
+ DP(BNX2X_MSG_PTP, "HWTSTAMP SET called\n");
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is down");
+ return -EAGAIN;
+ }
DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
- config.tx_type, config.rx_filter);
+ config->tx_type, config->rx_filter);
- if (config.flags) {
- BNX2X_ERR("config.flags is reserved for future use\n");
- return -EINVAL;
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step timestamping is not supported");
+ return -ERANGE;
}
bp->hwtstamp_ioctl_called = true;
- bp->tx_type = config.tx_type;
- bp->rx_filter = config.rx_filter;
+ bp->tx_type = config->tx_type;
+ bp->rx_filter = config->rx_filter;
rc = bnx2x_configure_ptp_filters(bp);
- if (rc)
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "HW configuration failure");
return rc;
+ }
- config.rx_filter = bp->rx_filter;
+ config->rx_filter = bp->rx_filter;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
+}
+
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ config->rx_filter = bp->rx_filter;
+ config->tx_type = bp->tx_type;
+
+ return 0;
}
/* Configures HW for PTP */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 5caa75b41b73..a018f251d198 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -868,6 +868,8 @@
#define DORQ_REG_VF_TYPE_VALUE_0 0x170258
#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340
+extern const u32 dmae_reg_go_c[];
+
/* [RW 4] Initial activity counter value on the load request; when the
shortcut is done. */
#define DORQ_REG_SHRT_ACT_CNT 0x170070
@@ -2212,7 +2214,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2381,7 +2383,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2493,7 +2495,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2529,7 +2531,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -6218,7 +6220,7 @@
#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1U<<31)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 542c69822649..02c8213915a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -890,7 +890,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
(struct eth_classify_rules_ramrod_data *)(raw->rdata);
int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ bool add = cmd == BNX2X_VLAN_MAC_ADD;
unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
@@ -1075,7 +1075,7 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
- bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ bool add = cmd == BNX2X_VLAN_MAC_ADD;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
/* Reset the ramrod data buffer for the first rule */
@@ -1125,7 +1125,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
- bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ bool add = cmd == BNX2X_VLAN_MAC_ADD;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
u16 inner_mac;
@@ -2593,7 +2593,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
/********************* Multicast verbs: SET, CLEAR ****************************/
static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
{
- return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+ return (crc32c(0, mac, ETH_ALEN) >> 24) & 0xff;
}
struct bnx2x_mcast_mac_elem {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index bacc8552bce1..00ca861c80dd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -379,7 +379,7 @@ struct bnx2x_vlan_mac_obj {
/**
* Delete all configured elements having the given
* vlan_mac_flags specification. Assumes no pending for
- * execution commands. Will schedule all all currently
+ * execution commands. Will schedule all currently
* configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
* specification for deletion and will use the given
* ramrod_flags for the last DEL operation.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 74a8931ce1d1..12198fc3ab22 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -758,9 +758,18 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
{
+ u16 abs_fid;
+
+ abs_fid = FW_VF_HANDLE(abs_vfid);
+
/* set the VF-PF association in the FW */
- storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
- storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
+ storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp));
+ storm_memset_func_en(bp, abs_fid, 1);
+
+ /* Invalidate fp_hsi version for vfs */
+ if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI)
+ REG_WR8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(abs_fid), 0);
/* clear vf errors*/
bnx2x_vf_semi_clear_err(bp, abs_vfid);
@@ -786,16 +795,20 @@ static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
{
- struct pci_dev *dev;
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+ struct pci_dev *dev;
+ bool pending;
if (!vf)
return false;
dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
- if (dev)
- return bnx2x_is_pcie_pending(dev);
- return false;
+ if (!dev)
+ return false;
+ pending = bnx2x_is_pcie_pending(dev);
+ pci_dev_put(dev);
+
+ return pending;
}
int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
@@ -2639,10 +2652,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
/* vlan */
if (bulletin->valid_bitmap & (1 << VLAN_VALID))
/* vlan configured by ndo so its in bulletin board */
- memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ ivi->vlan = bulletin->vlan;
else
/* function has not been loaded yet. Show vlans as 0s */
- memset(&ivi->vlan, 0, VLAN_HLEN);
+ ivi->vlan = 0;
mutex_unlock(&bp->vfdb->bulletin_mutex);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8c2cf5519787..02a4e557e176 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -518,7 +518,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
size_t buf_len)
{
- strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+ strscpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
}
static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
@@ -586,7 +586,7 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
-static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
+static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr,
u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 0b193edb73b8..ba6729f2f9c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -23,8 +23,6 @@
#include "bnx2x_cmn.h"
#include "bnx2x_sriov.h"
-extern const u32 dmae_reg_go_c[];
-
/* Statistics */
/*
@@ -849,7 +847,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
memcpy(old, new, sizeof(struct nig_stats));
- memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+ BUILD_BUG_ON(sizeof(estats->shared) != sizeof(pstats->mac_stx[1]));
+ memcpy(&(estats->shared), &(pstats->mac_stx[1]),
sizeof(struct mac_stx));
estats->brb_drop_hi = pstats->brb_drop_hi;
estats->brb_drop_lo = pstats->brb_drop_lo;
@@ -1634,9 +1633,9 @@ void bnx2x_stats_init(struct bnx2x *bp)
REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
if (!CHIP_IS_E3(bp)) {
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+ &(bp->port.old_nig_stats.egress_mac_pkt0), 2);
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+ &(bp->port.old_nig_stats.egress_mac_pkt1), 2);
}
/* Prepare statistics ramrod data */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index d55e63692cf3..ae93c078707b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -36,10 +36,14 @@ struct nig_stats {
u32 pbf_octets;
u32 pbf_packet;
u32 safc_inp;
- u32 egress_mac_pkt0_lo;
- u32 egress_mac_pkt0_hi;
- u32 egress_mac_pkt1_lo;
- u32 egress_mac_pkt1_hi;
+ struct_group(egress_mac_pkt0,
+ u32 egress_mac_pkt0_lo;
+ u32 egress_mac_pkt0_hi;
+ );
+ struct_group(egress_mac_pkt1,
+ u32 egress_mac_pkt1_lo;
+ u32 egress_mac_pkt1_hi;
+ );
};
enum bnx2x_stats_event {
@@ -83,6 +87,7 @@ struct bnx2x_eth_stats {
u32 no_buff_discard_hi;
u32 no_buff_discard_lo;
+ struct_group(shared,
u32 rx_stat_ifhcinbadoctets_hi;
u32 rx_stat_ifhcinbadoctets_lo;
u32 tx_stat_ifhcoutbadoctets_hi;
@@ -159,6 +164,7 @@ struct bnx2x_eth_stats {
u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
u32 tx_stat_bmac_ufl_hi;
u32 tx_stat_bmac_ufl_lo;
+ );
u32 pause_frames_received_hi;
u32 pause_frames_received_lo;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index c9129b9ba446..8946a931e87e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -380,7 +380,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
- strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ strscpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
@@ -529,13 +529,16 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp)
bnx2x_vfpf_finalize(bp, &req->first_tlv);
free_irq:
- /* Disable HW interrupts, NAPI */
- bnx2x_netif_stop(bp, 0);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
-
- /* Release IRQs */
- bnx2x_free_irq(bp);
+ if (!bp->nic_stopped) {
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 0);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+ bp->nic_stopped = true;
+ }
}
static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index c6ef7ec2c115..ba6c239d52fa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
+bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_coredump.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
+bnxt_en-$(CONFIG_BNXT_HWMON) += bnxt_hwmon.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 66263aa0d96b..d17d0ea89c36 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -37,6 +37,7 @@
#include <linux/if_bridge.h>
#include <linux/rtc.h>
#include <linux/bpf.h>
+#include <net/gro.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
@@ -47,18 +48,18 @@
#include <linux/prefetch.h>
#include <linux/cache.h>
#include <linux/log2.h>
-#include <linux/aer.h>
#include <linux/bitmap.h>
-#include <linux/ptp_clock_kernel.h>
-#include <linux/timecounter.h>
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
+#include <linux/align.h>
+#include <net/netdev_lock.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <linux/pci-tph.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -71,69 +72,23 @@
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
#include "bnxt_debugfs.h"
+#include "bnxt_coredump.h"
+#include "bnxt_hwmon.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
NETIF_MSG_TX_ERR)
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
-#define BNXT_RX_COPY_THRESH 256
#define BNXT_TX_PUSH_THRESH 164
-enum board_idx {
- BCM57301,
- BCM57302,
- BCM57304,
- BCM57417_NPAR,
- BCM58700,
- BCM57311,
- BCM57312,
- BCM57402,
- BCM57404,
- BCM57406,
- BCM57402_NPAR,
- BCM57407,
- BCM57412,
- BCM57414,
- BCM57416,
- BCM57417,
- BCM57412_NPAR,
- BCM57314,
- BCM57417_SFP,
- BCM57416_SFP,
- BCM57404_NPAR,
- BCM57406_NPAR,
- BCM57407_SFP,
- BCM57407_NPAR,
- BCM57414_NPAR,
- BCM57416_NPAR,
- BCM57452,
- BCM57454,
- BCM5745x_NPAR,
- BCM57508,
- BCM57504,
- BCM57502,
- BCM57508_NPAR,
- BCM57504_NPAR,
- BCM57502_NPAR,
- BCM58802,
- BCM58804,
- BCM58808,
- NETXTREME_E_VF,
- NETXTREME_C_VF,
- NETXTREME_S_VF,
- NETXTREME_C_VF_HV,
- NETXTREME_E_VF_HV,
- NETXTREME_E_P5_VF,
- NETXTREME_E_P5_VF_HV,
-};
-
-/* indexed by enum above */
+/* indexed by enum board_idx */
static const struct {
char *name;
} board_info[] = {
@@ -169,6 +124,10 @@ static const struct {
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
+ [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
+ [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
+ [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
+ [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
@@ -182,6 +141,8 @@ static const struct {
[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
+ [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
+ [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -223,12 +184,16 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
- { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
+ { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
+ { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
+ { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
+ { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -252,6 +217,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
+ { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -282,42 +249,106 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
+ ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
+ ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
+};
+
+const u16 bnxt_bstore_to_trace[] = {
+ [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
+ [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
+ [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
+ [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
+ [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
+ [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
+ [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
+ [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
+ [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
+ [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
+ [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
+ [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
+ [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
};
static struct workqueue_struct *bnxt_pf_wq;
+#define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
+#define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
+
+const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
+ .ports = {
+ .src = 0,
+ .dst = 0,
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_NONE,
+ .dst = BNXT_IPV6_MASK_NONE,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_ALL,
+ .dst = BNXT_IPV6_MASK_ALL,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v4addrs = {
+ .src = cpu_to_be32(0xffffffff),
+ .dst = cpu_to_be32(0xffffffff),
+ },
+ },
+};
+
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
- idx == NETXTREME_E_P5_VF_HV);
+ idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
+ idx == NETXTREME_E_P7_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
-#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
-
-#define BNXT_CP_DB_IRQ_DIS(db) \
- writel(DB_CP_IRQ_DIS_FLAGS, db)
#define BNXT_DB_CQ(db, idx) \
- writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
+ writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
#define BNXT_DB_NQ_P5(db, idx) \
- bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
(db)->doorbell)
+#define BNXT_DB_NQ_P7(db, idx) \
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
+ DB_RING_IDX(db, idx), (db)->doorbell)
+
#define BNXT_DB_CQ_ARM(db, idx) \
- writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
+ writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
#define BNXT_DB_NQ_ARM_P5(db, idx) \
- bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
- (db)->doorbell)
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
+ DB_RING_IDX(db, idx), (db)->doorbell)
static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ BNXT_DB_NQ_P7(db, idx);
+ else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
BNXT_DB_NQ_P5(db, idx);
else
BNXT_DB_CQ(db, idx);
@@ -325,7 +356,7 @@ static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
BNXT_DB_NQ_ARM_P5(db, idx);
else
BNXT_DB_CQ_ARM(db, idx);
@@ -333,13 +364,67 @@ static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
- RING_CMP(idx), db->doorbell);
+ DB_RING_IDX(db, idx), db->doorbell);
else
BNXT_DB_CQ(db, idx);
}
+static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
+{
+ if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
+ return;
+
+ if (BNXT_PF(bp))
+ queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
+ else
+ schedule_delayed_work(&bp->fw_reset_task, delay);
+}
+
+static void __bnxt_queue_sp_work(struct bnxt *bp)
+{
+ if (BNXT_PF(bp))
+ queue_work(bnxt_pf_wq, &bp->sp_task);
+ else
+ schedule_work(&bp->sp_task);
+}
+
+static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
+{
+ set_bit(event, &bp->sp_event);
+ __bnxt_queue_sp_work(bp);
+}
+
+static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ if (!rxr->bnapi->in_reset) {
+ rxr->bnapi->in_reset = true;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ else
+ set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
+ __bnxt_queue_sp_work(bp);
+ }
+ rxr->rx_next_cons = 0xffff;
+}
+
+void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ u16 curr)
+{
+ struct bnxt_napi *bnapi = txr->bnapi;
+
+ if (bnapi->tx_fault)
+ return;
+
+ netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
+ txr->txq_index, txr->tx_hw_cons,
+ txr->tx_cons, txr->tx_prod, curr);
+ WARN_ON_ONCE(1);
+ bnapi->tx_fault = 1;
+ bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
+}
+
const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023,
@@ -375,50 +460,34 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 prod)
{
+ /* Sync BD data before updating doorbell */
+ wmb();
bnxt_db_write(bp, &txr->tx_db, prod);
txr->kick_pending = 0;
}
-static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
- struct bnxt_tx_ring_info *txr,
- struct netdev_queue *txq)
-{
- netif_tx_stop_queue(txq);
-
- /* netif_tx_stop_queue() must be done before checking
- * tx index in bnxt_tx_avail() below, because in
- * bnxt_tx_int(), we update tx index before checking for
- * netif_tx_queue_stopped().
- */
- smp_mb();
- if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
- netif_tx_wake_queue(txq);
- return false;
- }
-
- return true;
-}
-
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- struct tx_bd *txbd;
+ struct tx_bd *txbd, *txbd0;
struct tx_bd_ext *txbd1;
struct netdev_queue *txq;
int i;
dma_addr_t mapping;
unsigned int length, pad = 0;
u32 len, free_size, vlan_tag_flags, cfa_action, flags;
- u16 prod, last_frag;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct pci_dev *pdev = bp->pdev;
+ u16 prod, last_frag, txts_prod;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
+ skb_frag_t *frag;
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -426,25 +495,38 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txr = &bp->tx_ring[bp->tx_ring_map[i]];
prod = txr->tx_prod;
+#if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
+ if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
+ netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
+ skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ dev_core_stats_tx_dropped_inc(dev);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
/* We must have raced with NAPI cleanup */
if (net_ratelimit() && txr->kick_pending)
netif_warn(bp, tx_err, dev,
"bnxt: ring busy w/ flush pending!\n");
- if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+ if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
+ bp->tx_wake_thresh))
return NETDEV_TX_BUSY;
}
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto tx_free;
+
length = skb->len;
len = skb_headlen(skb);
last_frag = skb_shinfo(skb)->nr_frags;
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
-
- txbd->tx_bd_opaque = prod;
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->skb = skb;
tx_buf->nr_frags = last_frag;
@@ -460,28 +542,34 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
+ ptp->tx_tstamp_en) {
+ if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
+ lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ tx_buf->is_ts_pkt = 1;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ } else if (!skb_is_gso(skb)) {
+ u16 seq_id, hdr_off;
- if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
- atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
- if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
- &ptp->tx_hdr_off)) {
+ if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
+ !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
if (vlan_tag_flags)
- ptp->tx_hdr_off += VLAN_HLEN;
+ hdr_off += VLAN_HLEN;
lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ tx_buf->is_ts_pkt = 1;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
- atomic_inc(&bp->ptp_cfg->tx_avail);
+
+ ptp->txts_req[txts_prod].tx_seqid = seq_id;
+ ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
+ tx_buf->txts_prod = txts_prod;
}
}
}
-
if (unlikely(skb->no_fcs))
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
- !lflags) {
+ skb_frags_readable(skb) && !lflags) {
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
@@ -497,7 +585,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
TX_BD_FLAGS_LHINT_512_AND_SMALLER |
TX_BD_FLAGS_COAL_NOW |
TX_BD_FLAGS_PACKET_END |
- (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+ TX_BD_CNT(2));
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_push1->tx_bd_hsize_lflags =
@@ -516,9 +604,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_from_linear_data(skb, pdata, len);
pdata += len;
for (j = 0; j < last_frag; j++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
void *fptr;
+ frag = &skb_shinfo(skb)->frags[j];
fptr = skb_frag_address_safe(frag);
if (!fptr)
goto normal_tx;
@@ -529,13 +617,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
txbd->tx_bd_haddr = txr->data_mapping;
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
prod = NEXT_TX(prod);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
memcpy(txbd, tx_push1, sizeof(*txbd));
prod = NEXT_TX(prod);
tx_push->doorbell =
- cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
- txr->tx_prod = prod;
+ cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
+ DB_RING_IDX(&txr->tx_db, prod));
+ WRITE_ONCE(txr->tx_prod, prod);
tx_buf->is_push = 1;
netdev_tx_sent_queue(txq, skb->len);
@@ -569,25 +660,32 @@ normal_tx:
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
- ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+ TX_BD_CNT(last_frag + 2);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
prod = NEXT_TX(prod);
txbd1 = (struct tx_bd_ext *)
- &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
txbd1->tx_bd_hsize_lflags = lflags;
if (skb_is_gso(skb)) {
+ bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
u32 hdr_len;
- if (skb->encapsulation)
- hdr_len = skb_inner_network_offset(skb) +
- skb_inner_network_header_len(skb) +
- inner_tcp_hdrlen(skb);
- else
+ if (skb->encapsulation) {
+ if (udp_gso)
+ hdr_len = skb_inner_transport_offset(skb) +
+ sizeof(struct udphdr);
+ else
+ hdr_len = skb_inner_tcp_all_headers(skb);
+ } else if (udp_gso) {
hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ sizeof(struct udphdr);
+ } else {
+ hdr_len = skb_tcp_all_headers(skb);
+ }
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
@@ -614,11 +712,11 @@ normal_tx:
txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
txbd1->tx_bd_cfa_action =
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
+ txbd0 = txbd;
for (i = 0; i < last_frag; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
+ frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
@@ -627,8 +725,9 @@ normal_tx:
if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
goto tx_dma_error;
- tx_buf = &txr->tx_buf_ring[prod];
- dma_unmap_addr_set(tx_buf, mapping, mapping);
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
+ mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -645,36 +744,38 @@ normal_tx:
skb_tx_timestamp(skb);
- /* Sync BD data before updating doorbell */
- wmb();
-
prod = NEXT_TX(prod);
- txr->tx_prod = prod;
+ WRITE_ONCE(txr->tx_prod, prod);
- if (!netdev_xmit_more() || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
bnxt_txr_db_kick(bp, txr, prod);
- else
+ } else {
+ if (free_size >= bp->tx_wake_thresh)
+ txbd0->tx_bd_len_flags_type |=
+ cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
txr->kick_pending = 1;
+ }
tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
- if (netdev_xmit_more() && !tx_buf->is_push)
+ if (netdev_xmit_more() && !tx_buf->is_push) {
+ txbd0->tx_bd_len_flags_type &=
+ cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
bnxt_txr_db_kick(bp, txr, prod);
+ }
- bnxt_txr_netif_try_stop_queue(bp, txr, txq);
+ netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
+ bp->tx_wake_thresh);
}
return NETDEV_TX_OK;
tx_dma_error:
- if (BNXT_TX_PTP_IS_SET(lflags))
- atomic_inc(&bp->ptp_cfg->tx_avail);
-
last_frag = i;
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), DMA_TO_DEVICE);
prod = NEXT_TX(prod);
@@ -682,41 +783,69 @@ tx_dma_error:
/* unmap remaining mapped pages */
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
- tx_buf = &txr->tx_buf_ring[prod];
- dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[i]),
- DMA_TO_DEVICE);
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
+ frag = &skb_shinfo(skb)->frags[i];
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
+ if (BNXT_TX_PTP_IS_SET(lflags)) {
+ txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
+ atomic64_inc(&bp->ptp_cfg->stats.ts_err);
+ if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ /* set SKB to err so PTP worker will clean up */
+ ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
+ }
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
- txr->tx_buf_ring[txr->tx_prod].skb = NULL;
- atomic_long_inc(&dev->tx_dropped);
+ txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
-static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+/* Returns true if some remaining TX packets not processed. */
+static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ int budget)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
- u16 cons = txr->tx_cons;
struct pci_dev *pdev = bp->pdev;
- int i;
+ u16 hw_cons = txr->tx_hw_cons;
unsigned int tx_bytes = 0;
+ u16 cons = txr->tx_cons;
+ skb_frag_t *frag;
+ int tx_pkts = 0;
+ bool rc = false;
- for (i = 0; i < nr_pkts; i++) {
+ while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf;
- bool compl_deferred = false;
struct sk_buff *skb;
+ bool is_ts_pkt;
int j, last;
- tx_buf = &txr->tx_buf_ring[cons];
- cons = NEXT_TX(cons);
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
skb = tx_buf->skb;
+
+ if (unlikely(!skb)) {
+ bnxt_sched_reset_txr(bp, txr, cons);
+ return rc;
+ }
+
+ is_ts_pkt = tx_buf->is_ts_pkt;
+ if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
+ rc = true;
+ break;
+ }
+
+ cons = NEXT_TX(cons);
+ tx_pkts++;
+ tx_bytes += skb->len;
tx_buf->skb = NULL;
+ tx_buf->is_ts_pkt = 0;
if (tx_buf->is_push) {
tx_buf->is_push = 0;
@@ -728,107 +857,134 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
last = tx_buf->nr_frags;
for (j = 0; j < last; j++) {
+ frag = &skb_shinfo(skb)->frags[j];
cons = NEXT_TX(cons);
- tx_buf = &txr->tx_buf_ring[cons];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[j]),
- DMA_TO_DEVICE);
- }
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- if (!bnxt_get_tx_ts_p5(bp, skb))
- compl_deferred = true;
- else
- atomic_inc(&bp->ptp_cfg->tx_avail);
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
+ }
+ if (unlikely(is_ts_pkt)) {
+ if (BNXT_CHIP_P5(bp)) {
+ /* PTP worker takes ownership of the skb */
+ bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
+ skb = NULL;
}
}
next_tx_int:
cons = NEXT_TX(cons);
- tx_bytes += skb->len;
- if (!compl_deferred)
- dev_kfree_skb_any(skb);
+ napi_consume_skb(skb, budget);
}
- netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
- txr->tx_cons = cons;
+ WRITE_ONCE(txr->tx_cons, cons);
- /* Need to make the tx_cons update visible to bnxt_start_xmit()
- * before checking for netif_tx_queue_stopped(). Without the
- * memory barrier, there is a small possibility that bnxt_start_xmit()
- * will miss it and cause the queue to be stopped forever.
- */
- smp_mb();
+ __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
+ bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
+ READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
+
+ return rc;
+}
- if (unlikely(netif_tx_queue_stopped(txq)) &&
- bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
- READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
- netif_tx_wake_queue(txq);
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+ struct bnxt_tx_ring_info *txr;
+ bool more = false;
+ int i;
+
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
+ more |= __bnxt_tx_int(bp, txr, budget);
+ }
+ if (!more)
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
+}
+
+static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
+{
+ return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
+ unsigned int *offset,
gfp_t gfp)
{
- struct device *dev = &bp->pdev->dev;
struct page *page;
- page = page_pool_dev_alloc_pages(rxr->page_pool);
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
+ BNXT_RX_PAGE_SIZE);
+ } else {
+ page = page_pool_dev_alloc_pages(rxr->page_pool);
+ *offset = 0;
+ }
if (!page)
return NULL;
- *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- if (dma_mapping_error(dev, *mapping)) {
- page_pool_recycle_direct(rxr->page_pool, page);
- return NULL;
- }
- *mapping += bp->rx_dma_offset;
+ *mapping = page_pool_get_dma_addr(page) + *offset;
return page;
}
-static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
+ unsigned int *offset,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
+ } else {
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ *offset = 0;
+ }
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
+ return netmem;
+}
+
+static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
gfp_t gfp)
{
- u8 *data;
- struct pci_dev *pdev = bp->pdev;
+ unsigned int offset;
+ struct page *page;
- data = kmalloc(bp->rx_buf_size, gfp);
- if (!data)
+ page = page_pool_alloc_frag(rxr->head_pool, &offset,
+ bp->rx_buf_size, gfp);
+ if (!page)
return NULL;
- *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
-
- if (dma_mapping_error(&pdev->dev, *mapping)) {
- kfree(data);
- data = NULL;
- }
- return data;
+ *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
+ return page_address(page) + offset;
}
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp)
{
- struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
- struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+ struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
dma_addr_t mapping;
if (BNXT_RX_PAGE_MODE(bp)) {
+ unsigned int offset;
struct page *page =
- __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+ __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
if (!page)
return -ENOMEM;
+ mapping += bp->rx_dma_offset;
rx_buf->data = page;
- rx_buf->data_ptr = page_address(page) + bp->rx_offset;
+ rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
} else {
- u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+ u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
if (!data)
return -ENOMEM;
@@ -846,9 +1002,10 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
{
u16 prod = rxr->rx_prod;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnxt *bp = rxr->bnapi->bp;
struct rx_bd *cons_bd, *prod_bd;
- prod_rx_buf = &rxr->rx_buf_ring[prod];
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf->data = data;
@@ -856,8 +1013,8 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
prod_rx_buf->mapping = cons_rx_buf->mapping;
- prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
- cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
}
@@ -872,56 +1029,29 @@ static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
return next;
}
-static inline int bnxt_alloc_rx_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd =
- &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
- struct pci_dev *pdev = bp->pdev;
- struct page *page;
- dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
+ dma_addr_t mapping;
+ netmem_ref netmem;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
- page = rxr->rx_page;
- if (!page) {
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
- rxr->rx_page = page;
- rxr->rx_page_offset = 0;
- }
- offset = rxr->rx_page_offset;
- rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
- if (rxr->rx_page_offset == PAGE_SIZE)
- rxr->rx_page = NULL;
- else
- get_page(page);
- } else {
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
- }
-
- mapping = dma_map_page_attrs(&pdev->dev, page, offset,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
- if (dma_mapping_error(&pdev->dev, mapping)) {
- __free_page(page);
- return -EIO;
- }
+ netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
+ if (!netmem)
+ return -ENOMEM;
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
__set_bit(sw_prod, rxr->rx_agg_bmap);
rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
- rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+ rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
- rx_agg_buf->page = page;
+ rx_agg_buf->netmem = netmem;
rx_agg_buf->offset = offset;
rx_agg_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
@@ -961,15 +1091,15 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
bool p5_tpa = false;
u32 i;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
- u16 cons;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_agg_cmp *agg;
struct rx_bd *prod_bd;
- struct page *page;
+ netmem_ref netmem;
+ u16 cons;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
@@ -986,27 +1116,59 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
cons_rx_buf = &rxr->rx_agg_ring[cons];
/* It is possible for sw_prod to be equal to cons, so
- * set cons_rx_buf->page to NULL first.
+ * set cons_rx_buf->netmem to 0 first.
*/
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
- prod_rx_buf->page = page;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+ prod_rx_buf->netmem = netmem;
prod_rx_buf->offset = cons_rx_buf->offset;
prod_rx_buf->mapping = cons_rx_buf->mapping;
- prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
prod_bd->rx_bd_opaque = sw_prod;
prod = NEXT_RX_AGG(prod);
- sw_prod = NEXT_RX_AGG(sw_prod);
+ sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
}
rxr->rx_agg_prod = prod;
rxr->rx_sw_agg_prod = sw_prod;
}
+static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 cons, void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
+{
+ unsigned int len = offset_and_len & 0xffff;
+ struct page *page = data;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int err;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+ dma_addr -= bp->rx_dma_offset;
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+ bp->rx_dir);
+ skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
+ if (!skb) {
+ page_pool_recycle_direct(rxr->page_pool, page);
+ return NULL;
+ }
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, bp->rx_offset);
+ __skb_put(skb, len);
+
+ return skb;
+}
+
static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
u16 cons, void *data, u8 *data_ptr,
@@ -1027,21 +1189,21 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- page_pool_release_page(rxr->page_pool, page);
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+ bp->rx_dir);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
if (!skb) {
- __free_page(page);
+ page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
+ skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
- skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
+ skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
payload + NET_IP_ALIGN);
@@ -1070,40 +1232,49 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return NULL;
}
- skb = build_skb(data, 0);
- dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
- bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
+ skb = napi_build_skb(data, bp->rx_buf_size);
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ bp->rx_dir);
if (!skb) {
- kfree(data);
+ page_pool_free_va(rxr->head_pool, data, true);
return NULL;
}
+ skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_offset);
skb_put(skb, offset_and_len & 0xffff);
return skb;
}
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct sk_buff *skb,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
- struct pci_dev *pdev = bp->pdev;
- struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- u16 prod = rxr->rx_agg_prod;
+ struct skb_shared_info *shinfo;
+ struct bnxt_rx_ring_info *rxr;
+ u32 i, total_frag_len = 0;
bool p5_tpa = false;
- u32 i;
+ u16 prod;
+
+ rxr = bnapi->rx_ring;
+ prod = rxr->rx_agg_prod;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
+ if (skb)
+ shinfo = skb_shinfo(skb);
+ else
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+
for (i = 0; i < agg_bufs; i++) {
- u16 cons, frag_len;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
- struct page *page;
- dma_addr_t mapping;
+ struct rx_agg_cmp *agg;
+ u16 cons, frag_len;
+ netmem_ref netmem;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
@@ -1114,52 +1285,97 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_fill_page_desc(skb, i, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
+ if (skb) {
+ skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len, BNXT_RX_PAGE_SIZE);
+ } else {
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len);
+ shinfo->nr_frags = i + 1;
+ }
__clear_bit(cons, rxr->rx_agg_bmap);
- /* It is possible for bnxt_alloc_rx_page() to allocate
+ /* It is possible for bnxt_alloc_rx_netmem() to allocate
* a sw_prod index that equals the cons index, so we
* need to clear the cons entry now.
*/
- mapping = cons_rx_buf->mapping;
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
-
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
- struct skb_shared_info *shinfo;
- unsigned int nr_frags;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
- shinfo = skb_shinfo(skb);
- nr_frags = --shinfo->nr_frags;
- __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
+ if (xdp && netmem_is_pfmemalloc(netmem))
+ xdp_buff_set_frag_pfmemalloc(xdp);
- dev_kfree_skb(skb);
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (skb) {
+ skb->len -= frag_len;
+ skb->data_len -= frag_len;
+ skb->truesize -= BNXT_RX_PAGE_SIZE;
+ }
- cons_rx_buf->page = page;
+ --shinfo->nr_frags;
+ cons_rx_buf->netmem = netmem;
- /* Update prod since possibly some pages have been
+ /* Update prod since possibly some netmems have been
* allocated already.
*/
rxr->rx_agg_prod = prod;
bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
- return NULL;
+ return 0;
}
- dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
-
- skb->data_len += frag_len;
- skb->len += frag_len;
- skb->truesize += PAGE_SIZE;
+ page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
+ BNXT_RX_PAGE_SIZE);
+ total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
+ return total_frag_len;
+}
+
+static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ u32 total_frag_len = 0;
+
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ skb, NULL);
+ if (!total_frag_len) {
+ skb_mark_for_recycle(skb);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
return skb;
}
+static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 total_frag_len = 0;
+
+ if (!xdp_buff_has_frags(xdp))
+ shinfo->nr_frags = 0;
+
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ NULL, xdp);
+ if (total_frag_len) {
+ xdp_buff_set_frags_flag(xdp);
+ shinfo->nr_frags = agg_bufs;
+ shinfo->xdp_frags_size = total_frag_len;
+ }
+ return total_frag_len;
+}
+
static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u8 agg_bufs, u32 *raw_cons)
{
@@ -1173,9 +1389,9 @@ static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return RX_AGG_CMP_VALID(agg, *raw_cons);
}
-static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
- unsigned int len,
- dma_addr_t mapping)
+static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
{
struct bnxt *bp = bnapi->bp;
struct pci_dev *pdev = bp->pdev;
@@ -1185,16 +1401,49 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
skb_put(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ return bnxt_copy_data(bnapi, data, len, mapping);
+}
+
+static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
+ struct xdp_buff *xdp,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ unsigned int metasize = 0;
+ u8 *data = xdp->data;
+ struct sk_buff *skb;
+
+ len = xdp->data_end - xdp->data_meta;
+ metasize = xdp->data - xdp->data_meta;
+ data = xdp->data_meta;
+
+ skb = bnxt_copy_data(bnapi, data, len, mapping);
+ if (!skb)
+ return skb;
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
return skb;
}
@@ -1214,7 +1463,7 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
struct rx_tpa_end_cmp *tpa_end = cmp;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
agg_bufs = TPA_END_AGG_BUFS(tpa_end);
@@ -1228,38 +1477,6 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return 0;
}
-static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
-{
- if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
- return;
-
- if (BNXT_PF(bp))
- queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
- else
- schedule_delayed_work(&bp->fw_reset_task, delay);
-}
-
-static void bnxt_queue_sp_work(struct bnxt *bp)
-{
- if (BNXT_PF(bp))
- queue_work(bnxt_pf_wq, &bp->sp_task);
- else
- schedule_work(&bp->sp_task);
-}
-
-static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
-{
- if (!rxr->bnapi->in_reset) {
- rxr->bnapi->in_reset = true;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
- else
- set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
- rxr->rx_next_cons = 0xffff;
-}
-
static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
{
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
@@ -1287,8 +1504,39 @@ static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
return map->agg_id_tbl[agg_id];
}
+static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->cfa_code_valid = 1;
+ tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
+ tpa_info->vlan_valid = 0;
+ if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+ tpa_info->vlan_valid = 1;
+ tpa_info->metadata =
+ le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+ }
+}
+
+static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->vlan_valid = 0;
+ if (TPA_START_VLAN_VALID(tpa_start)) {
+ u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
+ u32 vlan_proto = ETH_P_8021Q;
+
+ tpa_info->vlan_valid = 1;
+ if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
+ vlan_proto = ETH_P_8021AD;
+ tpa_info->metadata = vlan_proto << 16 |
+ TPA_START_METADATA0_TCI(tpa_start1);
+ }
+}
+
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
- struct rx_tpa_start_cmp *tpa_start,
+ u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1)
{
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
@@ -1297,7 +1545,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_bd *prod_bd;
dma_addr_t mapping;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_START_AGG_ID_P5(tpa_start);
agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
} else {
@@ -1306,7 +1554,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons = tpa_start->rx_tpa_start_cmp_opaque;
prod = rxr->rx_prod;
cons_rx_buf = &rxr->rx_buf_ring[cons];
- prod_rx_buf = &rxr->rx_buf_ring[prod];
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons ||
@@ -1314,20 +1562,16 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
cons, rxr->rx_next_cons,
TPA_START_ERROR_CODE(tpa_start1));
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
return;
}
- /* Store cfa_code in tpa_info to use in tpa_end
- * completion processing.
- */
- tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
prod_rx_buf->data = tpa_info->data;
prod_rx_buf->data_ptr = tpa_info->data_ptr;
mapping = tpa_info->mapping;
prod_rx_buf->mapping = mapping;
- prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
@@ -1340,12 +1584,13 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
RX_TPA_START_CMP_LEN_SHIFT;
if (likely(TPA_START_HASH_VALID(tpa_start))) {
- u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
-
tpa_info->hash_type = PKT_HASH_TYPE_L4;
tpa_info->gso_type = SKB_GSO_TCPV4;
+ if (TPA_START_IS_IPV6(tpa_start1))
+ tpa_info->gso_type = SKB_GSO_TCPV6;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
+ else if (!BNXT_CHIP_P4_PLUS(bp) &&
+ TPA_START_HASH_TYPE(tpa_start) == 3)
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
@@ -1355,13 +1600,16 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
- tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
+ bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
+ else
+ bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
tpa_info->agg_count = 0;
rxr->rx_prod = NEXT_RX(prod);
- cons = NEXT_RX(cons);
- rxr->rx_next_cons = NEXT_RX(cons);
+ cons = RING_RX(bp, NEXT_RX(cons));
+ rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
cons_rx_buf = &rxr->rx_buf_ring[cons];
bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
@@ -1560,7 +1808,7 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
skb_shinfo(skb)->gso_type = tpa_info->gso_type;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
else
payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
@@ -1578,7 +1826,7 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
{
struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
- /* if vf-rep dev is NULL, the must belongs to the PF */
+ /* if vf-rep dev is NULL, it must belong to the PF */
return dev ? dev : bp->dev;
}
@@ -1591,6 +1839,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct net_device *dev = bp->dev;
u8 *data_ptr, agg_bufs;
unsigned int len;
struct bnxt_tpa_info *tpa_info;
@@ -1608,7 +1857,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_END_AGG_ID_P5(tpa_end);
agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
@@ -1651,21 +1900,22 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
} else {
u8 *new_data;
dma_addr_t new_mapping;
- new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+ new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
+ GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
@@ -1673,38 +1923,40 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->data_ptr = new_data + bp->rx_offset;
tpa_info->mapping = new_mapping;
- skb = build_skb(data, 0);
- dma_unmap_single_attrs(&bp->pdev->dev, mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ skb = napi_build_skb(data, bp->rx_buf_size);
+ dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
+ bp->rx_buf_use_size, bp->rx_dir);
if (!skb) {
- kfree(data);
+ page_pool_free_va(rxr->head_pool, data, true);
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
+ skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_offset);
skb_put(skb, len);
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
+ true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
}
- skb->protocol =
- eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
+ if (tpa_info->cfa_code_valid)
+ dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
+ skb->protocol = eth_type_trans(skb, dev);
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
- if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
- (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
+ if (tpa_info->vlan_valid &&
+ (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
__be16 vlan_proto = htons(tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT);
u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
@@ -1745,6 +1997,8 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
struct sk_buff *skb)
{
+ skb_mark_for_recycle(skb);
+
if (skb->dev != bp->dev) {
/* this packet belongs to a vf-rep */
bnxt_vf_rep_rx(bp, skb);
@@ -1754,6 +2008,80 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
napi_gro_receive(&bnapi->napi, skb);
}
+static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags,
+ struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts)
+{
+ u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
+
+ if (BNXT_PTP_RX_TS_VALID(flags))
+ goto ts_valid;
+ if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags))
+ return false;
+
+ts_valid:
+ *cmpl_ts = ts;
+ return true;
+}
+
+static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
+ struct rx_cmp *rxcmp,
+ struct rx_cmp_ext *rxcmp1)
+{
+ __be16 vlan_proto;
+ u16 vtag;
+
+ if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ __le32 flags2 = rxcmp1->rx_cmp_flags2;
+ u32 meta_data;
+
+ if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
+ return skb;
+
+ meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
+ vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
+ if (eth_type_vlan(vlan_proto))
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ else
+ goto vlan_err;
+ } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ if (RX_CMP_VLAN_VALID(rxcmp)) {
+ u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
+
+ if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
+ vlan_proto = htons(ETH_P_8021Q);
+ else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
+ vlan_proto = htons(ETH_P_8021AD);
+ else
+ goto vlan_err;
+ vtag = RX_CMP_METADATA0_TCI(rxcmp1);
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ }
+ }
+ return skb;
+vlan_err:
+ skb_mark_for_recycle(skb);
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
+ struct rx_cmp *rxcmp)
+{
+ u8 ext_op;
+
+ ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
+ switch (ext_op) {
+ case EXT_OP_INNER_4:
+ case EXT_OP_OUTER_4:
+ case EXT_OP_INNFL_3:
+ case EXT_OP_OUTFL_3:
+ return PKT_HASH_TYPE_L4;
+ default:
+ return PKT_HASH_TYPE_L3;
+ }
+}
+
/* returns the following:
* 1 - 1 packet successfully received
* 0 - successful TPA_START, packet not completed yet
@@ -1770,13 +2098,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp *rxcmp;
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
- u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct skb_shared_info *sinfo;
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
+ bool xdp_active = false;
dma_addr_t dma_addr;
struct sk_buff *skb;
+ struct xdp_buff xdp;
u32 flags, misc;
+ u32 cmpl_ts;
void *data;
int rc = 0;
@@ -1804,8 +2136,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
dma_rmb();
prod = rxr->rx_prod;
- if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
- bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
+ bnxt_tpa_start(bp, rxr, cmp_type,
+ (struct rx_tpa_start_cmp *)rxcmp,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
*event |= BNXT_RX_EVENT;
@@ -1836,7 +2170,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (rxr->rx_next_cons != 0xffff)
netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
cons, rxr->rx_next_cons);
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
if (rc1)
return rc1;
goto next_rx_no_prod_no_len;
@@ -1869,12 +2203,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n",
rx_err);
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
}
}
goto next_rx_no_len;
@@ -1884,21 +2218,48 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
len = flags >> RX_CMP_LEN_SHIFT;
dma_addr = rx_buf->mapping;
- if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
- rc = 1;
- goto next_rx;
+ if (bnxt_xdp_attached(bp, rxr)) {
+ bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
+ if (agg_bufs) {
+ u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
+ cp_cons,
+ agg_bufs,
+ false);
+ if (!frag_len)
+ goto oom_next_rx;
+
+ }
+ xdp_active = true;
+ }
+
+ if (xdp_active) {
+ if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
+ rc = 1;
+ goto next_rx;
+ }
+ if (xdp_buff_has_frags(&xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ agg_bufs = sinfo->nr_frags;
+ } else {
+ agg_bufs = 0;
+ }
}
- if (len <= bp->rx_copy_thresh) {
- skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+ if (len <= bp->rx_copybreak) {
+ if (!xdp_active)
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+ else
+ skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
- if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
- agg_bufs, false);
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ if (agg_bufs) {
+ if (!xdp_active)
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
+ else
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ }
+ goto oom_next_rx;
}
} else {
u32 payload;
@@ -1909,49 +2270,52 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ if (!xdp_active) {
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
+ agg_bufs, false);
+ if (!skb)
+ goto oom_next_rx;
+ } else {
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
+ rxr->page_pool, &xdp);
+ if (!skb) {
+ /* we should be able to free the old skb here */
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ goto oom_next_rx;
+ }
}
}
if (RX_CMP_HASH_VALID(rxcmp)) {
- u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
- enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+ enum pkt_hash_types type;
- /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type != 1 && hash_type != 3)
- type = PKT_HASH_TYPE_L3;
+ if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ type = bnxt_rss_ext_op(bp, rxcmp);
+ } else {
+ u32 itypes = RX_CMP_ITYPES(rxcmp);
+
+ if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
+ itypes == RX_CMP_FLAGS_ITYPE_UDP)
+ type = PKT_HASH_TYPE_L4;
+ else
+ type = PKT_HASH_TYPE_L3;
+ }
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
- cfa_code = RX_CMP_CFA_CODE(rxcmp1);
- skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
-
- if ((rxcmp1->rx_cmp_flags2 &
- cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
- (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
- u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
- u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
- __be16 vlan_proto = htons(meta_data >>
- RX_CMP_FLAGS2_METADATA_TPID_SFT);
+ if (cmp_type == CMP_TYPE_RX_L2_CMP)
+ dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
+ skb->protocol = eth_type_trans(skb, dev);
- if (eth_type_vlan(vlan_proto)) {
- __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
- } else {
- dev_kfree_skb(skb);
+ if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
+ skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
+ if (!skb)
goto next_rx;
- }
}
skb_checksum_none_assert(skb);
@@ -1963,22 +2327,18 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
- bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
+ bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
}
}
- if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
- RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
+ if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u64 ns, ts;
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- spin_lock_bh(&ptp->ptp_lock);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
memset(skb_hwtstamps(skb), 0,
sizeof(*skb_hwtstamps(skb)));
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
@@ -1994,12 +2354,17 @@ next_rx:
next_rx_no_len:
rxr->rx_prod = NEXT_RX(prod);
- rxr->rx_next_cons = NEXT_RX(cons);
+ rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
+
+oom_next_rx:
+ cpr->sw_stats->rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
}
/* In netpoll mode, if we are using a combined completion ring, we need to
@@ -2033,7 +2398,8 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
*/
dma_rmb();
cmp_type = RX_CMP_TYPE(rxcmp);
- if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ if (cmp_type == CMP_TYPE_RX_L2_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
@@ -2045,7 +2411,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
- cpr->sw_stats.rx.rx_netpoll_discards += 1;
+ cpr->sw_stats->rx.rx_netpoll_discards += 1;
return rc;
}
@@ -2091,17 +2457,231 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
return INVALID_HW_RING_ID;
}
-static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
+static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
+ return link_info->force_link_speed2;
+ if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
+ return link_info->force_pam4_link_speed;
+ return link_info->force_link_speed;
+}
+
+static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ link_info->req_link_speed = link_info->force_link_speed2;
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ switch (link_info->req_link_speed) {
+ case BNXT_LINK_SPEED_50GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
+ break;
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
+ break;
+ default:
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ }
+ return;
+ }
+ link_info->req_link_speed = link_info->force_link_speed;
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ if (link_info->force_pam4_link_speed) {
+ link_info->req_link_speed = link_info->force_pam4_link_speed;
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
+ }
+}
+
+static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ link_info->advertising = link_info->auto_link_speeds2;
+ return;
+ }
+ link_info->advertising = link_info->auto_link_speeds;
+ link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
+}
+
+static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (link_info->req_link_speed != link_info->force_link_speed2)
+ return true;
+ return false;
+ }
+ if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
+ link_info->req_link_speed != link_info->force_link_speed)
+ return true;
+ if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
+ link_info->req_link_speed != link_info->force_pam4_link_speed)
+ return true;
+ return false;
+}
+
+static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (link_info->advertising != link_info->auto_link_speeds2)
+ return true;
+ return false;
+ }
+ if (link_info->advertising != link_info->auto_link_speeds ||
+ link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
+ return true;
+ return false;
+}
+
+bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
+{
+ u32 flags = bp->ctx->ctx_arr[type].flags;
+
+ return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
+ ((flags & BNXT_CTX_MEM_FW_TRACE) ||
+ (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
+}
+
+static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
+{
+ u32 mem_size, pages, rem_bytes, magic_byte_offset;
+ u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
+ struct bnxt_bs_trace_info *bs_trace;
+ int last_pg;
+
+ if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
+ return;
+
+ mem_size = ctxm->max_entries * ctxm->entry_size;
+ rem_bytes = mem_size % BNXT_PAGE_SIZE;
+ pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+
+ last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
+ magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
+
+ rmem = &ctx_pg[0].ring_mem;
+ bs_trace = &bp->bs_trace[trace_type];
+ bs_trace->ctx_type = ctxm->type;
+ bs_trace->trace_type = trace_type;
+ if (pages > MAX_CTX_PAGES) {
+ int last_pg_dir = rmem->nr_pages - 1;
+
+ rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
+ bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
+ } else {
+ bs_trace->magic_byte = rmem->pg_arr[last_pg];
+ }
+ bs_trace->magic_byte += magic_byte_offset;
+ *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
+}
+
+#define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
+
+#define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
+
+#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
+
+#define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
+
+#define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
+ ((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
+
+#define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
+
+/* Return true if the workqueue has to be scheduled */
+static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
{
- switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
+ u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
+
+ switch (err_type) {
case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
+ netdev_warn(bp->dev, "Pause Storm detected!\n");
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
+ netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
+ u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
+ char *threshold_type;
+ bool notify = false;
+ char *dir_str;
+
+ switch (type) {
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
+ threshold_type = "warning";
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
+ threshold_type = "critical";
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
+ threshold_type = "fatal";
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
+ threshold_type = "shutdown";
+ break;
+ default:
+ netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
+ return false;
+ }
+ if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
+ dir_str = "above";
+ notify = true;
+ } else {
+ dir_str = "below";
+ }
+ netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
+ dir_str, threshold_type);
+ netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
+ BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
+ BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
+ if (notify) {
+ bp->thermal_threshold_type = type;
+ set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
+ return true;
+ }
+ return false;
+ }
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
+ netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
+ break;
default:
- netdev_err(bp->dev, "FW reported unknown error type\n");
+ netdev_err(bp->dev, "FW reported unknown error type %u\n",
+ err_type);
break;
}
+ return false;
}
#define BNXT_GET_EVENT_PORT(data) \
@@ -2116,6 +2696,16 @@ static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
(BNXT_EVENT_RING_TYPE(data2) == \
ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
+#define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
+
+#define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
+
+#define BNXT_PHC_BITS 48
+
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
@@ -2123,6 +2713,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
u32 data1 = le32_to_cpu(cmpl->event_data1);
u32 data2 = le32_to_cpu(cmpl->event_data2);
+ netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
+ event_id, data1, data2);
+
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
@@ -2134,7 +2727,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
/* print unsupported speed warning in forced speed mode only */
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
(data1 & 0x20000)) {
- u16 fw_speed = link_info->force_link_speed;
+ u16 fw_speed = bnxt_get_force_speed(link_info);
u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
if (speed != SPEED_UNKNOWN)
@@ -2172,7 +2765,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
- char *fatal_str = "non-fatal";
+ char *type_str = "Solicited";
if (!bp->fw_health)
goto async_event_process_exit;
@@ -2184,13 +2777,21 @@ static int bnxt_async_event_process(struct bnxt *bp,
bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
if (!bp->fw_reset_max_dsecs)
bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
- if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
- fatal_str = "fatal";
+ if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
+ set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
+ } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
+ type_str = "Fatal";
+ bp->fw_health->fatalities++;
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
+ EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
+ type_str = "Non-fatal";
+ bp->fw_health->survivals++;
+ set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
}
netif_warn(bp, hw, bp->dev,
- "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
- fatal_str, data1, data2,
+ "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
+ type_str, data1, data2,
bp->fw_reset_min_dsecs * 100,
bp->fw_reset_max_dsecs * 100);
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
@@ -2198,17 +2799,18 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
struct bnxt_fw_health *fw_health = bp->fw_health;
+ char *status_desc = "healthy";
+ u32 status;
if (!fw_health)
goto async_event_process_exit;
if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
fw_health->enabled = false;
- netif_info(bp, drv, bp->dev,
- "Error recovery info: error recovery[0]\n");
+ netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
break;
}
- fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
+ fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
@@ -2218,10 +2820,13 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (status != BNXT_FW_STATUS_HEALTHY)
+ status_desc = "unhealthy";
netif_info(bp, drv, bp->dev,
- "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
- fw_health->master, fw_health->last_fw_reset_cnt,
- bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
+ "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
+ fw_health->primary ? "primary" : "backup", status,
+ status_desc, fw_health->last_fw_reset_cnt);
if (!fw_health->enabled) {
/* Make sure tmr_counter is set and visible to
* bnxt_health_check() before setting enabled to true.
@@ -2240,7 +2845,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr;
u16 grp_idx;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto async_event_process_exit;
netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
@@ -2255,7 +2860,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
}
rxr = bp->bnapi[grp_idx]->rx_ring;
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
goto async_event_process_exit;
}
case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
@@ -2277,7 +2882,30 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
}
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
- bnxt_event_error_report(bp, data1, data2);
+ if (bnxt_event_error_report(bp, data1, data2))
+ break;
+ goto async_event_process_exit;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
+ switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
+ case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
+ if (BNXT_PTP_USE_RTC(bp)) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
+ u64 ns;
+
+ if (!ptp)
+ goto async_event_process_exit;
+
+ bnxt_ptp_update_current_time(bp);
+ ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
+ BNXT_PHC_BITS) | ptp->current_time);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
+ bnxt_ptp_rtc_timecounter_init(ptp, ns);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
+ }
+ break;
+ }
goto async_event_process_exit;
}
case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
@@ -2286,10 +2914,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
+ u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
+ u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
+
+ bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
- bnxt_queue_sp_work(bp);
+ __bnxt_queue_sp_work(bp);
async_event_process_exit:
bnxt_ulp_async_events(bp, cmpl);
return 0;
@@ -2319,8 +2954,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
}
set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
- set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
break;
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -2335,6 +2969,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
return 0;
}
+static bool bnxt_vnic_is_active(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+
+ return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
+}
+
static irqreturn_t bnxt_msix(int irq, void *dev_instance)
{
struct bnxt_napi *bnapi = dev_instance;
@@ -2359,41 +3000,13 @@ static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
return TX_CMP_VALID(txcmp, raw_cons);
}
-static irqreturn_t bnxt_inta(int irq, void *dev_instance)
-{
- struct bnxt_napi *bnapi = dev_instance;
- struct bnxt *bp = bnapi->bp;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- u32 cons = RING_CMP(cpr->cp_raw_cons);
- u32 int_status;
-
- prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
-
- if (!bnxt_has_work(bp, cpr)) {
- int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
- /* return if erroneous interrupt */
- if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
- return IRQ_NONE;
- }
-
- /* disable ring IRQ */
- BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
-
- /* Return here if interrupt is shared and is disabled. */
- if (unlikely(atomic_read(&bp->intr_sem) != 0))
- return IRQ_HANDLED;
-
- napi_schedule(&bnapi->napi);
- return IRQ_HANDLED;
-}
-
static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
int budget)
{
struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
+ bool flush_xdp = false;
u32 cons;
- int tx_pkts = 0;
int rx_pkts = 0;
u8 event = 0;
struct tx_cmp *txcmp;
@@ -2401,6 +3014,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->has_more_work = 0;
cpr->had_work_done = 1;
while (1) {
+ u8 cmp_type;
int rc;
cons = RING_CMP(raw_cons);
@@ -2413,22 +3027,40 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
- tx_pkts++;
+ cmp_type = TX_CMP_TYPE(txcmp);
+ if (cmp_type == CMP_TYPE_TX_L2_CMP ||
+ cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
+ u32 opaque = txcmp->tx_cmp_opaque;
+ struct bnxt_tx_ring_info *txr;
+ u16 tx_freed;
+
+ txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
+ event |= BNXT_TX_CMP_EVENT;
+ if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
+ txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
+ else
+ txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
+ tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
+ bp->tx_ring_mask;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
+ if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
rx_pkts = budget;
raw_cons = NEXT_RAW_CMP(raw_cons);
if (budget)
cpr->has_more_work = 1;
break;
}
- } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+ } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
+ bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
+ } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
+ cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
if (likely(budget))
rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
else
rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
&event);
+ if (event & BNXT_REDIRECT_EVENT)
+ flush_xdp = true;
if (likely(rc >= 0))
rx_pkts += rc;
/* Increment rx_pkts when rc is -ENOMEM to count towards
@@ -2440,12 +3072,9 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
- } else if (unlikely((TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_DONE) ||
- (TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
- (TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+ } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
+ cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
+ cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
bnxt_hwrm_handler(bp, txcmp);
}
raw_cons = NEXT_RAW_CMP(raw_cons);
@@ -2456,40 +3085,45 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (event & BNXT_REDIRECT_EVENT)
- xdp_do_flush_map();
+ if (flush_xdp) {
+ xdp_do_flush();
+ event &= ~BNXT_REDIRECT_EVENT;
+ }
if (event & BNXT_TX_EVENT) {
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
u16 prod = txr->tx_prod;
/* Sync BD data before updating doorbell */
wmb();
bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
+ event &= ~BNXT_TX_EVENT;
}
cpr->cp_raw_cons = raw_cons;
- bnapi->tx_pkts += tx_pkts;
bnapi->events |= event;
return rx_pkts;
}
-static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
+static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
+ int budget)
{
- if (bnapi->tx_pkts) {
- bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
- bnapi->tx_pkts = 0;
- }
+ if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
+ bnapi->tx_int(bp, bnapi, budget);
if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- if (bnapi->events & BNXT_AGG_EVENT)
- bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bnapi->events &= ~BNXT_RX_EVENT;
+ }
+ if (bnapi->events & BNXT_AGG_EVENT) {
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnapi->events &= ~BNXT_AGG_EVENT;
}
- bnapi->events = 0;
}
static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
@@ -2506,7 +3140,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
*/
bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
- __bnxt_poll_work_done(bp, bnapi);
+ __bnxt_poll_work_done(bp, bnapi, budget);
return rx_pkts;
}
@@ -2520,6 +3154,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
struct rx_cmp_ext *rxcmp1;
u32 cp_cons, tmp_raw_cons;
u32 raw_cons = cpr->cp_raw_cons;
+ bool flush_xdp = false;
u32 rx_pkts = 0;
u8 event = 0;
@@ -2554,6 +3189,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
+ if (event & BNXT_REDIRECT_EVENT)
+ flush_xdp = true;
} else if (unlikely(TX_CMP_TYPE(txcmp) ==
CMPL_BASE_TYPE_HWRM_DONE)) {
bnxt_hwrm_handler(bp, txcmp);
@@ -2573,6 +3210,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
if (event & BNXT_AGG_EVENT)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ if (flush_xdp)
+ xdp_do_flush();
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
napi_complete_done(napi, rx_pkts);
@@ -2607,14 +3246,14 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
- if (bp->flags & BNXT_FLAG_DIM) {
+ if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
cpr->rx_packets,
cpr->rx_bytes,
&dim_sample);
- net_dim(&cpr->dim, dim_sample);
+ net_dim(&cpr->dim, &dim_sample);
}
return work_done;
}
@@ -2624,10 +3263,10 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i, work_done = 0;
- for (i = 0; i < 2; i++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+ for (i = 0; i < cpr->cp_ring_count; i++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
- if (cpr2) {
+ if (cpr2->had_nqe_notify) {
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
cpr->has_more_work |= cpr2->has_more_work;
@@ -2637,29 +3276,38 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
- u64 dbr_type)
+ u64 dbr_type, int budget)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i;
- for (i = 0; i < 2; i++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+ for (i = 0; i < cpr->cp_ring_count; i++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
struct bnxt_db_info *db;
- if (cpr2 && cpr2->had_work_done) {
+ if (cpr2->had_work_done) {
+ u32 tgl = 0;
+
+ if (dbr_type == DBR_TYPE_CQ_ARMALL) {
+ cpr2->had_nqe_notify = 0;
+ tgl = cpr2->toggle;
+ }
db = &cpr2->cp_db;
- bnxt_writeq(bp, db->db_key64 | dbr_type |
- RING_CMP(cpr2->cp_raw_cons), db->doorbell);
+ bnxt_writeq(bp,
+ db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
+ DB_RING_IDX(db, cpr2->cp_raw_cons),
+ db->doorbell);
cpr2->had_work_done = 0;
}
}
- __bnxt_poll_work_done(bp, bnapi);
+ __bnxt_poll_work_done(bp, bnapi, budget);
}
static int bnxt_poll_p5(struct napi_struct *napi, int budget)
{
struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_cp_ring_info *cpr_rx;
u32 raw_cons = cpr->cp_raw_cons;
struct bnxt *bp = bnapi->bp;
struct nqe_cn *nqcmp;
@@ -2675,6 +3323,8 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
}
while (1) {
+ u16 type;
+
cons = RING_CMP(raw_cons);
nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
@@ -2682,12 +3332,13 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
if (cpr->has_more_work)
break;
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
+ budget);
cpr->cp_raw_cons = raw_cons;
if (napi_complete_done(napi, work_done))
BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
cpr->cp_raw_cons);
- return work_done;
+ goto poll_done;
}
/* The valid test of the entry must be done first before
@@ -2695,11 +3346,21 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
*/
dma_rmb();
- if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
+ type = le16_to_cpu(nqcmp->type);
+ if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
+ u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
struct bnxt_cp_ring_info *cpr2;
- cpr2 = cpr->cp_ring_arr[idx];
+ /* No more budget for RX work */
+ if (budget && work_done >= budget &&
+ cq_type == BNXT_NQ_HDL_TYPE_RX)
+ break;
+
+ idx = BNXT_NQ_HDL_IDX(idx);
+ cpr2 = &cpr->cp_ring_arr[idx];
+ cpr2->had_nqe_notify = 1;
+ cpr2->toggle = NQE_CN_TOGGLE(type);
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
cpr->has_more_work |= cpr2->has_more_work;
@@ -2708,166 +3369,190 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
}
raw_cons = NEXT_RAW_CMP(raw_cons);
}
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
if (raw_cons != cpr->cp_raw_cons) {
cpr->cp_raw_cons = raw_cons;
BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
}
+poll_done:
+ cpr_rx = &cpr->cp_ring_arr[0];
+ if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
+ (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
+ struct dim_sample dim_sample = {};
+
+ dim_update_sample(cpr->event_ctr,
+ cpr_rx->rx_packets,
+ cpr_rx->rx_bytes,
+ &dim_sample);
+ net_dim(&cpr->dim, &dim_sample);
+ }
return work_done;
}
-static void bnxt_free_tx_skbs(struct bnxt *bp)
+static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, int idx)
{
int i, max_idx;
struct pci_dev *pdev = bp->pdev;
- if (!bp->tx_ring)
- return;
-
max_idx = bp->tx_nr_pages * TX_DESC_CNT;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- int j;
- if (!txr->tx_buf_ring)
+ for (i = 0; i < max_idx;) {
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
+ struct sk_buff *skb;
+ int j, last;
+
+ if (idx < bp->tx_nr_rings_xdp &&
+ tx_buf->action == XDP_REDIRECT) {
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ xdp_return_frame(tx_buf->xdpf);
+ tx_buf->action = 0;
+ tx_buf->xdpf = NULL;
+ i++;
continue;
+ }
- for (j = 0; j < max_idx;) {
- struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
- struct sk_buff *skb;
- int k, last;
-
- if (i < bp->tx_nr_rings_xdp &&
- tx_buf->action == XDP_REDIRECT) {
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- xdp_return_frame(tx_buf->xdpf);
- tx_buf->action = 0;
- tx_buf->xdpf = NULL;
- j++;
- continue;
- }
+ skb = tx_buf->skb;
+ if (!skb) {
+ i++;
+ continue;
+ }
- skb = tx_buf->skb;
- if (!skb) {
- j++;
- continue;
- }
+ tx_buf->skb = NULL;
- tx_buf->skb = NULL;
+ if (tx_buf->is_push) {
+ dev_kfree_skb(skb);
+ i += 2;
+ continue;
+ }
- if (tx_buf->is_push) {
- dev_kfree_skb(skb);
- j += 2;
- continue;
- }
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_headlen(skb),
- DMA_TO_DEVICE);
+ last = tx_buf->nr_frags;
+ i += 2;
+ for (j = 0; j < last; j++, i++) {
+ int ring_idx = i & bp->tx_ring_mask;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
- last = tx_buf->nr_frags;
- j += 2;
- for (k = 0; k < last; k++, j++) {
- int ring_idx = j & bp->tx_ring_mask;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
-
- tx_buf = &txr->tx_buf_ring[ring_idx];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
- dev_kfree_skb(skb);
+ tx_buf = &txr->tx_buf_ring[ring_idx];
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ dev_kfree_skb(skb);
}
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
}
-static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+static void bnxt_free_tx_skbs(struct bnxt *bp)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct pci_dev *pdev = bp->pdev;
- struct bnxt_tpa_idx_map *map;
- int i, max_idx, max_agg_idx;
+ int i;
- max_idx = bp->rx_nr_pages * RX_DESC_CNT;
- max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- if (!rxr->rx_tpa)
- goto skip_rx_tpa_free;
+ if (!bp->tx_ring)
+ return;
- for (i = 0; i < bp->max_tpa; i++) {
- struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
- u8 *data = tpa_info->data;
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- if (!data)
+ if (!txr->tx_buf_ring)
continue;
- dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ bnxt_free_one_tx_ring_skbs(bp, txr, i);
+ }
- tpa_info->data = NULL;
+ if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
+}
- kfree(data);
- }
+static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ int i, max_idx;
-skip_rx_tpa_free:
- if (!rxr->rx_buf_ring)
- goto skip_rx_buf_free;
+ max_idx = bp->rx_nr_pages * RX_DESC_CNT;
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
- dma_addr_t mapping = rx_buf->mapping;
void *data = rx_buf->data;
if (!data)
continue;
rx_buf->data = NULL;
- if (BNXT_RX_PAGE_MODE(bp)) {
- mapping -= bp->rx_dma_offset;
- dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
- bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ if (BNXT_RX_PAGE_MODE(bp))
page_pool_recycle_direct(rxr->page_pool, data);
- } else {
- dma_unmap_single_attrs(&pdev->dev, mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- kfree(data);
- }
+ else
+ page_pool_free_va(rxr->head_pool, data, true);
}
+}
-skip_rx_buf_free:
- if (!rxr->rx_agg_ring)
- goto skip_rx_agg_free;
+static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- for (i = 0; i < max_agg_idx; i++) {
+ for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
- struct page *page = rx_agg_buf->page;
+ netmem_ref netmem = rx_agg_buf->netmem;
- if (!page)
+ if (!netmem)
continue;
- dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
-
- rx_agg_buf->page = NULL;
+ rx_agg_buf->netmem = 0;
__clear_bit(i, rxr->rx_agg_bmap);
- __free_page(page);
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
}
+}
-skip_rx_agg_free:
- if (rxr->rx_page) {
- __free_page(rxr->rx_page);
- rxr->rx_page = NULL;
+static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ int i;
+
+ for (i = 0; i < bp->max_tpa; i++) {
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
+ u8 *data = tpa_info->data;
+
+ if (!data)
+ continue;
+
+ tpa_info->data = NULL;
+ page_pool_free_va(rxr->head_pool, data, false);
}
+}
+
+static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_tpa_idx_map *map;
+
+ if (!rxr->rx_tpa)
+ goto skip_rx_tpa_free;
+
+ bnxt_free_one_tpa_info_data(bp, rxr);
+
+skip_rx_tpa_free:
+ if (!rxr->rx_buf_ring)
+ goto skip_rx_buf_free;
+
+ bnxt_free_one_rx_ring(bp, rxr);
+
+skip_rx_buf_free:
+ if (!rxr->rx_agg_ring)
+ goto skip_rx_agg_free;
+
+ bnxt_free_one_rx_agg_ring(bp, rxr);
+
+skip_rx_agg_free:
map = rxr->rx_tpa_idx_map;
if (map)
memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
@@ -2881,7 +3566,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
return;
for (i = 0; i < bp->rx_nr_rings; i++)
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
}
static void bnxt_free_skbs(struct bnxt *bp)
@@ -2890,23 +3575,52 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
-static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
+static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
{
- u8 init_val = mem_init->init_val;
- u16 offset = mem_init->offset;
+ u8 init_val = ctxm->init_value;
+ u16 offset = ctxm->init_offset;
u8 *p2 = p;
int i;
if (!init_val)
return;
- if (offset == BNXT_MEM_INVALID_OFFSET) {
+ if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
memset(p, init_val, len);
return;
}
- for (i = 0; i < len; i += mem_init->size)
+ for (i = 0; i < len; i += ctxm->entry_size)
*(p2 + i + offset) = init_val;
}
+static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
+ void *buf, size_t offset, size_t head,
+ size_t tail)
+{
+ int i, head_page, start_idx, source_offset;
+ size_t len, rem_len, total_len, max_bytes;
+
+ head_page = head / rmem->page_size;
+ source_offset = head % rmem->page_size;
+ total_len = (tail - head) & MAX_CTX_BYTES_MASK;
+ if (!total_len)
+ total_len = MAX_CTX_BYTES;
+ start_idx = head_page % MAX_CTX_PAGES;
+ max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
+ source_offset;
+ total_len = min(total_len, max_bytes);
+ rem_len = total_len;
+
+ for (i = start_idx; rem_len; i++, source_offset = 0) {
+ len = min((size_t)(rmem->page_size - source_offset), rem_len);
+ if (buf)
+ memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
+ len);
+ offset += len;
+ rem_len -= len;
+ }
+ return total_len;
+}
+
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
@@ -2970,8 +3684,8 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
- if (rmem->mem_init)
- bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
+ if (rmem->ctx_mem)
+ bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
@@ -2993,6 +3707,23 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
return 0;
}
+static void bnxt_free_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ int i;
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ for (i = 0; i < bp->max_tpa; i++) {
+ kfree(rxr->rx_tpa[i].agg_arr);
+ rxr->rx_tpa[i].agg_arr = NULL;
+ }
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+}
+
static void bnxt_free_tpa_info(struct bnxt *bp)
{
int i;
@@ -3000,50 +3731,54 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- kfree(rxr->rx_tpa_idx_map);
- rxr->rx_tpa_idx_map = NULL;
- if (rxr->rx_tpa) {
- kfree(rxr->rx_tpa[0].agg_arr);
- rxr->rx_tpa[0].agg_arr = NULL;
- }
- kfree(rxr->rx_tpa);
- rxr->rx_tpa = NULL;
+ bnxt_free_one_tpa_info(bp, rxr);
+ }
+}
+
+static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct rx_agg_cmp *agg;
+ int i;
+
+ rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return 0;
+ for (i = 0; i < bp->max_tpa; i++) {
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ return -ENOMEM;
+ rxr->rx_tpa[i].agg_arr = agg;
}
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ return -ENOMEM;
+
+ return 0;
}
static int bnxt_alloc_tpa_info(struct bnxt *bp)
{
- int i, j, total_aggs = 0;
+ int i, rc;
bp->max_tpa = MAX_TPA;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (!bp->max_tpa_v2)
return 0;
bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
- total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
}
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct rx_agg_cmp *agg;
- rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
- GFP_KERNEL);
- if (!rxr->rx_tpa)
- return -ENOMEM;
-
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
- continue;
- agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
- rxr->rx_tpa[0].agg_arr = agg;
- if (!agg)
- return -ENOMEM;
- for (j = 1; j < bp->max_tpa; j++)
- rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
- rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
- GFP_KERNEL);
- if (!rxr->rx_tpa_idx_map)
- return -ENOMEM;
+ rc = bnxt_alloc_one_tpa_info(bp, rxr);
+ if (rc)
+ return rc;
}
return 0;
}
@@ -3067,7 +3802,8 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- rxr->page_pool = NULL;
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = rxr->head_pool = NULL;
kfree(rxr->rx_agg_bmap);
rxr->rx_agg_bmap = NULL;
@@ -3081,28 +3817,74 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
}
static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr)
+ struct bnxt_rx_ring_info *rxr,
+ int numa_node)
{
+ const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE;
+ const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
struct page_pool_params pp = { 0 };
+ struct page_pool *pool;
- pp.pool_size = bp->rx_ring_size;
- pp.nid = dev_to_node(&bp->pdev->dev);
+ pp.pool_size = bp->rx_agg_ring_size / agg_size_fac;
+ if (BNXT_RX_PAGE_MODE(bp))
+ pp.pool_size += bp->rx_ring_size / rx_size_fac;
+ pp.nid = numa_node;
+ pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
- pp.dma_dir = DMA_BIDIRECTIONAL;
+ pp.dma_dir = bp->rx_dir;
+ pp.max_len = PAGE_SIZE;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ rxr->page_pool = pool;
+
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnxt_separate_head_pool(rxr)) {
+ pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ goto err_destroy_pp;
+ } else {
+ page_pool_get(pool);
+ }
+ rxr->head_pool = pool;
- rxr->page_pool = page_pool_create(&pp);
- if (IS_ERR(rxr->page_pool)) {
- int err = PTR_ERR(rxr->page_pool);
+ return 0;
+
+err_destroy_pp:
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+ return PTR_ERR(pool);
+}
+
+static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
+{
+ page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
+ page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
+}
+
+static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
- rxr->page_pool = NULL;
- return err;
- }
return 0;
}
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
- int i, rc = 0, agg_rings = 0;
+ int numa_node = dev_to_node(&bp->pdev->dev);
+ int i, rc = 0, agg_rings = 0, cpu;
if (!bp->rx_ring)
return -ENOMEM;
@@ -3113,12 +3895,18 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
+ int cpu_node;
ring = &rxr->rx_ring_struct;
- rc = bnxt_alloc_rx_page_pool(bp, rxr);
+ cpu = cpumask_local_spread(i, numa_node);
+ cpu_node = cpu_to_node(cpu);
+ netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
+ i, cpu_node);
+ rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;
+ bnxt_enable_rx_page_pool(rxr);
rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
if (rc < 0)
@@ -3138,19 +3926,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
ring->grp_idx = i;
if (agg_rings) {
- u16 mem_size;
-
ring = &rxr->rx_agg_ring_struct;
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
ring->grp_idx = i;
- rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
- mem_size = rxr->rx_agg_bmap_size / 8;
- rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
- if (!rxr->rx_agg_bmap)
- return -ENOMEM;
+ rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
+ if (rc)
+ return rc;
}
}
if (bp->flags & BNXT_FLAG_TPA)
@@ -3182,6 +3966,15 @@ static void bnxt_free_tx_rings(struct bnxt *bp)
}
}
+#define BNXT_TC_TO_RING_BASE(bp, tc) \
+ ((tc) * (bp)->tx_nr_rings_per_tc)
+
+#define BNXT_RING_TO_TC_OFF(bp, tx) \
+ ((tx) % (bp)->tx_nr_rings_per_tc)
+
+#define BNXT_RING_TO_TC(bp, tx) \
+ ((tx) / (bp)->tx_nr_rings_per_tc)
+
static int bnxt_alloc_tx_rings(struct bnxt *bp)
{
int i, j, rc;
@@ -3234,9 +4027,10 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
}
qidx = bp->tc_to_qidx[j];
ring->queue_id = bp->q_info[qidx].queue_id;
+ spin_lock_init(&txr->xdp_tx_lock);
if (i < bp->tx_nr_rings_xdp)
continue;
- if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+ if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
j++;
}
return 0;
@@ -3319,36 +4113,33 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
bnxt_free_ring(bp, &ring->ring_mem);
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ if (!cpr->cp_ring_arr)
+ continue;
- if (cpr2) {
- ring = &cpr2->cp_ring_struct;
- bnxt_free_ring(bp, &ring->ring_mem);
- bnxt_free_cp_arrays(cpr2);
- kfree(cpr2);
- cpr->cp_ring_arr[j] = NULL;
- }
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
+
+ ring = &cpr2->cp_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+ bnxt_free_cp_arrays(cpr2);
}
+ kfree(cpr->cp_ring_arr);
+ cpr->cp_ring_arr = NULL;
+ cpr->cp_ring_count = 0;
}
}
-static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
+static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring_mem_info *rmem;
struct bnxt_ring_struct *ring;
- struct bnxt_cp_ring_info *cpr;
int rc;
- cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
- if (!cpr)
- return NULL;
-
rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
if (rc) {
bnxt_free_cp_arrays(cpr);
- kfree(cpr);
- return NULL;
+ return -ENOMEM;
}
ring = &cpr->cp_ring_struct;
rmem = &ring->ring_mem;
@@ -3361,23 +4152,25 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
if (rc) {
bnxt_free_ring(bp, rmem);
bnxt_free_cp_arrays(cpr);
- kfree(cpr);
- cpr = NULL;
}
- return cpr;
+ return rc;
}
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
- int i, rc, ulp_base_vec, ulp_msix;
+ int i, j, rc, ulp_msix;
+ int tcs = bp->num_tc;
+ if (!tcs)
+ tcs = 1;
ulp_msix = bnxt_get_ulp_msix_num(bp);
- ulp_base_vec = bnxt_get_ulp_msix_base(bp);
- for (i = 0; i < bp->cp_nr_rings; i++) {
+ for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr;
+ struct bnxt_cp_ring_info *cpr, *cpr2;
struct bnxt_ring_struct *ring;
+ int cp_count = 0, k;
+ int rx = 0, tx = 0;
if (!bnapi)
continue;
@@ -3390,41 +4183,118 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
if (rc)
return rc;
- if (ulp_msix && i >= ulp_base_vec)
- ring->map_idx = i + ulp_msix;
- else
- ring->map_idx = i;
+ ring->map_idx = ulp_msix + i;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
if (i < bp->rx_nr_rings) {
- struct bnxt_cp_ring_info *cpr2 =
- bnxt_alloc_cp_sub_ring(bp);
-
- cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
- if (!cpr2)
- return -ENOMEM;
- cpr2->bnapi = bnapi;
+ cp_count++;
+ rx = 1;
+ }
+ if (i < bp->tx_nr_rings_xdp) {
+ cp_count++;
+ tx = 1;
+ } else if ((sh && i < bp->tx_nr_rings) ||
+ (!sh && i >= bp->rx_nr_rings)) {
+ cp_count += tcs;
+ tx = 1;
}
- if ((sh && i < bp->tx_nr_rings) ||
- (!sh && i >= bp->rx_nr_rings)) {
- struct bnxt_cp_ring_info *cpr2 =
- bnxt_alloc_cp_sub_ring(bp);
- cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
- if (!cpr2)
- return -ENOMEM;
+ cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+ GFP_KERNEL);
+ if (!cpr->cp_ring_arr)
+ return -ENOMEM;
+ cpr->cp_ring_count = cp_count;
+
+ for (k = 0; k < cp_count; k++) {
+ cpr2 = &cpr->cp_ring_arr[k];
+ rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
+ if (rc)
+ return rc;
cpr2->bnapi = bnapi;
+ cpr2->sw_stats = cpr->sw_stats;
+ cpr2->cp_idx = k;
+ if (!k && rx) {
+ bp->rx_ring[i].rx_cpr = cpr2;
+ cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
+ } else {
+ int n, tc = k - rx;
+
+ n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
+ bp->tx_ring[n].tx_cpr = cpr2;
+ cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
+ }
}
+ if (tx)
+ j++;
}
return 0;
}
-static void bnxt_init_ring_struct(struct bnxt *bp)
+static void bnxt_init_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_ring;
+}
+
+static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
int i;
+ rxr->page_pool->p.napi = NULL;
+ rxr->page_pool = NULL;
+ rxr->head_pool->p.napi = NULL;
+ rxr->head_pool = NULL;
+ memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+}
+
+static void bnxt_init_ring_struct(struct bnxt *bp)
+{
+ int i, j;
+
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_ring_mem_info *rmem;
@@ -3468,18 +4338,16 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
rmem->vmem = (void **)&rxr->rx_agg_ring;
skip_rx:
- txr = bnapi->tx_ring;
- if (!txr)
- continue;
-
- ring = &txr->tx_ring_struct;
- rmem = &ring->ring_mem;
- rmem->nr_pages = bp->tx_nr_pages;
- rmem->page_size = HW_RXBD_RING_SIZE;
- rmem->pg_arr = (void **)txr->tx_desc_ring;
- rmem->dma_arr = txr->tx_desc_mapping;
- rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
- rmem->vmem = (void **)&txr->tx_buf_ring;
+ bnxt_for_each_napi_tx(j, bnapi, txr) {
+ ring = &txr->tx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->tx_nr_pages;
+ rmem->page_size = HW_TXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
+ }
}
}
@@ -3505,58 +4373,88 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
}
}
-static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct net_device *dev = bp->dev;
u32 prod;
int i;
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
- netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX(prod);
}
rxr->rx_prod = prod;
+}
- if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
- return 0;
+static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod;
+ int i;
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
- netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
- ring_nr, i, bp->rx_ring_size);
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
+ ring_nr, i, bp->rx_agg_ring_size);
break;
}
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
+}
- if (rxr->rx_tpa) {
- dma_addr_t mapping;
- u8 *data;
+static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ dma_addr_t mapping;
+ u8 *data;
+ int i;
- for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ for (i = 0; i < bp->max_tpa; i++) {
+ data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- rxr->rx_tpa[i].data = data;
- rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
- rxr->rx_tpa[i].mapping = mapping;
- }
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+ rxr->rx_tpa[i].mapping = mapping;
}
+
return 0;
}
-static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+ int rc;
+
+ bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return 0;
+
+ bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
+
+ if (rxr->rx_tpa) {
+ rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
{
- struct bnxt_rx_ring_info *rxr;
struct bnxt_ring_struct *ring;
u32 type;
@@ -3566,25 +4464,50 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
if (NET_IP_ALIGN == 2)
type |= RX_BD_FLAGS_SOP;
- rxr = &bp->rx_ring[ring_nr];
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
-
- if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
- bpf_prog_add(bp->xdp_prog, 1);
- rxr->xdp_prog = bp->xdp_prog;
- }
ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring;
+ u32 type;
ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
-
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
- RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+ RX_BD_TYPE_RX_AGG_BD;
+
+ /* On P7, setting EOP will cause the chip to disable
+ * Relaxed Ordering (RO) for TPA data. Disable EOP for
+ * potentially higher performance with RO.
+ */
+ if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
+ type |= RX_BD_FLAGS_AGG_EOP;
bnxt_init_rxbd_pages(ring, type);
}
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr;
+
+ rxr = &bp->rx_ring[ring_nr];
+ bnxt_init_one_rx_ring_rxbd(bp, rxr);
+
+ netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
+ if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+ bpf_prog_add(bp->xdp_prog, 1);
+ rxr->xdp_prog = bp->xdp_prog;
+ }
+
+ bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
return bnxt_alloc_one_rx_ring(bp, ring_nr);
}
@@ -3600,11 +4523,10 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
ring->fw_ring_id = INVALID_HW_RING_ID;
cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
-
- if (!cpr2)
- continue;
+ if (!cpr->cp_ring_arr)
+ continue;
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
ring = &cpr2->cp_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
@@ -3647,6 +4569,11 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (i >= bp->tx_nr_rings_xdp)
+ netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
+ NETDEV_QUEUE_TYPE_TX,
+ &txr->bnapi->napi);
}
return 0;
@@ -3692,8 +4619,12 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
- num_vnics += bp->rx_nr_rings;
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ num_vnics++;
+ else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ num_vnics += bp->rx_nr_rings;
+ }
#endif
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
@@ -3710,6 +4641,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
static void bnxt_init_vnics(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int i;
for (i = 0; i < bp->nr_vnics; i++) {
@@ -3717,19 +4649,42 @@ static void bnxt_init_vnics(struct bnxt *bp)
int j;
vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->vnic_id = i;
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
- if (i == 0)
- prandom_bytes(vnic->rss_hash_key,
- HW_HASH_KEY_SIZE);
- else
- memcpy(vnic->rss_hash_key,
- bp->vnic_info[0].rss_hash_key,
+ if (i == BNXT_VNIC_DEFAULT) {
+ u8 *key = (void *)vnic->rss_hash_key;
+ int k;
+
+ if (!bp->rss_hash_key_valid &&
+ !bp->rss_hash_key_updated) {
+ get_random_bytes(bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
+
+ memcpy(vnic->rss_hash_key, bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+
+ if (!bp->rss_hash_key_updated)
+ continue;
+
+ bp->rss_hash_key_updated = false;
+ bp->rss_hash_key_valid = true;
+
+ bp->toeplitz_prefix = 0;
+ for (k = 0; k < 8; k++) {
+ bp->toeplitz_prefix <<= 8;
+ bp->toeplitz_prefix |= key[k];
+ }
+ } else {
+ memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
HW_HASH_KEY_SIZE);
+ }
}
}
}
@@ -3762,6 +4717,17 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags |= BNXT_FLAG_GRO;
}
+static void bnxt_init_ring_params(struct bnxt *bp)
+{
+ unsigned int rx_size;
+
+ bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
+ /* Try to fit 4 chunks into a 4k page */
+ rx_size = SZ_1K -
+ NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
+}
+
/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
* be set on entry.
*/
@@ -3773,15 +4739,14 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* 8 for CRC and VLAN */
rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
- rx_space = rx_size + NET_SKB_PAD +
+ rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
ring_size = bp->rx_ring_size;
bp->rx_agg_ring_size = 0;
bp->rx_agg_nr_pages = 0;
- if (bp->flags & BNXT_FLAG_TPA)
+ if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
@@ -3814,9 +4779,20 @@ void bnxt_set_ring_params(struct bnxt *bp)
}
bp->rx_agg_ring_size = agg_ring_size;
bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
- rx_space = rx_size + NET_SKB_PAD +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ rx_space = PAGE_SIZE;
+ rx_size = PAGE_SIZE -
+ ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ } else {
+ rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak,
+ bp->dev->cfg_pending->hds_thresh);
+ rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
}
bp->rx_buf_use_size = rx_size;
@@ -3854,26 +4830,51 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* Changing allocation mode of RX rings.
* TODO: Update when extending xdp_rxq_info to support allocation modes.
*/
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
+ struct net_device *dev = bp->dev;
+
if (page_mode) {
- if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
- return -EOPNOTSUPP;
- bp->dev->max_mtu =
- min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
- bp->flags &= ~BNXT_FLAG_AGG_RINGS;
- bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+ bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
+ bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
+
+ if (bp->xdp_prog->aux->xdp_has_frags)
+ dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
+ else
+ dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
+ if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ bp->flags |= BNXT_FLAG_JUMBO;
+ bp->rx_skb_func = bnxt_rx_multi_page_skb;
+ } else {
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->rx_skb_func = bnxt_rx_page_skb;
+ }
bp->rx_dir = DMA_BIDIRECTIONAL;
- bp->rx_skb_func = bnxt_rx_page_skb;
- /* Disable LRO or GRO_HW */
- netdev_update_features(bp->dev);
} else {
- bp->dev->max_mtu = bp->max_mtu;
+ dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
}
- return 0;
+}
+
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ __bnxt_set_rx_skb_mode(bp, page_mode);
+
+ if (!page_mode) {
+ int rx, tx;
+
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+
+ /* Update LRO and GRO_HW availability */
+ netdev_update_features(bp->dev);
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
@@ -3947,7 +4948,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
}
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto vnic_skip_grps;
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
@@ -3961,13 +4962,13 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
goto out;
}
vnic_skip_grps:
- if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
!(vnic->flags & BNXT_VNIC_RSS_FLAG))
continue;
/* Allocate rss table and hash key */
size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
@@ -4077,7 +5078,7 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
- !(bp->flags & BNXT_FLAG_CHIP_P5))
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
@@ -4115,7 +5116,7 @@ static void bnxt_init_stats(struct bnxt *bp)
stats = &cpr->stats;
rc = bnxt_hwrm_func_qstat_ext(bp, stats);
if (rc) {
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
mask = (1ULL << 48) - 1;
else
mask = -1ULL;
@@ -4193,6 +5194,9 @@ static void bnxt_free_ring_stats(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
bnxt_free_stats_mem(bp, &cpr->stats);
+
+ kfree(cpr->sw_stats);
+ cpr->sw_stats = NULL;
}
}
@@ -4207,6 +5211,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
+ if (!cpr->sw_stats)
+ return -ENOMEM;
+
cpr->stats.len = size;
rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
if (rc)
@@ -4261,7 +5269,7 @@ alloc_tx_ext_stats:
static void bnxt_clear_ring_indices(struct bnxt *bp)
{
- int i;
+ int i, j;
if (!bp->bnapi)
return;
@@ -4278,10 +5286,10 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
cpr = &bnapi->cp_ring;
cpr->cp_raw_cons = 0;
- txr = bnapi->tx_ring;
- if (txr) {
+ bnxt_for_each_napi_tx(j, bnapi, txr) {
txr->tx_prod = 0;
txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
}
rxr = bnapi->rx_ring;
@@ -4291,16 +5299,56 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
rxr->rx_sw_agg_prod = 0;
rxr->rx_next_cons = 0;
}
+ bnapi->events = 0;
}
}
-static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ u8 type = fltr->type, flags = fltr->flags;
+
+ INIT_LIST_HEAD(&fltr->list);
+ if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
+ (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
+ list_add_tail(&fltr->list, &bp->usr_fltr_list);
+}
+
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ if (!list_empty(&fltr->list))
+ list_del_init(&fltr->list);
+}
+
+static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
+ if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
+ continue;
+ bnxt_del_one_usr_fltr(bp, usr_fltr);
+ }
+}
+
+static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ hlist_del(&fltr->hash);
+ bnxt_del_one_usr_fltr(bp, fltr);
+ if (fltr->flags) {
+ clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ kfree(fltr);
+}
+
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
-#ifdef CONFIG_RFS_ACCEL
int i;
- /* Under rtnl_lock and all our NAPIs have been disabled. It's
- * safe to delete the hash table.
+ netdev_assert_locked_or_invisible(bp->dev);
+
+ /* Under netdev instance lock and all our NAPIs have been disabled.
+ * It's safe to delete the hash table.
*/
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
struct hlist_head *head;
@@ -4308,42 +5356,67 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
struct bnxt_ntuple_filter *fltr;
head = &bp->ntp_fltr_hash_tbl[i];
- hlist_for_each_entry_safe(fltr, tmp, head, hash) {
- hlist_del(&fltr->hash);
- kfree(fltr);
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
+ bnxt_del_l2_filter(bp, fltr->l2_fltr);
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
+ continue;
+ bnxt_del_fltr(bp, &fltr->base);
}
}
- if (irq_reinit) {
- kfree(bp->ntp_fltr_bmap);
- bp->ntp_fltr_bmap = NULL;
- }
+ if (!all)
+ return;
+
+ bitmap_free(bp->ntp_fltr_bmap);
+ bp->ntp_fltr_bmap = NULL;
bp->ntp_fltr_count = 0;
-#endif
}
static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
{
-#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
- if (!(bp->flags & BNXT_FLAG_RFS))
+ if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
return 0;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
- sizeof(long),
- GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
return rc;
-#else
- return 0;
-#endif
+}
+
+static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
+{
+ int i;
+
+ for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_l2_filter *fltr;
+
+ head = &bp->l2_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
+ continue;
+ bnxt_del_fltr(bp, &fltr->base);
+ }
+ }
+}
+
+static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
+ get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
}
static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
@@ -4353,7 +5426,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_rx_rings(bp);
bnxt_free_cp_rings(bp);
bnxt_free_all_cp_arrays(bp);
- bnxt_free_ntp_fltrs(bp, irq_re_init);
+ bnxt_free_ntp_fltrs(bp, false);
+ bnxt_free_l2_filters(bp, false);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
@@ -4396,7 +5470,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
bp->bnapi[i] = bnapi;
bp->bnapi[i]->index = i;
bp->bnapi[i]->bp = bp;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr =
&bp->bnapi[i]->cp_ring;
@@ -4414,11 +5488,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
rxr->rx_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
rxr->rx_agg_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
+ } else {
+ rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
}
rxr->bnapi = bp->bnapi[i];
bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
@@ -4441,22 +5517,33 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
else
j = bp->rx_nr_rings;
- for (i = 0; i < bp->tx_nr_rings; i++, j++) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+ struct bnxt_napi *bnapi2;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
txr->tx_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
- txr->bnapi = bp->bnapi[j];
- bp->bnapi[j]->tx_ring = txr;
bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
if (i >= bp->tx_nr_rings_xdp) {
+ int k = j + BNXT_RING_TO_TC_OFF(bp, i);
+
+ bnapi2 = bp->bnapi[k];
txr->txq_index = i - bp->tx_nr_rings_xdp;
- bp->bnapi[j]->tx_int = bnxt_tx_int;
+ txr->tx_napi_idx =
+ BNXT_RING_TO_TC(bp, txr->txq_index);
+ bnapi2->tx_ring[txr->tx_napi_idx] = txr;
+ bnapi2->tx_int = bnxt_tx_int;
} else {
- bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
- bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
+ bnapi2 = bp->bnapi[j];
+ bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
+ bnapi2->tx_ring[0] = txr;
+ bnapi2->tx_int = bnxt_tx_int_xdp;
+ j++;
}
+ txr->bnapi = bnapi2;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ txr->tx_cpr = &bnapi2->cp_ring;
}
rc = bnxt_alloc_stats(bp);
@@ -4491,8 +5578,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
if (rc)
goto alloc_mem_err;
- bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
- BNXT_VNIC_UCAST_FLAG;
+ bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
+ BNXT_VNIC_MCAST_FLAG |
+ BNXT_VNIC_UCAST_FLAG;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
+ bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
+ BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
+
rc = bnxt_alloc_vnic_attributes(bp);
if (rc)
goto alloc_mem_err;
@@ -4584,6 +5676,8 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
req->flags = cpu_to_le32(flags);
req->ver_maj_8b = DRV_VER_MAJ;
req->ver_min_8b = DRV_VER_MIN;
@@ -4601,6 +5695,10 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
u16 cmd = bnxt_vf_req_snif[i];
unsigned int bit, idx;
+ if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
+ cmd == HWRM_PORT_PHY_QCFG)
+ continue;
+
idx = cmd / 32;
bit = cmd % 32;
data[idx] |= 1 << bit;
@@ -4624,6 +5722,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
continue;
+ if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
+ !bp->ptp_cfg)
+ continue;
__set_bit(bnxt_async_events_arr[i], async_events_bmap);
}
if (bmap && bmap_size) {
@@ -4651,7 +5752,7 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
return rc;
}
-static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
+int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
{
struct hwrm_func_drv_unrgtr_input *req;
int rc;
@@ -4665,6 +5766,8 @@ static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
return hwrm_req_send(bp, req);
}
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
+
static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
{
struct hwrm_tunnel_dst_port_free_input *req;
@@ -4694,6 +5797,11 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
bp->nge_port = 0;
bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
break;
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
+ req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
+ bp->vxlan_gpe_port = 0;
+ bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
+ break;
default:
break;
}
@@ -4702,6 +5810,8 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
if (rc)
netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
rc);
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, true);
return rc;
}
@@ -4737,9 +5847,16 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
bp->nge_port = port;
bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
break;
+ case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
+ bp->vxlan_gpe_port = port;
+ bp->vxlan_gpe_fw_dst_port_id =
+ le16_to_cpu(resp->tunnel_dst_port_id);
+ break;
default:
break;
}
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, true);
err_out:
hwrm_req_drop(bp, req);
@@ -4757,31 +5874,346 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
return rc;
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
- req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
- req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
req->mask = cpu_to_le32(vnic->rx_mask);
return hwrm_req_send_silent(bp, req);
}
+void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ if (!atomic_dec_and_test(&fltr->refcnt))
+ return;
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return;
+ }
+ hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
+ if (fltr->base.flags) {
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ kfree_rcu(fltr, base.rcu);
+}
+
+static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u32 idx)
+{
+ struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
+ struct bnxt_l2_filter *fltr;
+
+ hlist_for_each_entry_rcu(fltr, head, base.hash) {
+ struct bnxt_l2_key *l2_key = &fltr->l2_key;
+
+ if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
+ l2_key->vlan == key->vlan)
+ return fltr;
+ }
+ return NULL;
+}
+
+static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u32 idx)
+{
+ struct bnxt_l2_filter *fltr = NULL;
+
+ rcu_read_lock();
+ fltr = __bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr)
+ atomic_inc(&fltr->refcnt);
+ rcu_read_unlock();
+ return fltr;
+}
+
+#define BNXT_IPV4_4TUPLE(bp, fkeys) \
+ (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
+ ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
+
+#define BNXT_IPV6_4TUPLE(bp, fkeys) \
+ (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
+ ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
+
+static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
+{
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ if (BNXT_IPV4_4TUPLE(bp, fkeys))
+ return sizeof(fkeys->addrs.v4addrs) +
+ sizeof(fkeys->ports);
+
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
+ return sizeof(fkeys->addrs.v4addrs);
+ }
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
+ if (BNXT_IPV6_4TUPLE(bp, fkeys))
+ return sizeof(fkeys->addrs.v6addrs) +
+ sizeof(fkeys->ports);
+
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
+ return sizeof(fkeys->addrs.v6addrs);
+ }
+
+ return 0;
+}
+
+static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
+ const unsigned char *key)
+{
+ u64 prefix = bp->toeplitz_prefix, hash = 0;
+ struct bnxt_ipv4_tuple tuple4;
+ struct bnxt_ipv6_tuple tuple6;
+ int i, j, len = 0;
+ u8 *four_tuple;
+
+ len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
+ if (!len)
+ return 0;
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ tuple4.v4addrs = fkeys->addrs.v4addrs;
+ tuple4.ports = fkeys->ports;
+ four_tuple = (unsigned char *)&tuple4;
+ } else {
+ tuple6.v6addrs = fkeys->addrs.v6addrs;
+ tuple6.ports = fkeys->ports;
+ four_tuple = (unsigned char *)&tuple6;
+ }
+
+ for (i = 0, j = 8; i < len; i++, j++) {
+ u8 byte = four_tuple[i];
+ int bit;
+
+ for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
+ if (byte & 0x80)
+ hash ^= prefix;
+ }
+ prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
+ }
+
+ /* The valid part of the hash is in the upper 32 bits. */
+ return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
+}
+
#ifdef CONFIG_RFS_ACCEL
-static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
- struct bnxt_ntuple_filter *fltr)
+static struct bnxt_l2_filter *
+bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ fltr = bnxt_lookup_l2_filter(bp, key, idx);
+ return fltr;
+}
+#endif
+
+static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
+ struct bnxt_l2_key *key, u32 idx)
+{
+ struct hlist_head *head;
+
+ ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
+ fltr->l2_key.vlan = key->vlan;
+ fltr->base.type = BNXT_FLTR_TYPE_L2;
+ if (fltr->base.flags) {
+ int bit_id;
+
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+ bp->max_fltr, 0);
+ if (bit_id < 0)
+ return -ENOMEM;
+ fltr->base.sw_id = (u16)bit_id;
+ bp->ntp_fltr_count++;
+ }
+ head = &bp->l2_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
+ set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ atomic_set(&fltr->refcnt, 1);
+ return 0;
+}
+
+static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ gfp_t gfp)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+ int rc;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ fltr = bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr)
+ return fltr;
+
+ fltr = kzalloc(sizeof(*fltr), gfp);
+ if (!fltr)
+ return ERR_PTR(-ENOMEM);
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ rc = bnxt_init_l2_filter(bp, fltr, key, idx);
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ if (rc) {
+ bnxt_del_l2_filter(bp, fltr);
+ fltr = ERR_PTR(rc);
+ }
+ return fltr;
+}
+
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+ int rc;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ fltr = __bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr) {
+ fltr = ERR_PTR(-EEXIST);
+ goto l2_filter_exit;
+ }
+ fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
+ if (!fltr) {
+ fltr = ERR_PTR(-ENOMEM);
+ goto l2_filter_exit;
+ }
+ fltr->base.flags = flags;
+ rc = bnxt_init_l2_filter(bp, fltr, key, idx);
+ if (rc) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ bnxt_del_l2_filter(bp, fltr);
+ return ERR_PTR(rc);
+ }
+
+l2_filter_exit:
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return fltr;
+}
+
+static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &pf->vf[vf_idx];
+
+ return vf->fw_fid;
+#else
+ return INVALID_HW_RING_ID;
+#endif
+}
+
+int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_free_input *req;
+ u16 target_id = 0xffff;
+ int rc;
+
+ if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ if (fltr->base.vf_idx >= pf->active_vfs)
+ return -EINVAL;
+
+ target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
+ if (target_id == INVALID_HW_RING_ID)
+ return -EINVAL;
+ }
+
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
+ if (rc)
+ return rc;
+
+ req->target_id = cpu_to_le16(target_id);
+ req->l2_filter_id = fltr->base.filter_id;
+ return hwrm_req_send(bp, req);
+}
+
+int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_alloc_output *resp;
+ struct hwrm_cfa_l2_filter_alloc_input *req;
+ u16 target_id = 0xffff;
+ int rc;
+
+ if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ if (fltr->base.vf_idx >= pf->active_vfs)
+ return -EINVAL;
+
+ target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
+ }
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
+ if (rc)
+ return rc;
+
+ req->target_id = cpu_to_le16(target_id);
+ req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ req->flags |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
+ req->enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
+ eth_broadcast_addr(req->l2_addr_mask);
+
+ if (fltr->l2_key.vlan) {
+ req->enables |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
+ req->num_vlans = 1;
+ req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
+ req->l2_ivlan_mask = cpu_to_le16(0xfff);
+ }
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ fltr->base.filter_id = resp->l2_filter_id;
+ set_bit(BNXT_FLTR_VALID, &fltr->base.state);
+ }
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_free_input *req;
int rc;
+ set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
if (rc)
return rc;
- req->ntuple_filter_id = fltr->filter_id;
+ req->ntuple_filter_id = fltr->base.filter_id;
return hwrm_req_send(bp, req);
}
#define BNXT_NTP_FLTR_FLAGS \
(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
- CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
@@ -4797,56 +6229,100 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
#define BNXT_NTP_TUNNEL_FLTR_FLAG \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
-static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
- struct bnxt_ntuple_filter *fltr)
+void bnxt_fill_ipv6_mask(__be32 mask[4])
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ mask[i] = cpu_to_be32(~0);
+}
+
+static void
+bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
+ struct hwrm_cfa_ntuple_filter_alloc_input *req,
+ struct bnxt_ntuple_filter *fltr)
+{
+ u16 rxq = fltr->base.rxq;
+
+ if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
+ struct ethtool_rxfh_context *ctx;
+ struct bnxt_rss_ctx *rss_ctx;
+ struct bnxt_vnic_info *vnic;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx,
+ fltr->base.fw_vnic_id);
+ if (ctx) {
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+ vnic = &rss_ctx->vnic;
+
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ }
+ return;
+ }
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ struct bnxt_vnic_info *vnic;
+ u32 enables;
+
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
+ req->enables |= cpu_to_le32(enables);
+ req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
+ } else {
+ u32 flags;
+
+ flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
+ req->flags |= cpu_to_le32(flags);
+ req->dst_id = cpu_to_le16(rxq);
+ }
+}
+
+int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct hwrm_cfa_ntuple_filter_alloc_input *req;
+ struct bnxt_flow_masks *masks = &fltr->fmasks;
struct flow_keys *keys = &fltr->fkeys;
+ struct bnxt_l2_filter *l2_fltr;
struct bnxt_vnic_info *vnic;
- u32 flags = 0;
int rc;
rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
if (rc)
return rc;
- req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
+ l2_fltr = fltr->l2_fltr;
+ req->l2_filter_id = l2_fltr->base.filter_id;
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
- flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
- req->dst_id = cpu_to_le16(fltr->rxq);
+ if (fltr->base.flags & BNXT_ACT_DROP) {
+ req->flags =
+ cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
+ } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
+ bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
} else {
- vnic = &bp->vnic_info[fltr->rxq + 1];
+ vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
- req->flags = cpu_to_le32(flags);
- req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+ req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req->ethertype = htons(ETH_P_IP);
- memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req->ip_protocol = keys->basic.ip_proto;
if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
- int i;
-
req->ethertype = htons(ETH_P_IPV6);
req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
- *(struct in6_addr *)&req->src_ipaddr[0] =
- keys->addrs.v6addrs.src;
- *(struct in6_addr *)&req->dst_ipaddr[0] =
- keys->addrs.v6addrs.dst;
- for (i = 0; i < 4; i++) {
- req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
- req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
- }
+ *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
+ *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
} else {
req->src_ipaddr[0] = keys->addrs.v4addrs.src;
- req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
}
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
@@ -4855,84 +6331,85 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
}
req->src_port = keys->ports.src;
- req->src_port_mask = cpu_to_be16(0xffff);
+ req->src_port_mask = masks->ports.src;
req->dst_port = keys->ports.dst;
- req->dst_port_mask = cpu_to_be16(0xffff);
+ req->dst_port_mask = masks->ports.dst;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
- fltr->filter_id = resp->ntuple_filter_id;
+ fltr->base.filter_id = resp->ntuple_filter_id;
hwrm_req_drop(bp, req);
return rc;
}
-#endif
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
const u8 *mac_addr)
{
- struct hwrm_cfa_l2_filter_alloc_output *resp;
- struct hwrm_cfa_l2_filter_alloc_input *req;
+ struct bnxt_l2_filter *fltr;
+ struct bnxt_l2_key key;
int rc;
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
- if (rc)
- return rc;
-
- req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
- if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
- req->flags |=
- cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
- req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
- req->enables =
- cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
- CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
- CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
- memcpy(req->l2_addr, mac_addr, ETH_ALEN);
- req->l2_addr_mask[0] = 0xff;
- req->l2_addr_mask[1] = 0xff;
- req->l2_addr_mask[2] = 0xff;
- req->l2_addr_mask[3] = 0xff;
- req->l2_addr_mask[4] = 0xff;
- req->l2_addr_mask[5] = 0xff;
+ ether_addr_copy(key.dst_mac_addr, mac_addr);
+ key.vlan = 0;
+ fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
- resp = hwrm_req_hold(bp, req);
- rc = hwrm_req_send(bp, req);
- if (!rc)
- bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
- resp->l2_filter_id;
- hwrm_req_drop(bp, req);
+ fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
+ rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
+ if (rc)
+ bnxt_del_l2_filter(bp, fltr);
+ else
+ bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
return rc;
}
-static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
{
- struct hwrm_cfa_l2_filter_free_input *req;
u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
- int rc;
/* Any associated ntuple filters will also be cleared by firmware. */
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
- if (rc)
- return rc;
- hwrm_req_hold(bp, req);
for (i = 0; i < num_of_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
for (j = 0; j < vnic->uc_filter_count; j++) {
- req->l2_filter_id = vnic->fw_l2_filter_id[j];
+ struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
- rc = hwrm_req_send(bp, req);
+ bnxt_hwrm_l2_filter_free(bp, fltr);
+ bnxt_del_l2_filter(bp, fltr);
}
vnic->uc_filter_count = 0;
}
- hwrm_req_drop(bp, req);
- return rc;
}
-static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+#define BNXT_DFLT_TUNL_TPA_BMAP \
+ (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
+
+static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
+ struct hwrm_vnic_tpa_cfg_input *req)
+{
+ u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
+ return;
+
+ if (bp->vxlan_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
+ if (bp->vxlan_gpe_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
+ if (bp->nge_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
+
+ req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
+ req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
+}
+
+int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u32 tpa_flags)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
struct hwrm_vnic_tpa_cfg_input *req;
int rc;
@@ -4976,7 +6453,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
nsegs = (MAX_SKB_FRAGS - n) / n;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
segs = MAX_TPA_SEGS_P5;
max_aggs = bp->max_tpa;
} else {
@@ -4986,6 +6463,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
req->max_aggs = cpu_to_le16(max_aggs);
req->min_agg_len = cpu_to_le32(512);
+ bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
}
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -5002,50 +6480,43 @@ static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_napi *bnapi = rxr->bnapi;
- struct bnxt_cp_ring_info *cpr;
-
- cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
- return cpr->cp_ring_struct.fw_ring_id;
- } else {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
+ else
return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
- }
}
static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_napi *bnapi = txr->bnapi;
- struct bnxt_cp_ring_info *cpr;
-
- cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
- return cpr->cp_ring_struct.fw_ring_id;
- } else {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return txr->tx_cpr->cp_ring_struct.fw_ring_id;
+ else
return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
- }
}
static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
{
int entries;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
else
entries = HW_HASH_INDEX_SIZE;
bp->rss_indir_tbl_entries = entries;
- bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
- GFP_KERNEL);
+ bp->rss_indir_tbl =
+ kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
if (!bp->rss_indir_tbl)
return -ENOMEM;
+
return 0;
}
-static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
+ struct ethtool_rxfh_context *rss_ctx)
{
u16 max_rings, max_entries, pad, i;
+ u32 *rss_indir_tbl;
if (!bp->rx_nr_rings)
return;
@@ -5056,18 +6527,22 @@ static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
max_rings = bp->rx_nr_rings;
max_entries = bnxt_get_rxfh_indir_size(bp->dev);
+ if (rss_ctx)
+ rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
+ else
+ rss_indir_tbl = &bp->rss_indir_tbl[0];
for (i = 0; i < max_entries; i++)
- bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
+ rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
pad = bp->rss_indir_tbl_entries - max_entries;
if (pad)
- memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+ memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
}
static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
{
- u16 i, tbl_size, max_ring = 0;
+ u32 i, tbl_size, max_ring = 0;
if (!bp->rss_indir_tbl)
return 0;
@@ -5080,14 +6555,18 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ if (!rx_rings)
+ return 0;
+ return bnxt_calc_nr_ring_pages(rx_rings - 1,
+ BNXT_RSS_TABLE_ENTRIES_P5);
+ }
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return 2;
return 1;
}
-static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
u16 i, j;
@@ -5100,8 +6579,8 @@ static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
}
}
-static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
- struct bnxt_vnic_info *vnic)
+static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
{
__le16 *ring_tbl = vnic->rss_table;
struct bnxt_rx_ring_info *rxr;
@@ -5112,7 +6591,12 @@ static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
for (i = 0; i < tbl_size; i++) {
u16 ring_id, j;
- j = bp->rss_indir_tbl[i];
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
+ else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
+ j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
+ else
+ j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
ring_id = rxr->rx_ring_struct.fw_ring_id;
@@ -5122,21 +6606,39 @@ static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
}
}
-static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+static void
+__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
+ struct bnxt_vnic_info *vnic)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
- else
- __bnxt_fill_hw_rss_tbl(bp, vnic);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ bnxt_fill_hw_rss_tbl_p5(bp, vnic);
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+ } else {
+ bnxt_fill_hw_rss_tbl(bp, vnic);
+ }
+
+ if (bp->rss_hash_delta) {
+ req->hash_type = cpu_to_le32(bp->rss_hash_delta);
+ if (bp->rss_hash_cfg & bp->rss_hash_delta)
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
+ else
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
+ } else {
+ req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ }
+ req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
-static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ bool set_rss)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
int rc;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
return 0;
@@ -5144,21 +6646,15 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
if (rc)
return rc;
- if (set_rss) {
- bnxt_fill_hw_rss_tbl(bp, vnic);
- req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
- req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
- req->hash_key_tbl_addr =
- cpu_to_le64(vnic->rss_hash_key_dma_addr);
- }
+ if (set_rss)
+ __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
return hwrm_req_send(bp, req);
}
-static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
+static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool set_rss)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
dma_addr_t ring_tbl_map;
u32 i, nr_ctxs;
@@ -5172,10 +6668,7 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
if (!set_rss)
return hwrm_req_send(bp, req);
- bnxt_fill_hw_rss_tbl(bp, vnic);
- req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
- req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+ __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
ring_tbl_map = vnic->rss_table_dma_addr;
nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
@@ -5194,9 +6687,29 @@ exit:
return rc;
}
-static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
+ struct hwrm_vnic_rss_qcfg_output *resp;
+ struct hwrm_vnic_rss_qcfg_input *req;
+
+ if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
+ return;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ /* all contexts configured to same hash_type, zero always exists */
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ resp = hwrm_req_hold(bp, req);
+ if (!hwrm_req_send(bp, req)) {
+ bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
+ bp->rss_hash_delta = 0;
+ }
+ hwrm_req_drop(bp, req);
+}
+
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@@ -5204,20 +6717,23 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
if (rc)
return rc;
- req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
- req->enables =
- cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
- VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
- /* thresholds not implemented in firmware yet */
- req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
+
+ if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
+ req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req->enables |=
+ cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
+ }
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req);
}
-static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
@@ -5226,10 +6742,10 @@ static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
return;
req->rss_cos_lb_ctx_id =
- cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
+ cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
hwrm_req_send(bp, req);
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
@@ -5241,13 +6757,14 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
- bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
+ bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
}
}
bp->rsscos_nr_ctxs = 0;
}
-static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
@@ -5260,7 +6777,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] =
le16_to_cpu(resp->rss_cos_lb_ctx_id);
hwrm_req_drop(bp, req);
@@ -5274,9 +6791,9 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
}
-int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct hwrm_vnic_cfg_input *req;
unsigned int ring = 0, grp_idx;
u16 def_vlan = 0;
@@ -5286,7 +6803,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
req->default_rx_ring_id =
@@ -5305,8 +6822,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
- req->rss_rule =
- cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
+ req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
@@ -5325,15 +6841,16 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
ring = 0;
else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
- ring = vnic_id - 1;
- else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
+ ring = vnic->vnic_id - 1;
+ else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req->lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
+ vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
+ req->mru = cpu_to_le16(vnic->mru);
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
@@ -5342,25 +6859,25 @@ vnic_mru:
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
- if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_req_send(bp, req);
}
-static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
{
- if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+ if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
struct hwrm_vnic_free_input *req;
if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
return;
- req->vnic_id =
- cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
hwrm_req_send(bp, req);
- bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
}
}
@@ -5369,15 +6886,14 @@ static void bnxt_hwrm_vnic_free(struct bnxt *bp)
u16 i;
for (i = 0; i < bp->nr_vnics; i++)
- bnxt_hwrm_vnic_free_one(bp, i);
+ bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
}
-static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
- unsigned int start_rx_ring_idx,
- unsigned int nr_rings)
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ unsigned int start_rx_ring_idx,
+ unsigned int nr_rings)
{
unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_alloc_output *resp;
struct hwrm_vnic_alloc_input *req;
int rc;
@@ -5386,7 +6902,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto vnic_no_ring_grps;
/* map ring groups to this vnic */
@@ -5403,7 +6919,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
vnic_no_ring_grps:
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
- if (vnic_id == 0)
+ if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = hwrm_req_hold(bp, req);
@@ -5421,7 +6937,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
int rc;
bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
- bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
+ bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
+ bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
if (bp->hwrm_spec_code < 0x10600)
return 0;
@@ -5434,9 +6951,9 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (!rc) {
u32 flags = le32_to_cpu(resp->flags);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
- bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
+ bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
@@ -5445,16 +6962,34 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
* VLAN_STRIP_CAP properly.
*/
if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
- (BNXT_CHIP_P5_THOR(bp) &&
+ (BNXT_CHIP_P5(bp) &&
!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
+ bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
if (bp->max_tpa_v2) {
- if (BNXT_CHIP_P5_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
else
- bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
+ bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
}
+ if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
}
hwrm_req_drop(bp, req);
return rc;
@@ -5467,7 +7002,7 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
int rc;
u16 i;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
@@ -5500,7 +7035,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
struct hwrm_ring_grp_free_input *req;
u16 i;
- if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return;
if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
@@ -5519,6 +7054,30 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
+static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
+ struct hwrm_ring_alloc_input *req,
+ struct bnxt_ring_struct *ring)
+{
+ struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
+ u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
+ RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
+
+ if (ring_type == HWRM_RING_ALLOC_AGG) {
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
+ } else {
+ req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+ if (NET_IP_ALIGN == 2)
+ req->flags =
+ cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
+ }
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req->enables |= cpu_to_le32(enables);
+}
+
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, u32 map_index)
@@ -5550,6 +7109,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_ALLOC_TX: {
struct bnxt_tx_ring_info *txr;
+ u16 flags = 0;
txr = container_of(ring, struct bnxt_tx_ring_info,
tx_ring_struct);
@@ -5560,65 +7120,46 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req->length = cpu_to_le32(bp->tx_ring_mask + 1);
req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req->queue_id = cpu_to_le16(ring->queue_id);
+ if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
+ req->cmpl_coal_cnt =
+ RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
+ if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
+ flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
+ req->flags = cpu_to_le16(flags);
break;
}
case HWRM_RING_ALLOC_RX:
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- req->length = cpu_to_le32(bp->rx_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- u16 flags = 0;
-
- /* Association of rx ring with stats context */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- if (NET_IP_ALIGN == 2)
- flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
- req->flags = cpu_to_le16(flags);
- }
- break;
case HWRM_RING_ALLOC_AGG:
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
- /* Association of agg ring with rx ring */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- } else {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- }
- req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
+ cpu_to_le32(bp->rx_ring_mask + 1) :
+ cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
break;
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req->length = cpu_to_le32(bp->cp_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
/* Association of cp ring with nq */
grp_info = &bp->grp_info[map_index];
req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
req->cq_handle = cpu_to_le64(ring->handle);
req->enables |= cpu_to_le32(
RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
- } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ } else {
req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
}
break;
case HWRM_RING_ALLOC_NQ:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
req->length = cpu_to_le32(bp->cp_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
break;
default:
netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
ring_type);
- return -1;
+ return -EINVAL;
}
resp = hwrm_req_hold(bp, req);
@@ -5644,7 +7185,7 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
if (BNXT_PF(bp)) {
struct hwrm_func_cfg_input *req;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -5666,14 +7207,34 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
}
}
+static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
+ u32 ring_type)
+{
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_ring_mask = bp->tx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ db->db_ring_mask = bp->rx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ db->db_ring_mask = bp->rx_agg_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ case HWRM_RING_ALLOC_NQ:
+ db->db_ring_mask = bp->cp_ring_mask;
+ break;
+ }
+ if (bp->flags & BNXT_FLAG_CHIP_P7) {
+ db->db_epoch_mask = db->db_ring_mask + 1;
+ db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
+ }
+}
+
static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
u32 map_idx, u32 xid)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- if (BNXT_PF(bp))
- db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
- else
- db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
switch (ring_type) {
case HWRM_RING_ALLOC_TX:
db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
@@ -5690,6 +7251,11 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
break;
}
db->db_key64 |= (u64)xid << DBR_XID_SFT;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ db->db_key64 |= DBR_VALID;
+
+ db->doorbell = bp->bar1 + bp->db_offset;
} else {
db->doorbell = bp->bar1 + map_idx * 0x80;
switch (ring_type) {
@@ -5705,6 +7271,82 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
break;
}
}
+ bnxt_set_db_mask(bp, db, ring_type);
+}
+
+static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ u32 type = HWRM_RING_ALLOC_RX;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 type = HWRM_RING_ALLOC_AGG;
+ u32 grp_idx = ring->grp_idx;
+ u32 map_idx;
+ int rc;
+
+ map_idx = grp_idx + bp->rx_nr_rings;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ struct bnxt_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->cp_ring_struct;
+ ring->handle = BNXT_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ return 0;
+}
+
+static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+ return 0;
}
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
@@ -5713,7 +7355,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
int i, rc = 0;
u32 type;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = HWRM_RING_ALLOC_NQ;
else
type = HWRM_RING_ALLOC_CMPL;
@@ -5743,93 +7385,66 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
}
- type = HWRM_RING_ALLOC_TX;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring;
- u32 map_idx;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_napi *bnapi = txr->bnapi;
- struct bnxt_cp_ring_info *cpr, *cpr2;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
-
- cpr = &bnapi->cp_ring;
- cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_TX_HDL;
- map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
- ring = &txr->tx_ring_struct;
- map_idx = i;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
if (rc)
goto err_out;
- bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
- type = HWRM_RING_ALLOC_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- struct bnxt_napi *bnapi = rxr->bnapi;
- u32 map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
/* If we have agg rings, post agg buffers first. */
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
- bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
- struct bnxt_cp_ring_info *cpr2;
-
- cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_RX_HDL;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
}
if (agg_rings) {
- type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring =
- &rxr->rx_agg_ring_struct;
- u32 grp_idx = ring->grp_idx;
- u32 map_idx = grp_idx + bp->rx_nr_rings;
-
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
if (rc)
goto err_out;
-
- bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
- ring->fw_ring_id);
- bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
- bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
}
}
err_out:
return rc;
}
+static void bnxt_cancel_dim(struct bnxt *bp)
+{
+ int i;
+
+ /* DIM work is initialized in bnxt_enable_napi(). Proceed only
+ * if NAPI is enabled.
+ */
+ if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
+ return;
+
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_napi *bnapi = rxr->bnapi;
+
+ cancel_work_sync(&bnapi->cp_ring.dim.work);
+ }
+}
+
static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, int cmpl_ring_id)
@@ -5863,66 +7478,109 @@ exit:
return 0;
}
-static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ bool close_path)
{
- u32 type;
- int i;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
- if (!bp->bnapi)
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
return;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
+ cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_TX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
+static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- u32 grp_idx = rxr->bnapi->index;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+}
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_RX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[grp_idx].rx_fw_ring_id =
- INVALID_HW_RING_ID;
- }
- }
+static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 type, cmpl_ring_id;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_RX_AGG;
else
type = RING_FREE_REQ_RING_TYPE_RX;
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
- u32 grp_idx = rxr->bnapi->index;
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
- hwrm_ring_free_send_msg(bp, ring, type,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[grp_idx].agg_fw_ring_id =
- INVALID_HW_RING_ID;
- }
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ hwrm_ring_free_send_msg(bp, ring, type,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring;
+
+ ring = &cpr->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ int i, size = ring->ring_mem.page_size;
+
+ cpr->cp_raw_cons = 0;
+ cpr->toggle = 0;
+
+ for (i = 0; i < bp->cp_nr_pages; i++)
+ if (cpr->cp_desc_ring[i])
+ memset(cpr->cp_desc_ring[i], 0, size);
+}
+
+static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+{
+ u32 type;
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->tx_nr_rings; i++)
+ bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
+
+ bnxt_cancel_dim(bp);
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
+ bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
}
/* The completion rings are about to be freed. After that the
@@ -5931,7 +7589,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
*/
bnxt_disable_int_sync(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_NQ;
else
type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
@@ -5941,19 +7599,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_ring_struct *ring;
int j;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
+ bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
- if (cpr2) {
- ring = &cpr2->cp_ring_struct;
- if (ring->fw_ring_id == INVALID_HW_RING_ID)
- continue;
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring, type,
@@ -5964,6 +7612,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
bool shared);
@@ -5997,17 +7647,20 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_hw_ring_grps =
le32_to_cpu(resp->alloc_hw_ring_grps);
hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
hw_resc->resv_irqs = cp;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
int rx = hw_resc->resv_rx_rings;
int tx = hw_resc->resv_tx_rings;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx >>= 1;
if (cp < (rx + tx)) {
- bnxt_trim_rings(bp, &rx, &tx, cp, false);
+ rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
+ if (rc)
+ goto get_rings_exit;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
hw_resc->resv_rx_rings = rx;
@@ -6019,8 +7672,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_cp_rings = cp;
hw_resc->resv_stat_ctxs = stats;
}
+get_rings_exit:
hwrm_req_drop(bp, req);
- return 0;
+ return rc;
}
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
@@ -6049,61 +7703,51 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
static bool bnxt_rfs_supported(struct bnxt *bp);
static struct hwrm_func_cfg_input *
-__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 enables = 0;
- if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
+ if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
return NULL;
req->fid = cpu_to_le16(0xffff);
- enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- req->num_tx_rings = cpu_to_le16(tx_rings);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
if (BNXT_NEW_RM(bp)) {
- enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->cp_p5 ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= rx_rings ?
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
} else {
- enables |= cp_rings ?
+ enables |= hwr->cp ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- }
- enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
-
- req->num_rx_rings = cpu_to_le16(rx_rings);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_msix = cpu_to_le16(cp_rings);
- req->num_rsscos_ctxs =
- cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ enables |= hwr->grp ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ }
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
+ 0;
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
+ req->num_msix = cpu_to_le16(hwr->cp);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(1);
- if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
- bnxt_rfs_supported(bp))
- req->num_rsscos_ctxs =
- cpu_to_le16(ring_grps + 1);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
}
req->enables = cpu_to_le32(enables);
return req;
}
static struct hwrm_func_vf_cfg_input *
-__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 enables = 0;
@@ -6111,49 +7755,46 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
return NULL;
- enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ enables |= hwr->cp_p5 ?
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
- enables |= cp_rings ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
+ enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->grp ?
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
}
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
- req->num_tx_rings = cpu_to_le16(tx_rings);
- req->num_rx_rings = cpu_to_le16(rx_rings);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
req->enables = cpu_to_le32(enables);
return req;
}
static int
-bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
int rc;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -6167,25 +7808,23 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return rc;
if (bp->hwrm_spec_code < 0x10601)
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return bnxt_hwrm_get_rings(bp);
}
static int
-bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
int rc;
if (!BNXT_NEW_RM(bp)) {
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return 0;
}
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -6196,37 +7835,24 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return bnxt_hwrm_get_rings(bp);
}
-static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
- int cp, int stat, int vnic)
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (BNXT_PF(bp))
- return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_pf_rings(bp, hwr);
else
- return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_vf_rings(bp, hwr);
}
int bnxt_nq_rings_in_use(struct bnxt *bp)
{
- int cp = bp->cp_nr_rings;
- int ulp_msix, ulp_base;
-
- ulp_msix = bnxt_get_ulp_msix_num(bp);
- if (ulp_msix) {
- ulp_base = bnxt_get_ulp_msix_base(bp);
- cp += ulp_msix;
- if ((ulp_base + ulp_msix) > cp)
- cp = ulp_base + ulp_msix;
- }
- return cp;
+ return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
}
static int bnxt_cp_rings_in_use(struct bnxt *bp)
{
int cp;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return bnxt_nq_rings_in_use(bp);
cp = bp->tx_nr_rings + bp->rx_nr_rings;
@@ -6235,16 +7861,25 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
{
- int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
- int cp = bp->cp_nr_rings;
-
- if (!ulp_stat)
- return cp;
+ return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
+}
- if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
- return bnxt_get_ulp_msix_base(bp) + ulp_stat;
+static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ if (!hwr->grp)
+ return 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
- return cp + ulp_stat;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ rss_ctx *= hwr->vnic;
+ return rss_ctx;
+ }
+ if (BNXT_VF(bp))
+ return BNXT_VF_MAX_RSS_CTX;
+ if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
+ return hwr->grp + 1;
+ return 1;
}
/* Check if a default RSS map needs to be setup. This function is only
@@ -6258,86 +7893,135 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
hw_resc->resv_rx_rings = bp->rx_nr_rings;
if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
+ bnxt_set_dflt_rss_indir_tbl(bp, NULL);
}
}
+static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
+{
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ return 2 + bp->num_rss_ctx;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return rx_rings + 1;
+ }
+ return 1;
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int cp = bnxt_cp_rings_in_use(bp);
int nq = bnxt_nq_rings_in_use(bp);
int rx = bp->rx_nr_rings, stat;
- int vnic = 1, grp = rx;
-
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
- bp->hwrm_spec_code >= 0x10601)
- return true;
+ int vnic, grp = rx;
/* Old firmware does not need RX ring reservations but we still
* need to setup a default RSS map when needed. With new firmware
* we go through RX ring reservations first and then set up the
* RSS map for the successfully reserved RX rings when needed.
*/
- if (!BNXT_NEW_RM(bp)) {
+ if (!BNXT_NEW_RM(bp))
bnxt_check_rss_tbl_no_rmgr(bp);
+
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
+ bp->hwrm_spec_code >= 0x10601)
+ return true;
+
+ if (!BNXT_NEW_RM(bp))
return false;
- }
- if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
- vnic = rx + 1;
+
+ vnic = bnxt_get_total_vnics(bp, rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
stat = bnxt_get_func_stat_ctxs(bp);
if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp &&
- !(bp->flags & BNXT_FLAG_CHIP_P5)))
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
return true;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
hw_resc->resv_irqs != nq)
return true;
return false;
}
-static int __bnxt_reserve_rings(struct bnxt *bp)
+static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int cp = bnxt_nq_rings_in_use(bp);
- int tx = bp->tx_nr_rings;
- int rx = bp->rx_nr_rings;
- int grp, rx_rings, rc;
- int vnic = 1, stat;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ if (BNXT_NEW_RM(bp)) {
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->cp = hw_resc->resv_irqs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr->cp_p5 = hw_resc->resv_cp_rings;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
+ }
+}
+
+static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
+ hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
+}
+
+static int bnxt_get_avail_msix(struct bnxt *bp, int num);
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_rings hwr = {0};
+ int rx_rings, old_rx_rings, rc;
+ int cp = bp->cp_nr_rings;
+ int ulp_msix = 0;
bool sh = false;
+ int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
+ if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
+ ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
+ if (!ulp_msix)
+ bnxt_set_ulp_stat_ctxs(bp, 0);
+
+ if (ulp_msix > bp->ulp_num_msix_want)
+ ulp_msix = bp->ulp_num_msix_want;
+ hwr.cp = cp + ulp_msix;
+ } else {
+ hwr.cp = bnxt_nq_rings_in_use(bp);
+ }
+
+ hwr.tx = bp->tx_nr_rings;
+ hwr.rx = bp->rx_nr_rings;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
- vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.rx + hwr.tx;
+
+ hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx <<= 1;
- grp = bp->rx_nr_rings;
- stat = bnxt_get_func_stat_ctxs(bp);
+ hwr.rx <<= 1;
+ hwr.grp = bp->rx_nr_rings;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
+ hwr.stat = bnxt_get_func_stat_ctxs(bp);
+ old_rx_rings = bp->hw_resc.resv_rx_rings;
- rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
+ rc = bnxt_hwrm_reserve_rings(bp, &hwr);
if (rc)
return rc;
- tx = hw_resc->resv_tx_rings;
- if (BNXT_NEW_RM(bp)) {
- rx = hw_resc->resv_rx_rings;
- cp = hw_resc->resv_irqs;
- grp = hw_resc->resv_hw_ring_grps;
- vnic = hw_resc->resv_vnics;
- stat = hw_resc->resv_stat_ctxs;
- }
+ bnxt_copy_reserved_rings(bp, &hwr);
- rx_rings = rx;
+ rx_rings = hwr.rx;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- if (rx >= 2) {
- rx_rings = rx >> 1;
+ if (hwr.rx >= 2) {
+ rx_rings = hwr.rx >> 1;
} else {
if (netif_running(bp->dev))
return -ENOMEM;
@@ -6349,16 +8033,23 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
bnxt_set_ring_params(bp);
}
}
- rx_rings = min_t(int, rx_rings, grp);
- cp = min_t(int, cp, bp->cp_nr_rings);
- if (stat > bnxt_get_ulp_stat_ctxs(bp))
- stat -= bnxt_get_ulp_stat_ctxs(bp);
- cp = min_t(int, cp, stat);
- rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+ rx_rings = min_t(int, rx_rings, hwr.grp);
+ hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
+ if (bnxt_ulp_registered(bp->edev) &&
+ hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp = min_t(int, hwr.cp, hwr.stat);
+ rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx = rx_rings << 1;
- cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
- bp->tx_nr_rings = tx;
+ hwr.rx = rx_rings << 1;
+ tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
+ hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ if (hwr.tx != bp->tx_nr_rings) {
+ netdev_warn(bp->dev,
+ "Able to reserve only %d out of %d requested TX rings\n",
+ hwr.tx, bp->tx_nr_rings);
+ }
+ bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
* if absolutely necessary
@@ -6375,20 +8066,32 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
}
bp->rx_nr_rings = rx_rings;
- bp->cp_nr_rings = cp;
+ bp->cp_nr_rings = hwr.cp;
- if (!tx || !rx || !cp || !grp || !vnic || !stat)
+ if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
- if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
+ if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
+ !netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp, NULL);
+
+ if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
+ int resv_msix, resv_ctx, ulp_ctxs;
+ struct bnxt_hw_resc *hw_resc;
+
+ hw_resc = &bp->hw_resc;
+ resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
+ ulp_msix = min_t(int, resv_msix, ulp_msix);
+ bnxt_set_ulp_msix_num(bp, ulp_msix);
+ resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
+ ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
+ bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
+ }
return rc;
}
-static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 flags;
@@ -6396,37 +8099,33 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (!BNXT_NEW_RM(bp))
return 0;
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req->flags = cpu_to_le32(flags);
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 flags;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
else
@@ -6437,20 +8136,15 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_PF(bp))
- return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
- ring_grps, cp_rings, stats,
- vnics);
+ return bnxt_hwrm_check_pf_rings(bp, hwr);
- return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ return bnxt_hwrm_check_vf_rings(bp, hwr);
}
static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
@@ -6512,8 +8206,8 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ u16 val, tmr, max, flags = hw_coal->flags;
u32 cmpl_params = coal_cap->cmpl_params;
- u16 val, tmr, max, flags = 0;
max = hw_coal->bufs_per_record * 128;
if (hw_coal->budget)
@@ -6556,8 +8250,6 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
}
- if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
- flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
@@ -6622,10 +8314,40 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
return hwrm_req_send(bp, req_rx);
}
+static int
+bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
+
+ req->ring_id = cpu_to_le16(ring_id);
+ return hwrm_req_send(bp, req);
+}
+
+static int
+bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ struct bnxt_tx_ring_info *txr;
+ int i, rc;
+
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ u16 ring_id;
+
+ ring_id = bnxt_cp_ring_for_tx(bp, txr);
+ req->ring_id = cpu_to_le16(ring_id);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ return rc;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return 0;
+ }
+ return 0;
+}
+
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
- struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
- *req;
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
int i, rc;
rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
@@ -6646,29 +8368,19 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_coal *hw_coal;
- u16 ring_id;
- req = req_rx;
- if (!bnapi->rx_ring) {
- ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
- req = req_tx;
- } else {
- ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
- }
- req->ring_id = cpu_to_le16(ring_id);
-
- rc = hwrm_req_send(bp, req);
+ if (!bnapi->rx_ring)
+ rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
+ else
+ rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
if (rc)
break;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
- if (bnapi->rx_ring && bnapi->tx_ring) {
- req = req_tx;
- ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
- req->ring_id = cpu_to_le16(ring_id);
- rc = hwrm_req_send(bp, req);
+ if (bnapi->rx_ring && bnapi->tx_ring[0]) {
+ rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
if (rc)
break;
}
@@ -6764,7 +8476,6 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_output *resp;
struct hwrm_func_qcfg_input *req;
- u32 min_db_offset = 0;
u16 flags;
int rc;
@@ -6778,16 +8489,20 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (rc)
goto func_qcfg_exit;
+ flags = le16_to_cpu(resp->flags);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp)) {
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
+ vf->flags |= BNXT_VF_TRUST;
+ else
+ vf->flags &= ~BNXT_VF_TRUST;
} else {
bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
- flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
@@ -6796,11 +8511,21 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
}
if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
+
if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
+ if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
+ bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
+ if (resp->roce_bidi_opt_mode &
+ FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
+ bp->cos0_cos1_shared = 1;
+ else
+ bp->cos0_cos1_shared = 0;
+
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
bp->port_partition_type = resp->port_partition_type;
@@ -6821,16 +8546,17 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (bp->db_size)
goto func_qcfg_exit;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
+ if (BNXT_CHIP_P5(bp)) {
if (BNXT_PF(bp))
- min_db_offset = DB_PF_OFFSET_P5;
+ bp->db_offset = DB_PF_OFFSET_P5;
else
- min_db_offset = DB_VF_OFFSET_P5;
+ bp->db_offset = DB_VF_OFFSET_P5;
}
bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
1024);
if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
- bp->db_size <= min_db_offset)
+ bp->db_size <= bp->db_offset)
bp->db_size = pci_resource_len(bp->pdev, 2);
func_qcfg_exit:
@@ -6838,37 +8564,117 @@ func_qcfg_exit:
return rc;
}
-static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
- struct hwrm_func_backing_store_qcaps_output *resp)
+static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
+ u8 init_val, u8 init_offset,
+ bool init_mask_set)
{
- struct bnxt_mem_init *mem_init;
- u16 init_mask;
- u8 init_val;
- u8 *offset;
- int i;
+ ctxm->init_value = init_val;
+ ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
+ if (init_mask_set)
+ ctxm->init_offset = init_offset * 4;
+ else
+ ctxm->init_value = 0;
+}
+
+static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u16 type;
+
+ for (type = 0; type < ctx_max; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ int n = 1;
- init_val = resp->ctx_kind_initializer;
- init_mask = le16_to_cpu(resp->ctx_init_mask);
- offset = &resp->qp_init_offset;
- mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
- for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
- mem_init->init_val = init_val;
- mem_init->offset = BNXT_MEM_INVALID_OFFSET;
- if (!init_mask)
+ if (!ctxm->max_entries || ctxm->pg_info)
continue;
- if (i == BNXT_CTX_MEM_INIT_STAT)
- offset = &resp->stat_init_offset;
- if (init_mask & (1 << i))
- mem_init->offset = *offset * 4;
- else
- mem_init->init_val = 0;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
+ if (!ctxm->pg_info)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void bnxt_free_one_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, bool force);
+
+#define BNXT_CTX_INIT_VALID(flags) \
+ (!!((flags) & \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
+
+static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
+{
+ struct hwrm_func_backing_store_qcaps_v2_output *resp;
+ struct hwrm_func_backing_store_qcaps_v2_input *req;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u16 type;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
+ if (rc)
+ return rc;
+
+ if (!ctx) {
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bp->ctx = ctx;
}
- ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
+
+ resp = hwrm_req_hold(bp, req);
+
+ for (type = 0; type < BNXT_CTX_V2_MAX; ) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ u8 init_val, init_off, i;
+ u32 max_entries;
+ u16 entry_size;
+ __le32 *p;
+ u32 flags;
+
+ req->type = cpu_to_le16(type);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto ctx_done;
+ flags = le32_to_cpu(resp->flags);
+ type = le16_to_cpu(resp->next_valid_type);
+ if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
+ bnxt_free_one_ctx_mem(bp, ctxm, true);
+ continue;
+ }
+ entry_size = le16_to_cpu(resp->entry_size);
+ max_entries = le32_to_cpu(resp->max_num_entries);
+ if (ctxm->mem_valid) {
+ if (!(flags & BNXT_CTX_MEM_PERSIST) ||
+ ctxm->entry_size != entry_size ||
+ ctxm->max_entries != max_entries)
+ bnxt_free_one_ctx_mem(bp, ctxm, true);
+ else
+ continue;
+ }
+ ctxm->type = le16_to_cpu(resp->type);
+ ctxm->entry_size = entry_size;
+ ctxm->flags = flags;
+ ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
+ ctxm->entry_multiple = resp->entry_multiple;
+ ctxm->max_entries = max_entries;
+ ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
+ init_val = resp->ctx_init_value;
+ init_off = resp->ctx_init_offset;
+ bnxt_init_ctx_initializer(ctxm, init_val, init_off,
+ BNXT_CTX_INIT_VALID(flags));
+ ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
+ BNXT_MAX_SPLIT_ENTRY);
+ for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
+ i++, p++)
+ ctxm->split[i] = le32_to_cpu(*p);
+ }
+ rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
+
+ctx_done:
+ hwrm_req_drop(bp, req);
+ return rc;
}
static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
@@ -6877,9 +8683,13 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
struct hwrm_func_backing_store_qcaps_input *req;
int rc;
- if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
+ if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
+ (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
+ if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
+ return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
+
rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
if (rc)
return rc;
@@ -6887,48 +8697,84 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (!rc) {
- struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
- int i, tqm_rings;
+ u8 init_val, init_idx = 0;
+ u16 init_mask;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = bp->ctx;
if (!ctx) {
- rc = -ENOMEM;
- goto ctx_err;
- }
- ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
- ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
- ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
- ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
- ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
- ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
- ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
- ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
- ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
- ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
- ctx->vnic_max_vnic_entries =
- le16_to_cpu(resp->vnic_max_vnic_entries);
- ctx->vnic_max_ring_table_entries =
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ bp->ctx = ctx;
+ }
+ init_val = resp->ctx_kind_initializer;
+ init_mask = le16_to_cpu(resp->ctx_init_mask);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
+ ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
+ ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
+ ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
+ ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
+ ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
+ ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
+ ctxm->max_entries = ctxm->vnic_entries +
le16_to_cpu(resp->vnic_max_ring_table_entries);
- ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
- ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
- ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
- ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
- ctx->tqm_min_entries_per_ring =
- le32_to_cpu(resp->tqm_min_entries_per_ring);
- ctx->tqm_max_entries_per_ring =
- le32_to_cpu(resp->tqm_max_entries_per_ring);
- ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
- if (!ctx->tqm_entries_multiple)
- ctx->tqm_entries_multiple = 1;
- ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
- ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
- ctx->mrav_num_entries_units =
+ ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->vnic_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->stat_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
+ ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
+ ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
+ ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
+ ctxm->entry_multiple = resp->tqm_entries_multiple;
+ if (!ctxm->entry_multiple)
+ ctxm->entry_multiple = 1;
+
+ memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
+ ctxm->mrav_num_entries_units =
le16_to_cpu(resp->mrav_num_entries_units);
- ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
- ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->mrav_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
- bnxt_init_ctx_initializer(ctx, resp);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
+ ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
@@ -6936,16 +8782,11 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
- tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
- ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
- if (!ctx_pg) {
- kfree(ctx);
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < tqm_rings; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
- bp->ctx = ctx;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
+ memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
+ ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
+
+ rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
} else {
rc = 0;
}
@@ -6984,6 +8825,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct hwrm_func_backing_store_cfg_input *req;
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
void **__req = (void **)&req;
u32 req_len = sizeof(*req);
__le32 *num_entries;
@@ -7005,82 +8847,102 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
req->enables = cpu_to_le32(enables);
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
- ctx_pg = &ctx->qp_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ ctx_pg = ctxm->pg_info;
req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
- req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
- req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
- req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
+ req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
+ req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
+ req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->qpc_pg_size_qpc_lvl,
&req->qpc_page_dir);
+
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
+ req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
- ctx_pg = &ctx->srq_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ ctx_pg = ctxm->pg_info;
req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
- req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
- req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
+ req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
+ req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->srq_pg_size_srq_lvl,
&req->srq_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
- ctx_pg = &ctx->cq_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ ctx_pg = ctxm->pg_info;
req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
- req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
- req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
+ req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
+ req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->cq_pg_size_cq_lvl,
&req->cq_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
- ctx_pg = &ctx->vnic_mem;
- req->vnic_num_vnic_entries =
- cpu_to_le16(ctx->vnic_max_vnic_entries);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ ctx_pg = ctxm->pg_info;
+ req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
req->vnic_num_ring_table_entries =
- cpu_to_le16(ctx->vnic_max_ring_table_entries);
- req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
+ cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
+ req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->vnic_pg_size_vnic_lvl,
&req->vnic_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
- ctx_pg = &ctx->stat_mem;
- req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
- req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ ctx_pg = ctxm->pg_info;
+ req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
+ req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->stat_pg_size_stat_lvl,
&req->stat_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
- ctx_pg = &ctx->mrav_mem;
+ u32 units;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ ctx_pg = ctxm->pg_info;
req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
- if (ctx->mrav_num_entries_units)
- flags |=
- FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
- req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
+ units = ctxm->mrav_num_entries_units;
+ if (units) {
+ u32 num_mr, num_ah = ctxm->mrav_av_entries;
+ u32 entries;
+
+ num_mr = ctx_pg->entries - num_ah;
+ entries = ((num_mr / units) << 16) | (num_ah / units);
+ req->mrav_num_entries = cpu_to_le32(entries);
+ flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
+ }
+ req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->mrav_pg_size_mrav_lvl,
&req->mrav_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
- ctx_pg = &ctx->tim_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ ctx_pg = ctxm->pg_info;
req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
- req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
+ req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->tim_pg_size_tim_lvl,
&req->tim_page_dir);
}
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
for (i = 0, num_entries = &req->tqm_sp_num_entries,
pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req->tqm_sp_page_dir,
- ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
+ ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
+ ctx_pg = ctxm->pg_info;
i < BNXT_MAX_TQM_RINGS;
+ ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
- req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
- ctx_pg = ctx->tqm_mem[i];
+ req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
*num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
@@ -7104,7 +8966,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
- u8 depth, struct bnxt_mem_init *mem_init)
+ u8 depth, struct bnxt_ctx_mem_type *ctxm)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
@@ -7142,7 +9004,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
- rmem->mem_init = mem_init;
+ rmem->ctx_mem = ctxm;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@@ -7157,12 +9019,42 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
- rmem->mem_init = mem_init;
+ rmem->ctx_mem = ctxm;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
}
+static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg,
+ void *buf, size_t offset, size_t head,
+ size_t tail)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ size_t nr_pages = ctx_pg->nr_pages;
+ int page_size = rmem->page_size;
+ size_t len = 0, total_len = 0;
+ u16 depth = rmem->depth;
+
+ tail %= nr_pages * page_size;
+ do {
+ if (depth > 1) {
+ int i = head / (page_size * MAX_CTX_PAGES);
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ rmem = &pg_tbl->ring_mem;
+ }
+ len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
+ head += len;
+ offset += len;
+ total_len += len;
+ if (head >= nr_pages * page_size)
+ head = 0;
+ } while (head != tail);
+ return total_len;
+}
+
static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg)
{
@@ -7192,41 +9084,236 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
ctx_pg->nr_pages = 0;
}
-static void bnxt_free_ctx_mem(struct bnxt *bp)
+static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, u32 entries,
+ u8 pg_lvl)
+{
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, rc = 0, n = 1;
+ u32 mem_size;
+
+ if (!ctxm->entry_size || !ctx_pg)
+ return -EINVAL;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ if (ctxm->entry_multiple)
+ entries = roundup(entries, ctxm->entry_multiple);
+ entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
+ mem_size = entries * ctxm->entry_size;
+ for (i = 0; i < n && !rc; i++) {
+ ctx_pg[i].entries = entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
+ ctxm->init_value ? ctxm : NULL);
+ }
+ if (!rc)
+ ctxm->mem_valid = 1;
+ return rc;
+}
+
+static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm,
+ bool last)
+{
+ struct hwrm_func_backing_store_cfg_v2_input *req;
+ u32 instance_bmap = ctxm->instance_bmap;
+ int i, j, rc = 0, n = 1;
+ __le32 *p;
+
+ if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
+ return 0;
+
+ if (instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ else
+ instance_bmap = 1;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
+ if (rc)
+ return rc;
+ hwrm_req_hold(bp, req);
+ req->type = cpu_to_le16(ctxm->type);
+ req->entry_size = cpu_to_le16(ctxm->entry_size);
+ if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
+ bnxt_bs_trace_avail(bp, ctxm->type)) {
+ struct bnxt_bs_trace_info *bs_trace;
+ u32 enables;
+
+ enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
+ req->enables = cpu_to_le32(enables);
+ bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
+ req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
+ }
+ req->subtype_valid_cnt = ctxm->split_entry_cnt;
+ for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
+ p[i] = cpu_to_le32(ctxm->split[i]);
+ for (i = 0, j = 0; j < n && !rc; i++) {
+ struct bnxt_ctx_pg_info *ctx_pg;
+
+ if (!(instance_bmap & (1 << i)))
+ continue;
+ req->instance = cpu_to_le16(i);
+ ctx_pg = &ctxm->pg_info[j++];
+ if (!ctx_pg->entries)
+ continue;
+ req->num_entries = cpu_to_le32(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req->page_size_pbl_level,
+ &req->page_dir);
+ if (last && j == n)
+ req->flags =
+ cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
+ rc = hwrm_req_send(bp, req);
+ }
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
- int i;
+ struct bnxt_ctx_mem_type *ctxm;
+ u16 last_type = BNXT_CTX_INV;
+ int rc = 0;
+ u16 type;
- if (!ctx)
+ for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
+ ctxm = &ctx->ctx_arr[type];
+ if (!bnxt_bs_trace_avail(bp, type))
+ continue;
+ if (!ctxm->mem_valid) {
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
+ ctxm->max_entries, 1);
+ if (rc) {
+ netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
+ type);
+ continue;
+ }
+ bnxt_bs_trace_init(bp, ctxm);
+ }
+ last_type = type;
+ }
+
+ if (last_type == BNXT_CTX_INV) {
+ for (type = 0; type < BNXT_CTX_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+ if (ctxm->mem_valid)
+ last_type = type;
+ }
+ if (last_type == BNXT_CTX_INV)
+ return 0;
+ }
+ ctx->ctx_arr[last_type].last = 1;
+
+ for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+
+ if (!ctxm->mem_valid)
+ continue;
+ rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+/**
+ * __bnxt_copy_ctx_mem - copy host context memory
+ * @bp: The driver context
+ * @ctxm: The pointer to the context memory type
+ * @buf: The destination buffer or NULL to just obtain the length
+ * @offset: The buffer offset to copy the data to
+ * @head: The head offset of context memory to copy from
+ * @tail: The tail offset (last byte + 1) of context memory to end the copy
+ *
+ * This function is called for debugging purposes to dump the host context
+ * used by the chip.
+ *
+ * Return: Length of memory copied
+ */
+static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, void *buf,
+ size_t offset, size_t head, size_t tail)
+{
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ size_t len = 0, total_len = 0;
+ int i, n = 1;
+
+ if (!ctx_pg)
+ return 0;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++) {
+ len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
+ tail);
+ offset += len;
+ total_len += len;
+ }
+ return total_len;
+}
+
+size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
+ void *buf, size_t offset)
+{
+ size_t tail = ctxm->max_entries * ctxm->entry_size;
+
+ return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
+}
+
+static void bnxt_free_one_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, bool force)
+{
+ struct bnxt_ctx_pg_info *ctx_pg;
+ int i, n = 1;
+
+ ctxm->last = 0;
+
+ if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
return;
- if (ctx->tqm_mem[0]) {
- for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
- bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
- kfree(ctx->tqm_mem[0]);
- ctx->tqm_mem[0] = NULL;
+ ctx_pg = ctxm->pg_info;
+ if (ctx_pg) {
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++)
+ bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
+
+ kfree(ctx_pg);
+ ctxm->pg_info = NULL;
+ ctxm->mem_valid = 0;
}
+ memset(ctxm, 0, sizeof(*ctxm));
+}
+
+void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u16 type;
+
+ if (!ctx)
+ return;
+
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++)
+ bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
- bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+ if (force) {
+ kfree(ctx);
+ bp->ctx = NULL;
+ }
}
static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{
- struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
- struct bnxt_mem_init *init;
- u32 mem_size, ena, entries;
- u32 entries_sp, min;
+ u32 l2_qps, qp1_qps, max_qps;
+ u32 ena, entries_sp, entries;
+ u32 srqs, max_srqs, min;
u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
+ u32 fast_qpmd_qps;
u8 pg_lvl = 1;
int i, rc;
@@ -7240,120 +9327,116 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
+ ena = 0;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ goto skip_legacy;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ l2_qps = ctxm->qp_l2_entries;
+ qp1_qps = ctxm->qp_qp1_entries;
+ fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
+ max_qps = ctxm->max_entries;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ srqs = ctxm->srq_l2_entries;
+ max_srqs = ctxm->max_entries;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = 65536;
- extra_srqs = 8192;
+ if (BNXT_SW_RES_LMT(bp)) {
+ extra_qps = max_qps - l2_qps - qp1_qps;
+ extra_srqs = max_srqs - srqs;
+ } else {
+ extra_qps = min_t(u32, 65536,
+ max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp
+ * destroy feature
+ */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ }
+ if (fast_qpmd_qps)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
- ctx_pg = &ctx->qp_mem;
- ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
- extra_qps;
- if (ctx->qp_entry_size) {
- mem_size = ctx->qp_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
+ pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->srq_mem;
- ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
- if (ctx->srq_entry_size) {
- mem_size = ctx->srq_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->cq_mem;
- ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
- if (ctx->cq_entry_size) {
- mem_size = ctx->cq_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
+ extra_qps * 2, pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->vnic_mem;
- ctx_pg->entries = ctx->vnic_max_vnic_entries +
- ctx->vnic_max_ring_table_entries;
- if (ctx->vnic_entry_size) {
- mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->stat_mem;
- ctx_pg->entries = ctx->stat_max_entries;
- if (ctx->stat_entry_size) {
- mem_size = ctx->stat_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
- ena = 0;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
goto skip_rdma;
- ctx_pg = &ctx->mrav_mem;
- /* 128K extra is needed to accommodate static AH context
- * allocation by f/w.
- */
- num_mr = 1024 * 256;
- num_ah = 1024 * 128;
- ctx_pg->entries = num_mr + num_ah;
- if (ctx->mrav_entry_size) {
- mem_size = ctx->mrav_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
- if (rc)
- return rc;
- }
- ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
- if (ctx->mrav_num_entries_units)
- ctx_pg->entries =
- ((num_mr / ctx->mrav_num_entries_units) << 16) |
- (num_ah / ctx->mrav_num_entries_units);
-
- ctx_pg = &ctx->tim_mem;
- ctx_pg->entries = ctx->qp_mem.entries;
- if (ctx->tim_entry_size) {
- mem_size = ctx->tim_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
- if (rc)
- return rc;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ if (BNXT_SW_RES_LMT(bp) &&
+ ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
+ num_ah = ctxm->mrav_av_entries;
+ num_mr = ctxm->max_entries - num_ah;
+ } else {
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
}
+
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
+ if (rc)
+ return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
- min = ctx->tqm_min_entries_per_ring;
- entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
- 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
- entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
- entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
- entries = roundup(entries, ctx->tqm_entries_multiple);
- entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
- for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
- ctx_pg = ctx->tqm_mem[i];
- ctx_pg->entries = i ? entries : entries_sp;
- if (ctx->tqm_entry_size) {
- mem_size = ctx->tqm_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
- NULL);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
+ min = ctxm->min_entries;
+ entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
+ 2 * (extra_qps + qp1_qps) + min;
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
+ entries = l2_qps + 2 * (extra_qps + qp1_qps);
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
+ if (rc)
+ return rc;
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
- }
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
- rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+
+skip_legacy:
+ if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
+ rc = bnxt_backing_store_cfg_v2(bp);
+ else
+ rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
if (rc) {
netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
rc);
@@ -7363,6 +9446,80 @@ skip_rdma:
return 0;
}
+static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
+{
+ struct hwrm_dbg_crashdump_medium_cfg_input *req;
+ u16 page_attr;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
+ if (rc)
+ return rc;
+
+ if (BNXT_PAGE_SIZE == 0x2000)
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
+ else if (BNXT_PAGE_SIZE == 0x10000)
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
+ else
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
+ req->pg_size_lvl = cpu_to_le16(page_attr |
+ bp->fw_crash_mem->ring_mem.depth);
+ req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
+ req->size = cpu_to_le32(bp->fw_crash_len);
+ req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
+ return hwrm_req_send(bp, req);
+}
+
+static void bnxt_free_crash_dump_mem(struct bnxt *bp)
+{
+ if (bp->fw_crash_mem) {
+ bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
+ kfree(bp->fw_crash_mem);
+ bp->fw_crash_mem = NULL;
+ }
+}
+
+static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
+{
+ u32 mem_size = 0;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
+ if (rc)
+ return rc;
+
+ mem_size = round_up(mem_size, 4);
+
+ /* keep and use the existing pages */
+ if (bp->fw_crash_mem &&
+ mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
+ goto alloc_done;
+
+ if (bp->fw_crash_mem)
+ bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
+ else
+ bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
+ GFP_KERNEL);
+ if (!bp->fw_crash_mem)
+ return -ENOMEM;
+
+ rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ return rc;
+ }
+
+alloc_done:
+ bp->fw_crash_len = mem_size;
+ return 0;
+}
+
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
struct hwrm_func_resource_qcaps_output *resp;
@@ -7401,7 +9558,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 max_msix = le16_to_cpu(resp->max_msix);
hw_resc->max_nqs = max_msix;
@@ -7429,7 +9586,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
u8 flags;
int rc;
- if (bp->hwrm_spec_code < 0x10801) {
+ if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
rc = -ENODEV;
goto no_ptp;
}
@@ -7445,7 +9602,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
goto exit;
flags = resp->flags;
- if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
+ if (BNXT_CHIP_P5_AND_MINUS(bp) &&
+ !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
rc = -ENODEV;
goto exit;
}
@@ -7458,16 +9616,21 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
ptp->bp = bp;
bp->ptp_cfg = ptp;
}
- if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
+
+ if (flags &
+ (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
+ PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
- } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ } else if (BNXT_CHIP_P5(bp)) {
ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
} else {
rc = -ENODEV;
goto exit;
}
+ ptp->rtc_configured =
+ (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
rc = bnxt_ptp_init(bp);
if (rc)
netdev_warn(bp->dev, "PTP initialization failed.\n");
@@ -7485,10 +9648,10 @@ no_ptp:
static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
+ u32 flags, flags_ext, flags_ext2, flags_ext3;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
struct hwrm_func_qcaps_output *resp;
struct hwrm_func_qcaps_input *req;
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u32 flags, flags_ext;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
@@ -7506,6 +9669,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
@@ -7518,12 +9683,48 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
flags_ext = le32_to_cpu(resp->flags_ext);
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
+ bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
+ bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
+
+ flags_ext2 = le32_to_cpu(resp->flags_ext2);
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
+ bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
+ if (flags_ext2 &
+ FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
+ if (BNXT_PF(bp) &&
+ (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
+
+ flags_ext3 = le32_to_cpu(resp->flags_ext3);
+ if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
+ bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
+ if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -7541,6 +9742,13 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
+ hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
+ hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+ hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+ hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+ hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -7549,17 +9757,11 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
- pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
- pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
- pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
- pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
- pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
- pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
- __bnxt_hwrm_ptp_qcfg(bp);
+ bp->fw_cap |= BNXT_FW_CAP_PTP;
} else {
bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
@@ -7573,21 +9775,51 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
#endif
}
+ bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
hwrm_func_qcaps_exit:
hwrm_req_drop(bp, req);
return rc;
}
+static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
+{
+ struct hwrm_dbg_qcaps_output *resp;
+ struct hwrm_dbg_qcaps_input *req;
+ int rc;
+
+ bp->fw_dbg_cap = 0;
+ if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
+ if (rc)
+ return;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto hwrm_dbg_qcaps_exit;
+
+ bp->fw_dbg_cap = le32_to_cpu(resp->flags);
+
+hwrm_dbg_qcaps_exit:
+ hwrm_req_drop(bp, req);
+}
+
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc;
rc = __bnxt_hwrm_func_qcaps(bp);
if (rc)
return rc;
+
+ bnxt_hwrm_dbg_qcaps(bp);
+
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
@@ -7628,6 +9860,14 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
+
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
+
hwrm_cfa_adv_qcaps_exit:
hwrm_req_drop(bp, req);
return rc;
@@ -7642,6 +9882,7 @@ static int __bnxt_alloc_fw_health(struct bnxt *bp)
if (!bp->fw_health)
return -ENOMEM;
+ mutex_init(&bp->fw_health->lock);
return 0;
}
@@ -7670,30 +9911,21 @@ static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
BNXT_FW_HEALTH_WIN_MAP_OFF);
}
-bool bnxt_is_fw_healthy(struct bnxt *bp)
-{
- if (bp->fw_health && bp->fw_health->status_reliable) {
- u32 fw_status;
-
- fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
- if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
- return false;
- }
-
- return true;
-}
-
static void bnxt_inv_fw_health_reg(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
u32 reg_type;
- if (!fw_health || !fw_health->status_reliable)
+ if (!fw_health)
return;
reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
fw_health->status_reliable = false;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
+ fw_health->resets_reliable = false;
}
static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
@@ -7717,7 +9949,7 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
BNXT_FW_HEALTH_WIN_BASE +
BNXT_GRC_REG_CHIP_NUM);
}
- if (!BNXT_CHIP_P5(bp))
+ if (!BNXT_CHIP_P5_PLUS(bp))
return;
status_loc = BNXT_GRC_REG_STATUS_P5 |
@@ -7750,6 +9982,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
int i;
bp->fw_health->status_reliable = false;
+ bp->fw_health->resets_reliable = false;
/* Only pre-map the monitoring GRC registers using window 3 */
for (i = 0; i < 4; i++) {
u32 reg = fw_health->regs[i];
@@ -7763,6 +9996,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
}
bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
if (reg_base == 0xffffffff)
return 0;
@@ -7770,6 +10004,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
return 0;
}
+static void bnxt_remap_fw_health_regs(struct bnxt *bp)
+{
+ if (!bp->fw_health)
+ return;
+
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
+ bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
+ } else {
+ bnxt_try_map_fw_health_reg(bp);
+ }
+}
+
static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -7934,7 +10181,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
struct hwrm_ver_get_input *req;
u16 fw_maj, fw_min, fw_bld, fw_rsv;
u32 dev_caps_cfg, hwrm_ver;
- int rc, len;
+ int rc, len, max_tmo_secs;
rc = hwrm_req_init(bp, req, HWRM_VER_GET);
if (rc)
@@ -8004,6 +10251,17 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+ bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
+ if (!bp->hwrm_cmd_max_timeout)
+ bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
+ max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
+#ifdef CONFIG_DETECT_HUNG_TASK
+ if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
+ max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
+ netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
+ max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
+ }
+#endif
if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
@@ -8113,7 +10371,7 @@ static void bnxt_accumulate_all_stats(struct bnxt *bp)
int i;
/* Chip bug. Counter intermittently becomes 0. */
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
ignore_zero = true;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -8208,6 +10466,10 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
if (!rc) {
bp->fw_rx_stats_ext_size =
le16_to_cpu(resp_qs->rx_stat_size) / 8;
+ if (BNXT_FW_MAJ(bp) < 220 &&
+ bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
+ bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
+
bp->fw_tx_stats_ext_size = tx_stat_size ?
le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
} else {
@@ -8279,7 +10541,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
else if (BNXT_NO_FW_ACCESS(bp))
return 0;
for (i = 0; i < bp->nr_vnics; i++) {
- rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+ rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
if (rc) {
netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
i, rc);
@@ -8294,7 +10556,7 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
int i;
for (i = 0; i < bp->nr_vnics; i++)
- bnxt_hwrm_vnic_set_rss(bp, i, false);
+ bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
}
static void bnxt_clear_vnic(struct bnxt *bp)
@@ -8303,7 +10565,7 @@ static void bnxt_clear_vnic(struct bnxt *bp)
return;
bnxt_hwrm_clear_vnic_filter(bp);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
/* clear all RSS setting before free vnic ctx */
bnxt_hwrm_clear_vnic_rss(bp);
bnxt_hwrm_vnic_ctx_free(bp);
@@ -8312,7 +10574,7 @@ static void bnxt_clear_vnic(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, false);
bnxt_hwrm_vnic_free(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_hwrm_vnic_ctx_free(bp);
}
@@ -8341,7 +10603,7 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
else
return -EINVAL;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -8359,7 +10621,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
return 0;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -8372,28 +10634,27 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
return hwrm_req_send(bp, req);
}
-static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
goto skip_rss_ctx;
/* allocate context for vnic */
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
@@ -8401,26 +10662,26 @@ static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
skip_rss_ctx:
/* configure default vnic, ring grp */
- rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
/* Enable RSS hashing on vnic */
- rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+ rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
}
}
@@ -8428,16 +10689,53 @@ vnic_setup_err:
return rc;
}
-static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid)
+{
+ struct hwrm_vnic_update_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
+ req->mru = cpu_to_le16(vnic->mru);
+
+ req->enables = cpu_to_le32(valid);
+
+ return hwrm_req_send(bp, req);
+}
+
+int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc;
+
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc)
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic->vnic_id, rc);
+ return rc;
+}
+
+int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc, i, nr_ctxs;
nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
for (i = 0; i < nr_ctxs; i++) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
- vnic_id, i, rc);
+ vnic->vnic_id, i, rc);
break;
}
bp->rsscos_nr_ctxs++;
@@ -8445,46 +10743,57 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
if (i < nr_ctxs)
return -ENOMEM;
- rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
- vnic_id, rc);
- return rc;
- }
- rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
- vnic_id, rc);
+ rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
+ if (rc)
return rc;
- }
+
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
}
}
return rc;
}
-static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- return __bnxt_setup_vnic_p5(bp, vnic_id);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return __bnxt_setup_vnic_p5(bp, vnic);
else
- return __bnxt_setup_vnic(bp, vnic_id);
+ return __bnxt_setup_vnic(bp, vnic);
+}
+
+static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ u16 start_rx_ring_idx, int rx_rings)
+{
+ int rc;
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ return bnxt_setup_vnic(bp, vnic);
}
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
-#ifdef CONFIG_RFS_ACCEL
+ struct bnxt_vnic_info *vnic;
int i, rc = 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
+ }
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1;
u16 ring_id = i;
@@ -8493,22 +10802,148 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic = &bp->vnic_info[vnic_id];
vnic->flags |= BNXT_VNIC_RFS_FLAG;
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
- rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
+ if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
break;
+ }
+ return rc;
+}
+
+void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ bool all)
+{
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+ struct bnxt_filter_base *usr_fltr, *tmp;
+ struct bnxt_ntuple_filter *ntp_fltr;
+ int i;
+
+ if (netif_running(bp->dev)) {
+ bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
+ for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
+ if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
+ }
+ }
+ if (!all)
+ return;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
+ if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
+ usr_fltr->fw_vnic_id == rss_ctx->index) {
+ ntp_fltr = container_of(usr_fltr,
+ struct bnxt_ntuple_filter,
+ base);
+ bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
+ bnxt_del_ntp_filter(bp, ntp_fltr);
+ bnxt_del_one_usr_fltr(bp, usr_fltr);
+ }
+ }
+
+ if (vnic->rss_table)
+ dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ bp->num_rss_ctx--;
+}
+
+static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int rxr_id)
+{
+ u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ int i, vnic_rx;
+
+ /* Ntuple VNIC always has all the rx rings. Any change of ring id
+ * must be updated because a future filter may use it.
+ */
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ return true;
+
+ for (i = 0; i < tbl_size; i++) {
+ if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
+ vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
+ else
+ vnic_rx = bp->rss_indir_tbl[i];
+
+ if (rxr_id == vnic_rx)
+ return true;
+ }
+
+ return false;
+}
+
+static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u16 mru, int rxr_id)
+{
+ int rc;
+
+ if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
+ return 0;
+
+ if (mru) {
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
}
- rc = bnxt_setup_vnic(bp, vnic_id);
+ }
+ vnic->mru = mru;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+
+ return 0;
+}
+
+static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+ int rc;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
if (rc)
- break;
+ return rc;
}
- return rc;
-#else
+
return 0;
-#endif
+}
+
+static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
+{
+ bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
+ bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
+ __bnxt_setup_vnic_p5(bp, vnic)) {
+ netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
+ rss_ctx->index);
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
+ }
+ }
+}
+
+static void bnxt_clear_rss_ctxs(struct bnxt *bp)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_del_one_rss_ctx(bp, rss_ctx, false);
+ }
}
/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
@@ -8523,16 +10958,17 @@ static bool bnxt_promisc_ok(struct bnxt *bp)
static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
unsigned int rc = 0;
- rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
return rc;
}
- rc = bnxt_hwrm_vnic_cfg(bp, 1);
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
@@ -8546,7 +10982,7 @@ static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int rc = 0;
unsigned int rx_nr_rings = bp->rx_nr_rings;
@@ -8575,15 +11011,20 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rx_nr_rings--;
/* default vnic 0 */
- rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
}
- rc = bnxt_setup_vnic(bp, 0);
+ if (BNXT_VF(bp))
+ bnxt_hwrm_func_qcfg(bp);
+
+ rc = bnxt_setup_vnic(bp, vnic);
if (rc)
goto err_out;
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
+ bnxt_hwrm_update_rss_hash_cfg(bp);
if (bp->flags & BNXT_FLAG_RFS) {
rc = bnxt_alloc_rfs_vnics(bp);
@@ -8603,12 +11044,18 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
/* Filter for default vnic 0 */
rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
if (rc) {
- netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ if (BNXT_VF(bp) && rc == -ENODEV)
+ netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
+ else
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
goto err_out;
}
vnic->uc_filter_count = 1;
vnic->rx_mask = 0;
+ if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
+ goto skip_rx_mask;
+
if (bp->dev->flags & IFF_BROADCAST)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
@@ -8618,7 +11065,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (bp->dev->flags & IFF_ALLMULTI) {
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (bp->dev->flags & IFF_MULTICAST) {
u32 mask = 0;
bnxt_mc_list_updated(bp, &mask);
@@ -8629,6 +11076,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (rc)
goto err_out;
+skip_rx_mask:
rc = bnxt_hwrm_set_coal(bp);
if (rc)
netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
@@ -8693,8 +11141,8 @@ static int bnxt_set_real_num_queues(struct bnxt *bp)
return rc;
}
-static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
- bool shared)
+static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared)
{
int _rx = *rx, _tx = *tx;
@@ -8717,19 +11165,59 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
return 0;
}
+static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
+{
+ return (tx - tx_xdp) / tx_sets + tx_xdp;
+}
+
+int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
+{
+ int tcs = bp->num_tc;
+
+ if (!tcs)
+ tcs = 1;
+ return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
+}
+
+static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
+{
+ int tcs = bp->num_tc;
+
+ return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
+ bp->tx_nr_rings_xdp;
+}
+
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool sh)
+{
+ int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
+
+ if (tx_cp != *tx) {
+ int tx_saved = tx_cp, rc;
+
+ rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
+ if (rc)
+ return rc;
+ if (tx_cp != tx_saved)
+ *tx = bnxt_num_cp_to_tx(bp, tx_cp);
+ return 0;
+ }
+ return __bnxt_trim_rings(bp, rx, tx, max, sh);
+}
+
static void bnxt_setup_msix(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
struct net_device *dev = bp->dev;
int tcs, i;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
if (tcs) {
int i, off, count;
for (i = 0; i < tcs; i++) {
count = bp->tx_nr_rings_per_tc;
- off = i * count;
+ off = BNXT_TC_TO_RING_BASE(bp, i);
netdev_set_tc_queue(dev, i, count, off);
}
}
@@ -8751,20 +11239,32 @@ static void bnxt_setup_msix(struct bnxt *bp)
}
}
-static void bnxt_setup_inta(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp);
+
+static int bnxt_change_msix(struct bnxt *bp, int total)
{
- const int len = sizeof(bp->irq_tbl[0].name);
+ struct msi_map map;
+ int i;
- if (netdev_get_num_tc(bp->dev))
- netdev_reset_tc(bp->dev);
+ /* add MSIX to the end if needed */
+ for (i = bp->total_irqs; i < total; i++) {
+ map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
+ if (map.index < 0)
+ return bp->total_irqs;
+ bp->irq_tbl[i].vector = map.virq;
+ bp->total_irqs++;
+ }
- snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
- 0);
- bp->irq_tbl[0].handler = bnxt_inta;
+ /* trim MSIX from the end if needed */
+ for (i = bp->total_irqs; i > total; i--) {
+ map.index = i - 1;
+ map.virq = bp->irq_tbl[i - 1].vector;
+ pci_msix_free_irq(bp->pdev, map);
+ bp->total_irqs--;
+ }
+ return bp->total_irqs;
}
-static int bnxt_init_int_mode(struct bnxt *bp);
-
static int bnxt_setup_int_mode(struct bnxt *bp)
{
int rc;
@@ -8775,16 +11275,12 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc ?: -ENODEV;
}
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- bnxt_setup_msix(bp);
- else
- bnxt_setup_inta(bp);
+ bnxt_setup_msix(bp);
rc = bnxt_set_real_num_queues(bp);
return rc;
}
-#ifdef CONFIG_RFS_ACCEL
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{
return bp->hw_resc.max_rsscos_ctxs;
@@ -8794,7 +11290,6 @@ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
{
return bp->hw_resc.max_vnics;
}
-#endif
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
@@ -8810,7 +11305,7 @@ static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
{
unsigned int cp = bp->hw_resc.max_cp_rings;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
cp -= bnxt_get_ulp_msix_num(bp);
return cp;
@@ -8820,7 +11315,7 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
@@ -8836,7 +11331,7 @@ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
unsigned int cp;
cp = bnxt_get_max_func_cp_rings_for_en(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return cp - bp->rx_nr_rings - bp->tx_nr_rings;
else
return cp - bp->cp_nr_rings;
@@ -8847,19 +11342,10 @@ unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
}
-int bnxt_get_avail_msix(struct bnxt *bp, int num)
+static int bnxt_get_avail_msix(struct bnxt *bp, int num)
{
- int max_cp = bnxt_get_max_func_cp_rings(bp);
int max_irq = bnxt_get_max_func_irqs(bp);
int total_req = bp->cp_nr_rings + num;
- int max_idx, avail_msix;
-
- max_idx = bp->total_irqs;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
- max_idx = min_t(int, bp->total_irqs, max_cp);
- avail_msix = max_idx - bp->cp_nr_rings;
- if (!BNXT_NEW_RM(bp) || avail_msix >= num)
- return avail_msix;
if (max_irq < total_req) {
num = max_irq - bp->cp_nr_rings;
@@ -8877,10 +11363,9 @@ static int bnxt_get_num_msix(struct bnxt *bp)
return bnxt_nq_rings_in_use(bp);
}
-static int bnxt_init_msix(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp)
{
- int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
- struct msix_entry *msix_ent;
+ int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
total_vecs = bnxt_get_num_msix(bp);
max = bnxt_get_max_func_irqs(bp);
@@ -8890,29 +11375,24 @@ static int bnxt_init_msix(struct bnxt *bp)
if (!total_vecs)
return 0;
- msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
- if (!msix_ent)
- return -ENOMEM;
-
- for (i = 0; i < total_vecs; i++) {
- msix_ent[i].entry = i;
- msix_ent[i].vector = 0;
- }
-
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
min = 2;
- total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
+ total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
+ PCI_IRQ_MSIX);
ulp_msix = bnxt_get_ulp_msix_num(bp);
if (total_vecs < 0 || total_vecs < ulp_msix) {
rc = -ENODEV;
goto msix_setup_exit;
}
- bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+ tbl_size = total_vecs;
+ if (pci_msix_can_alloc_dyn(bp->pdev))
+ tbl_size = max;
+ bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
if (bp->irq_tbl) {
for (i = 0; i < total_vecs; i++)
- bp->irq_tbl[i].vector = msix_ent[i].vector;
+ bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
@@ -8921,100 +11401,238 @@ static int bnxt_init_msix(struct bnxt *bp)
if (rc)
goto msix_setup_exit;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
bp->cp_nr_rings = (min == 1) ?
- max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
} else {
rc = -ENOMEM;
goto msix_setup_exit;
}
- bp->flags |= BNXT_FLAG_USING_MSIX;
- kfree(msix_ent);
return 0;
msix_setup_exit:
- netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
+ netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
- pci_disable_msix(bp->pdev);
- kfree(msix_ent);
- return rc;
-}
-
-static int bnxt_init_inta(struct bnxt *bp)
-{
- bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
- if (!bp->irq_tbl)
- return -ENOMEM;
-
- bp->total_irqs = 1;
- bp->rx_nr_rings = 1;
- bp->tx_nr_rings = 1;
- bp->cp_nr_rings = 1;
- bp->flags |= BNXT_FLAG_SHARED_RINGS;
- bp->irq_tbl[0].vector = bp->pdev->irq;
- return 0;
-}
-
-static int bnxt_init_int_mode(struct bnxt *bp)
-{
- int rc = -ENODEV;
-
- if (bp->flags & BNXT_FLAG_MSIX_CAP)
- rc = bnxt_init_msix(bp);
-
- if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
- /* fallback to INTA */
- rc = bnxt_init_inta(bp);
- }
+ pci_free_irq_vectors(bp->pdev);
return rc;
}
static void bnxt_clear_int_mode(struct bnxt *bp)
{
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- pci_disable_msix(bp->pdev);
+ pci_free_irq_vectors(bp->pdev);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
- bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
- int tcs = netdev_get_num_tc(bp->dev);
bool irq_cleared = false;
+ bool irq_change = false;
+ int tcs = bp->num_tc;
+ int irqs_required;
int rc;
if (!bnxt_need_reserve_rings(bp))
return 0;
- if (irq_re_init && BNXT_NEW_RM(bp) &&
- bnxt_get_num_msix(bp) != bp->total_irqs) {
- bnxt_ulp_irq_stop(bp);
- bnxt_clear_int_mode(bp);
- irq_cleared = true;
+ if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
+ int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
+
+ if (ulp_msix > bp->ulp_num_msix_want)
+ ulp_msix = bp->ulp_num_msix_want;
+ irqs_required = ulp_msix + bp->cp_nr_rings;
+ } else {
+ irqs_required = bnxt_get_num_msix(bp);
+ }
+
+ if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
+ irq_change = true;
+ if (!pci_msix_can_alloc_dyn(bp->pdev)) {
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ irq_cleared = true;
+ }
}
rc = __bnxt_reserve_rings(bp);
if (irq_cleared) {
if (!rc)
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
+ } else if (irq_change && !rc) {
+ if (bnxt_change_msix(bp, irqs_required) != irqs_required)
+ rc = -ENOSPC;
}
if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;
}
- if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+ if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
+ bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->num_tc = 0;
+ if (bp->tx_nr_rings_xdp)
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
+ else
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
return -ENOMEM;
}
return 0;
}
+static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int i;
+
+ bnapi = bp->bnapi[idx];
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
+ synchronize_net();
+
+ if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq) {
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+ }
+ }
+
+ if (!bp->tph_mode)
+ continue;
+
+ bnxt_hwrm_tx_ring_free(bp, txr, true);
+ bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
+ bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
+ bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
+ }
+}
+
+static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int rc, i;
+
+ bnapi = bp->bnapi[idx];
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ if (!bp->tph_mode)
+ goto start_tx;
+
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
+ if (rc)
+ return rc;
+
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
+start_tx:
+ WRITE_ONCE(txr->dev_state, 0);
+ synchronize_net();
+
+ if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
+ continue;
+
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq)
+ netif_tx_start_queue(txq);
+ }
+
+ return 0;
+}
+
+static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct bnxt_irq *irq;
+ u16 tag;
+ int err;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ cpumask_copy(irq->cpu_mask, mask);
+
+ if (irq->ring_nr >= irq->bp->rx_nr_rings)
+ return;
+
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask), &tag))
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
+ return;
+
+ netdev_lock(irq->bp->dev);
+ if (netif_running(irq->bp->dev)) {
+ err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
+ if (err)
+ netdev_err(irq->bp->dev,
+ "RX queue restart failed: err=%d\n", err);
+ }
+ netdev_unlock(irq->bp->dev);
+}
+
+static void bnxt_irq_affinity_release(struct kref *ref)
+{
+ struct irq_affinity_notify *notify =
+ container_of(ref, struct irq_affinity_notify, kref);
+ struct bnxt_irq *irq;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
+ netdev_err(irq->bp->dev,
+ "Setting ST=0 for MSIX entry %d failed\n",
+ irq->msix_nr);
+ return;
+ }
+}
+
+static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
+{
+ irq_set_affinity_notifier(irq->vector, NULL);
+}
+
+static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
+{
+ struct irq_affinity_notify *notify;
+
+ irq->bp = bp;
+
+ /* Nothing to do if TPH is not enabled */
+ if (!bp->tph_mode)
+ return;
+
+ /* Register IRQ affinity notifier */
+ notify = &irq->affinity_notify;
+ notify->irq = irq->vector;
+ notify->notify = bnxt_irq_affinity_notify;
+ notify->release = bnxt_irq_affinity_release;
+
+ irq_set_affinity_notifier(irq->vector, notify);
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -9033,24 +11651,29 @@ static void bnxt_free_irq(struct bnxt *bp)
irq = &bp->irq_tbl[map_idx];
if (irq->requested) {
if (irq->have_cpumask) {
- irq_set_affinity_hint(irq->vector, NULL);
+ irq_update_affinity_hint(irq->vector, NULL);
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
+
+ bnxt_release_irq_notifier(irq);
+
free_irq(irq->vector, bp->bnapi[i]);
}
irq->requested = 0;
}
+
+ /* Disable TPH support */
+ pcie_disable_tph(bp->pdev);
+ bp->tph_mode = 0;
}
static int bnxt_request_irq(struct bnxt *bp)
{
+ struct cpu_rmap *rmap = NULL;
int i, j, rc = 0;
unsigned long flags = 0;
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
rc = bnxt_setup_int_mode(bp);
if (rc) {
@@ -9061,42 +11684,59 @@ static int bnxt_request_irq(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
rmap = bp->dev->rx_cpu_rmap;
#endif
- if (!(bp->flags & BNXT_FLAG_USING_MSIX))
- flags = IRQF_SHARED;
+
+ /* Enable TPH support as part of IRQ request */
+ rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
+ if (!rc)
+ bp->tph_mode = PCI_TPH_ST_IV_MODE;
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
-#ifdef CONFIG_RFS_ACCEL
- if (rmap && bp->bnapi[i]->rx_ring) {
+ if (IS_ENABLED(CONFIG_RFS_ACCEL) &&
+ rmap && bp->bnapi[i]->rx_ring) {
rc = irq_cpu_rmap_add(rmap, irq->vector);
if (rc)
netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
j);
j++;
}
-#endif
+
rc = request_irq(irq->vector, irq->handler, flags, irq->name,
bp->bnapi[i]);
if (rc)
break;
+ netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
int numa_node = dev_to_node(&bp->pdev->dev);
+ u16 tag;
irq->have_cpumask = 1;
+ irq->msix_nr = map_idx;
+ irq->ring_nr = i;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
- rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
if (rc) {
netdev_warn(bp->dev,
- "Set affinity failed, IRQ = %d\n",
+ "Update affinity hint failed, IRQ = %d\n",
irq->vector);
break;
}
+
+ bnxt_register_irq_notifier(bp, irq);
+
+ /* Init ST table entry */
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask),
+ &tag))
+ continue;
+
+ pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
}
}
return rc;
@@ -9109,12 +11749,17 @@ static void bnxt_del_napi(struct bnxt *bp)
if (!bp->bnapi)
return;
+ for (i = 0; i < bp->rx_nr_rings; i++)
+ netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
+ netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- __netif_napi_del(&bnapi->napi);
+ __netif_napi_del_locked(&bnapi->napi);
}
- /* We called __netif_napi_del(), we need
+ /* We called __netif_napi_del_locked(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
@@ -9122,29 +11767,26 @@ static void bnxt_del_napi(struct bnxt *bp)
static void bnxt_init_napi(struct bnxt *bp)
{
- int i;
+ int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
unsigned int cp_nr_rings = bp->cp_nr_rings;
struct bnxt_napi *bnapi;
+ int i;
- if (bp->flags & BNXT_FLAG_USING_MSIX) {
- int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ poll_fn = bnxt_poll_p5;
+ else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ cp_nr_rings--;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- poll_fn = bnxt_poll_p5;
- else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- cp_nr_rings--;
- for (i = 0; i < cp_nr_rings; i++) {
- bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
- }
- if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
- bnapi = bp->bnapi[cp_nr_rings];
- netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll_nitroa0, 64);
- }
- } else {
- bnapi = bp->bnapi[0];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+ set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
+
+ for (i = 0; i < cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
+ bnapi->index);
+ }
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bnapi = bp->bnapi[cp_nr_rings];
+ netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@@ -9157,11 +11799,15 @@ static void bnxt_disable_napi(struct bnxt *bp)
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
- napi_disable(&bp->bnapi[i]->napi);
- if (bp->bnapi[i]->rx_ring)
- cancel_work_sync(&cpr->dim.work);
+ cpr = &bnapi->cp_ring;
+ if (bnapi->tx_fault)
+ cpr->sw_stats->tx.tx_resets++;
+ if (bnapi->in_reset)
+ cpr->sw_stats->rx.rx_resets++;
+ napi_disable_locked(&bnapi->napi);
}
}
@@ -9174,16 +11820,16 @@ static void bnxt_enable_napi(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
+ bnapi->tx_fault = 0;
+
cpr = &bnapi->cp_ring;
- if (bnapi->in_reset)
- cpr->sw_stats.rx.rx_resets++;
bnapi->in_reset = false;
if (bnapi->rx_ring) {
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
- napi_enable(&bnapi->napi);
+ napi_enable_locked(&bnapi->napi);
}
}
@@ -9218,7 +11864,7 @@ void bnxt_tx_enable(struct bnxt *bp)
/* Make sure napi polls see @dev_state change */
synchronize_net();
netif_tx_wake_all_queues(bp->dev);
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
netif_carrier_on(bp->dev);
}
@@ -9246,9 +11892,9 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
}
}
-static void bnxt_report_link(struct bnxt *bp)
+void bnxt_report_link(struct bnxt *bp)
{
- if (bp->link_info.link_up) {
+ if (BNXT_LINK_IS_UP(bp)) {
const char *signal = "";
const char *flow_ctrl;
const char *duplex;
@@ -9282,7 +11928,10 @@ static void bnxt_report_link(struct bnxt *bp)
signal = "(NRZ) ";
break;
case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
- signal = "(PAM4) ";
+ signal = "(PAM4 56Gbps) ";
+ break;
+ case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
+ signal = "(PAM4 112Gbps) ";
break;
default:
break;
@@ -9310,7 +11959,9 @@ static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
if (!resp->supported_speeds_auto_mode &&
!resp->supported_speeds_force_mode &&
!resp->supported_pam4_speeds_auto_mode &&
- !resp->supported_pam4_speeds_force_mode)
+ !resp->supported_pam4_speeds_force_mode &&
+ !resp->supported_speeds2_auto_mode &&
+ !resp->supported_speeds2_force_mode)
return true;
return false;
}
@@ -9334,12 +11985,12 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_phy_qcaps_exit;
- bp->phy_flags = resp->flags;
+ bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
- eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
@@ -9356,6 +12007,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
/* Phy re-enabled, reprobe the speeds */
link_info->support_auto_speeds = 0;
link_info->support_pam4_auto_speeds = 0;
+ link_info->support_auto_speeds2 = 0;
}
}
if (resp->supported_speeds_auto_mode)
@@ -9364,6 +12016,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (resp->supported_pam4_speeds_auto_mode)
link_info->support_pam4_auto_speeds =
le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
+ if (resp->supported_speeds2_auto_mode)
+ link_info->support_auto_speeds2 =
+ le16_to_cpu(resp->supported_speeds2_auto_mode);
bp->port_count = resp->port_cnt;
@@ -9372,6 +12027,26 @@ hwrm_phy_qcaps_exit:
return rc;
}
+static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_mac_qcaps_output *resp;
+ struct hwrm_port_mac_qcaps_input *req;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10a03)
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
+ if (rc)
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->mac_flags = resp->flags;
+ hwrm_req_drop(bp, req);
+}
+
static bool bnxt_support_dropped(u16 advertising, u16 supported)
{
u16 diff = advertising ^ supported;
@@ -9379,13 +12054,41 @@ static bool bnxt_support_dropped(u16 advertising, u16 supported)
return ((supported | diff) != supported);
}
+static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ /* Check if any advertised speeds are no longer supported. The caller
+ * holds the link_lock mutex, so we can modify link_info settings.
+ */
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (bnxt_support_dropped(link_info->advertising,
+ link_info->support_auto_speeds2)) {
+ link_info->advertising = link_info->support_auto_speeds2;
+ return true;
+ }
+ return false;
+ }
+ if (bnxt_support_dropped(link_info->advertising,
+ link_info->support_auto_speeds)) {
+ link_info->advertising = link_info->support_auto_speeds;
+ return true;
+ }
+ if (bnxt_support_dropped(link_info->advertising_pam4,
+ link_info->support_pam4_auto_speeds)) {
+ link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
+ return true;
+ }
+ return false;
+}
+
int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
{
struct bnxt_link_info *link_info = &bp->link_info;
struct hwrm_port_phy_qcfg_output *resp;
struct hwrm_port_phy_qcfg_input *req;
- u8 link_up = link_info->link_up;
- bool support_changed = false;
+ u8 link_state = link_info->link_state;
+ bool support_changed;
int rc;
rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
@@ -9396,6 +12099,10 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
rc = hwrm_req_send(bp, req);
if (rc) {
hwrm_req_drop(bp, req);
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
+ rc = 0;
+ }
return rc;
}
@@ -9410,18 +12117,25 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->lp_pause = resp->link_partner_adv_pause;
link_info->force_pause_setting = resp->force_pause;
link_info->duplex_setting = resp->duplex_cfg;
- if (link_info->phy_link_status == BNXT_LINK_LINK)
+ if (link_info->phy_link_status == BNXT_LINK_LINK) {
link_info->link_speed = le16_to_cpu(resp->link_speed);
- else
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
+ link_info->active_lanes = resp->active_lanes;
+ } else {
link_info->link_speed = 0;
+ link_info->active_lanes = 0;
+ }
link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
link_info->force_pam4_link_speed =
le16_to_cpu(resp->force_pam4_link_speed);
+ link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
link_info->support_speeds = le16_to_cpu(resp->support_speeds);
link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
+ link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
link_info->auto_pam4_link_speeds =
le16_to_cpu(resp->auto_pam4_link_speed_mask);
+ link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
link_info->lp_auto_link_speeds =
le16_to_cpu(resp->link_partner_adv_speeds);
link_info->lp_auto_pam4_link_speeds =
@@ -9438,7 +12152,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->module_status = resp->module_status;
if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds;
eee->eee_active = 0;
@@ -9447,8 +12161,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_active = 1;
fw_speeds = le16_to_cpu(
resp->link_partner_adv_eee_link_speed_mask);
- eee->lp_advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
}
/* Pull initial EEE config */
@@ -9458,8 +12171,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_enabled = 1;
fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
- eee->advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
if (resp->eee_config_phy_addr &
PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
@@ -9481,33 +12193,21 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
- link_info->link_up = 1;
+ link_info->link_state = BNXT_LINK_STATE_UP;
else
- link_info->link_up = 0;
- if (link_up != link_info->link_up)
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
+ if (link_state != link_info->link_state)
bnxt_report_link(bp);
} else {
- /* alwasy link down if not require to update link state */
- link_info->link_up = 0;
+ /* always link down if not require to update link state */
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
}
hwrm_req_drop(bp, req);
if (!BNXT_PHY_CFG_ABLE(bp))
return 0;
- /* Check if any advertised speeds are no longer supported. The caller
- * holds the link_lock mutex, so we can modify link_info settings.
- */
- if (bnxt_support_dropped(link_info->advertising,
- link_info->support_auto_speeds)) {
- link_info->advertising = link_info->support_auto_speeds;
- support_changed = true;
- }
- if (bnxt_support_dropped(link_info->advertising_pam4,
- link_info->support_pam4_auto_speeds)) {
- link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
- support_changed = true;
- }
+ support_changed = bnxt_support_speed_dropped(link_info);
if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
bnxt_hwrm_set_link_setting(bp, true, false);
return 0;
@@ -9572,7 +12272,11 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_
{
if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
- if (bp->link_info.advertising) {
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
+ req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
+ } else if (bp->link_info.advertising) {
req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
}
@@ -9586,7 +12290,12 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
} else {
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
- if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
+ netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
+ (u32)bp->link_info.req_link_speed);
+ } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
} else {
@@ -9632,7 +12341,7 @@ int bnxt_hwrm_set_pause(struct bnxt *bp)
static void bnxt_hwrm_set_eee(struct bnxt *bp,
struct hwrm_port_phy_cfg_input *req)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
if (eee->eee_enabled) {
u16 eee_speeds;
@@ -9688,11 +12397,20 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return rc;
req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
- return hwrm_req_send(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ mutex_lock(&bp->link_lock);
+ /* Device is not obliged link down in certain scenarios, even
+ * when forced. Setting the state unknown is consistent with
+ * driver startup and will force link state to be reported
+ * during subsequent open based on PORT_PHY_QCFG.
+ */
+ bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
+ mutex_unlock(&bp->link_lock);
+ }
+ return rc;
}
-static int bnxt_fw_init_one(struct bnxt *bp);
-
static int bnxt_fw_reset_via_optee(struct bnxt *bp)
{
#ifdef CONFIG_TEE_BNXT_FW
@@ -9739,15 +12457,56 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
+void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ if (!BNXT_NEW_RM(bp))
+ return; /* no resource reservations required */
+
+ hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_stat_ctxs = 0;
+ hw_resc->resv_irqs = 0;
+ hw_resc->resv_tx_rings = 0;
+ hw_resc->resv_rx_rings = 0;
+ hw_resc->resv_hw_ring_grps = 0;
+ hw_resc->resv_vnics = 0;
+ hw_resc->resv_rsscos_ctxs = 0;
+ if (!fw_reset) {
+ bp->tx_nr_rings = 0;
+ bp->rx_nr_rings = 0;
+ }
+}
+
+int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
+{
+ int rc;
+
+ if (!BNXT_NEW_RM(bp))
+ return 0; /* no resource reservations required */
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "resc_qcaps failed\n");
+
+ bnxt_clear_reservations(bp, fw_reset);
+
+ return rc;
+}
+
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp;
struct hwrm_func_drv_if_change_input *req;
- bool fw_reset = !bp->irq_tbl;
bool resc_reinit = false;
+ bool caps_change = false;
int rc, retry = 0;
+ bool fw_reset;
u32 flags = 0;
+ fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT);
+ bp->fw_reset_state = 0;
+
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
@@ -9789,24 +12548,26 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
resc_reinit = true;
- if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
+ test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
fw_reset = true;
- else if (bp->fw_health && !bp->fw_health->status_reliable)
- bnxt_try_map_fw_health_reg(bp);
+ else
+ bnxt_remap_fw_health_regs(bp);
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return -ENODEV;
}
- if (resc_reinit || fw_reset) {
- if (fw_reset) {
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
+ caps_change = true;
+
+ if (resc_reinit || fw_reset || caps_change) {
+ if (fw_reset || caps_change) {
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
- bnxt_ulp_stop(bp);
- bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
+ bnxt_ulp_irq_stop(bp);
+ bnxt_free_ctx_mem(bp, false);
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
@@ -9814,33 +12575,10 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return rc;
}
+ /* IRQ will be initialized later in bnxt_request_irq()*/
bnxt_clear_int_mode(bp);
- rc = bnxt_init_int_mode(bp);
- if (rc) {
- clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
- netdev_err(bp->dev, "init int mode failed\n");
- return rc;
- }
- }
- if (BNXT_NEW_RM(bp)) {
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-
- rc = bnxt_hwrm_func_resc_qcaps(bp, true);
- if (rc)
- netdev_err(bp->dev, "resc_qcaps failed\n");
-
- hw_resc->resv_cp_rings = 0;
- hw_resc->resv_stat_ctxs = 0;
- hw_resc->resv_irqs = 0;
- hw_resc->resv_tx_rings = 0;
- hw_resc->resv_rx_rings = 0;
- hw_resc->resv_hw_ring_grps = 0;
- hw_resc->resv_vnics = 0;
- if (!fw_reset) {
- bp->tx_nr_rings = 0;
- bp->rx_nr_rings = 0;
- }
}
+ rc = bnxt_cancel_reservations(bp, fw_reset);
}
return rc;
}
@@ -9969,97 +12707,27 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
} while (handle && handle != 0xffff);
}
-#ifdef CONFIG_BNXT_HWMON
-static ssize_t bnxt_show_temp(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct hwrm_temp_monitor_query_output *resp;
- struct hwrm_temp_monitor_query_input *req;
- struct bnxt *bp = dev_get_drvdata(dev);
- u32 len = 0;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
- if (rc)
- return rc;
- resp = hwrm_req_hold(bp, req);
- rc = hwrm_req_send(bp, req);
- if (!rc)
- len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
- hwrm_req_drop(bp, req);
- if (rc)
- return rc;
- return len;
-}
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
-
-static struct attribute *bnxt_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(bnxt);
-
-static void bnxt_hwmon_close(struct bnxt *bp)
-{
- if (bp->hwmon_dev) {
- hwmon_device_unregister(bp->hwmon_dev);
- bp->hwmon_dev = NULL;
- }
-}
-
-static void bnxt_hwmon_open(struct bnxt *bp)
-{
- struct hwrm_temp_monitor_query_input *req;
- struct pci_dev *pdev = bp->pdev;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
- if (!rc)
- rc = hwrm_req_send_silent(bp, req);
- if (rc == -EACCES || rc == -EOPNOTSUPP) {
- bnxt_hwmon_close(bp);
- return;
- }
-
- if (bp->hwmon_dev)
- return;
-
- bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
- DRV_MODULE_NAME, bp,
- bnxt_groups);
- if (IS_ERR(bp->hwmon_dev)) {
- bp->hwmon_dev = NULL;
- dev_warn(&pdev->dev, "Cannot register hwmon device\n");
- }
-}
-#else
-static void bnxt_hwmon_close(struct bnxt *bp)
-{
-}
-
-static void bnxt_hwmon_open(struct bnxt *bp)
-{
-}
-#endif
-
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return true;
if (eee->eee_enabled) {
- u32 advertising =
- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
eee->eee_enabled = 0;
return false;
}
- if (eee->advertised & ~advertising) {
- eee->advertised = advertising & eee->supported;
+ if (linkmode_andnot(tmp, eee->advertised, advertising)) {
+ linkmode_and(eee->advertised, advertising,
+ eee->supported);
return false;
}
}
@@ -10093,26 +12761,21 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
if (BNXT_AUTO_MODE(link_info->auto_mode))
update_link = true;
- if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
- link_info->req_link_speed != link_info->force_link_speed)
- update_link = true;
- else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
- link_info->req_link_speed != link_info->force_pam4_link_speed)
+ if (bnxt_force_speed_updated(link_info))
update_link = true;
if (link_info->req_duplex != link_info->duplex_setting)
update_link = true;
} else {
if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
update_link = true;
- if (link_info->advertising != link_info->auto_link_speeds ||
- link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
+ if (bnxt_auto_speed_updated(link_info))
update_link = true;
}
/* The last close may have shutdown the link, so need to call
* PHY_CFG to bring it back up.
*/
- if (!bp->link_info.link_up)
+ if (!BNXT_LINK_IS_UP(bp))
update_link = true;
if (!bnxt_eee_config_ok(bp))
@@ -10131,20 +12794,6 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
return rc;
}
-/* Common routine to pre-map certain register block to different GRC window.
- * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
- * in PF and 3 windows in VF that can be customized to map in different
- * register blocks.
- */
-static void bnxt_preset_reg_win(struct bnxt *bp)
-{
- if (BNXT_PF(bp)) {
- /* CAG registers map to GRC window #4 */
- writel(BNXT_CAG_REG_BASE,
- bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
- }
-}
-
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
static int bnxt_reinit_after_abort(struct bnxt *bp)
@@ -10169,11 +12818,97 @@ static int bnxt_reinit_after_abort(struct bnxt *bp)
return rc;
}
+static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ struct bnxt_ntuple_filter *ntp_fltr;
+ struct bnxt_l2_filter *l2_fltr;
+
+ if (list_empty(&fltr->list))
+ return;
+
+ if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
+ ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ ntp_fltr->l2_fltr = l2_fltr;
+ if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
+ bnxt_del_ntp_filter(bp, ntp_fltr);
+ netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
+ fltr->sw_id);
+ }
+ } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
+ l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
+ if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
+ bnxt_del_l2_filter(bp, l2_fltr);
+ netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
+ fltr->sw_id);
+ }
+ }
+}
+
+static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
+ bnxt_cfg_one_usr_fltr(bp, usr_fltr);
+}
+
+static int bnxt_set_xps_mapping(struct bnxt *bp)
+{
+ int numa_node = dev_to_node(&bp->pdev->dev);
+ unsigned int q_idx, map_idx, cpu, i;
+ const struct cpumask *cpu_mask_ptr;
+ int nr_cpus = num_online_cpus();
+ cpumask_t *q_map;
+ int rc = 0;
+
+ q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
+ if (!q_map)
+ return -ENOMEM;
+
+ /* Create CPU mask for all TX queues across MQPRIO traffic classes.
+ * Each TC has the same number of TX queues. The nth TX queue for each
+ * TC will have the same CPU mask.
+ */
+ for (i = 0; i < nr_cpus; i++) {
+ map_idx = i % bp->tx_nr_rings_per_tc;
+ cpu = cpumask_local_spread(i, numa_node);
+ cpu_mask_ptr = get_cpu_mask(cpu);
+ cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
+ }
+
+ /* Register CPU mask for each TX queue except the ones marked for XDP */
+ for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
+ map_idx = q_idx % bp->tx_nr_rings_per_tc;
+ rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
+ if (rc) {
+ netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
+ q_idx);
+ break;
+ }
+ }
+
+ kfree(q_map);
+
+ return rc;
+}
+
+static int bnxt_tx_nr_rings(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
+ bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
- bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
/* Reserve rings now if none were reserved at driver probe. */
@@ -10186,13 +12921,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = bnxt_reserve_rings(bp, irq_re_init);
if (rc)
return rc;
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_USING_MSIX)) {
- /* disable RFS if falling back to INTA */
- bp->dev->hw_features &= ~NETIF_F_NTUPLE;
- bp->flags &= ~BNXT_FLAG_RFS;
- }
+ /* Make adjustments if reserved TX rings are less than requested */
+ bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
+ if (bp->tx_nr_rings_xdp) {
+ bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings += bp->tx_nr_rings_xdp;
+ }
rc = bnxt_alloc_mem(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
@@ -10231,9 +12967,19 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
- if (irq_re_init)
+ if (irq_re_init) {
udp_tunnel_nic_reset_ntf(bp->dev);
+ rc = bnxt_set_xps_mapping(bp);
+ if (rc)
+ netdev_warn(bp->dev, "failed to set xps mapping\n");
+ }
+ if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
+ if (!static_key_enabled(&bnxt_xdp_locking_key))
+ static_branch_enable(&bnxt_xdp_locking_key);
+ } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
+ static_branch_disable(&bnxt_xdp_locking_key);
+ }
set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
/* Enable TX queues */
@@ -10247,6 +12993,11 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ bnxt_ptp_init_rtc(bp, true);
+ bnxt_ptp_cfg_tstamp_filters(bp);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_hwrm_realloc_rss_ctx_vnic(bp);
+ bnxt_cfg_usr_fltrs(bp);
return 0;
open_err_irq:
@@ -10259,7 +13010,6 @@ open_err_free_mem:
return rc;
}
-/* rtnl_lock held */
int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -10270,14 +13020,14 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
return rc;
}
-/* rtnl_lock held, open the NIC half way by allocating all resources, but
- * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
- * self tests.
+/* netdev instance lock held, open the NIC half way by allocating all
+ * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
+ * for offline self tests.
*/
int bnxt_half_open_nic(struct bnxt *bp)
{
@@ -10289,13 +13039,17 @@ int bnxt_half_open_nic(struct bnxt *bp)
goto half_open_err;
}
- rc = bnxt_alloc_mem(bp, false);
+ rc = bnxt_alloc_mem(bp, true);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
- rc = bnxt_init_nic(bp, false);
+ bnxt_init_napi(bp);
+ set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ rc = bnxt_init_nic(bp, true);
if (rc) {
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ bnxt_del_napi(bp);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@@ -10303,22 +13057,24 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
- dev_close(bp->dev);
+ bnxt_free_mem(bp, true);
+ netif_close(bp->dev);
return rc;
}
-/* rtnl_lock held, this call can only be made after a previous successful
- * call to bnxt_half_open_nic().
+/* netdev instance lock held, this call can only be made after a previous
+ * successful call to bnxt_half_open_nic().
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
- bnxt_hwrm_resource_free(bp, false, false);
+ bnxt_hwrm_resource_free(bp, false, true);
+ bnxt_del_napi(bp);
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
}
-static void bnxt_reenable_sriov(struct bnxt *bp)
+void bnxt_reenable_sriov(struct bnxt *bp)
{
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -10354,12 +13110,10 @@ static int bnxt_open(struct net_device *dev)
bnxt_hwrm_if_change(bp, false);
} else {
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
- if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
- bnxt_ulp_start(bp, 0);
- bnxt_reenable_sriov(bp);
- }
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_queue_sp_work(bp,
+ BNXT_RESTART_ULP_SP_EVENT);
}
- bnxt_hwmon_open(bp);
}
return rc;
@@ -10389,19 +13143,23 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
- /* Flush rings and and disable interrupts */
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_clear_rss_ctxs(bp);
+ /* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
- del_timer_sync(&bp->timer);
+ timer_delete_sync(&bp->timer);
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
- if (bp->bnapi && irq_re_init)
+ if (bp->bnapi && irq_re_init) {
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
+ bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
+ }
if (irq_re_init) {
bnxt_free_irq(bp);
bnxt_del_napi(bp);
@@ -10409,17 +13167,16 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_free_mem(bp, irq_re_init);
}
-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
- int rc = 0;
-
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
/* If we get here, it means firmware reset is in progress
* while we are trying to close. We can safely proceed with
- * the close because we are holding rtnl_lock(). Some firmware
- * messages may fail as we proceed to close. We set the
- * ABORT_ERR flag here so that the FW reset thread will later
- * abort when it gets the rtnl_lock() and sees the flag.
+ * the close because we are holding netdev instance lock.
+ * Some firmware messages may fail as we proceed to close.
+ * We set the ABORT_ERR flag here so that the FW reset thread
+ * will later abort when it gets the netdev instance lock
+ * and sees the flag.
*/
netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -10427,22 +13184,24 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
#ifdef CONFIG_BNXT_SRIOV
if (bp->sriov_cfg) {
+ int rc;
+
rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
!bp->sriov_cfg,
BNXT_SRIOV_CFG_WAIT_TMO);
- if (rc)
- netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+ if (!rc)
+ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n");
+ else if (rc < 0)
+ netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n");
}
#endif
__bnxt_close_nic(bp, irq_re_init, link_re_init);
- return rc;
}
static int bnxt_close(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- bnxt_hwmon_close(bp);
bnxt_close_nic(bp, true, true);
bnxt_hwrm_shutdown_link(bp);
bnxt_hwrm_if_change(bp, false);
@@ -10508,7 +13267,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
return hwrm_req_send(bp, req);
}
-/* rtnl_lock held */
+/* netdev instance lock held */
static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mdio = if_mii(ifr);
@@ -10539,12 +13298,6 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
mdio->val_in);
- case SIOCSHWTSTAMP:
- return bnxt_hwtstamp_set(dev, ifr);
-
- case SIOCGHWTSTAMP:
- return bnxt_hwtstamp_get(dev, ifr);
-
default:
/* do nothing */
break;
@@ -10586,8 +13339,8 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
stats->rx_dropped +=
- cpr->sw_stats.rx.rx_netpoll_discards +
- cpr->sw_stats.rx.rx_oom_discards;
+ cpr->sw_stats->rx.rx_netpoll_discards +
+ cpr->sw_stats->rx.rx_oom_discards;
}
}
@@ -10650,10 +13403,39 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
clear_bit(BNXT_STATE_READ_STATS, &bp->state);
}
+static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
+ struct bnxt_total_ring_err_stats *stats,
+ struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
+ u64 *hw_stats = cpr->stats.sw_stats;
+
+ stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
+ stats->rx_total_resets += sw_stats->rx.rx_resets;
+ stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
+ stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
+ stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
+ stats->rx_total_ring_discards +=
+ BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
+ stats->tx_total_resets += sw_stats->tx.tx_resets;
+ stats->tx_total_ring_discards +=
+ BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
+ stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
+}
+
+void bnxt_get_ring_err_stats(struct bnxt *bp,
+ struct bnxt_total_ring_err_stats *stats)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++)
+ bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
+}
+
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
struct netdev_hw_addr *ha;
u8 *haddr;
int mc_count = 0;
@@ -10687,7 +13469,7 @@ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
static bool bnxt_uc_list_updated(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int off = 0;
@@ -10714,7 +13496,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (!test_bit(BNXT_STATE_OPEN, &bp->state))
return;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
mask = vnic->rx_mask;
mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
@@ -10731,23 +13513,21 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (dev->flags & IFF_MULTICAST) {
mc_update = bnxt_mc_list_updated(bp, &mask);
}
if (mask != vnic->rx_mask || uc_update || mc_update) {
vnic->rx_mask = mask;
- set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
}
}
static int bnxt_cfg_rx_mode(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- struct hwrm_cfa_l2_filter_free_input *req;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int i, off = 0, rc;
bool uc_update;
@@ -10759,16 +13539,12 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
if (!uc_update)
goto skip_uc;
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
- if (rc)
- return rc;
- hwrm_req_hold(bp, req);
for (i = 1; i < vnic->uc_filter_count; i++) {
- req->l2_filter_id = vnic->fw_l2_filter_id[i];
+ struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
- rc = hwrm_req_send(bp, req);
+ bnxt_hwrm_l2_filter_free(bp, fltr);
+ bnxt_del_l2_filter(bp, fltr);
}
- hwrm_req_drop(bp, req);
vnic->uc_filter_count = 1;
@@ -10787,21 +13563,31 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
if (rc) {
- netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
- rc);
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
+ else
+ netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
+ rc = 0;
+ } else {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ }
vnic->uc_filter_count = i;
return rc;
}
}
+ if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
skip_uc:
if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
!bnxt_promisc_ok(bp))
vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
- if (rc && vnic->mc_list_count) {
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
@@ -10835,7 +13621,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
return true;
return false;
@@ -10845,30 +13631,33 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
return true;
return false;
}
/* If runtime conditions support RFS */
-static bool bnxt_rfs_capable(struct bnxt *bp)
+bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
{
-#ifdef CONFIG_RFS_ACCEL
- int vnics, max_vnics, max_rss_ctxs;
+ struct bnxt_hw_rings hwr = {0};
+ int max_vnics, max_rss_ctxs;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
return bnxt_rfs_supported(bp);
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
+
+ if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
- vnics = 1 + bp->rx_nr_rings;
+ hwr.grp = bp->rx_nr_rings;
+ hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
+ if (new_rss_ctx)
+ hwr.vnic++;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
- /* RSS contexts not a limiting factor */
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
- max_rss_ctxs = max_vnics;
- if (vnics > max_vnics || vnics > max_rss_ctxs) {
+ if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
netdev_warn(bp->dev,
"Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
@@ -10879,19 +13668,24 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (!BNXT_NEW_RM(bp))
return true;
- if (vnics == bp->hw_resc.resv_vnics)
+ /* Do not reduce VNIC and RSS ctx reservations. There is a FW
+ * issue that will mess up the default VNIC if we reduce the
+ * reservations.
+ */
+ if (hwr.vnic <= bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
- if (vnics <= bp->hw_resc.resv_vnics)
+ bnxt_hwrm_reserve_rings(bp, &hwr);
+ if (hwr.vnic <= bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
+ hwr.vnic = 1;
+ hwr.rss_ctx = 0;
+ bnxt_hwrm_reserve_rings(bp, &hwr);
return false;
-#else
- return false;
-#endif
}
static netdev_features_t bnxt_fix_features(struct net_device *dev,
@@ -10900,10 +13694,10 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
netdev_features_t vlan_features;
- if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
+ if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
features &= ~NETIF_F_NTUPLE;
- if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
if (!(features & NETIF_F_GRO))
@@ -10912,7 +13706,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if (features & NETIF_F_GRO_HW)
features &= ~NETIF_F_LRO;
- /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+ /* Both CTAG and STAG VLAN acceleration on the RX side have to be
* turned on or off together.
*/
vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
@@ -10929,14 +13723,24 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
return features;
}
+static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
+ bool link_re_init, u32 flags, bool update_tpa)
+{
+ bnxt_close_nic(bp, irq_re_init, link_re_init);
+ bp->flags = flags;
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+ return bnxt_open_nic(bp, irq_re_init, link_re_init);
+}
+
static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
{
+ bool update_tpa = false, update_ntuple = false;
struct bnxt *bp = netdev_priv(dev);
u32 flags = bp->flags;
u32 changes;
int rc = 0;
bool re_init = false;
- bool update_tpa = false;
flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
if (features & NETIF_F_GRO_HW)
@@ -10952,19 +13756,24 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (features & NETIF_F_NTUPLE)
flags |= BNXT_FLAG_RFS;
+ else
+ bnxt_clear_usr_fltrs(bp, true);
changes = flags ^ bp->flags;
if (changes & BNXT_FLAG_TPA) {
update_tpa = true;
if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
(flags & BNXT_FLAG_TPA) == 0 ||
- (bp->flags & BNXT_FLAG_CHIP_P5))
+ (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
re_init = true;
}
if (changes & ~BNXT_FLAG_TPA)
re_init = true;
+ if (changes & BNXT_FLAG_RFS)
+ update_ntuple = true;
+
if (flags != bp->flags) {
u32 old_flags = bp->flags;
@@ -10975,14 +13784,12 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
- if (re_init) {
- bnxt_close_nic(bp, false, false);
- bp->flags = flags;
- if (update_tpa)
- bnxt_set_ring_params(bp);
+ if (update_ntuple)
+ return bnxt_reinit_features(bp, true, false, flags, update_tpa);
+
+ if (re_init)
+ return bnxt_reinit_features(bp, false, false, flags, update_tpa);
- return bnxt_open_nic(bp, false, false);
- }
if (update_tpa) {
bp->flags = flags;
rc = bnxt_set_tpa(bp,
@@ -10999,6 +13806,7 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
u8 **nextp)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
+ struct hop_jumbo_hdr *jhdr;
int hdr_count = 0;
u8 *nexthdr;
int start;
@@ -11026,9 +13834,27 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
if (hdrlen > 64)
return false;
+
+ /* The ext header may be a hop-by-hop header inserted for
+ * big TCP purposes. This will be removed before sending
+ * from NIC, so do not count it.
+ */
+ if (*nexthdr == NEXTHDR_HOP) {
+ if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
+ goto increment_hdr;
+
+ jhdr = (struct hop_jumbo_hdr *)hp;
+ if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
+ jhdr->nexthdr != IPPROTO_TCP)
+ goto increment_hdr;
+
+ goto next_hdr;
+ }
+increment_hdr:
+ hdr_count++;
+next_hdr:
nexthdr = &hp->nexthdr;
start += hdrlen;
- hdr_count++;
}
if (nextp) {
/* Caller will check inner protocol */
@@ -11048,9 +13874,10 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
struct udphdr *uh = udp_hdr(skb);
__be16 udp_port = uh->dest;
- if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
+ if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
+ udp_port != bp->vxlan_gpe_port)
return false;
- if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
+ if (skb->inner_protocol == htons(ETH_P_TEB)) {
struct ethhdr *eh = inner_eth_hdr(skb);
switch (eh->h_proto) {
@@ -11061,6 +13888,11 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
skb_inner_network_offset(skb),
NULL);
}
+ } else if (skb->inner_protocol == htons(ETH_P_IP)) {
+ return true;
+ } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
+ return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
+ NULL);
}
return false;
}
@@ -11181,15 +14013,13 @@ static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
- int i = bnapi->index;
-
- if (!txr)
- return;
+ struct bnxt_tx_ring_info *txr;
+ int i = bnapi->index, j;
- netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
- i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
- txr->tx_cons);
+ bnxt_for_each_napi_tx(j, bnapi, txr)
+ netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+ i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
+ txr->tx_cons);
}
static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
@@ -11208,11 +14038,19 @@ static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- int i = bnapi->index;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
+ int i = bnapi->index, j;
netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ cpr2 = &cpr->cp_ring_arr[j];
+ if (!cpr2->bnapi)
+ continue;
+ netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
+ i, j, cpr2->cp_ring_struct.fw_ring_id,
+ cpr2->cp_raw_cons);
+ }
}
static void bnxt_dbg_dump_states(struct bnxt *bp)
@@ -11256,17 +14094,8 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (!silent)
bnxt_dbg_dump_states(bp);
if (netif_running(bp->dev)) {
- int rc;
-
- if (silent) {
- bnxt_close_nic(bp, false, false);
- bnxt_open_nic(bp, false, false);
- } else {
- bnxt_ulp_stop(bp);
- bnxt_close_nic(bp, true, false);
- rc = bnxt_open_nic(bp, true, false);
- bnxt_ulp_start(bp, rc);
- }
+ bnxt_close_nic(bp, !silent, false);
+ bnxt_open_nic(bp, !silent, false);
}
}
@@ -11275,13 +14104,13 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
struct bnxt *bp = netdev_priv(dev);
netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
- set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
}
static void bnxt_fw_health_check(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct pci_dev *pdev = bp->pdev;
u32 val;
if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
@@ -11295,26 +14124,29 @@ static void bnxt_fw_health_check(struct bnxt *bp)
}
val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
- if (val == fw_health->last_fw_heartbeat)
+ if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
+ fw_health->arrests++;
goto fw_reset;
+ }
fw_health->last_fw_heartbeat = val;
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- if (val != fw_health->last_fw_reset_cnt)
+ if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
+ fw_health->discoveries++;
goto fw_reset;
+ }
fw_health->tmr_counter = fw_health->tmr_multiplier;
return;
fw_reset:
- set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
}
static void bnxt_timer(struct timer_list *t)
{
- struct bnxt *bp = from_timer(bp, t, timer);
+ struct bnxt *bp = timer_container_of(bp, t, timer);
struct net_device *dev = bp->dev;
if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
@@ -11326,21 +14158,15 @@ static void bnxt_timer(struct timer_list *t)
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
bnxt_fw_health_check(bp);
- if (bp->link_info.link_up && bp->stats_coal_ticks) {
- set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
+ bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
- if (bnxt_tc_flower_enabled(bp)) {
- set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if (bnxt_tc_flower_enabled(bp))
+ bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
- set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
+ bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
#endif /*CONFIG_RFS_ACCEL*/
if (bp->link_info.phy_retry) {
@@ -11348,44 +14174,45 @@ static void bnxt_timer(struct timer_list *t)
bp->link_info.phy_retry = false;
netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
} else {
- set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
}
}
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
- netif_carrier_ok(dev)) {
- set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
+
+ if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
+ bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
+
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-static void bnxt_rtnl_lock_sp(struct bnxt *bp)
+static void bnxt_lock_sp(struct bnxt *bp)
{
/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
* set. If the device is being closed, bnxt_close() may be holding
- * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
- * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+ * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
+ * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
+ * instance lock.
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
+ netdev_lock(bp->dev);
}
-static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+static void bnxt_unlock_sp(struct bnxt *bp)
{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
}
/* Only called from bnxt_sp_task() */
static void bnxt_reset(struct bnxt *bp, bool silent)
{
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_reset_task(bp, silent);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Only called from bnxt_sp_task() */
@@ -11393,9 +14220,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
{
int i;
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
return;
}
/* Disable and flush TPA before resetting the RX ring */
@@ -11419,7 +14246,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_reset_task(bp, true);
break;
}
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
rxr->rx_prod = 0;
rxr->rx_agg_prod = 0;
rxr->rx_sw_agg_prod = 0;
@@ -11427,19 +14254,28 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
rxr->bnapi->in_reset = false;
bnxt_alloc_one_rx_ring(bp, i);
cpr = &rxr->bnapi->cp_ring;
- cpr->sw_stats.rx.rx_resets++;
+ cpr->sw_stats->rx.rx_resets++;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
}
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, true);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
+}
+
+static void bnxt_fw_fatal_close(struct bnxt *bp)
+{
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
+ pci_disable_device(bp->pdev);
}
static void bnxt_fw_reset_close(struct bnxt *bp)
{
- bnxt_ulp_stop(bp);
/* When firmware is in fatal state, quiesce device and disable
* bus master to prevent any potential bad DMAs before freeing
* kernel memory.
@@ -11450,12 +14286,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
- bnxt_tx_disable(bp);
- bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
- bnxt_free_irq(bp);
- bnxt_clear_int_mode(bp);
- pci_disable_device(bp->pdev);
+ bnxt_fw_fatal_close(bp);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
@@ -11463,9 +14294,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
bnxt_hwrm_func_drv_unrgtr(bp);
if (pci_is_enabled(bp->pdev))
pci_disable_device(bp->pdev);
- bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
+ bnxt_free_ctx_mem(bp, false);
}
static bool is_bnxt_fw_ok(struct bnxt *bp)
@@ -11488,7 +14317,7 @@ static bool is_bnxt_fw_ok(struct bnxt *bp)
return false;
}
-/* rtnl_lock is acquired before calling this function */
+/* netdev instance lock is acquired before calling this function */
static void bnxt_force_fw_reset(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -11499,16 +14328,19 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
+ /* we have to serialize with bnxt_refclk_read()*/
if (ptp) {
- spin_lock_bh(&ptp->ptp_lock);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
bnxt_fw_reset_close(bp);
wait_dsecs = fw_health->master_func_wait_dsecs;
- if (fw_health->master) {
+ if (fw_health->primary) {
if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
wait_dsecs = 0;
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
@@ -11527,9 +14359,10 @@ void bnxt_fw_exception(struct bnxt *bp)
{
netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_ulp_stop(bp);
+ bnxt_lock_sp(bp);
bnxt_force_fw_reset(bp);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
@@ -11558,16 +14391,20 @@ static int bnxt_get_registered_vfs(struct bnxt *bp)
void bnxt_fw_reset(struct bnxt *bp)
{
- bnxt_rtnl_lock_sp(bp);
+ bnxt_ulp_stop(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int n = 0, tmo;
+ /* we have to serialize with bnxt_refclk_read()*/
if (ptp) {
- spin_lock_bh(&ptp->ptp_lock);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
@@ -11578,7 +14415,7 @@ void bnxt_fw_reset(struct bnxt *bp)
netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
n);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- dev_close(bp->dev);
+ netif_close(bp->dev);
goto fw_reset_exit;
} else if (n > 0) {
u16 vf_tmo_dsecs = n * 10;
@@ -11601,14 +14438,14 @@ void bnxt_fw_reset(struct bnxt *bp)
bnxt_queue_fw_reset_work(bp, tmo);
}
fw_reset_exit:
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_chk_missed_irq(struct bnxt *bp)
{
int i;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -11621,12 +14458,11 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
continue;
cpr = &bnapi->cp_ring;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
u32 val[2];
- if (!cpr2 || cpr2->has_more_work ||
- !bnxt_has_work(bp, cpr2))
+ if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
continue;
if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
@@ -11637,7 +14473,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
bnxt_dbg_hwrm_ring_info_get(bp,
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
fw_ring_id, &val[0], &val[1]);
- cpr->sw_stats.cmn.missed_irqs++;
+ cpr->sw_stats->cmn.missed_irqs++;
}
}
}
@@ -11657,16 +14493,9 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
} else {
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
}
- link_info->advertising = link_info->auto_link_speeds;
- link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
+ bnxt_set_auto_speed(link_info);
} else {
- link_info->req_link_speed = link_info->force_link_speed;
- link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
- if (link_info->force_pam4_link_speed) {
- link_info->req_link_speed =
- link_info->force_pam4_link_speed;
- link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
- }
+ bnxt_set_force_speed(link_info);
link_info->req_duplex = link_info->duplex_setting;
}
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
@@ -11690,6 +14519,12 @@ static void bnxt_fw_echo_reply(struct bnxt *bp)
hwrm_req_send(bp, req);
}
+static void bnxt_ulp_restart(struct bnxt *bp)
+{
+ bnxt_ulp_stop(bp);
+ bnxt_ulp_start(bp, 0);
+}
+
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -11701,6 +14536,11 @@ static void bnxt_sp_task(struct work_struct *work)
return;
}
+ if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
+ bnxt_ulp_restart(bp);
+ bnxt_reenable_sriov(bp);
+ }
+
if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
bnxt_cfg_rx_mode(bp);
@@ -11708,6 +14548,8 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_cfg_ntp_filters(bp);
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
+ if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+ netdev_info(bp->dev, "Receive PF driver unload event!\n");
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp, 0);
bnxt_hwrm_port_qstats_ext(bp, 0);
@@ -11760,6 +14602,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
bnxt_fw_echo_reply(bp);
+ if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
+ bnxt_hwmon_notify_event(bp);
+
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
@@ -11772,56 +14617,83 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
bnxt_rx_ring_reset(bp);
- if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
- bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
+ if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
+ test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
+ bnxt_devlink_health_fw_report(bp);
+ else
+ bnxt_fw_reset(bp);
+ }
if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
if (!is_bnxt_fw_ok(bp))
- bnxt_devlink_health_report(bp,
- BNXT_FW_EXCEPTION_SP_EVENT);
+ bnxt_devlink_health_fw_report(bp);
}
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
-/* Under rtnl_lock */
+static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+ int *max_cp);
+
+/* Under netdev instance lock */
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
- int max_rx, max_tx, tx_sets = 1;
- int tx_rings_needed, stats;
+ int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
+ struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
- int cp, vnics, rc;
+ int rc;
if (tcs)
tx_sets = tcs;
- rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
- if (rc)
- return rc;
+ _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
- if (max_rx < rx)
+ if (max_rx < rx_rings)
return -ENOMEM;
- tx_rings_needed = tx * tx_sets + tx_xdp;
- if (max_tx < tx_rings_needed)
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx_rings <<= 1;
+
+ hwr.rx = rx_rings;
+ hwr.tx = tx * tx_sets + tx_xdp;
+ if (max_tx < hwr.tx)
return -ENOMEM;
- vnics = 1;
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
- vnics += rx_rings;
+ hwr.vnic = bnxt_get_total_vnics(bp, rx);
- if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx_rings <<= 1;
- cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
- stats = cp;
+ tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
+ hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
+ if (max_cp < hwr.cp)
+ return -ENOMEM;
+ hwr.stat = hwr.cp;
if (BNXT_NEW_RM(bp)) {
- cp += bnxt_get_ulp_msix_num(bp);
- stats += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
+ hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
+ hwr.grp = rx;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
+ }
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.tx + rx;
+ rc = bnxt_hwrm_check_rings(bp, &hwr);
+ if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
+ if (!bnxt_ulp_registered(bp->edev)) {
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
+ }
+ if (hwr.cp > bp->total_irqs) {
+ int total_msix = bnxt_change_msix(bp, hwr.cp);
+
+ if (total_msix < hwr.cp) {
+ netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
+ hwr.cp, total_msix);
+ rc = -ENOSPC;
+ }
+ }
}
- return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
- stats, vnics);
+ return rc;
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -11852,7 +14724,13 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
static void bnxt_init_dflt_coal(struct bnxt *bp)
{
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
struct bnxt_coal *coal;
+ u16 flags = 0;
+
+ if (coal_cap->cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
/* Tick values in micro seconds.
* 1 coal_buf x bufs_per_record = 1 completion record.
@@ -11865,6 +14743,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->idle_thresh = 50;
coal->bufs_per_record = 2;
coal->budget = 64; /* NAPI budget */
+ coal->flags = flags;
coal = &bp->tx_coal;
coal->coal_ticks = 28;
@@ -11872,16 +14751,53 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->coal_ticks_irq = 2;
coal->coal_bufs_irq = 2;
coal->bufs_per_record = 1;
+ coal->flags = flags;
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
+/* FW that pre-reserves 1 VNIC per function */
+static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
+{
+ u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
+ return true;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
+ return true;
+ return false;
+}
+
+static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
+{
+ struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
+ struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
+ int rc;
+
+ bp->max_pfcwd_tmo_ms = 0;
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
+ if (rc)
+ return;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
+ hwrm_req_drop(bp, req);
+}
+
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
bp->fw_cap = 0;
rc = bnxt_hwrm_ver_get(bp);
+ /* FW may be unresponsive after FLR. FLR must complete within 100 msec
+ * so wait before continuing with recovery.
+ */
+ if (rc)
+ msleep(100);
bnxt_try_map_fw_health_reg(bp);
if (rc) {
rc = bnxt_try_recover_fw(bp);
@@ -11932,23 +14848,45 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (rc)
return -ENODEV;
+ rc = bnxt_alloc_crash_dump_mem(bp);
+ if (rc)
+ netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
+ rc);
+ if (!rc) {
+ rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ netdev_warn(bp->dev,
+ "hwrm crash dump mem failure rc: %d\n", rc);
+ }
+ }
+
+ if (bnxt_fw_pre_resv_vnics(bp))
+ bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
+
+ bnxt_hwrm_pfcwd_qcaps(bp);
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
bnxt_ethtool_init(bp);
+ if (bp->fw_cap & BNXT_FW_CAP_PTP)
+ __bnxt_hwrm_ptp_qcfg(bp);
bnxt_dcb_init(bp);
+ bnxt_hwmon_init(bp);
return 0;
}
static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
{
- bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
+ bp->rss_hash_delta = bp->rss_hash_cfg;
if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
- bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
}
@@ -11963,7 +14901,7 @@ static void bnxt_set_dflt_rfs(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_RFS;
if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
- if (bnxt_rfs_capable(bp)) {
+ if (bnxt_rfs_capable(bp, false)) {
bp->flags |= BNXT_FLAG_RFS;
dev->features |= NETIF_F_NTUPLE;
}
@@ -11989,7 +14927,7 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
-static int bnxt_fw_init_one(struct bnxt *bp)
+int bnxt_fw_init_one(struct bnxt *bp)
{
int rc;
@@ -12010,11 +14948,6 @@ static int bnxt_fw_init_one(struct bnxt *bp)
if (rc)
return rc;
- /* In case fw capabilities have changed, destroy the unneeded
- * reporters and create newly capable ones.
- */
- bnxt_dl_fw_reporters_destroy(bp, false);
- bnxt_dl_fw_reporters_create(bp);
bnxt_fw_init_one_p3(bp);
return 0;
}
@@ -12051,6 +14984,27 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
}
}
+bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ bool result = true; /* firmware will enforce if unknown */
+
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return result;
+
+ if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
+ return result;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ if (!hwrm_req_send(bp, req))
+ result = !!(le16_to_cpu(resp->flags) &
+ FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
+ hwrm_req_drop(bp, req);
+ return result;
+}
+
static void bnxt_reset_all(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -12091,12 +15045,10 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp)
static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
{
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
- bnxt_ulp_start(bp, rc);
- bnxt_dl_health_status_update(bp, false);
- }
- bp->fw_reset_state = 0;
- dev_close(bp->dev);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
+ bnxt_dl_health_fw_status_update(bp, false);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT;
+ netif_close(bp->dev);
}
static void bnxt_fw_reset_task(struct work_struct *work)
@@ -12125,17 +15077,17 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = 0;
netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
n);
- return;
+ goto ulp_start;
}
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
bp->fw_reset_timestamp = jiffies;
- rtnl_lock();
+ netdev_lock(bp->dev);
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
- return;
+ netdev_unlock(bp->dev);
+ goto ulp_start;
}
bnxt_fw_reset_close(bp);
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
@@ -12145,7 +15097,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
tmo = bp->fw_reset_min_dsecs * HZ / 10;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_queue_fw_reset_work(bp, tmo);
return;
}
@@ -12159,7 +15111,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
- if (!bp->fw_health->master) {
+ if (!bp->fw_health->primary) {
u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
@@ -12192,6 +15144,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
}
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
+ if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
+ !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
+ bnxt_dl_remote_reload(bp);
if (pci_enable_device(bp->pdev)) {
netdev_err(bp->dev, "Cannot re-enable PCI device\n");
rc = -ENODEV;
@@ -12215,7 +15171,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(bp->dev)) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
@@ -12223,8 +15179,8 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
- return;
+ netdev_unlock(bp->dev);
+ goto ulp_start;
}
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
@@ -12236,14 +15192,19 @@ static void bnxt_fw_reset_task(struct work_struct *work)
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_ptp_reapply_pps(bp);
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
+ bnxt_dl_health_fw_recovery_done(bp);
+ bnxt_dl_health_fw_status_update(bp, true);
+ }
+ netdev_unlock(bp->dev);
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
+ netdev_lock(bp->dev);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
- bnxt_ptp_reapply_pps(bp);
- bnxt_dl_health_recovery_done(bp);
- bnxt_dl_health_status_update(bp, true);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
break;
}
return;
@@ -12256,9 +15217,11 @@ fw_reset_abort_status:
netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
}
fw_reset_abort:
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
+ulp_start:
+ bnxt_ulp_start(bp, rc);
}
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
@@ -12317,8 +15280,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
goto init_err_release;
}
- pci_enable_pcie_error_reporting(pdev);
-
INIT_WORK(&bp->sp_task, bnxt_sp_task);
INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
@@ -12330,8 +15291,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
- bnxt_init_dflt_coal(bp);
-
timer_setup(&bp->timer, bnxt_timer, 0);
bp->current_interval = BNXT_TIMER_INTERVAL;
@@ -12352,13 +15311,14 @@ init_err:
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
+ netdev_assert_locked(dev);
+
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -12370,6 +15330,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
eth_hw_addr_set(dev, addr->sa_data);
+ bnxt_clear_usr_fltrs(bp, true);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -12378,15 +15339,24 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
+ netdev_assert_locked(dev);
+
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
+
+ /* MTU change may change the AGG ring settings if an XDP multi-buffer
+ * program is attached. We need to set the AGG rings settings and
+ * rx_skb_func accordingly.
+ */
+ if (READ_ONCE(bp->xdp_prog))
+ bnxt_set_rx_skb_mode(bp, true);
+
bnxt_set_ring_params(bp);
if (netif_running(dev))
@@ -12399,7 +15369,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
{
struct bnxt *bp = netdev_priv(dev);
bool sh = false;
- int rc;
+ int rc, tx_cp;
if (tc > bp->max_tc) {
netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
@@ -12407,7 +15377,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return -EINVAL;
}
- if (netdev_get_num_tc(dev) == tc)
+ if (bp->num_tc == tc)
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -12425,13 +15395,16 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (tc) {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
netdev_set_num_tc(dev, tc);
+ bp->num_tc = tc;
} else {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
+ bp->num_tc = 0;
}
bp->tx_nr_rings += bp->tx_nr_rings_xdp;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
if (netif_running(bp->dev))
return bnxt_open_nic(bp, true, false);
@@ -12481,10 +15454,48 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-#ifdef CONFIG_RFS_ACCEL
+u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
+ const struct sk_buff *skb)
+{
+ struct bnxt_vnic_info *vnic;
+
+ if (skb)
+ return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
+ return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
+}
+
+int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
+ u32 idx)
+{
+ struct hlist_head *head;
+ int bit_id;
+
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
+ if (bit_id < 0) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return -ENOMEM;
+ }
+
+ fltr->base.sw_id = (u16)bit_id;
+ fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
+ fltr->base.flags |= BNXT_ACT_RING_DST;
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
+ bp->ntp_fltr_count++;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return 0;
+}
+
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
{
+ struct bnxt_flow_masks *masks1 = &f1->fmasks;
+ struct bnxt_flow_masks *masks2 = &f2->fmasks;
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
@@ -12494,25 +15505,46 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
if (keys1->basic.n_proto == htons(ETH_P_IP)) {
if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
- keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
+ masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
+ masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
return false;
} else {
- if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
- sizeof(keys1->addrs.v6addrs.src)) ||
- memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
- sizeof(keys1->addrs.v6addrs.dst)))
+ if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
+ &keys2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
+ &masks2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
+ &keys2->addrs.v6addrs.dst) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
+ &masks2->addrs.v6addrs.dst))
return false;
}
- if (keys1->ports.ports == keys2->ports.ports &&
- keys1->control.flags == keys2->control.flags &&
- ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
- ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
- return true;
+ return keys1->ports.src == keys2->ports.src &&
+ masks1->ports.src == masks2->ports.src &&
+ keys1->ports.dst == keys2->ports.dst &&
+ masks1->ports.dst == masks2->ports.dst &&
+ keys1->control.flags == keys2->control.flags &&
+ f1->l2_fltr == f2->l2_fltr;
+}
- return false;
+struct bnxt_ntuple_filter *
+bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr, u32 idx)
+{
+ struct bnxt_ntuple_filter *f;
+ struct hlist_head *head;
+
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ hlist_for_each_entry_rcu(f, head, base.hash) {
+ if (bnxt_fltr_match(f, fltr))
+ return f;
+ }
+ return NULL;
}
+#ifdef CONFIG_RFS_ACCEL
static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
@@ -12520,29 +15552,31 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct bnxt_ntuple_filter *fltr, *new_fltr;
struct flow_keys *fkeys;
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
- int rc = 0, idx, bit_id, l2_idx = 0;
- struct hlist_head *head;
+ struct bnxt_l2_filter *l2_fltr;
+ int rc = 0, idx;
u32 flags;
- if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- int off = 0, j;
+ if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ } else {
+ struct bnxt_l2_key key;
- netif_addr_lock_bh(dev);
- for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
- if (ether_addr_equal(eth->h_dest,
- vnic->uc_list + off)) {
- l2_idx = j + 1;
- break;
- }
- }
- netif_addr_unlock_bh(dev);
- if (!l2_idx)
+ ether_addr_copy(key.dst_mac_addr, eth->h_dest);
+ key.vlan = 0;
+ l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
+ if (!l2_fltr)
return -EINVAL;
+ if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ bnxt_del_l2_filter(bp, l2_fltr);
+ return -EINVAL;
+ }
}
new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
- if (!new_fltr)
+ if (!new_fltr) {
+ bnxt_del_l2_filter(bp, l2_fltr);
return -ENOMEM;
+ }
fkeys = &new_fltr->fkeys;
if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
@@ -12557,10 +15591,13 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
- if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
- bp->hwrm_spec_code < 0x10601) {
- rc = -EPROTONOSUPPORT;
- goto err_free;
+ new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
+ if (bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+ new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
}
flags = fkeys->control.flags;
if (((flags & FLOW_DIS_ENCAPSULATION) &&
@@ -12568,51 +15605,52 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
+ new_fltr->l2_fltr = l2_fltr;
- memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
- memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
-
- idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
- head = &bp->ntp_fltr_hash_tbl[idx];
+ idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
rcu_read_lock();
- hlist_for_each_entry_rcu(fltr, head, hash) {
- if (bnxt_fltr_match(fltr, new_fltr)) {
- rcu_read_unlock();
- rc = 0;
- goto err_free;
- }
- }
- rcu_read_unlock();
-
- spin_lock_bh(&bp->ntp_fltr_lock);
- bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
- BNXT_NTP_FLTR_MAX_FLTR, 0);
- if (bit_id < 0) {
- spin_unlock_bh(&bp->ntp_fltr_lock);
- rc = -ENOMEM;
+ fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
+ if (fltr) {
+ rc = fltr->base.sw_id;
+ rcu_read_unlock();
goto err_free;
}
+ rcu_read_unlock();
- new_fltr->sw_id = (u16)bit_id;
new_fltr->flow_id = flow_id;
- new_fltr->l2_fltr_idx = l2_idx;
- new_fltr->rxq = rxq_index;
- hlist_add_head_rcu(&new_fltr->hash, head);
- bp->ntp_fltr_count++;
- spin_unlock_bh(&bp->ntp_fltr_lock);
-
- set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
-
- return new_fltr->sw_id;
+ new_fltr->base.rxq = rxq_index;
+ rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
+ if (!rc) {
+ bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
+ return new_fltr->base.sw_id;
+ }
err_free:
+ bnxt_del_l2_filter(bp, l2_fltr);
kfree(new_fltr);
return rc;
}
+#endif
+
+void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
+{
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return;
+ }
+ hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
+ bp->ntp_fltr_count--;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ bnxt_del_l2_filter(bp, fltr->l2_fltr);
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ kfree_rcu(fltr, base.rcu);
+}
static void bnxt_cfg_ntp_filters(struct bnxt *bp)
{
+#ifdef CONFIG_RFS_ACCEL
int i;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
@@ -12622,13 +15660,15 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
int rc;
head = &bp->ntp_fltr_hash_tbl[i];
- hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
bool del = false;
- if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
- if (rps_may_expire_flow(bp->dev, fltr->rxq,
+ if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
+ if (fltr->base.flags & BNXT_ACT_NO_AGING)
+ continue;
+ if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
fltr->flow_id,
- fltr->sw_id)) {
+ fltr->base.sw_id)) {
bnxt_hwrm_cfa_ntuple_filter_free(bp,
fltr);
del = true;
@@ -12639,57 +15679,64 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
if (rc)
del = true;
else
- set_bit(BNXT_FLTR_VALID, &fltr->state);
+ set_bit(BNXT_FLTR_VALID, &fltr->base.state);
}
- if (del) {
- spin_lock_bh(&bp->ntp_fltr_lock);
- hlist_del_rcu(&fltr->hash);
- bp->ntp_fltr_count--;
- spin_unlock_bh(&bp->ntp_fltr_lock);
- synchronize_rcu();
- clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
- kfree(fltr);
- }
+ if (del)
+ bnxt_del_ntp_filter(bp, fltr);
}
}
- if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
- netdev_info(bp->dev, "Receive PF driver unload event!\n");
+#endif
}
-#else
-
-static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
{
-}
+ struct bnxt *bp = netdev_priv(netdev);
+ unsigned int cmd;
+
+ if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
+ else
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
-#endif /* CONFIG_RFS_ACCEL */
+ return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
+}
-static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
+static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(netdev);
- struct udp_tunnel_info ti;
unsigned int cmd;
- udp_tunnel_nic_get_port(netdev, table, 0, &ti);
- if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+ if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
- else
+ else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
-
- if (ti.port)
- return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
+ else
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
}
static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
- .sync_table = bnxt_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .set_port = bnxt_udp_tunnel_set_port,
+ .unset_port = bnxt_udp_tunnel_unset_port,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+}, bnxt_udp_tunnels_p7 = {
+ .set_port = bnxt_udp_tunnel_set_port,
+ .unset_port = bnxt_udp_tunnel_unset_port,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
},
};
@@ -12717,15 +15764,9 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
u16 mode;
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
mode = nla_get_u16(attr);
if (mode == bp->br_mode)
break;
@@ -12756,13 +15797,6 @@ int bnxt_get_port_parent_id(struct net_device *dev,
return 0;
}
-static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
-{
- struct bnxt *bp = netdev_priv(dev);
-
- return &bp->dl_port;
-}
-
static const struct net_device_ops bnxt_netdev_ops = {
.ndo_open = bnxt_open,
.ndo_start_xmit = bnxt_start_xmit,
@@ -12794,7 +15828,379 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
- .ndo_get_devlink_port = bnxt_get_devlink_port,
+ .ndo_hwtstamp_get = bnxt_hwtstamp_get,
+ .ndo_hwtstamp_set = bnxt_hwtstamp_set,
+};
+
+static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_cp_ring_info *cpr;
+ u64 *sw;
+
+ if (!bp->bnapi)
+ return;
+
+ cpr = &bp->bnapi[i]->cp_ring;
+ sw = cpr->stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
+
+ stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
+}
+
+static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_napi *bnapi;
+ u64 *sw;
+
+ if (!bp->tx_ring)
+ return;
+
+ bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
+ sw = bnapi->cp_ring.stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
+}
+
+static void bnxt_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ rx->packets = bp->net_stats_prev.rx_packets;
+ rx->bytes = bp->net_stats_prev.rx_bytes;
+ rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
+
+ tx->packets = bp->net_stats_prev.tx_packets;
+ tx->bytes = bp->net_stats_prev.tx_bytes;
+}
+
+static const struct netdev_stat_ops bnxt_stat_ops = {
+ .get_queue_stats_rx = bnxt_get_queue_stats_rx,
+ .get_queue_stats_tx = bnxt_get_queue_stats_tx,
+ .get_base_stats = bnxt_get_base_stats,
+};
+
+static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+ int rc;
+
+ if (!bp->rx_ring)
+ return -ENETDOWN;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+ memcpy(clone, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, clone);
+ bnxt_reset_rx_ring_struct(bp, clone);
+
+ clone->rx_prod = 0;
+ clone->rx_agg_prod = 0;
+ clone->rx_sw_agg_prod = 0;
+ clone->rx_next_cons = 0;
+ clone->need_head_pool = false;
+
+ rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
+ if (rc)
+ return rc;
+
+ rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
+ if (rc < 0)
+ goto err_page_pool_destroy;
+
+ rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ clone->page_pool);
+ if (rc)
+ goto err_rxq_info_unreg;
+
+ ring = &clone->rx_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_ring;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ ring = &clone->rx_agg_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_agg_ring;
+
+ rc = bnxt_alloc_rx_agg_bmap(bp, clone);
+ if (rc)
+ goto err_free_rx_agg_ring;
+ }
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ rc = bnxt_alloc_one_tpa_info(bp, clone);
+ if (rc)
+ goto err_free_tpa_info;
+ }
+
+ bnxt_init_one_rx_ring_rxbd(bp, clone);
+ bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
+
+ bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_alloc_one_tpa_info_data(bp, clone);
+
+ return 0;
+
+err_free_tpa_info:
+ bnxt_free_one_tpa_info(bp, clone);
+err_free_rx_agg_ring:
+ bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
+err_free_rx_ring:
+ bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
+err_rxq_info_unreg:
+ xdp_rxq_info_unreg(&clone->xdp_rxq);
+err_page_pool_destroy:
+ page_pool_destroy(clone->page_pool);
+ page_pool_destroy(clone->head_pool);
+ clone->page_pool = NULL;
+ clone->head_pool = NULL;
+ return rc;
+}
+
+static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
+{
+ struct bnxt_rx_ring_info *rxr = qmem;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
+ bnxt_free_one_tpa_info(bp, rxr);
+
+ xdp_rxq_info_unreg(&rxr->xdp_rxq);
+
+ page_pool_destroy(rxr->page_pool);
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = NULL;
+ rxr->head_pool = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+}
+
+static void bnxt_copy_rx_ring(struct bnxt *bp,
+ struct bnxt_rx_ring_info *dst,
+ struct bnxt_rx_ring_info *src)
+{
+ struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
+ struct bnxt_ring_struct *dst_ring, *src_ring;
+ int i;
+
+ dst_ring = &dst->rx_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return;
+
+ dst_ring = &dst->rx_agg_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_agg_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+ WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ dst->rx_agg_bmap = src->rx_agg_bmap;
+}
+
+static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
+ int i, rc;
+ u16 mru;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+
+ rxr->rx_prod = clone->rx_prod;
+ rxr->rx_agg_prod = clone->rx_agg_prod;
+ rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
+ rxr->rx_next_cons = clone->rx_next_cons;
+ rxr->rx_tpa = clone->rx_tpa;
+ rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
+ rxr->page_pool = clone->page_pool;
+ rxr->head_pool = clone->head_pool;
+ rxr->xdp_rxq = clone->xdp_rxq;
+ rxr->need_head_pool = clone->need_head_pool;
+
+ bnxt_copy_rx_ring(bp, rxr, clone);
+
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
+ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
+ if (rc)
+ goto err_reset;
+
+ if (bp->tph_mode) {
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
+ if (rc)
+ goto err_reset;
+ }
+
+ rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
+ if (rc)
+ goto err_reset;
+
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
+ rc = bnxt_tx_queue_start(bp, idx);
+ if (rc)
+ goto err_reset;
+ }
+
+ bnxt_enable_rx_page_pool(rxr);
+ napi_enable_locked(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
+ mru = bp->dev->mtu + VLAN_ETH_HLEN;
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
+ if (rc)
+ return rc;
+ }
+ return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
+
+err_reset:
+ netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
+ rc);
+ napi_enable_locked(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ bnxt_reset_task(bp, true);
+ return rc;
+}
+
+static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
+ }
+ bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
+ rxr = &bp->rx_ring[idx];
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+ cancel_work_sync(&cpr->dim.work);
+ bnxt_hwrm_rx_ring_free(bp, rxr, false);
+ bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
+ page_pool_disable_direct_recycling(rxr->page_pool);
+ if (bnxt_separate_head_pool(rxr))
+ page_pool_disable_direct_recycling(rxr->head_pool);
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ bnxt_tx_queue_stop(bp, idx);
+
+ /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
+ * completion is handled in NAPI to guarantee no more DMA on that ring
+ * after seeing the completion.
+ */
+ napi_disable_locked(&bnapi->napi);
+
+ if (bp->tph_mode) {
+ bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
+ bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
+ }
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
+ memcpy(qmem, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, qmem);
+
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
+ .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
+ .ndo_queue_mem_free = bnxt_queue_mem_free,
+ .ndo_queue_start = bnxt_queue_start,
+ .ndo_queue_stop = bnxt_queue_stop,
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -12803,39 +16209,41 @@ static void bnxt_remove_one(struct pci_dev *pdev)
struct bnxt *bp = netdev_priv(dev);
if (BNXT_PF(bp))
- bnxt_sriov_disable(bp);
+ __bnxt_sriov_disable(bp);
- if (BNXT_PF(bp))
- devlink_port_type_clear(&bp->dl_port);
+ bnxt_rdma_aux_device_del(bp);
- bnxt_ptp_clear(bp);
- pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
+ bnxt_ptp_clear(bp);
+
+ bnxt_rdma_aux_device_uninit(bp);
+
+ bnxt_free_l2_filters(bp, true);
+ bnxt_free_ntp_fltrs(bp, true);
+ WARN_ON(bp->num_rss_ctx);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
cancel_work_sync(&bp->sp_task);
cancel_delayed_work_sync(&bp->fw_reset_task);
bp->sp_event = 0;
- bnxt_dl_fw_reporters_destroy(bp, true);
+ bnxt_dl_fw_reporters_destroy(bp);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_hwmon_uninit(bp);
bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
- kfree(bp->edev);
- bp->edev = NULL;
kfree(bp->ptp_cfg);
bp->ptp_cfg = NULL;
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
- bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
+ bnxt_free_ctx_mem(bp, true);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
@@ -12858,6 +16266,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
bp->dev->priv_flags |= IFF_SUPP_NOFCS;
else
bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
+
+ bp->mac_flags = 0;
+ bnxt_hwrm_mac_qcaps(bp);
+
if (!fw_dflt)
return 0;
@@ -12902,9 +16314,10 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_rx = hw_resc->max_rx_rings;
*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
- bnxt_get_ulp_msix_num(bp),
- hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ bnxt_get_ulp_msix_num_in_use(bp),
+ hw_resc->max_stat_ctxs -
+ bnxt_get_ulp_stat_ctxs_in_use(bp));
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
*max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -12913,8 +16326,14 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
}
if (bp->flags & BNXT_FLAG_AGG_RINGS)
*max_rx >>= 1;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ int rc;
+
+ rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
+ if (rc) {
+ *max_rx = 0;
+ *max_tx = 0;
+ }
/* On P5 chips, max_cp output param should be available NQs */
*max_cp = max_irq;
}
@@ -12987,12 +16406,13 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
bp->rx_nr_rings = bp->cp_nr_rings;
bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
}
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
+ int avail_msix;
if (!bnxt_can_reserve_rings(bp))
return 0;
@@ -13018,21 +16438,29 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bnxt_trim_dflt_sh_rings(bp);
else
bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
+
+ avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
+ if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
+ int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
+
+ bnxt_set_ulp_msix_num(bp, ulp_num_msix);
+ bnxt_set_dflt_ulp_stat_ctxs(bp);
+ }
rc = __bnxt_reserve_rings(bp);
- if (rc)
+ if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
if (sh)
bnxt_trim_dflt_sh_rings(bp);
/* Rings may have been trimmed, re-reserve the trimmed rings. */
if (bnxt_need_reserve_rings(bp)) {
rc = __bnxt_reserve_rings(bp);
- if (rc)
+ if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
@@ -13056,18 +16484,20 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
bnxt_clear_int_mode(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
- netdev_err(bp->dev, "Not enough rings available.\n");
+ if (BNXT_VF(bp) && rc == -ENODEV)
+ netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
+ else
+ netdev_err(bp->dev, "Not enough rings available.\n");
goto init_dflt_ring_err;
}
rc = bnxt_init_int_mode(bp);
if (rc)
goto init_dflt_ring_err;
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
- bp->flags |= BNXT_FLAG_RFS;
- bp->dev->features |= NETIF_F_NTUPLE;
- }
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
+
+ bnxt_set_dflt_rfs(bp);
+
init_dflt_ring_err:
bnxt_ulp_irq_restart(bp, rc);
return rc;
@@ -13077,7 +16507,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
int rc;
- ASSERT_RTNL();
+ netdev_ops_assert_locked(bp->dev);
bnxt_hwrm_func_qcaps(bp);
if (netif_running(bp->dev))
@@ -13090,7 +16520,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
if (netif_running(bp->dev)) {
if (rc)
- dev_close(bp->dev);
+ netif_close(bp->dev);
else
rc = bnxt_open_nic(bp, true, false);
}
@@ -13186,8 +16616,18 @@ static int bnxt_map_db_bar(struct bnxt *bp)
return 0;
}
+void bnxt_print_device_info(struct bnxt *bp)
+{
+ netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
+ board_info[bp->board_idx].name,
+ (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
+
+ pcie_print_link_status(bp->pdev);
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct bnxt_hw_resc *hw_resc;
struct net_device *dev;
struct bnxt *bp;
int rc, max_irqs;
@@ -13195,6 +16635,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_is_bridge(pdev))
return -ENODEV;
+ if (!pdev->msix_cap) {
+ dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
+ return -ENODEV;
+ }
+
/* Clear any pending DMA transactions from crash kernel
* while loading driver in capture kernel.
*/
@@ -13204,25 +16649,29 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
max_irqs = bnxt_get_max_irq(pdev);
- dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+ dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
+ max_irqs);
if (!dev)
return -ENOMEM;
bp = netdev_priv(dev);
+ bp->board_idx = ent->driver_data;
bp->msg_enable = BNXT_DEF_MSG_ENABLE;
bnxt_set_max_func_irqs(bp, max_irqs);
- if (bnxt_vf_pciid(ent->driver_data))
+ if (bnxt_vf_pciid(bp->board_idx))
bp->flags |= BNXT_FLAG_VF;
- if (pdev->msix_cap)
- bp->flags |= BNXT_FLAG_MSIX_CAP;
+ /* No devlink port registration in case of a VF */
+ if (BNXT_PF(bp))
+ SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
rc = bnxt_init_board(pdev, dev);
if (rc < 0)
goto init_err_free;
dev->netdev_ops = &bnxt_netdev_ops;
+ dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
@@ -13241,10 +16690,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp))
bnxt_vpd_read_info(bp);
- if (BNXT_CHIP_P5(bp)) {
- bp->flags |= BNXT_FLAG_CHIP_P5;
- if (BNXT_CHIP_SR2(bp))
- bp->flags |= BNXT_FLAG_CHIP_SR2;
+ if (BNXT_CHIP_P5_PLUS(bp)) {
+ bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
+ if (BNXT_CHIP_P7(bp))
+ bp->flags |= BNXT_FLAG_CHIP_P7;
}
rc = bnxt_alloc_rss_indir_tbl(bp);
@@ -13269,6 +16718,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_GRO;
+ if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
+ dev->hw_features |= NETIF_F_GSO_UDP_L4;
if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_LRO;
@@ -13279,7 +16730,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
- dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
+ if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
+ dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
+ else
+ dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
@@ -13295,15 +16751,21 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features &= ~NETIF_F_LRO;
dev->priv_flags |= IFF_UNICAST_FLT;
+ netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+ if (bp->tso_max_segs)
+ netif_set_tso_max_segs(dev, bp->tso_max_segs);
+
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG;
+
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
- mutex_init(&bp->sriov_lock);
#endif
if (BNXT_SUPPORTS_TPA(bp)) {
bp->gro_func = bnxt_gro_func_5730x;
if (BNXT_CHIP_P4(bp))
bp->gro_func = bnxt_gro_func_5731x;
- else if (BNXT_CHIP_P5(bp))
+ else if (BNXT_CHIP_P5_PLUS(bp))
bp->gro_func = bnxt_gro_func_5750x;
}
if (!BNXT_CHIP_P4_PLUS(bp))
@@ -13329,18 +16791,33 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- bnxt_set_rx_skb_mode(bp, false);
+ hw_resc = &bp->hw_resc;
+ bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
+ BNXT_L2_FLTR_MAX_FLTR;
+ /* Older firmware may not report these filters properly */
+ if (bp->max_fltr < BNXT_MAX_FLTR)
+ bp->max_fltr = BNXT_MAX_FLTR;
+ bnxt_init_l2_fltr_tbl(bp);
+ __bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
+ bnxt_init_ring_params(bp);
bnxt_set_ring_params(bp);
+ bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
- netdev_err(bp->dev, "Not enough rings available.\n");
- rc = -ENOMEM;
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
+ } else {
+ netdev_err(bp->dev, "Not enough rings available.\n");
+ rc = -ENOMEM;
+ }
goto init_err_pci_clean;
}
bnxt_fw_init_one_p3(bp);
+ bnxt_init_dflt_coal(bp);
+
if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
@@ -13374,23 +16851,30 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_dl;
+ INIT_LIST_HEAD(&bp->usr_fltr_list);
+
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+ if (BNXT_SUPPORTS_QUEUE_API(bp))
+ dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
+ dev->request_ops_lock = true;
+ dev->netmem_tx = true;
+
rc = register_netdev(dev);
if (rc)
goto init_err_cleanup;
- if (BNXT_PF(bp))
- devlink_port_type_eth_set(&bp->dl_port, bp->dev);
bnxt_dl_fw_reporters_create(bp);
- netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
- board_info[ent->driver_data].name,
- (long)pci_resource_start(pdev, 0), dev->dev_addr);
- pcie_print_link_status(pdev);
+ bnxt_rdma_aux_device_add(bp);
+
+ bnxt_print_device_info(bp);
pci_save_state(pdev);
- return 0;
+ return 0;
init_err_cleanup:
+ bnxt_rdma_aux_device_uninit(bp);
bnxt_dl_unregister(bp);
init_err_dl:
bnxt_shutdown_tc(bp);
@@ -13399,6 +16883,7 @@ init_err_dl:
init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_hwmon_uninit(bp);
bnxt_ethtool_free(bp);
bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
@@ -13406,9 +16891,8 @@ init_err_pci_clean:
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
- bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
+ bnxt_free_ctx_mem(bp, true);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -13426,14 +16910,19 @@ static void bnxt_shutdown(struct pci_dev *pdev)
return;
rtnl_lock();
+ netdev_lock(dev);
bp = netdev_priv(dev);
if (!bp)
goto shutdown_exit;
if (netif_running(dev))
- dev_close(dev);
+ netif_close(dev);
- bnxt_ulp_shutdown(bp);
+ if (bnxt_hwrm_func_drv_unrgtr(bp)) {
+ pcie_flr(pdev);
+ goto shutdown_exit;
+ }
+ bnxt_ptp_clear(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(pdev);
@@ -13443,6 +16932,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
}
shutdown_exit:
+ netdev_unlock(dev);
rtnl_unlock();
}
@@ -13453,18 +16943,18 @@ static int bnxt_suspend(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
bnxt_ulp_stop(bp);
+
+ netdev_lock(dev);
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
}
bnxt_hwrm_func_drv_unrgtr(bp);
+ bnxt_ptp_clear(bp);
pci_disable_device(bp->pdev);
- bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
- rtnl_unlock();
+ bnxt_free_ctx_mem(bp, false);
+ netdev_unlock(dev);
return rc;
}
@@ -13474,7 +16964,7 @@ static int bnxt_resume(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
+ netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
@@ -13496,11 +16986,19 @@ static int bnxt_resume(struct device *device)
if (rc)
goto resume_exit;
+ bnxt_clear_reservations(bp, true);
+
if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
rc = -ENODEV;
goto resume_exit;
}
+ if (bp->fw_crash_mem)
+ bnxt_hwrm_crash_dump_mem_cfg(bp);
+ if (bnxt_ptp_init(bp)) {
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
+ }
bnxt_get_wol_settings(bp);
if (netif_running(dev)) {
rc = bnxt_open(dev);
@@ -13509,10 +17007,10 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
+ netdev_unlock(bp->dev);
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
- rtnl_unlock();
return rc;
}
@@ -13538,33 +17036,43 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
+ bool abort = false;
netdev_info(netdev, "PCI I/O error detected\n");
- rtnl_lock();
+ bnxt_ulp_stop(bp);
+
+ netdev_lock(netdev);
netif_device_detach(netdev);
- bnxt_ulp_stop(bp);
+ if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "Firmware reset already in progress\n");
+ abort = true;
+ }
- if (state == pci_channel_io_perm_failure) {
- rtnl_unlock();
+ if (abort || state == pci_channel_io_perm_failure) {
+ netdev_unlock(netdev);
return PCI_ERS_RESULT_DISCONNECT;
}
- if (state == pci_channel_io_frozen)
+ /* Link is not reliable anymore if state is pci_channel_io_frozen
+ * so we disable bus master to prevent any potential bad DMAs before
+ * freeing kernel memory.
+ */
+ if (state == pci_channel_io_frozen) {
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+ bnxt_fw_fatal_close(bp);
+ }
if (netif_running(netdev))
- bnxt_close(netdev);
+ __bnxt_close_nic(bp, true, true);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
- bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
- rtnl_unlock();
+ bnxt_free_ctx_mem(bp, false);
+ netdev_unlock(netdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -13573,7 +17081,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
+ * At this point, the card has experienced a hard reset,
* followed by fixups by BIOS, and has its config space
* set up identically to what it was at cold boot.
*/
@@ -13582,11 +17090,17 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
- int err = 0, off;
+ int retry = 0;
+ int err = 0;
+ int off;
netdev_info(bp->dev, "PCI Slot Reset\n");
- rtnl_lock();
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
+ msleep(900);
+
+ netdev_lock(netdev);
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
@@ -13595,7 +17109,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
/* Upon fatal error, our device internal logic that latches to
* BAR value is getting reset and will restore only upon
- * rewritting the BARs.
+ * rewriting the BARs.
*
* As pci_restore_state() does not re-write the BARs if the
* value is same as saved value earlier, driver needs to
@@ -13610,12 +17124,37 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
pci_save_state(pdev);
+ bnxt_inv_fw_health_reg(bp);
+ bnxt_try_map_fw_health_reg(bp);
+
+ /* In some PCIe AER scenarios, firmware may take up to
+ * 10 seconds to become ready in the worst case.
+ */
+ do {
+ err = bnxt_try_recover_fw(bp);
+ if (!err)
+ break;
+ retry++;
+ } while (retry < BNXT_FW_SLOT_RESET_RETRY);
+
+ if (err) {
+ dev_err(&pdev->dev, "Firmware not ready\n");
+ goto reset_exit;
+ }
+
err = bnxt_hwrm_func_reset(bp);
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
+
+ /* IRQ will be initialized later in bnxt_io_resume */
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
}
- rtnl_unlock();
+reset_exit:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_clear_reservations(bp, true);
+ netdev_unlock(netdev);
return result;
}
@@ -13634,19 +17173,26 @@ static void bnxt_io_resume(struct pci_dev *pdev)
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
- rtnl_lock();
+ netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
-
- bnxt_ulp_start(bp, err);
if (!err) {
- bnxt_reenable_sriov(bp);
- netif_device_attach(netdev);
+ if (netif_running(netdev)) {
+ err = bnxt_open(netdev);
+ } else {
+ err = bnxt_reserve_rings(bp, true);
+ if (!err)
+ err = bnxt_init_int_mode(bp);
+ }
}
- rtnl_unlock();
+ if (!err)
+ netif_device_attach(netdev);
+
+ netdev_unlock(netdev);
+ bnxt_ulp_start(bp, err);
+ if (!err)
+ bnxt_reenable_sriov(bp);
}
static const struct pci_error_handlers bnxt_err_handler = {
@@ -13670,8 +17216,16 @@ static struct pci_driver bnxt_pci_driver = {
static int __init bnxt_init(void)
{
+ int err;
+
bnxt_debug_init();
- return pci_register_driver(&bnxt_pci_driver);
+ err = pci_register_driver(&bnxt_pci_driver);
+ if (err) {
+ bnxt_debug_exit();
+ return err;
+ }
+
+ return 0;
}
static void __exit bnxt_exit(void)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 19fe6478e9b4..f5f07a7e6b29 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -18,12 +18,13 @@
*/
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
-#define DRV_VER_UPD 2
+#define DRV_VER_UPD 3
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
#include <linux/crash_dump.h>
+#include <linux/auxiliary_bus.h>
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/xdp.h>
@@ -33,6 +34,9 @@
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#endif
+#define BNXT_DEFAULT_RX_COPYBREAK 256
+#define BNXT_MAX_RX_COPYBREAK 1024
+
extern struct list_head bnxt_block_cb_list;
struct page_pool;
@@ -60,6 +64,30 @@ struct tx_bd {
__le64 tx_bd_haddr;
} __packed;
+#define TX_OPAQUE_IDX_MASK 0x0000ffff
+#define TX_OPAQUE_BDS_MASK 0x00ff0000
+#define TX_OPAQUE_BDS_SHIFT 16
+#define TX_OPAQUE_RING_MASK 0xff000000
+#define TX_OPAQUE_RING_SHIFT 24
+
+#define SET_TX_OPAQUE(bp, txr, idx, bds) \
+ (((txr)->tx_napi_idx << TX_OPAQUE_RING_SHIFT) | \
+ ((bds) << TX_OPAQUE_BDS_SHIFT) | ((idx) & (bp)->tx_ring_mask))
+
+#define TX_OPAQUE_IDX(opq) ((opq) & TX_OPAQUE_IDX_MASK)
+#define TX_OPAQUE_RING(opq) (((opq) & TX_OPAQUE_RING_MASK) >> \
+ TX_OPAQUE_RING_SHIFT)
+#define TX_OPAQUE_BDS(opq) (((opq) & TX_OPAQUE_BDS_MASK) >> \
+ TX_OPAQUE_BDS_SHIFT)
+#define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
+ (bp)->tx_ring_mask)
+
+#define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT)
+
+#define TX_MAX_BD_CNT 32
+
+#define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2)
+
struct tx_bd_ext {
__le32 tx_bd_hsize_lflags;
#define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
@@ -103,6 +131,7 @@ struct rx_bd {
#define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
#define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
#define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_AGG_EOP (1 << 6)
#define RX_BD_FLAGS_EOP (1 << 7)
#define RX_BD_FLAGS_BUFFERS (3 << 8)
#define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
@@ -120,11 +149,15 @@ struct tx_cmp {
__le32 tx_cmp_flags_type;
#define CMP_TYPE (0x3f << 0)
#define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_TX_L2_COAL_CMP 2
+ #define CMP_TYPE_TX_L2_PKT_TS_CMP 4
#define CMP_TYPE_RX_L2_CMP 17
#define CMP_TYPE_RX_AGG_CMP 18
#define CMP_TYPE_RX_L2_TPA_START_CMP 19
#define CMP_TYPE_RX_L2_TPA_END_CMP 21
#define CMP_TYPE_RX_TPA_AGG_CMP 22
+ #define CMP_TYPE_RX_L2_V3_CMP 23
+ #define CMP_TYPE_RX_L2_TPA_START_V3_CMP 25
#define CMP_TYPE_STATUS_CMP 32
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
@@ -151,16 +184,46 @@ struct tx_cmp {
#define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
#define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
- __le32 tx_cmp_unsed_3;
+ __le32 sq_cons_idx;
+ #define TX_CMP_SQ_CONS_IDX_MASK 0x00ffffff
};
+#define TX_CMP_SQ_CONS_IDX(txcmp) \
+ (le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK)
+
+struct tx_ts_cmp {
+ __le32 tx_ts_cmp_flags_type;
+ #define TX_TS_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_TS_CMP_FLAGS_TS_TYPE (1 << 7)
+ #define TX_TS_CMP_FLAGS_TS_TYPE_PM (0 << 7)
+ #define TX_TS_CMP_FLAGS_TS_TYPE_PA (1 << 7)
+ #define TX_TS_CMP_FLAGS_TS_FALLBACK (1 << 8)
+ #define TX_TS_CMP_TS_SUB_NS (0xf << 12)
+ #define TX_TS_CMP_TS_NS_MID (0xffff << 16)
+ #define TX_TS_CMP_TS_NS_MID_SFT 16
+ u32 tx_ts_cmp_opaque;
+ __le32 tx_ts_cmp_errors_v;
+ #define TX_TS_CMP_V (1 << 0)
+ #define TX_TS_CMP_TS_INVALID_ERR (1 << 10)
+ __le32 tx_ts_cmp_ts_ns_lo;
+};
+
+#define BNXT_GET_TX_TS_48B_NS(tscmp) \
+ (le32_to_cpu((tscmp)->tx_ts_cmp_ts_ns_lo) | \
+ ((u64)(le32_to_cpu((tscmp)->tx_ts_cmp_flags_type) & \
+ TX_TS_CMP_TS_NS_MID) << TX_TS_CMP_TS_NS_MID_SFT))
+
+#define BNXT_TX_TS_ERR(tscmp) \
+ (((tscmp)->tx_ts_cmp_flags_type & cpu_to_le32(TX_TS_CMP_FLAGS_ERROR)) &&\
+ ((tscmp)->tx_ts_cmp_errors_v & cpu_to_le32(TX_TS_CMP_TS_INVALID_ERR)))
+
struct rx_cmp {
__le32 rx_cmp_len_flags_type;
#define RX_CMP_CMP_TYPE (0x3f << 0)
#define RX_CMP_FLAGS_ERROR (1 << 6)
#define RX_CMP_FLAGS_PLACEMENT (7 << 7)
#define RX_CMP_FLAGS_RSS_VALID (1 << 10)
- #define RX_CMP_FLAGS_UNUSED (1 << 11)
+ #define RX_CMP_FLAGS_PKT_METADATA_PRESENT (1 << 11)
#define RX_CMP_FLAGS_ITYPES_SHIFT 12
#define RX_CMP_FLAGS_ITYPES_MASK 0xf000
#define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
@@ -181,12 +244,30 @@ struct rx_cmp {
#define RX_CMP_AGG_BUFS_SHIFT 1
#define RX_CMP_RSS_HASH_TYPE (0x7f << 9)
#define RX_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_CMP_V3_RSS_EXT_OP_LEGACY (0xf << 12)
+ #define RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT 12
+ #define RX_CMP_V3_RSS_EXT_OP_NEW (0xf << 8)
+ #define RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT 8
#define RX_CMP_PAYLOAD_OFFSET (0xff << 16)
#define RX_CMP_PAYLOAD_OFFSET_SHIFT 16
+ #define RX_CMP_SUB_NS_TS (0xf << 16)
+ #define RX_CMP_SUB_NS_TS_SHIFT 16
+ #define RX_CMP_METADATA1 (0xf << 28)
+ #define RX_CMP_METADATA1_SHIFT 28
+ #define RX_CMP_METADATA1_TPID_SEL (0x7 << 28)
+ #define RX_CMP_METADATA1_TPID_8021Q (0x1 << 28)
+ #define RX_CMP_METADATA1_TPID_8021AD (0x0 << 28)
+ #define RX_CMP_METADATA1_VALID (0x8 << 28)
__le32 rx_cmp_rss_hash;
};
+#define BNXT_PTP_RX_TS_VALID(flags) \
+ (((flags) & RX_CMP_FLAGS_ITYPES_MASK) == RX_CMP_FLAGS_ITYPE_PTP_W_TS)
+
+#define BNXT_ALL_RX_TS_VALID(flags) \
+ !((flags) & RX_CMP_FLAGS_PKT_METADATA_PRESENT)
+
#define RX_CMP_HASH_VALID(rxcmp) \
((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
@@ -196,6 +277,33 @@ struct rx_cmp {
(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define RX_CMP_ITYPES(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_FLAGS_ITYPES_MASK)
+
+#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\
+ RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE_NEW(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_NEW) >>\
+ RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE(bp, rxcmp) \
+ (((bp)->rss_cap & BNXT_RSS_CAP_RSS_TCAM) ? \
+ RX_CMP_V3_HASH_TYPE_NEW(rxcmp) : \
+ RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp))
+
+#define EXT_OP_INNER_4 0x0
+#define EXT_OP_OUTER_4 0x2
+#define EXT_OP_INNFL_3 0x8
+#define EXT_OP_OUTFL_3 0xa
+
+#define RX_CMP_VLAN_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_misc_v1 & cpu_to_le32(RX_CMP_METADATA1_VALID))
+
+#define RX_CMP_VLAN_TPID_SEL(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_METADATA1_TPID_SEL)
+
struct rx_cmp_ext {
__le32 rx_cmp_flags2;
#define RX_CMP_FLAGS2_IP_CS_CALC 0x1
@@ -243,6 +351,9 @@ struct rx_cmp_ext {
#define RX_CMPL_CFA_CODE_MASK (0xffff << 16)
#define RX_CMPL_CFA_CODE_SFT 16
+ #define RX_CMPL_METADATA0_TCI_MASK (0xffff << 16)
+ #define RX_CMPL_METADATA0_VID_MASK (0x0fff << 16)
+ #define RX_CMPL_METADATA0_SFT 16
__le32 rx_cmp_timestamp;
};
@@ -268,6 +379,10 @@ struct rx_cmp_ext {
((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \
RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
+#define RX_CMP_METADATA0_TCI(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_cfa_code_errors_v2) & \
+ RX_CMPL_METADATA0_TCI_MASK) >> RX_CMPL_METADATA0_SFT)
+
struct rx_agg_cmp {
__le32 rx_agg_cmp_len_flags_type;
#define RX_AGG_CMP_TYPE (0x3f << 0)
@@ -276,7 +391,7 @@ struct rx_agg_cmp {
u32 rx_agg_cmp_opaque;
__le32 rx_agg_cmp_v;
#define RX_AGG_CMP_V (1 << 0)
- #define RX_AGG_CMP_AGG_ID (0xffff << 16)
+ #define RX_AGG_CMP_AGG_ID (0x0fff << 16)
#define RX_AGG_CMP_AGG_ID_SHIFT 16
__le32 rx_agg_cmp_unused;
};
@@ -310,10 +425,18 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_V1 (0x1 << 0)
#define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9)
#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE (0x1ff << 7)
+ #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
- #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_START_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
+ #define RX_TPA_START_CMP_METADATA1 (0xf << 28)
+ #define RX_TPA_START_CMP_METADATA1_SHIFT 28
+ #define RX_TPA_START_METADATA1_TPID_SEL (0x7 << 28)
+ #define RX_TPA_START_METADATA1_TPID_8021Q (0x1 << 28)
+ #define RX_TPA_START_METADATA1_TPID_8021AD (0x0 << 28)
+ #define RX_TPA_START_METADATA1_VALID (0x8 << 28)
__le32 rx_tpa_start_cmp_rss_hash;
};
@@ -327,6 +450,11 @@ struct rx_tpa_start_cmp {
RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define TPA_START_V3_HASH_TYPE(rx_tpa_start) \
+ (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
#define TPA_START_AGG_ID(rx_tpa_start) \
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
@@ -339,6 +467,14 @@ struct rx_tpa_start_cmp {
((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
+#define TPA_START_VLAN_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_misc_v1 & \
+ cpu_to_le32(RX_TPA_START_METADATA1_VALID))
+
+#define TPA_START_VLAN_TPID_SEL(rx_tpa_start) \
+ (le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_METADATA1_TPID_SEL)
+
struct rx_tpa_start_cmp_ext {
__le32 rx_tpa_start_cmp_flags2;
#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
@@ -349,6 +485,8 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9)
#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10)
#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10
+ #define RX_TPA_START_CMP_V3_FLAGS2_T_IP_TYPE (0x1 << 10)
+ #define RX_TPA_START_CMP_V3_FLAGS2_AGG_GRO (0x1 << 11)
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16)
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16
@@ -362,6 +500,9 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
+ #define RX_TPA_START_CMP_METADATA0_TCI_MASK (0xffff << 16)
+ #define RX_TPA_START_CMP_METADATA0_VID_MASK (0x0fff << 16)
+ #define RX_TPA_START_CMP_METADATA0_SFT 16
__le32 rx_tpa_start_cmp_hdr_info;
};
@@ -378,6 +519,11 @@ struct rx_tpa_start_cmp_ext {
RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \
RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
+#define TPA_START_METADATA0_TCI(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_METADATA0_TCI_MASK) >> \
+ RX_TPA_START_CMP_METADATA0_SFT)
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -407,7 +553,7 @@ struct rx_tpa_end_cmp {
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
#define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_END_CMP_AGG_ID_SHIFT 25
- #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_END_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_end_cmp_tsdelta;
@@ -489,6 +635,15 @@ struct rx_tpa_end_cmp_ext {
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
+#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION)
+
+#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK)
+
#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
!!((data1) & \
ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
@@ -513,6 +668,8 @@ struct nqe_cn {
#define NQ_CN_TYPE_SFT 0
#define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
#define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
+ #define NQ_CN_TOGGLE_MASK 0xc0UL
+ #define NQ_CN_TOGGLE_SFT 6
__le16 reserved16;
__le32 cq_handle_low;
__le32 v;
@@ -520,6 +677,23 @@ struct nqe_cn {
__le32 cq_handle_high;
};
+#define BNXT_NQ_HDL_IDX_MASK 0x00ffffff
+#define BNXT_NQ_HDL_TYPE_MASK 0xff000000
+#define BNXT_NQ_HDL_TYPE_SHIFT 24
+#define BNXT_NQ_HDL_TYPE_RX 0x00
+#define BNXT_NQ_HDL_TYPE_TX 0x01
+
+#define BNXT_NQ_HDL_IDX(hdl) ((hdl) & BNXT_NQ_HDL_IDX_MASK)
+#define BNXT_NQ_HDL_TYPE(hdl) (((hdl) & BNXT_NQ_HDL_TYPE_MASK) >> \
+ BNXT_NQ_HDL_TYPE_SHIFT)
+
+#define BNXT_SET_NQ_HDL(cpr) \
+ (((cpr)->cp_ring_type << BNXT_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
+
+#define NQE_CN_TYPE(type) ((type) & NQ_CN_TYPE_MASK)
+#define NQE_CN_TOGGLE(type) (((type) & NQ_CN_TOGGLE_MASK) >> \
+ NQ_CN_TOGGLE_SFT)
+
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
#define DB_IRQ_DIS (0x1 << 27)
@@ -535,9 +709,14 @@ struct nqe_cn {
/* 64-bit doorbell */
#define DBR_INDEX_MASK 0x0000000000ffffffULL
+#define DBR_EPOCH_MASK 0x01000000UL
+#define DBR_EPOCH_SFT 24
+#define DBR_TOGGLE_MASK 0x06000000UL
+#define DBR_TOGGLE_SFT 25
#define DBR_XID_MASK 0x000fffff00000000ULL
#define DBR_XID_SFT 32
#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_VALID (0x1ULL << 58)
#define DBR_TYPE_SQ (0x0ULL << 60)
#define DBR_TYPE_RQ (0x1ULL << 60)
#define DBR_TYPE_SRQ (0x2ULL << 60)
@@ -550,6 +729,7 @@ struct nqe_cn {
#define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60)
#define DBR_TYPE_NQ (0xaULL << 60)
#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NQ_MASK (0xeULL << 60)
#define DBR_TYPE_NULL (0xfULL << 60)
#define DB_PF_OFFSET_P5 0x10000
@@ -582,9 +762,20 @@ struct nqe_cn {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
#define BNXT_MAX_MTU 9500
-#define BNXT_MAX_PAGE_MODE_MTU \
+
+/* First RX buffer page in XDP multi-buf mode
+ *
+ * +-------------------------------------------------------------------------+
+ * | XDP_PACKET_HEADROOM | bp->rx_buf_use_size | skb_shared_info|
+ * | (bp->rx_dma_offset) | | |
+ * +-------------------------------------------------------------------------+
+ */
+#define BNXT_MAX_PAGE_MODE_MTU_SBUF \
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
XDP_PACKET_HEADROOM)
+#define BNXT_MAX_PAGE_MODE_MTU \
+ (BNXT_MAX_PAGE_MODE_MTU_SBUF - \
+ SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
#define BNXT_MIN_PKT_SIZE 52
@@ -634,10 +825,12 @@ struct nqe_cn {
*/
#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
-#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_RING(bp, x) (((x) & (bp)->rx_ring_mask) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_AGG_RING(bp, x) (((x) & (bp)->rx_agg_ring_mask) >> \
+ (BNXT_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
-#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_RING(bp, x) (((x) & (bp)->tx_ring_mask) >> (BNXT_PAGE_SHIFT - 4))
#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
@@ -664,11 +857,14 @@ struct nqe_cn {
#define RX_CMP_TYPE(rxcmp) \
(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
-#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask)
+#define RING_RX(bp, idx) ((idx) & (bp)->rx_ring_mask)
+#define NEXT_RX(idx) ((idx) + 1)
-#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask)
+#define RING_RX_AGG(bp, idx) ((idx) & (bp)->rx_agg_ring_mask)
+#define NEXT_RX_AGG(idx) ((idx) + 1)
-#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask)
+#define RING_TX(bp, idx) ((idx) & (bp)->tx_ring_mask)
+#define NEXT_TX(idx) ((idx) + 1)
#define ADV_RAW_CMP(idx, n) ((idx) + (n))
#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
@@ -681,6 +877,7 @@ struct nqe_cn {
#define BNXT_AGG_EVENT 2
#define BNXT_TX_EVENT 4
#define BNXT_REDIRECT_EVENT 8
+#define BNXT_TX_CMP_EVENT 0x10
struct bnxt_sw_tx_bd {
union {
@@ -689,12 +886,14 @@ struct bnxt_sw_tx_bd {
};
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
- u8 is_gso;
+ struct page *page;
+ u8 is_ts_pkt;
u8 is_push;
u8 action;
+ unsigned short nr_frags;
union {
- unsigned short nr_frags;
u16 rx_prod;
+ u16 txts_prod;
};
};
@@ -705,18 +904,11 @@ struct bnxt_sw_rx_bd {
};
struct bnxt_sw_rx_agg_bd {
- struct page *page;
+ netmem_ref netmem;
unsigned int offset;
dma_addr_t mapping;
};
-struct bnxt_mem_init {
- u8 init_val;
- u16 offset;
-#define BNXT_MEM_INVALID_OFFSET 0xffff
- u16 size;
-};
-
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
@@ -726,7 +918,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
- struct bnxt_mem_init *mem_init;
+ struct bnxt_ctx_mem_type *ctx_mem;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -768,13 +960,27 @@ struct bnxt_db_info {
u64 db_key64;
u32 db_key32;
};
+ u32 db_ring_mask;
+ u32 db_epoch_mask;
+ u8 db_epoch_shift;
};
+#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
+ ((db)->db_epoch_shift))
+
+#define DB_TOGGLE(tgl) ((tgl) << DBR_TOGGLE_SFT)
+
+#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
+ DB_EPOCH(db, idx))
+
struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi;
+ struct bnxt_cp_ring_info *tx_cpr;
u16 tx_prod;
u16 tx_cons;
+ u16 tx_hw_cons;
u16 txq_index;
+ u8 tx_napi_idx;
u8 kick_pending;
struct bnxt_db_info tx_db;
@@ -791,6 +997,8 @@ struct bnxt_tx_ring_info {
u32 dev_state;
struct bnxt_ring_struct tx_ring_struct;
+ /* Synchronize simultaneous xdp_xmit on same ring */
+ spinlock_t xdp_tx_lock;
};
#define BNXT_LEGACY_COAL_CMPL_PARAMS \
@@ -838,6 +1046,7 @@ struct bnxt_coal {
u16 idle_thresh;
u8 bufs_per_record;
u8 budget;
+ u16 flags;
};
struct bnxt_tpa_info {
@@ -866,6 +1075,8 @@ struct bnxt_tpa_info {
u16 cfa_code; /* cfa_code in TPA start compl */
u8 agg_count;
+ u8 vlan_valid:1;
+ u8 cfa_code_valid:1;
struct rx_agg_cmp *agg_arr;
};
@@ -878,6 +1089,7 @@ struct bnxt_tpa_idx_map {
struct bnxt_rx_ring_info {
struct bnxt_napi *bnapi;
+ struct bnxt_cp_ring_info *rx_cpr;
u16 rx_prod;
u16 rx_agg_prod;
u16 rx_sw_agg_prod;
@@ -895,9 +1107,7 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
-
- struct page *rx_page;
- unsigned int rx_page_offset;
+ bool need_head_pool;
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
@@ -909,6 +1119,7 @@ struct bnxt_rx_ring_info {
struct bnxt_ring_struct rx_agg_ring_struct;
struct xdp_rxq_info xdp_rxq;
struct page_pool *page_pool;
+ struct page_pool *head_pool;
};
struct bnxt_rx_sw_stats {
@@ -919,15 +1130,32 @@ struct bnxt_rx_sw_stats {
u64 rx_netpoll_discards;
};
+struct bnxt_tx_sw_stats {
+ u64 tx_resets;
+};
+
struct bnxt_cmn_sw_stats {
u64 missed_irqs;
};
struct bnxt_sw_stats {
struct bnxt_rx_sw_stats rx;
+ struct bnxt_tx_sw_stats tx;
struct bnxt_cmn_sw_stats cmn;
};
+struct bnxt_total_ring_err_stats {
+ u64 rx_total_l4_csum_errors;
+ u64 rx_total_resets;
+ u64 rx_total_buf_errors;
+ u64 rx_total_oom_discards;
+ u64 rx_total_netpoll_discards;
+ u64 rx_total_ring_discards;
+ u64 tx_total_resets;
+ u64 tx_total_ring_discards;
+ u64 total_missed_irqs;
+};
+
struct bnxt_stats_mem {
u64 *sw_stats;
u64 *hw_masks;
@@ -943,6 +1171,11 @@ struct bnxt_cp_ring_info {
u8 had_work_done:1;
u8 has_more_work:1;
+ u8 had_nqe_notify:1;
+ u8 toggle;
+
+ u8 cp_ring_type;
+ u8 cp_idx;
u32 last_cp_raw_cons;
@@ -963,15 +1196,22 @@ struct bnxt_cp_ring_info {
struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
- struct bnxt_sw_stats sw_stats;
+ struct bnxt_sw_stats *sw_stats;
struct bnxt_ring_struct cp_ring_struct;
- struct bnxt_cp_ring_info *cp_ring_arr[2];
-#define BNXT_RX_HDL 0
-#define BNXT_TX_HDL 1
+ int cp_ring_count;
+ struct bnxt_cp_ring_info *cp_ring_arr;
};
+#define BNXT_MAX_QUEUE 8
+#define BNXT_MAX_TXR_PER_NAPI BNXT_MAX_QUEUE
+
+#define bnxt_for_each_napi_tx(iter, bnapi, txr) \
+ for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
+ txr = (iter < BNXT_MAX_TXR_PER_NAPI - 1) ? \
+ (bnapi)->tx_ring[++iter] : NULL)
+
struct bnxt_napi {
struct napi_struct napi;
struct bnxt *bp;
@@ -979,12 +1219,12 @@ struct bnxt_napi {
int index;
struct bnxt_cp_ring_info cp_ring;
struct bnxt_rx_ring_info *rx_ring;
- struct bnxt_tx_ring_info *tx_ring;
+ struct bnxt_tx_ring_info *tx_ring[BNXT_MAX_TXR_PER_NAPI];
void (*tx_int)(struct bnxt *, struct bnxt_napi *,
- int);
- int tx_pkts;
+ int budget);
u8 events;
+ u8 tx_fault:1;
u32 flags;
#define BNXT_NAPI_FLAG_XDP 0x1
@@ -992,13 +1232,21 @@ struct bnxt_napi {
bool in_reset;
};
+/* "TxRx", 2 hypens, plus maximum integer */
+#define BNXT_IRQ_NAME_EXTRA 17
+
struct bnxt_irq {
irq_handler_t handler;
unsigned int vector;
u8 requested:1;
u8 have_cpumask:1;
- char name[IFNAMSIZ + 2];
+ char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
+
+ struct bnxt *bp;
+ int msix_nr;
+ int ring_nr;
+ struct irq_affinity_notify affinity_notify;
};
#define HWRM_RING_ALLOC_TX 0x1
@@ -1017,13 +1265,17 @@ struct bnxt_ring_grp_info {
u16 cp_fw_ring_id;
};
+#define BNXT_VNIC_DEFAULT 0
+#define BNXT_VNIC_NTUPLE 1
+
struct bnxt_vnic_info {
u16 fw_vnic_id; /* returned by Chimp during alloc */
#define BNXT_MAX_CTX_PER_VNIC 8
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
+ u16 mru;
#define BNXT_MAX_UC_ADDRS 4
- __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+ struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS];
/* index 0 always dev_addr */
u16 uc_filter_count;
u8 *uc_list;
@@ -1056,11 +1308,35 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_MCAST_FLAG 4
#define BNXT_VNIC_UCAST_FLAG 8
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
+#define BNXT_VNIC_NTUPLE_FLAG 0x20
+#define BNXT_VNIC_RSSCTX_FLAG 0x40
+ struct ethtool_rxfh_context *rss_ctx;
+ u32 vnic_id;
+};
+
+struct bnxt_rss_ctx {
+ struct bnxt_vnic_info vnic;
+ u8 index;
+};
+
+#define BNXT_MAX_ETH_RSS_CTX 32
+#define BNXT_VNIC_ID_INVALID 0xffffffff
+
+struct bnxt_hw_rings {
+ int tx;
+ int rx;
+ int grp;
+ int cp;
+ int cp_p5;
+ int stat;
+ int vnic;
+ int rss_ctx;
};
struct bnxt_hw_resc {
u16 min_rsscos_ctxs;
u16 max_rsscos_ctxs;
+ u16 resv_rsscos_ctxs;
u16 min_cp_rings;
u16 max_cp_rings;
u16 resv_cp_rings;
@@ -1085,6 +1361,12 @@ struct bnxt_hw_resc {
u16 max_nqs;
u16 max_irqs;
u16 resv_irqs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
};
#if defined(CONFIG_BNXT_SRIOV)
@@ -1097,7 +1379,6 @@ struct bnxt_vf_info {
u16 vlan;
u16 func_qcfg_flags;
u32 flags;
-#define BNXT_VF_QOS 0x1
#define BNXT_VF_SPOOFCHK 0x2
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
@@ -1119,12 +1400,6 @@ struct bnxt_pf_info {
u16 active_vfs;
u16 registered_vfs;
u16 max_vfs;
- u32 max_encap_records;
- u32 max_decap_records;
- u32 max_tx_em_flows;
- u32 max_tx_wm_flows;
- u32 max_rx_em_flows;
- u32 max_rx_wm_flows;
unsigned long *vf_event_bmap;
u16 hwrm_cmd_req_pages;
u8 vf_resv_strategy;
@@ -1136,19 +1411,127 @@ struct bnxt_pf_info {
struct bnxt_vf_info *vf;
};
-struct bnxt_ntuple_filter {
+struct bnxt_filter_base {
struct hlist_node hash;
- u8 dst_mac_addr[ETH_ALEN];
- u8 src_mac_addr[ETH_ALEN];
- struct flow_keys fkeys;
+ struct list_head list;
__le64 filter_id;
+ u8 type;
+#define BNXT_FLTR_TYPE_NTUPLE 1
+#define BNXT_FLTR_TYPE_L2 2
+ u8 flags;
+#define BNXT_ACT_DROP 1
+#define BNXT_ACT_RING_DST 2
+#define BNXT_ACT_FUNC_DST 4
+#define BNXT_ACT_NO_AGING 8
+#define BNXT_ACT_RSS_CTX 0x10
u16 sw_id;
- u8 l2_fltr_idx;
u16 rxq;
- u32 flow_id;
+ u16 fw_vnic_id;
+ u16 vf_idx;
unsigned long state;
#define BNXT_FLTR_VALID 0
-#define BNXT_FLTR_UPDATE 1
+#define BNXT_FLTR_INSERTED 1
+#define BNXT_FLTR_FW_DELETED 2
+
+ struct rcu_head rcu;
+};
+
+struct bnxt_flow_masks {
+ struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_addrs addrs;
+};
+
+extern const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL;
+
+struct bnxt_ntuple_filter {
+ /* base filter must be the first member */
+ struct bnxt_filter_base base;
+ struct flow_keys fkeys;
+ struct bnxt_flow_masks fmasks;
+ struct bnxt_l2_filter *l2_fltr;
+ u32 flow_id;
+};
+
+struct bnxt_l2_key {
+ union {
+ struct {
+ u8 dst_mac_addr[ETH_ALEN];
+ u16 vlan;
+ };
+ u32 filter_key;
+ };
+};
+
+struct bnxt_ipv4_tuple {
+ struct flow_dissector_key_ipv4_addrs v4addrs;
+ struct flow_dissector_key_ports ports;
+};
+
+struct bnxt_ipv6_tuple {
+ struct flow_dissector_key_ipv6_addrs v6addrs;
+ struct flow_dissector_key_ports ports;
+};
+
+#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4)
+
+struct bnxt_l2_filter {
+ /* base filter must be the first member */
+ struct bnxt_filter_base base;
+ struct bnxt_l2_key l2_key;
+ atomic_t refcnt;
+};
+
+/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The
+ * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h.
+ * The last valid byte in the compat version is different.
+ */
+struct hwrm_port_phy_qcfg_output_compat {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ u8 active_fec_signal_mode;
+ __le16 link_speed;
+ u8 duplex_cfg;
+ u8 pause;
+ __le16 support_speeds;
+ __le16 force_link_speed;
+ u8 auto_mode;
+ u8 auto_pause;
+ __le16 auto_link_speed;
+ __le16 auto_link_speed_mask;
+ u8 wirespeed;
+ u8 lpbk;
+ u8 force_pause;
+ u8 module_status;
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ u8 media_type;
+ u8 xcvr_pkg_type;
+ u8 eee_config_phy_addr;
+ u8 parallel_detect;
+ __le16 link_partner_adv_speeds;
+ u8 link_partner_adv_auto_mode;
+ u8 link_partner_adv_pause;
+ __le16 adv_eee_link_speed_mask;
+ __le16 link_partner_adv_eee_link_speed_mask;
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ __le16 fec_cfg;
+ u8 duplex_state;
+ u8 option_flags;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ __le16 force_pam4_link_speed;
+ __le16 auto_pam4_link_speed_mask;
+ u8 link_partner_pam4_adv_speeds;
+ u8 valid;
};
struct bnxt_link_info {
@@ -1165,7 +1548,12 @@ struct bnxt_link_info {
#define BNXT_PHY_STATE_ENABLED 0
#define BNXT_PHY_STATE_DISABLED 1
- u8 link_up;
+ u8 link_state;
+#define BNXT_LINK_STATE_UNKNOWN 0
+#define BNXT_LINK_STATE_DOWN 1
+#define BNXT_LINK_STATE_UP 2
+#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP)
+ u8 active_lanes;
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
@@ -1199,8 +1587,12 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
+#define BNXT_LINK_SPEED_200GB PORT_PHY_QCFG_RESP_LINK_SPEED_200GB
+#define BNXT_LINK_SPEED_400GB PORT_PHY_QCFG_RESP_LINK_SPEED_400GB
u16 support_speeds;
u16 support_pam4_speeds;
+ u16 support_speeds2;
+
u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
@@ -1216,12 +1608,52 @@ struct bnxt_link_info {
#define BNXT_LINK_PAM4_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G
#define BNXT_LINK_PAM4_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G
#define BNXT_LINK_PAM4_SPEED_MSK_200GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G
+ u16 auto_link_speeds2;
+#define BNXT_LINK_SPEEDS2_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB
+#define BNXT_LINK_SPEEDS2_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB
+#define BNXT_LINK_SPEEDS2_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB
+#define BNXT_LINK_SPEEDS2_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB
+#define BNXT_LINK_SPEEDS2_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB
+#define BNXT_LINK_SPEEDS2_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB
+#define BNXT_LINK_SPEEDS2_MSK_50GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_100GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_200GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_400GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112
+#define BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112
+#define BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112
+
u16 support_auto_speeds;
u16 support_pam4_auto_speeds;
+ u16 support_auto_speeds2;
+
u16 lp_auto_link_speeds;
u16 lp_auto_pam4_link_speeds;
u16 force_link_speed;
u16 force_pam4_link_speed;
+ u16 force_link_speed2;
+#define BNXT_LINK_SPEED_50GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56
+#define BNXT_LINK_SPEED_100GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56
+#define BNXT_LINK_SPEED_200GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56
+#define BNXT_LINK_SPEED_400GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56
+#define BNXT_LINK_SPEED_100GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112
+#define BNXT_LINK_SPEED_200GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112
+#define BNXT_LINK_SPEED_400GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+
u32 preemphasis;
u8 module_status;
u8 active_fec_sig_mode;
@@ -1252,6 +1684,8 @@ struct bnxt_link_info {
u8 req_signal_mode;
#define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ
#define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
+#define BNXT_SIG_MODE_PAM4_112 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
+#define BNXT_SIG_MODE_MAX (PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST + 1)
u8 req_duplex;
u8 req_flow_ctrl;
u16 req_link_speed;
@@ -1311,8 +1745,6 @@ struct bnxt_link_info {
(PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
BNXT_FEC_RS_OFF(link_info))
-#define BNXT_MAX_QUEUE 8
-
struct bnxt_queue_info {
u8 queue_id;
u8 queue_profile;
@@ -1341,13 +1773,11 @@ struct bnxt_test_info {
};
#define CHIMP_REG_VIEW_ADDR \
- ((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000)
+ ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ? 0x80000000 : 0xb1000000)
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
-#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
-#define BNXT_CAG_REG_BASE 0x300000
#define BNXT_GRC_REG_STATUS_P5 0x520
@@ -1438,6 +1868,8 @@ struct bnxt_vf_rep {
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
+#define MAX_CTX_BYTES ((size_t)MAX_CTX_TOTAL_PAGES * BNXT_PAGE_SIZE)
+#define MAX_CTX_BYTES_MASK (MAX_CTX_BYTES - 1)
struct bnxt_ctx_pg_info {
u32 entries;
@@ -1465,53 +1897,107 @@ do { \
attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
} while (0)
+struct bnxt_ctx_mem_type {
+ u16 type;
+ u16 entry_size;
+ u32 flags;
+#define BNXT_CTX_MEM_TYPE_VALID FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+#define BNXT_CTX_MEM_FW_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE
+#define BNXT_CTX_MEM_FW_BIN_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE
+#define BNXT_CTX_MEM_PERSIST \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET
+
+ u32 instance_bmap;
+ u8 init_value;
+ u8 entry_multiple;
+ u16 init_offset;
+#define BNXT_CTX_INIT_INVALID_OFFSET 0xffff
+ u32 max_entries;
+ u32 min_entries;
+ u8 last:1;
+ u8 mem_valid:1;
+ u8 split_entry_cnt;
+#define BNXT_MAX_SPLIT_ENTRY 4
+ union {
+ struct {
+ u32 qp_l2_entries;
+ u32 qp_qp1_entries;
+ u32 qp_fast_qpmd_entries;
+ };
+ u32 srq_l2_entries;
+ u32 cq_l2_entries;
+ u32 vnic_entries;
+ struct {
+ u32 mrav_av_entries;
+ u32 mrav_num_entries_units;
+ };
+ u32 split[BNXT_MAX_SPLIT_ENTRY];
+ };
+ struct bnxt_ctx_pg_info *pg_info;
+};
+
+#define BNXT_CTX_MRAV_AV_SPLIT_ENTRY 0
+
+#define BNXT_CTX_QP FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP
+#define BNXT_CTX_SRQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ
+#define BNXT_CTX_CQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ
+#define BNXT_CTX_VNIC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC
+#define BNXT_CTX_STAT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT
+#define BNXT_CTX_STQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING
+#define BNXT_CTX_FTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
+#define BNXT_CTX_MRAV FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
+#define BNXT_CTX_TIM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
+#define BNXT_CTX_TCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK
+#define BNXT_CTX_RCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK
+#define BNXT_CTX_MTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
+#define BNXT_CTX_SQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
+#define BNXT_CTX_RQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
+#define BNXT_CTX_SRQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
+#define BNXT_CTX_CQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
+#define BNXT_CTX_TBLSC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE
+#define BNXT_CTX_XPAR FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION
+#define BNXT_CTX_SRT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE
+#define BNXT_CTX_SRT2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE
+#define BNXT_CTX_CRT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE
+#define BNXT_CTX_CRT2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE
+#define BNXT_CTX_RIGP0 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE
+#define BNXT_CTX_L2HWRM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE
+#define BNXT_CTX_REHWRM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE
+#define BNXT_CTX_CA0 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE
+#define BNXT_CTX_CA1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE
+#define BNXT_CTX_CA2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE
+#define BNXT_CTX_RIGP1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE
+#define BNXT_CTX_KONG FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE
+#define BNXT_CTX_QPC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE
+
+#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
+#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1)
+#define BNXT_CTX_V2_MAX (BNXT_CTX_QPC + 1)
+#define BNXT_CTX_INV ((u16)-1)
+
struct bnxt_ctx_mem_info {
- u32 qp_max_entries;
- u16 qp_min_qp1_entries;
- u16 qp_max_l2_entries;
- u16 qp_entry_size;
- u16 srq_max_l2_entries;
- u32 srq_max_entries;
- u16 srq_entry_size;
- u16 cq_max_l2_entries;
- u32 cq_max_entries;
- u16 cq_entry_size;
- u16 vnic_max_vnic_entries;
- u16 vnic_max_ring_table_entries;
- u16 vnic_entry_size;
- u32 stat_max_entries;
- u16 stat_entry_size;
- u16 tqm_entry_size;
- u32 tqm_min_entries_per_ring;
- u32 tqm_max_entries_per_ring;
- u32 mrav_max_entries;
- u16 mrav_entry_size;
- u16 tim_entry_size;
- u32 tim_max_entries;
- u16 mrav_num_entries_units;
- u8 tqm_entries_multiple;
u8 tqm_fp_rings_count;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
+ struct bnxt_ctx_mem_type ctx_arr[BNXT_CTX_V2_MAX];
+};
+
+enum bnxt_health_severity {
+ SEVERITY_NORMAL = 0,
+ SEVERITY_WARNING,
+ SEVERITY_RECOVERABLE,
+ SEVERITY_FATAL,
+};
- struct bnxt_ctx_pg_info qp_mem;
- struct bnxt_ctx_pg_info srq_mem;
- struct bnxt_ctx_pg_info cq_mem;
- struct bnxt_ctx_pg_info vnic_mem;
- struct bnxt_ctx_pg_info stat_mem;
- struct bnxt_ctx_pg_info mrav_mem;
- struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
-
-#define BNXT_CTX_MEM_INIT_QP 0
-#define BNXT_CTX_MEM_INIT_SRQ 1
-#define BNXT_CTX_MEM_INIT_CQ 2
-#define BNXT_CTX_MEM_INIT_VNIC 3
-#define BNXT_CTX_MEM_INIT_STAT 4
-#define BNXT_CTX_MEM_INIT_MRAV 5
-#define BNXT_CTX_MEM_INIT_MAX 6
- struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
+enum bnxt_health_remedy {
+ REMEDY_DEVLINK_RECOVER,
+ REMEDY_POWER_CYCLE_DEVICE,
+ REMEDY_POWER_CYCLE_HOST,
+ REMEDY_FW_UPDATE,
+ REMEDY_HW_REPLACE,
};
struct bnxt_fw_health {
@@ -1531,9 +2017,9 @@ struct bnxt_fw_health {
u32 last_fw_heartbeat;
u32 last_fw_reset_cnt;
u8 enabled:1;
- u8 master:1;
- u8 fatal:1;
+ u8 primary:1;
u8 status_reliable:1;
+ u8 resets_reliable:1;
u8 tmr_multiplier;
u8 tmr_counter;
u8 fw_reset_seq_cnt;
@@ -1543,12 +2029,15 @@ struct bnxt_fw_health {
u32 echo_req_data1;
u32 echo_req_data2;
struct devlink_health_reporter *fw_reporter;
- struct devlink_health_reporter *fw_reset_reporter;
- struct devlink_health_reporter *fw_fatal_reporter;
-};
-
-struct bnxt_fw_reporter_ctx {
- unsigned long sp_event;
+ /* Protects severity and remedy */
+ struct mutex lock;
+ enum bnxt_health_severity severity;
+ enum bnxt_health_remedy remedy;
+ u32 arrests;
+ u32 discoveries;
+ u32 survivals;
+ u32 fatalities;
+ u32 diagnoses;
};
#define BNXT_FW_HEALTH_REG_TYPE_MASK 3
@@ -1585,6 +2074,87 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_RETRY 5
#define BNXT_FW_IF_RETRY 10
+#define BNXT_FW_SLOT_RESET_RETRY 4
+
+struct bnxt_aux_priv {
+ struct auxiliary_device aux_dev;
+ struct bnxt_en_dev *edev;
+ int id;
+};
+
+enum board_idx {
+ BCM57301,
+ BCM57302,
+ BCM57304,
+ BCM57417_NPAR,
+ BCM58700,
+ BCM57311,
+ BCM57312,
+ BCM57402,
+ BCM57404,
+ BCM57406,
+ BCM57402_NPAR,
+ BCM57407,
+ BCM57412,
+ BCM57414,
+ BCM57416,
+ BCM57417,
+ BCM57412_NPAR,
+ BCM57314,
+ BCM57417_SFP,
+ BCM57416_SFP,
+ BCM57404_NPAR,
+ BCM57406_NPAR,
+ BCM57407_SFP,
+ BCM57407_NPAR,
+ BCM57414_NPAR,
+ BCM57416_NPAR,
+ BCM57452,
+ BCM57454,
+ BCM5745x_NPAR,
+ BCM57508,
+ BCM57504,
+ BCM57502,
+ BCM57508_NPAR,
+ BCM57504_NPAR,
+ BCM57502_NPAR,
+ BCM57608,
+ BCM57604,
+ BCM57602,
+ BCM57601,
+ BCM58802,
+ BCM58804,
+ BCM58808,
+ NETXTREME_E_VF,
+ NETXTREME_C_VF,
+ NETXTREME_S_VF,
+ NETXTREME_C_VF_HV,
+ NETXTREME_E_VF_HV,
+ NETXTREME_E_P5_VF,
+ NETXTREME_E_P5_VF_HV,
+ NETXTREME_E_P7_VF,
+ NETXTREME_E_P7_VF_HV,
+};
+
+#define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc)
+#define BNXT_TRACE_MAX 11
+
+struct bnxt_bs_trace_info {
+ u8 *magic_byte;
+ u32 last_offset;
+ u8 wrapped:1;
+ u16 ctx_type;
+ u16 trace_type;
+};
+
+static inline void bnxt_bs_trace_check_wrap(struct bnxt_bs_trace_info *bs_trace,
+ u32 offset)
+{
+ if (!bs_trace->wrapped && bs_trace->magic_byte &&
+ *bs_trace->magic_byte != BNXT_TRACE_BUF_MAGIC_BYTE)
+ bs_trace->wrapped = 1;
+ bs_trace->last_offset = offset;
+}
struct bnxt {
void __iomem *bar0;
@@ -1621,14 +2191,14 @@ struct bnxt {
#define CHIP_NUM_57504 0x1751
#define CHIP_NUM_57502 0x1752
+#define CHIP_NUM_57608 0x1760
+
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808
u8 chip_rev;
-#define CHIP_NUM_58818 0xd818
-
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
@@ -1678,7 +2248,7 @@ struct bnxt {
atomic_t intr_sem;
u32 flags;
- #define BNXT_FLAG_CHIP_P5 0x1
+ #define BNXT_FLAG_CHIP_P5_PLUS 0x1
#define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET
@@ -1690,15 +2260,9 @@ struct bnxt {
#define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
#define BNXT_FLAG_JUMBO 0x10
#define BNXT_FLAG_STRIP_VLAN 0x20
- #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
- BNXT_FLAG_LRO)
- #define BNXT_FLAG_USING_MSIX 0x40
- #define BNXT_FLAG_MSIX_CAP 0x80
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
- #define BNXT_FLAG_UDP_RSS_CAP 0x800
- #define BNXT_FLAG_NEW_RSS_CAP 0x2000
#define BNXT_FLAG_WOL_CAP 0x4000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
#define BNXT_FLAG_ROCEV2_CAP 0x10000
@@ -1706,14 +2270,19 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
- #define BNXT_FLAG_CHIP_SR2 0x80000
+ #define BNXT_FLAG_CHIP_P7 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_DSN_VALID 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
+ #define BNXT_FLAG_UDP_GSO_CAP 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
+ #define BNXT_FLAG_TX_COAL_CMPL 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+ #define BNXT_FLAG_HDS 0x20000000
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO | BNXT_FLAG_HDS)
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1721,6 +2290,11 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#ifdef CONFIG_BNXT_SRIOV
+#define BNXT_VF_IS_TRUSTED(bp) ((bp)->vf.flags & BNXT_VF_TRUST)
+#else
+#define BNXT_VF_IS_TRUSTED(bp) 0
+#endif
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
@@ -1732,20 +2306,21 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
- (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
+ (!((bp)->flags & BNXT_FLAG_CHIP_P5_PLUS) ||\
(bp)->max_tpa_v2) && !is_kdump_kernel())
+#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO)
-#define BNXT_CHIP_SR2(bp) \
- ((bp)->chip_num == CHIP_NUM_58818)
+#define BNXT_CHIP_P7(bp) \
+ ((bp)->chip_num == CHIP_NUM_57608)
-#define BNXT_CHIP_P5_THOR(bp) \
+#define BNXT_CHIP_P5(bp) \
((bp)->chip_num == CHIP_NUM_57508 || \
(bp)->chip_num == CHIP_NUM_57504 || \
(bp)->chip_num == CHIP_NUM_57502)
/* Chip class phase 5 */
-#define BNXT_CHIP_P5(bp) \
- (BNXT_CHIP_P5_THOR(bp) || BNXT_CHIP_SR2(bp))
+#define BNXT_CHIP_P5_PLUS(bp) \
+ (BNXT_CHIP_P5(bp) || BNXT_CHIP_P7(bp))
/* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \
@@ -1755,9 +2330,18 @@ struct bnxt {
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
+/* Chip class phase 3.x */
+#define BNXT_CHIP_P3(bp) \
+ (BNXT_CHIP_NUM_57X0X((bp)->chip_num) || \
+ BNXT_CHIP_TYPE_NITRO_A0(bp))
+
#define BNXT_CHIP_P4_PLUS(bp) \
- (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+ (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp))
+#define BNXT_CHIP_P5_AND_MINUS(bp) \
+ (BNXT_CHIP_P3(bp) || BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+
+ struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
struct bnxt_napi **bnapi;
@@ -1783,7 +2367,7 @@ struct bnxt {
enum dma_data_direction rx_dir;
u32 rx_ring_size;
u32 rx_agg_ring_size;
- u32 rx_copy_thresh;
+ u32 rx_copybreak;
u32 rx_ring_mask;
u32 rx_agg_ring_mask;
int rx_nr_pages;
@@ -1811,18 +2395,42 @@ struct bnxt {
/* grp_info indexed by completion ring index */
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
+ u32 num_rss_ctx;
int nr_vnics;
- u16 *rss_indir_tbl;
+ u32 *rss_indir_tbl;
u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
+ u32 rss_hash_delta;
+ u32 rss_cap;
+#define BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA BIT(0)
+#define BNXT_RSS_CAP_UDP_RSS_CAP BIT(1)
+#define BNXT_RSS_CAP_NEW_RSS_CAP BIT(2)
+#define BNXT_RSS_CAP_RSS_TCAM BIT(3)
+#define BNXT_RSS_CAP_AH_V4_RSS_CAP BIT(4)
+#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5)
+#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
+#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
+#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8)
+#define BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP BIT(9)
+
+ u8 rss_hash_key[HW_HASH_KEY_SIZE];
+ u8 rss_hash_key_valid:1;
+ u8 rss_hash_key_updated:1;
u16 max_mtu;
+ u16 tso_max_segs;
u8 max_tc;
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
+ u8 cos0_cos1_shared;
+ u8 num_tc;
+
+ u16 max_pfcwd_tmo_ms;
+
+ u8 tph_mode;
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -1840,6 +2448,12 @@ struct bnxt {
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_STATE_NAPI_DISABLED 9
+#define BNXT_STATE_L2_FILTER_RETRY 10
+#define BNXT_STATE_FW_ACTIVATE 11
+#define BNXT_STATE_RECOVER 12
+#define BNXT_STATE_FW_NON_FATAL_COND 13
+#define BNXT_STATE_FW_ACTIVATE_RESET 14
+#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
@@ -1847,6 +2461,7 @@ struct bnxt {
struct bnxt_irq *irq_tbl;
int total_irqs;
+ int ulp_num_msix_want;
u8 mac_addr[ETH_ALEN];
#ifdef CONFIG_BNXT_DCB
@@ -1859,30 +2474,76 @@ struct bnxt {
u32 msg_enable;
- u32 fw_cap;
- #define BNXT_FW_CAP_SHORT_CMD 0x00000001
- #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
- #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
- #define BNXT_FW_CAP_NEW_RM 0x00000008
- #define BNXT_FW_CAP_IF_CHANGE 0x00000010
- #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
- #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
- #define BNXT_FW_CAP_TRUSTED_VF 0x00000800
- #define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000
- #define BNXT_FW_CAP_PKG_VER 0x00004000
- #define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
- #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 0x00010000
- #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
- #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
- #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
- #define BNXT_FW_CAP_HOT_RESET 0x00200000
- #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
- #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
- #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
- #define BNXT_FW_CAP_PTP_PPS 0x10000000
- #define BNXT_FW_CAP_RING_MONITOR 0x40000000
+ u64 fw_cap;
+ #define BNXT_FW_CAP_SHORT_CMD BIT_ULL(0)
+ #define BNXT_FW_CAP_LLDP_AGENT BIT_ULL(1)
+ #define BNXT_FW_CAP_DCBX_AGENT BIT_ULL(2)
+ #define BNXT_FW_CAP_NEW_RM BIT_ULL(3)
+ #define BNXT_FW_CAP_IF_CHANGE BIT_ULL(4)
+ #define BNXT_FW_CAP_ENABLE_RDMA_SRIOV BIT_ULL(5)
+ #define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(6)
+ #define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7)
+ #define BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT BIT_ULL(8)
+ #define BNXT_FW_CAP_LINK_ADMIN BIT_ULL(9)
+ #define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10)
+ #define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11)
+ #define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13)
+ #define BNXT_FW_CAP_PKG_VER BIT_ULL(14)
+ #define BNXT_FW_CAP_CFA_ADV_FLOW BIT_ULL(15)
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
+ #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
+ #define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
+ #define BNXT_FW_CAP_TX_TS_CMP BIT_ULL(19)
+ #define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
+ #define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
+ #define BNXT_FW_CAP_PTP_RTC BIT_ULL(22)
+ #define BNXT_FW_CAP_RX_ALL_PKT_TS BIT_ULL(23)
+ #define BNXT_FW_CAP_VLAN_RX_STRIP BIT_ULL(24)
+ #define BNXT_FW_CAP_VLAN_TX_INSERT BIT_ULL(25)
+ #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED BIT_ULL(26)
+ #define BNXT_FW_CAP_LIVEPATCH BIT_ULL(27)
+ #define BNXT_FW_CAP_PTP_PPS BIT_ULL(28)
+ #define BNXT_FW_CAP_HOT_RESET_IF BIT_ULL(29)
+ #define BNXT_FW_CAP_RING_MONITOR BIT_ULL(30)
+ #define BNXT_FW_CAP_DBG_QCAPS BIT_ULL(31)
+ #define BNXT_FW_CAP_PTP BIT_ULL(32)
+ #define BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED BIT_ULL(33)
+ #define BNXT_FW_CAP_DFLT_VLAN_TPID_PCP BIT_ULL(34)
+ #define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
+ #define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36)
+ #define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
+ #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
+ #define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
+ #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
+ #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
+ #define BNXT_FW_CAP_MIRROR_ON_ROCE BIT_ULL(43)
+
+ u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
+#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \
+ ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC))
+#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \
+ (BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3))
+
+#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
+ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
+ ((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
+#define BNXT_ROCE_VF_DYN_ALLOC_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT)
+#define BNXT_SUPPORTS_QUEUE_API(bp) \
+ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
+ ((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH))
+#define BNXT_RDMA_SRIOV_EN(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ENABLE_RDMA_SRIOV)
+#define BNXT_ROCE_VF_RESC_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED)
+#define BNXT_SW_RES_LMT(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS)
+#define BNXT_MIRROR_ON_ROCE_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_MIRROR_ON_ROCE)
+
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
@@ -1896,12 +2557,16 @@ struct bnxt {
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
+ u16 pcie_stat_len;
u8 pri2cos_idx[8];
u8 pri2cos_valid;
+ struct bnxt_total_ring_err_stats ring_err_stats_prev;
+
u16 hwrm_max_req_len;
u16 hwrm_max_ext_req_len;
- int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_max_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
#define FW_VER_STR_LEN 32
@@ -1914,11 +2579,14 @@ struct bnxt {
#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \
((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48)
+#define BNXT_FW_BLD(bp) (((bp)->fw_ver_code >> 16) & 0xffff)
u16 vxlan_fw_dst_port_id;
u16 nge_fw_dst_port_id;
+ u16 vxlan_gpe_fw_dst_port_id;
__be16 vxlan_port;
__be16 nge_port;
+ __be16 vxlan_gpe_port;
u8 port_partition_type;
u8 port_count;
u16 br_mode;
@@ -1951,7 +2619,9 @@ struct bnxt {
#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
#define BNXT_FW_EXCEPTION_SP_EVENT 19
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
+#define BNXT_THERMAL_THRESHOLD_SP_EVENT 22
#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
+#define BNXT_RESTART_ULP_SP_EVENT 24
struct delayed_work fw_reset_task;
int fw_reset_state;
@@ -1961,6 +2631,7 @@ struct bnxt {
#define BNXT_FW_RESET_STATE_POLL_FW 4
#define BNXT_FW_RESET_STATE_OPENING 5
#define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6
+#define BNXT_FW_RESET_STATE_ABORT 7
u16 fw_reset_min_dsecs;
#define BNXT_DFLT_FW_RST_MIN_DSECS 20
@@ -1979,21 +2650,17 @@ struct bnxt {
wait_queue_head_t sriov_cfg_wait;
bool sriov_cfg;
#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
-
- /* lock to protect VF-rep creation/cleanup via
- * multiple paths such as ->sriov_configure() and
- * devlink ->eswitch_mode_set()
- */
- struct mutex sriov_lock;
#endif
#if BITS_PER_LONG == 32
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
+ int db_offset; /* db_offset within db_size */
int db_size;
#define BNXT_NTP_FLTR_MAX_FLTR 4096
+#define BNXT_MAX_FLTR (BNXT_NTP_FLTR_MAX_FLTR + BNXT_L2_FLTR_MAX_FLTR)
#define BNXT_NTP_FLTR_HASH_SIZE 512
#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
@@ -2001,18 +2668,29 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
+ int max_fltr;
+
+#define BNXT_L2_FLTR_MAX_FLTR 1024
+#define BNXT_L2_FLTR_HASH_SIZE 32
+#define BNXT_L2_FLTR_HASH_MASK (BNXT_L2_FLTR_HASH_SIZE - 1)
+ struct hlist_head l2_fltr_hash_tbl[BNXT_L2_FLTR_HASH_SIZE];
+
+ u32 hash_seed;
+ u64 toeplitz_prefix;
+
+ struct list_head usr_fltr_list;
/* To protect link related settings during link changes and
* ethtool settings changes.
*/
struct mutex link_lock;
struct bnxt_link_info link_info;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
- /* copied from flags in hwrm_port_phy_qcaps_output */
- u8 phy_flags;
+ /* copied from flags and flags2 in hwrm_port_phy_qcaps_output */
+ u32 phy_flags;
#define BNXT_PHY_FL_EEE_CAP PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
#define BNXT_PHY_FL_EXT_LPBK PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED
#define BNXT_PHY_FL_AN_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED
@@ -2021,6 +2699,15 @@ struct bnxt {
#define BNXT_PHY_FL_NO_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
#define BNXT_PHY_FL_FW_MANAGED_LKDN PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN
#define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS
+#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8)
+#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
+#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
+#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
+
+ /* copied from flags in hwrm_port_mac_qcaps_output */
+ u8 mac_flags;
+#define BNXT_MAC_FL_NO_MAC_LPBK \
+ PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2033,10 +2720,13 @@ struct bnxt {
u16 dump_flag;
#define BNXT_DUMP_LIVE 0
#define BNXT_DUMP_CRASH 1
+#define BNXT_DUMP_DRIVER 2
+#define BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE 3
struct bpf_prog *xdp_prog;
struct bnxt_ptp_cfg *ptp_cfg;
+ u8 ptp_all_rx_tstamp;
/* devlink interface and vf-rep structs */
struct devlink *dl;
@@ -2048,22 +2738,34 @@ struct bnxt {
struct bnxt_tc_info *tc_info;
struct list_head tc_indr_block_list;
struct dentry *debugfs_pdev;
+#ifdef CONFIG_BNXT_HWMON
struct device *hwmon_dev;
+ u8 warn_thresh_temp;
+ u8 crit_thresh_temp;
+ u8 fatal_thresh_temp;
+ u8 shutdown_thresh_temp;
+#endif
+ u32 thermal_threshold_type;
+ enum board_idx board_idx;
+
+ struct bnxt_ctx_pg_info *fw_crash_mem;
+ u32 fw_crash_len;
+ struct bnxt_bs_trace_info bs_trace[BNXT_TRACE_MAX];
};
#define BNXT_NUM_RX_RING_STATS 8
#define BNXT_NUM_TX_RING_STATS 8
#define BNXT_NUM_TPA_RING_STATS 4
#define BNXT_NUM_TPA_RING_STATS_P5 5
-#define BNXT_NUM_TPA_RING_STATS_P5_SR2 6
+#define BNXT_NUM_TPA_RING_STATS_P7 6
#define BNXT_RING_STATS_SIZE_P5 \
((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
BNXT_NUM_TPA_RING_STATS_P5) * 8)
-#define BNXT_RING_STATS_SIZE_P5_SR2 \
+#define BNXT_RING_STATS_SIZE_P7 \
((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
- BNXT_NUM_TPA_RING_STATS_P5_SR2) * 8)
+ BNXT_NUM_TPA_RING_STATS_P7) * 8)
#define BNXT_GET_RING_STATS64(sw, counter) \
(*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
@@ -2090,6 +2792,9 @@ struct bnxt {
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
+#define BNXT_RX_STATS_EXT_NUM_LEGACY \
+ BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)
+
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
@@ -2107,13 +2812,14 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
-static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+#define BNXT_HDS_THRESHOLD_MAX 1023
+
+static inline u32 bnxt_tx_avail(struct bnxt *bp,
+ const struct bnxt_tx_ring_info *txr)
{
- /* Tell compiler to fetch tx indices from memory. */
- barrier();
+ u32 used = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
- return bp->tx_ring_size -
- ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+ return bp->tx_ring_size - (used & bp->tx_ring_mask);
}
static inline void bnxt_writeq(struct bnxt *bp, u64 val,
@@ -2144,10 +2850,11 @@ static inline void bnxt_writeq_relaxed(struct bnxt *bp, u64 val,
static inline void bnxt_db_write_relaxed(struct bnxt *bp,
struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_writeq_relaxed(bp, db->db_key64 | idx, db->doorbell);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ bnxt_writeq_relaxed(bp, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
} else {
- u32 db_val = db->db_key32 | idx;
+ u32 db_val = db->db_key32 | DB_RING_IDX(db, idx);
writel_relaxed(db_val, db->doorbell);
if (bp->flags & BNXT_FLAG_DOUBLE_DB)
@@ -2159,10 +2866,11 @@ static inline void bnxt_db_write_relaxed(struct bnxt *bp,
static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_writeq(bp, db->db_key64 | idx, db->doorbell);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ bnxt_writeq(bp, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
} else {
- u32 db_val = db->db_key32 | idx;
+ u32 db_val = db->db_key32 | DB_RING_IDX(db, idx);
writel(db_val, db->doorbell);
if (bp->flags & BNXT_FLAG_DOUBLE_DB)
@@ -2170,54 +2878,114 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
}
}
+/* Must hold rtnl_lock */
+static inline bool bnxt_sriov_cfg(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ return BNXT_PF(bp) && (bp->pf.active_vfs || bp->sriov_cfg);
+#else
+ return false;
+#endif
+}
+
+extern const u16 bnxt_bstore_to_trace[];
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp);
void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
+bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
+int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
+void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags);
+int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr);
+int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr);
+int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u32 tpa_flags);
+void bnxt_fill_ipv6_mask(__be32 mask[4]);
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
+ struct ethtool_rxfh_context *rss_ctx);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
-int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ unsigned int start_rx_ring_idx,
+ unsigned int nr_rings);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
+size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
+ void *buf, size_t offset);
+void bnxt_free_ctx_mem(struct bnxt *bp, bool force);
+int bnxt_num_tx_to_cp(struct bnxt *bp, int tx);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
-int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
+void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ u16 curr);
+void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset);
+int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
-bool bnxt_is_fw_healthy(struct bnxt *bp);
+int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid);
+int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ bool all);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
-int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_reenable_sriov(struct bnxt *bp);
+void bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_get_ring_err_stats(struct bnxt *bp,
+ struct bnxt_total_ring_err_stats *stats);
+bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
+int bnxt_fw_init_one(struct bnxt *bp);
+bool bnxt_hwrm_reset_permitted(struct bnxt *bp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
+struct bnxt_ntuple_filter *bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr, u32 idx);
+u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
+ const struct sk_buff *skb);
+int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
+ u32 idx);
+void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
void bnxt_dim_work(struct work_struct *work);
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
-
+void bnxt_print_device_info(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
new file mode 100644
index 000000000000..ccb8b509662d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -0,0 +1,677 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2021 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/bnxt/hsi.h>
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_coredump.h"
+
+static const u16 bnxt_bstore_to_seg_id[] = {
+ [BNXT_CTX_QP] = BNXT_CTX_MEM_SEG_QP,
+ [BNXT_CTX_SRQ] = BNXT_CTX_MEM_SEG_SRQ,
+ [BNXT_CTX_CQ] = BNXT_CTX_MEM_SEG_CQ,
+ [BNXT_CTX_VNIC] = BNXT_CTX_MEM_SEG_VNIC,
+ [BNXT_CTX_STAT] = BNXT_CTX_MEM_SEG_STAT,
+ [BNXT_CTX_STQM] = BNXT_CTX_MEM_SEG_STQM,
+ [BNXT_CTX_FTQM] = BNXT_CTX_MEM_SEG_FTQM,
+ [BNXT_CTX_MRAV] = BNXT_CTX_MEM_SEG_MRAV,
+ [BNXT_CTX_TIM] = BNXT_CTX_MEM_SEG_TIM,
+ [BNXT_CTX_SRT] = BNXT_CTX_MEM_SEG_SRT,
+ [BNXT_CTX_SRT2] = BNXT_CTX_MEM_SEG_SRT2,
+ [BNXT_CTX_CRT] = BNXT_CTX_MEM_SEG_CRT,
+ [BNXT_CTX_CRT2] = BNXT_CTX_MEM_SEG_CRT2,
+ [BNXT_CTX_RIGP0] = BNXT_CTX_MEM_SEG_RIGP0,
+ [BNXT_CTX_L2HWRM] = BNXT_CTX_MEM_SEG_L2HWRM,
+ [BNXT_CTX_REHWRM] = BNXT_CTX_MEM_SEG_REHWRM,
+ [BNXT_CTX_CA0] = BNXT_CTX_MEM_SEG_CA0,
+ [BNXT_CTX_CA1] = BNXT_CTX_MEM_SEG_CA1,
+ [BNXT_CTX_CA2] = BNXT_CTX_MEM_SEG_CA2,
+ [BNXT_CTX_RIGP1] = BNXT_CTX_MEM_SEG_RIGP1,
+ [BNXT_CTX_KONG] = BNXT_CTX_MEM_SEG_KONG,
+ [BNXT_CTX_QPC] = BNXT_CTX_MEM_SEG_QPC,
+};
+
+static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags,
+ u32 *offset)
+{
+ struct hwrm_dbg_log_buffer_flush_output *resp;
+ struct hwrm_dbg_log_buffer_flush_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_LOG_BUFFER_FLUSH);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(flags);
+ req->type = cpu_to_le16(type);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ *offset = le32_to_cpu(resp->current_buffer_offset);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
+ struct bnxt_hwrm_dbg_dma_info *info)
+{
+ struct hwrm_dbg_cmn_input *cmn_req = msg;
+ __le16 *seq_ptr = msg + info->seq_off;
+ struct hwrm_dbg_cmn_output *cmn_resp;
+ u16 seq = 0, len, segs_off;
+ dma_addr_t dma_handle;
+ void *dma_buf, *resp;
+ int rc, off = 0;
+
+ dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
+ if (!dma_buf) {
+ hwrm_req_drop(bp, msg);
+ return -ENOMEM;
+ }
+
+ hwrm_req_timeout(bp, msg, bp->hwrm_cmd_max_timeout);
+ cmn_resp = hwrm_req_hold(bp, msg);
+ resp = cmn_resp;
+
+ segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ total_segments);
+ cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
+ cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
+ while (1) {
+ *seq_ptr = cpu_to_le16(seq);
+ rc = hwrm_req_send(bp, msg);
+ if (rc)
+ break;
+
+ len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
+ if (!seq &&
+ cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ info->segs = le16_to_cpu(*((__le16 *)(resp +
+ segs_off)));
+ if (!info->segs) {
+ rc = -EIO;
+ break;
+ }
+
+ info->dest_buf_size = info->segs *
+ sizeof(struct coredump_segment_record);
+ info->dest_buf = kmalloc(info->dest_buf_size,
+ GFP_KERNEL);
+ if (!info->dest_buf) {
+ rc = -ENOMEM;
+ break;
+ }
+ }
+
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
+ if (info->dest_buf) {
+ if ((info->seg_start + off + len) <=
+ BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
+ u16 copylen = min_t(u16, len,
+ info->dest_buf_size - off);
+
+ memcpy(info->dest_buf + off, dma_buf, copylen);
+ if (copylen < len)
+ break;
+ } else {
+ rc = -ENOBUFS;
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ kfree(info->dest_buf);
+ info->dest_buf = NULL;
+ }
+ break;
+ }
+ }
+
+ if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
+ break;
+
+ seq++;
+ off += len;
+ }
+ hwrm_req_drop(bp, msg);
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
+ struct bnxt_coredump *coredump)
+{
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ struct hwrm_dbg_coredump_list_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
+ if (rc)
+ return rc;
+
+ info.dma_len = COREDUMP_LIST_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ data_len);
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
+ if (!rc) {
+ coredump->data = info.dest_buf;
+ coredump->data_size = info.dest_buf_size;
+ coredump->total_segs = info.segs;
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 dump_type,
+ u16 component_id, u16 segment_id)
+{
+ struct hwrm_dbg_coredump_initiate_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
+ if (rc)
+ return rc;
+
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
+ req->component_id = cpu_to_le16(component_id);
+ req->segment_id = cpu_to_le16(segment_id);
+ if (dump_type == BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE)
+ req->seg_flags = DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE;
+
+ return hwrm_req_send(bp, req);
+}
+
+static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
+ u16 segment_id, u32 *seg_len,
+ void *buf, u32 buf_len, u32 offset)
+{
+ struct hwrm_dbg_coredump_retrieve_input *req;
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
+ if (rc)
+ return rc;
+
+ req->component_id = cpu_to_le16(component_id);
+ req->segment_id = cpu_to_le16(segment_id);
+
+ info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
+ seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
+ data_len);
+ if (buf) {
+ info.dest_buf = buf + offset;
+ info.buf_len = buf_len;
+ info.seg_start = offset;
+ }
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
+ if (!rc)
+ *seg_len = info.dest_buf_size;
+
+ return rc;
+}
+
+void
+bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec, u32 seg_len,
+ int status, u32 duration, u32 instance, u32 comp_id,
+ u32 seg_id)
+{
+ memset(seg_hdr, 0, sizeof(*seg_hdr));
+ memcpy(seg_hdr->signature, "sEgM", 4);
+ if (seg_rec) {
+ seg_hdr->component_id = (__force __le32)seg_rec->component_id;
+ seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
+ seg_hdr->low_version = seg_rec->version_low;
+ seg_hdr->high_version = seg_rec->version_hi;
+ seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags);
+ } else {
+ seg_hdr->component_id = cpu_to_le32(comp_id);
+ seg_hdr->segment_id = cpu_to_le32(seg_id);
+ }
+ seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
+ seg_hdr->length = cpu_to_le32(seg_len);
+ seg_hdr->status = cpu_to_le32(status);
+ seg_hdr->duration = cpu_to_le32(duration);
+ seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
+ seg_hdr->instance = cpu_to_le32(instance);
+}
+
+static void bnxt_fill_cmdline(struct bnxt_coredump_record *record)
+{
+ struct mm_struct *mm = current->mm;
+ int i, len, last = 0;
+
+ if (mm) {
+ len = min_t(int, mm->arg_end - mm->arg_start,
+ sizeof(record->commandline) - 1);
+ if (len && !copy_from_user(record->commandline,
+ (char __user *)mm->arg_start, len)) {
+ for (i = 0; i < len; i++) {
+ if (record->commandline[i])
+ last = i;
+ else
+ record->commandline[i] = ' ';
+ }
+ record->commandline[last + 1] = 0;
+ return;
+ }
+ }
+
+ strscpy(record->commandline, current->comm, TASK_COMM_LEN);
+}
+
+static void
+bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
+ time64_t start, s16 start_utc, u16 total_segs,
+ int status)
+{
+ time64_t end = ktime_get_real_seconds();
+ u32 os_ver_major = 0, os_ver_minor = 0;
+ struct tm tm;
+
+ time64_to_tm(start, 0, &tm);
+ memset(record, 0, sizeof(*record));
+ memcpy(record->signature, "cOrE", 4);
+ record->flags = 0;
+ record->low_version = 0;
+ record->high_version = 1;
+ record->asic_state = 0;
+ strscpy(record->system_name, utsname()->nodename,
+ sizeof(record->system_name));
+ record->year = cpu_to_le16(tm.tm_year + 1900);
+ record->month = cpu_to_le16(tm.tm_mon + 1);
+ record->day = cpu_to_le16(tm.tm_mday);
+ record->hour = cpu_to_le16(tm.tm_hour);
+ record->minute = cpu_to_le16(tm.tm_min);
+ record->second = cpu_to_le16(tm.tm_sec);
+ record->utc_bias = cpu_to_le16(start_utc);
+ bnxt_fill_cmdline(record);
+ record->total_segments = cpu_to_le32(total_segs);
+
+ if (sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor) != 2)
+ netdev_warn(bp->dev, "Unknown OS release in coredump\n");
+ record->os_ver_major = cpu_to_le32(os_ver_major);
+ record->os_ver_minor = cpu_to_le32(os_ver_minor);
+
+ strscpy(record->os_name, utsname()->sysname, sizeof(record->os_name));
+ time64_to_tm(end, 0, &tm);
+ record->end_year = cpu_to_le16(tm.tm_year + 1900);
+ record->end_month = cpu_to_le16(tm.tm_mon + 1);
+ record->end_day = cpu_to_le16(tm.tm_mday);
+ record->end_hour = cpu_to_le16(tm.tm_hour);
+ record->end_minute = cpu_to_le16(tm.tm_min);
+ record->end_second = cpu_to_le16(tm.tm_sec);
+ record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
+ record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
+ bp->ver_resp.chip_rev << 8 |
+ bp->ver_resp.chip_metal);
+ record->asic_id2 = 0;
+ record->coredump_status = cpu_to_le32(status);
+ record->ioctl_low_version = 0;
+ record->ioctl_high_version = 0;
+}
+
+static void bnxt_fill_drv_seg_record(struct bnxt *bp,
+ struct bnxt_driver_segment_record *record,
+ struct bnxt_ctx_mem_type *ctxm, u16 type)
+{
+ struct bnxt_bs_trace_info *bs_trace = &bp->bs_trace[type];
+ u32 offset = 0;
+ int rc = 0;
+
+ record->max_entries = cpu_to_le32(ctxm->max_entries);
+ record->entry_size = cpu_to_le32(ctxm->entry_size);
+
+ rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset);
+ if (rc)
+ return;
+
+ bnxt_bs_trace_check_wrap(bs_trace, offset);
+ record->offset = cpu_to_le32(bs_trace->last_offset);
+ record->wrapped = bs_trace->wrapped;
+}
+
+static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset,
+ u32 *segs)
+{
+ struct bnxt_driver_segment_record record = {};
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u32 comp_id = BNXT_DRV_COMP_ID;
+ void *data = NULL;
+ size_t len = 0;
+ u16 type;
+
+ *segs = 0;
+ if (!ctx)
+ return 0;
+
+ if (buf)
+ buf += offset;
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ bool trace = bnxt_bs_trace_avail(bp, type);
+ u32 seg_id = bnxt_bstore_to_seg_id[type];
+ size_t seg_len, extra_hlen = 0;
+
+ if (!ctxm->mem_valid || !seg_id)
+ continue;
+
+ if (trace) {
+ extra_hlen = BNXT_SEG_RCD_LEN;
+ if (buf) {
+ u16 trace_type = bnxt_bstore_to_trace[type];
+
+ bnxt_fill_drv_seg_record(bp, &record, ctxm,
+ trace_type);
+ }
+ }
+
+ if (buf)
+ data = buf + BNXT_SEG_HDR_LEN + extra_hlen;
+
+ seg_len = bnxt_copy_ctx_mem(bp, ctxm, data, 0) + extra_hlen;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len,
+ 0, 0, 0, comp_id, seg_id);
+ memcpy(buf, &seg_hdr, BNXT_SEG_HDR_LEN);
+ buf += BNXT_SEG_HDR_LEN;
+ if (trace)
+ memcpy(buf, &record, BNXT_SEG_RCD_LEN);
+ buf += seg_len;
+ }
+ len += BNXT_SEG_HDR_LEN + seg_len;
+ *segs += 1;
+ }
+ return len;
+}
+
+static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf,
+ u32 *dump_len)
+{
+ u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
+ u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
+ struct coredump_segment_record *seg_record = NULL;
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_coredump coredump = {NULL};
+ time64_t start_time;
+ u16 start_utc;
+ int rc = 0, i;
+
+ if (buf)
+ buf_len = *dump_len;
+
+ start_time = ktime_get_real_seconds();
+ start_utc = sys_tz.tz_minuteswest * 60;
+ seg_hdr_len = sizeof(seg_hdr);
+
+ /* First segment should be hwrm_ver_get response.
+ * For hwrm_ver_get response Component id = 2 and Segment id = 0.
+ */
+ *dump_len = seg_hdr_len + ver_get_resp_len;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
+ 0, 0, 0, BNXT_VER_GET_COMP_ID, 0);
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len;
+ memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
+ offset += ver_get_resp_len;
+ }
+
+ if (dump_type == BNXT_DUMP_DRIVER) {
+ u32 drv_len, segs = 0;
+
+ drv_len = bnxt_get_ctx_coredump(bp, buf, offset, &segs);
+ *dump_len += drv_len;
+ offset += drv_len;
+ if (buf)
+ coredump.total_segs += segs;
+ goto err;
+ }
+
+ seg_record_len = sizeof(*seg_record);
+ rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
+ if (rc) {
+ netdev_err(bp->dev, "Failed to get coredump segment list\n");
+ goto err;
+ }
+
+ *dump_len += seg_hdr_len * coredump.total_segs;
+
+ seg_record = (struct coredump_segment_record *)coredump.data;
+ seg_record_len = sizeof(*seg_record);
+
+ for (i = 0; i < coredump.total_segs; i++) {
+ u16 comp_id = le16_to_cpu(seg_record->component_id);
+ u16 seg_id = le16_to_cpu(seg_record->segment_id);
+ u32 duration = 0, seg_len = 0;
+ unsigned long start, end;
+
+ if (buf && ((offset + seg_hdr_len) >
+ BNXT_COREDUMP_BUF_LEN(buf_len))) {
+ rc = -ENOBUFS;
+ goto err;
+ }
+
+ start = jiffies;
+
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, dump_type, comp_id,
+ seg_id);
+ if (rc) {
+ netdev_err(bp->dev,
+ "Failed to initiate coredump for seg = %d\n",
+ seg_record->segment_id);
+ goto next_seg;
+ }
+
+ /* Write segment data into the buffer */
+ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
+ &seg_len, buf, buf_len,
+ offset + seg_hdr_len);
+ if (rc && rc == -ENOBUFS)
+ goto err;
+ else if (rc)
+ netdev_err(bp->dev,
+ "Failed to retrieve coredump for seg = %d\n",
+ seg_record->segment_id);
+
+next_seg:
+ end = jiffies;
+ duration = jiffies_to_msecs(end - start);
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
+ rc, duration, 0, 0, 0);
+
+ if (buf) {
+ /* Write segment header into the buffer */
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len + seg_len;
+ }
+
+ *dump_len += seg_len;
+ seg_record =
+ (struct coredump_segment_record *)((u8 *)seg_record +
+ seg_record_len);
+ }
+
+err:
+ if (buf)
+ bnxt_fill_coredump_record(bp, buf + offset, start_time,
+ start_utc, coredump.total_segs + 1,
+ rc);
+ kfree(coredump.data);
+ if (!rc) {
+ *dump_len += sizeof(struct bnxt_coredump_record);
+ /* The actual coredump length can be smaller than the FW
+ * reported length earlier. Use the ethtool provided length.
+ */
+ if (buf_len)
+ *dump_len = buf_len;
+ } else if (rc == -ENOBUFS) {
+ netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
+ }
+ return rc;
+}
+
+static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf,
+ u32 dump_len)
+{
+ u32 data_copied = 0;
+ u32 data_len;
+ int i;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ data_len = rmem->page_size;
+ if (data_copied + data_len > dump_len)
+ data_len = dump_len - data_copied;
+ memcpy(buf + data_copied, rmem->pg_arr[i], data_len);
+ data_copied += data_len;
+ if (data_copied >= dump_len)
+ break;
+ }
+ return data_copied;
+}
+
+static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len)
+{
+ struct bnxt_ring_mem_info *rmem;
+ u32 offset = 0;
+
+ if (!bp->fw_crash_mem)
+ return -ENOENT;
+
+ rmem = &bp->fw_crash_mem->ring_mem;
+
+ if (rmem->depth > 1) {
+ int i;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i];
+ offset += bnxt_copy_crash_data(&pg_tbl->ring_mem,
+ buf + offset,
+ dump_len - offset);
+ if (offset >= dump_len)
+ break;
+ }
+ } else {
+ bnxt_copy_crash_data(rmem, buf, dump_len);
+ }
+
+ return 0;
+}
+
+static bool bnxt_crash_dump_avail(struct bnxt *bp)
+{
+ u32 sig = 0;
+
+ /* First 4 bytes(signature) of crash dump is always non-zero */
+ bnxt_copy_crash_dump(bp, &sig, sizeof(sig));
+ return !!sig;
+}
+
+int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
+{
+ if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)
+ return bnxt_copy_crash_dump(bp, buf, *dump_len);
+#ifdef CONFIG_TEE_BNXT_FW
+ else if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ return tee_bnxt_copy_coredump(buf, 0, *dump_len);
+#endif
+ else
+ return -EOPNOTSUPP;
+ } else {
+ return __bnxt_get_coredump(bp, dump_type, buf, dump_len);
+ }
+}
+
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
+{
+ struct hwrm_dbg_qcfg_output *resp;
+ struct hwrm_dbg_qcfg_input *req;
+ int rc, hdr_len = 0;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
+ return -EOPNOTSUPP;
+
+ if (dump_type == BNXT_DUMP_CRASH &&
+ !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR ||
+ (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)))
+ return -EOPNOTSUPP;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC);
+ else
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST);
+ }
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto get_dump_len_exit;
+
+ if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ *dump_len = BNXT_CRASH_DUMP_LEN;
+ else
+ *dump_len = le32_to_cpu(resp->crashdump_size);
+ } else {
+ /* Driver adds coredump header and "HWRM_VER_GET response"
+ * segment additionally to coredump.
+ */
+ hdr_len = sizeof(struct bnxt_coredump_segment_hdr) +
+ sizeof(struct hwrm_ver_get_output) +
+ sizeof(struct bnxt_coredump_record);
+ *dump_len = le32_to_cpu(resp->coredump_size) + hdr_len;
+ }
+ if (*dump_len <= hdr_len)
+ rc = -EINVAL;
+
+get_dump_len_exit:
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
+{
+ u32 len = 0;
+
+ if (dump_type == BNXT_DUMP_CRASH &&
+ bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR &&
+ bp->fw_crash_mem) {
+ if (!bnxt_crash_dump_avail(bp))
+ return 0;
+
+ return bp->fw_crash_len;
+ }
+
+ if (dump_type != BNXT_DUMP_DRIVER) {
+ if (!bnxt_hwrm_get_dump_len(bp, dump_type, &len))
+ return len;
+ }
+ if (dump_type != BNXT_DUMP_CRASH)
+ __bnxt_get_coredump(bp, dump_type, NULL, &len);
+
+ return len;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index 09c22f8fe399..c087df88154a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -10,6 +10,10 @@
#ifndef BNXT_COREDUMP_H
#define BNXT_COREDUMP_H
+#include <linux/utsname.h>
+#include <linux/time.h>
+#include <linux/rtc.h>
+
struct bnxt_coredump_segment_hdr {
__u8 signature[4];
__le32 component_id;
@@ -63,4 +67,104 @@ struct bnxt_coredump_record {
__u8 ioctl_high_version;
__le16 rsvd3[313];
};
+
+struct bnxt_driver_segment_record {
+ __le32 max_entries;
+ __le32 entry_size;
+ __le32 offset;
+ __u8 wrapped:1;
+ __u8 unused[3];
+};
+
+#define BNXT_VER_GET_COMP_ID 2
+#define BNXT_DRV_COMP_ID 0xd
+
+#define BNXT_CTX_MEM_SEG_ID_START 0x200
+
+#define BNXT_CTX_MEM_SEG_QP (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_QP)
+#define BNXT_CTX_MEM_SEG_SRQ (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_SRQ)
+#define BNXT_CTX_MEM_SEG_CQ (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_CQ)
+#define BNXT_CTX_MEM_SEG_VNIC (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_VNIC)
+#define BNXT_CTX_MEM_SEG_STAT (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_STAT)
+#define BNXT_CTX_MEM_SEG_STQM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_STQM)
+#define BNXT_CTX_MEM_SEG_FTQM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_FTQM)
+#define BNXT_CTX_MEM_SEG_MRAV (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_MRAV)
+#define BNXT_CTX_MEM_SEG_TIM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_TIM)
+
+#define BNXT_CTX_MEM_SEG_SRT 0x1
+#define BNXT_CTX_MEM_SEG_SRT2 0x2
+#define BNXT_CTX_MEM_SEG_CRT 0x3
+#define BNXT_CTX_MEM_SEG_CRT2 0x4
+#define BNXT_CTX_MEM_SEG_RIGP0 0x5
+#define BNXT_CTX_MEM_SEG_L2HWRM 0x6
+#define BNXT_CTX_MEM_SEG_REHWRM 0x7
+#define BNXT_CTX_MEM_SEG_CA0 0x8
+#define BNXT_CTX_MEM_SEG_CA1 0x9
+#define BNXT_CTX_MEM_SEG_CA2 0xa
+#define BNXT_CTX_MEM_SEG_RIGP1 0xb
+#define BNXT_CTX_MEM_SEG_QPC 0xc
+#define BNXT_CTX_MEM_SEG_KONG 0xd
+
+#define BNXT_CRASH_DUMP_LEN (8 << 20)
+
+#define COREDUMP_LIST_BUF_LEN 2048
+#define COREDUMP_RETRIEVE_BUF_LEN 4096
+
+#define BNXT_SEG_HDR_LEN sizeof(struct bnxt_coredump_segment_hdr)
+#define BNXT_SEG_RCD_LEN sizeof(struct bnxt_driver_segment_record)
+
+struct bnxt_coredump {
+ void *data;
+ int data_size;
+ u16 total_segs;
+};
+
+#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
+
+struct bnxt_hwrm_dbg_dma_info {
+ void *dest_buf;
+ int dest_buf_size;
+ u16 dma_len;
+ u16 seq_off;
+ u16 data_len_off;
+ u16 segs;
+ u32 seg_start;
+ u32 buf_len;
+};
+
+struct hwrm_dbg_cmn_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+};
+
+struct hwrm_dbg_cmn_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define HWRM_DBG_CMN_FLAGS_MORE 1
+};
+
+#define BNXT_DBG_FL_CR_DUMP_SIZE_SOC \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+#define BNXT_DBG_FL_CR_DUMP_SIZE_HOST \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR
+#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \
+ DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR
+
+void bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec,
+ u32 seg_len, int status, u32 duration,
+ u32 instance, u32 comp_id, u32 seg_id);
+int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len);
+u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 228a5db7e143..a00b67334f9b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -16,7 +16,7 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <rdma/ib_verbs.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_dcb.h"
@@ -98,7 +98,6 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
{
struct hwrm_queue_cos2bw_cfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
- void *data;
int rc, i;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG);
@@ -129,11 +128,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100);
}
- data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4);
- memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
if (qidx == 0) {
req->queue_id0 = cos2bw.queue_id;
- req->unused_0 = 0;
+ req->queue_id0_min_bw = cos2bw.min_bw;
+ req->queue_id0_max_bw = cos2bw.max_bw;
+ req->queue_id0_tsa_assign = cos2bw.tsa;
+ req->queue_id0_pri_lvl = cos2bw.pri_lvl;
+ req->queue_id0_bw_weight = cos2bw.bw_weight;
+ } else {
+ memcpy(&req->cfg[i - 1], &cos2bw.cfg, sizeof(cos2bw.cfg));
}
}
return hwrm_req_send(bp, req);
@@ -144,7 +147,6 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
struct hwrm_queue_cos2bw_qcfg_output *resp;
struct hwrm_queue_cos2bw_qcfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
- void *data;
int rc, i;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG);
@@ -158,13 +160,19 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
return rc;
}
- data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
- for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
+ for (i = 0; i < bp->max_tc; i++) {
int tc;
- memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
- if (i == 0)
+ if (i == 0) {
cos2bw.queue_id = resp->queue_id0;
+ cos2bw.min_bw = resp->queue_id0_min_bw;
+ cos2bw.max_bw = resp->queue_id0_max_bw;
+ cos2bw.tsa = resp->queue_id0_tsa_assign;
+ cos2bw.pri_lvl = resp->queue_id0_pri_lvl;
+ cos2bw.bw_weight = resp->queue_id0_bw_weight;
+ } else {
+ memcpy(&cos2bw.cfg, &resp->cfg[i - 1], sizeof(cos2bw.cfg));
+ }
tc = bnxt_queue_to_tc(bp, cos2bw.queue_id);
if (tc < 0)
@@ -220,7 +228,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
}
}
if (bp->ieee_ets) {
- int tc = netdev_get_num_tc(bp->dev);
+ int tc = bp->num_tc;
if (!tc)
tc = 1;
@@ -479,7 +487,9 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
return -EINVAL;
+ }
+ for (i = 0; i < max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
break;
@@ -627,7 +637,8 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST) ||
+ (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EINVAL;
if (!my_pfc) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 6eed231de565..5b2a6f678244 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -23,13 +23,16 @@ struct bnxt_dcb {
struct bnxt_cos2bw_cfg {
u8 pad[3];
- u8 queue_id;
- __le32 min_bw;
- __le32 max_bw;
+ struct_group_attr(cfg, __packed,
+ u8 queue_id;
+ __le32 min_bw;
+ __le32 max_bw;
+ u8 tsa;
+ u8 pri_lvl;
+ u8 bw_weight;
+ );
+/* for min_bw / max_bw */
#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- u8 tsa;
- u8 pri_lvl;
- u8 bw_weight;
u8 unused;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 156c2404854f..3324afbb3bec 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -10,7 +10,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include <linux/dim.h>
#include "bnxt.h"
#include "bnxt_debugfs.h"
@@ -64,9 +64,9 @@ static const struct file_operations debugfs_dim_fops = {
static void debugfs_dim_ring_init(struct dim *dim, int ring_idx,
struct dentry *dd)
{
- static char qname[16];
+ static char qname[12];
- snprintf(qname, 10, "%d", ring_idx);
+ snprintf(qname, sizeof(qname), "%d", ring_idx);
debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
index d0bb4887acd0..a0a8d687dd99 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
@@ -7,7 +7,7 @@
* the Free Software Foundation.
*/
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 951c0c00cc95..15de802bbac4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -9,13 +9,28 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
#include <net/devlink.h>
-#include "bnxt_hsi.h"
+#include <net/netdev_lock.h>
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
#include "bnxt_ethtool.h"
+#include "bnxt_ulp.h"
+#include "bnxt_ptp.h"
+#include "bnxt_coredump.h"
+#include "bnxt_nvm_defs.h"
+
+static void __bnxt_fw_recover(struct bnxt *bp)
+{
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
+ test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
+ bnxt_fw_reset(bp);
+ else
+ bnxt_fw_exception(bp);
+}
static int
bnxt_dl_flash_update(struct devlink *dl,
@@ -25,14 +40,8 @@ bnxt_dl_flash_update(struct devlink *dl,
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
- if (!BNXT_PF(bp)) {
- NL_SET_ERR_MSG_MOD(extack,
- "flash update not supported from a VF");
- return -EPERM;
- }
-
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
- rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0);
+ rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack);
if (!rc)
devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0);
else
@@ -40,245 +49,597 @@ bnxt_dl_flash_update(struct devlink *dl,
return rc;
}
-static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset)
{
- struct bnxt *bp = devlink_health_reporter_priv(reporter);
- u32 val;
+ struct hwrm_func_cfg_input *req;
int rc;
- if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT);
+ if (remote_reset)
+ req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS);
+
+ return hwrm_req_send(bp, req);
+}
+
+static char *bnxt_health_severity_str(enum bnxt_health_severity severity)
+{
+ switch (severity) {
+ case SEVERITY_NORMAL: return "normal";
+ case SEVERITY_WARNING: return "warning";
+ case SEVERITY_RECOVERABLE: return "recoverable";
+ case SEVERITY_FATAL: return "fatal";
+ default: return "unknown";
+ }
+}
+
+static char *bnxt_health_remedy_str(enum bnxt_health_remedy remedy)
+{
+ switch (remedy) {
+ case REMEDY_DEVLINK_RECOVER: return "devlink recover";
+ case REMEDY_POWER_CYCLE_DEVICE: return "device power cycle";
+ case REMEDY_POWER_CYCLE_HOST: return "host power cycle";
+ case REMEDY_FW_UPDATE: return "update firmware";
+ case REMEDY_HW_REPLACE: return "replace hardware";
+ default: return "unknown";
+ }
+}
+
+static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = devlink_health_reporter_priv(reporter);
+ struct bnxt_fw_health *h = bp->fw_health;
+ u32 fw_status, fw_resets;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ devlink_fmsg_string_pair_put(fmsg, "Status", "recovering");
return 0;
+ }
- val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (!h->status_reliable) {
+ devlink_fmsg_string_pair_put(fmsg, "Status", "unknown");
+ return 0;
+ }
- if (BNXT_FW_IS_BOOTING(val)) {
- rc = devlink_fmsg_string_pair_put(fmsg, "Description",
- "Not yet completed initialization");
- if (rc)
- return rc;
- } else if (BNXT_FW_IS_ERR(val)) {
- rc = devlink_fmsg_string_pair_put(fmsg, "Description",
- "Encountered fatal error and cannot recover");
- if (rc)
- return rc;
+ mutex_lock(&h->lock);
+ fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (BNXT_FW_IS_BOOTING(fw_status)) {
+ devlink_fmsg_string_pair_put(fmsg, "Status", "initializing");
+ } else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) {
+ if (!h->severity) {
+ h->severity = SEVERITY_FATAL;
+ h->remedy = REMEDY_POWER_CYCLE_DEVICE;
+ h->diagnoses++;
+ devlink_health_report(h->fw_reporter,
+ "FW error diagnosed", h);
+ }
+ devlink_fmsg_string_pair_put(fmsg, "Status", "error");
+ devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status);
+ } else {
+ devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
}
- if (val >> 16) {
- rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
- if (rc)
- return rc;
+ devlink_fmsg_string_pair_put(fmsg, "Severity",
+ bnxt_health_severity_str(h->severity));
+
+ if (h->severity) {
+ devlink_fmsg_string_pair_put(fmsg, "Remedy",
+ bnxt_health_remedy_str(h->remedy));
+ if (h->remedy == REMEDY_DEVLINK_RECOVER)
+ devlink_fmsg_string_pair_put(fmsg, "Impact",
+ "traffic+ntuple_cfg");
}
- val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val);
- if (rc)
- return rc;
+ mutex_unlock(&h->lock);
+ if (!h->resets_reliable)
+ return 0;
+ fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets);
+ devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests);
+ devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals);
+ devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries);
+ devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities);
+ devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses);
return 0;
}
-static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
- .name = "fw",
- .diagnose = bnxt_fw_reporter_diagnose,
-};
-
-static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx,
- struct netlink_ext_ack *extack)
+static int bnxt_fw_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
+ u32 dump_len;
+ void *data;
+ int rc;
- if (!priv_ctx)
+ /* TODO: no firmware dump support in devlink_health_report() context */
+ if (priv_ctx)
return -EOPNOTSUPP;
- bnxt_fw_reset(bp);
- return -EINPROGRESS;
-}
+ dump_len = bnxt_get_coredump_length(bp, BNXT_DUMP_LIVE);
+ if (!dump_len)
+ return -EIO;
-static const
-struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
- .name = "fw_reset",
- .recover = bnxt_fw_reset_recover,
-};
+ data = vmalloc(dump_len);
+ if (!data)
+ return -ENOMEM;
-static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx,
- struct netlink_ext_ack *extack)
+ rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len);
+ if (!rc) {
+ devlink_fmsg_pair_nest_start(fmsg, "core");
+ devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len);
+ devlink_fmsg_u32_pair_put(fmsg, "size", dump_len);
+ devlink_fmsg_pair_nest_end(fmsg);
+ }
+
+ vfree(data);
+ return rc;
+}
+
+static int bnxt_fw_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
- unsigned long event;
- if (!priv_ctx)
- return -EOPNOTSUPP;
+ if (bp->fw_health->severity == SEVERITY_FATAL)
+ return -ENODEV;
- bp->fw_health->fatal = true;
- event = fw_reporter_ctx->sp_event;
- if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
- bnxt_fw_reset(bp);
- else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
- bnxt_fw_exception(bp);
+ set_bit(BNXT_STATE_RECOVER, &bp->state);
+ __bnxt_fw_recover(bp);
return -EINPROGRESS;
}
-static const
-struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
- .name = "fw_fatal",
- .recover = bnxt_fw_fatal_recover,
+static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = bnxt_fw_diagnose,
+ .dump = bnxt_fw_dump,
+ .recover = bnxt_fw_recover,
};
+static struct devlink_health_reporter *
+__bnxt_dl_reporter_create(struct bnxt *bp,
+ const struct devlink_health_reporter_ops *ops)
+{
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_create(bp->dl, ops, bp);
+ if (IS_ERR(reporter)) {
+ netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n",
+ ops->name, PTR_ERR(reporter));
+ return NULL;
+ }
+
+ return reporter;
+}
+
void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
- struct bnxt_fw_health *health = bp->fw_health;
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- if (!health)
- return;
+ if (fw_health && !fw_health->fw_reporter)
+ fw_health->fw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops);
+}
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
- goto err_recovery;
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- health->fw_reset_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_reset_reporter_ops,
- 0, bp);
- if (IS_ERR(health->fw_reset_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reset_reporter));
- health->fw_reset_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
+ if (fw_health && fw_health->fw_reporter) {
+ devlink_health_reporter_destroy(fw_health->fw_reporter);
+ fw_health->fw_reporter = NULL;
}
+}
+
+void bnxt_devlink_health_fw_report(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ int rc;
-err_recovery:
- if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ if (!fw_health)
return;
- if (!health->fw_reporter) {
- health->fw_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_reporter_ops,
- 0, bp);
- if (IS_ERR(health->fw_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reporter));
- health->fw_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
- return;
- }
+ if (!fw_health->fw_reporter) {
+ __bnxt_fw_recover(bp);
+ return;
}
- if (health->fw_fatal_reporter)
- return;
+ mutex_lock(&fw_health->lock);
+ fw_health->severity = SEVERITY_RECOVERABLE;
+ fw_health->remedy = REMEDY_DEVLINK_RECOVER;
+ mutex_unlock(&fw_health->lock);
+ rc = devlink_health_report(fw_health->fw_reporter, "FW error reported",
+ fw_health);
+ if (rc == -ECANCELED)
+ __bnxt_fw_recover(bp);
+}
+
+void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u8 state;
- health->fw_fatal_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_fatal_reporter_ops,
- 0, bp);
- if (IS_ERR(health->fw_fatal_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
- PTR_ERR(health->fw_fatal_reporter));
- health->fw_fatal_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ mutex_lock(&fw_health->lock);
+ if (healthy) {
+ fw_health->severity = SEVERITY_NORMAL;
+ state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
+ } else {
+ fw_health->severity = SEVERITY_FATAL;
+ fw_health->remedy = REMEDY_POWER_CYCLE_DEVICE;
+ state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
}
+ mutex_unlock(&fw_health->lock);
+ devlink_health_reporter_state_update(fw_health->fw_reporter, state);
}
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
+void bnxt_dl_health_fw_recovery_done(struct bnxt *bp)
{
- struct bnxt_fw_health *health = bp->fw_health;
+ struct bnxt_dl *dl = devlink_priv(bp->dl);
- if (!health)
- return;
+ devlink_health_reporter_recovery_done(bp->fw_health->fw_reporter);
+ bnxt_hwrm_remote_dev_reset_set(bp, dl->remote_reset);
+}
- if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
- health->fw_reset_reporter) {
- devlink_health_reporter_destroy(health->fw_reset_reporter);
- health->fw_reset_reporter = NULL;
+static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+
+static void
+bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
+ struct hwrm_fw_livepatch_output *resp)
+{
+ int err = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ switch (err) {
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE:
+ netdev_err(bp->dev, "Illegal live patch opcode");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid opcode");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch operation not supported");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch not found");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Live patch deactivation failed. Firmware not patched.");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch not authenticated");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER:
+ NL_SET_ERR_MSG_MOD(extack, "Incompatible live patch");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch has invalid size");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch already applied");
+ break;
+ default:
+ netdev_err(bp->dev, "Unexpected live patch error: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch");
+ break;
}
+}
- if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
- return;
+/* Live patch status in NVM */
+#define BNXT_LIVEPATCH_NOT_INSTALLED 0
+#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL
+#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE
+#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \
+ FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE)
+#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK
+
+#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK)
+
+static int
+bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ struct hwrm_fw_livepatch_query_output *query_resp;
+ struct hwrm_fw_livepatch_query_input *query_req;
+ struct hwrm_fw_livepatch_output *patch_resp;
+ struct hwrm_fw_livepatch_input *patch_req;
+ u16 flags, live_patch_state;
+ bool activated = false;
+ u32 installed = 0;
+ u8 target;
+ int rc;
+
+ if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support live patch");
+ return -EOPNOTSUPP;
+ }
+
+ rc = hwrm_req_init(bp, query_req, HWRM_FW_LIVEPATCH_QUERY);
+ if (rc)
+ return rc;
+ query_resp = hwrm_req_hold(bp, query_req);
+
+ rc = hwrm_req_init(bp, patch_req, HWRM_FW_LIVEPATCH);
+ if (rc) {
+ hwrm_req_drop(bp, query_req);
+ return rc;
+ }
+ patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
+ patch_resp = hwrm_req_hold(bp, patch_req);
+
+ for (target = 1; target <= FW_LIVEPATCH_REQ_FW_TARGET_LAST; target++) {
+ query_req->fw_target = target;
+ rc = hwrm_req_send(bp, query_req);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query packages");
+ break;
+ }
+
+ flags = le16_to_cpu(query_resp->status_flags);
+ live_patch_state = BNXT_LIVEPATCH_STATE(flags);
- if (health->fw_reporter) {
- devlink_health_reporter_destroy(health->fw_reporter);
- health->fw_reporter = NULL;
+ if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED)
+ continue;
+
+ if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) {
+ activated = true;
+ continue;
+ }
+
+ if (live_patch_state == BNXT_LIVEPATCH_INSTALLED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
+ else if (live_patch_state == BNXT_LIVEPATCH_REMOVED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE;
+
+ patch_req->fw_target = target;
+ rc = hwrm_req_send(bp, patch_req);
+ if (rc) {
+ bnxt_dl_livepatch_report_err(bp, extack, patch_resp);
+ break;
+ }
+ installed++;
}
- if (health->fw_fatal_reporter) {
- devlink_health_reporter_destroy(health->fw_fatal_reporter);
- health->fw_fatal_reporter = NULL;
+ if (!rc && !installed) {
+ if (activated) {
+ NL_SET_ERR_MSG_MOD(extack, "Live patch already activated");
+ rc = -EEXIST;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "No live patches found");
+ rc = -ENOENT;
+ }
}
+ hwrm_req_drop(bp, query_req);
+ hwrm_req_drop(bp, patch_req);
+ return rc;
}
-void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
+static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
{
- struct bnxt_fw_health *fw_health = bp->fw_health;
- struct bnxt_fw_reporter_ctx fw_reporter_ctx;
-
- fw_reporter_ctx.sp_event = event;
- switch (event) {
- case BNXT_FW_RESET_NOTIFY_SP_EVENT:
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
- if (!fw_health->fw_fatal_reporter)
- return;
-
- devlink_health_report(fw_health->fw_fatal_reporter,
- "FW fatal async event received",
- &fw_reporter_ctx);
- return;
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc = 0;
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ bnxt_ulp_stop(bp);
+ rtnl_lock();
+ netdev_lock(bp->dev);
+ if (bnxt_sriov_cfg(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "reload is unsupported while VFs are allocated or being configured");
+ netdev_unlock(bp->dev);
+ rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
+ return -EOPNOTSUPP;
+ }
+ if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
+ rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
+ return -ENODEV;
+ }
+ if (netif_running(bp->dev))
+ bnxt_close_nic(bp, true, true);
+ bnxt_vf_reps_free(bp);
+ rc = bnxt_hwrm_func_drv_unrgtr(bp);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to deregister");
+ if (netif_running(bp->dev))
+ netif_close(bp->dev);
+ netdev_unlock(bp->dev);
+ rtnl_unlock();
+ break;
+ }
+ bnxt_clear_reservations(bp, false);
+ bnxt_free_ctx_mem(bp, false);
+ break;
+ }
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ return bnxt_dl_livepatch_activate(bp, extack);
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET) {
+ NL_SET_ERR_MSG_MOD(extack, "Device not capable, requires reboot");
+ return -EOPNOTSUPP;
+ }
+ if (!bnxt_hwrm_reset_permitted(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Reset denied by firmware, it may be inhibited by remote driver");
+ return -EPERM;
+ }
+ rtnl_lock();
+ netdev_lock(bp->dev);
+ if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
+ rtnl_unlock();
+ return -ENODEV;
}
- if (!fw_health->fw_reset_reporter)
- return;
+ if (netif_running(bp->dev))
+ set_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ rc = bnxt_hwrm_firmware_reset(bp->dev,
+ FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
+ FW_RESET_REQ_FLAGS_RESET_GRACEFUL |
+ FW_RESET_REQ_FLAGS_FW_ACTIVATION);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware");
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ netdev_unlock(bp->dev);
+ rtnl_unlock();
+ }
+ break;
+ }
+ default:
+ rc = -EOPNOTSUPP;
+ }
- devlink_health_report(fw_health->fw_reset_reporter,
- "FW non-fatal reset event received",
- &fw_reporter_ctx);
- return;
+ return rc;
+}
- case BNXT_FW_EXCEPTION_SP_EVENT:
- if (!fw_health->fw_fatal_reporter)
- return;
+static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc = 0;
- devlink_health_report(fw_health->fw_fatal_reporter,
- "FW fatal error reported",
- &fw_reporter_ctx);
- return;
+ netdev_assert_locked(bp->dev);
+
+ *actions_performed = 0;
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ bnxt_fw_init_one(bp);
+ bnxt_vf_reps_alloc(bp);
+ if (netif_running(bp->dev))
+ rc = bnxt_open_nic(bp, true, true);
+ if (!rc) {
+ bnxt_reenable_sriov(bp);
+ bnxt_ptp_reapply_pps(bp);
+ }
+ break;
+ }
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
+ unsigned long start = jiffies;
+ unsigned long timeout = start + BNXT_DFLT_FW_RST_MAX_DSECS * HZ / 10;
+
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ break;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
+ timeout = start + bp->fw_health->normal_func_wait_dsecs * HZ / 10;
+ if (!netif_running(bp->dev))
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device is closed, not waiting for reset notice that will never come");
+ netdev_unlock(bp->dev);
+ rtnl_unlock();
+ while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) {
+ if (time_after(jiffies, timeout)) {
+ NL_SET_ERR_MSG_MOD(extack, "Activation incomplete");
+ rc = -ETIMEDOUT;
+ break;
+ }
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ NL_SET_ERR_MSG_MOD(extack, "Activation aborted");
+ rc = -ENODEV;
+ break;
+ }
+ msleep(50);
+ }
+ rtnl_lock();
+ netdev_lock(bp->dev);
+ if (!rc)
+ *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
}
+
+ if (!rc) {
+ bnxt_print_device_info(bp);
+ if (netif_running(bp->dev)) {
+ mutex_lock(&bp->link_lock);
+ bnxt_report_link(bp);
+ mutex_unlock(&bp->link_lock);
+ }
+ *actions_performed |= BIT(action);
+ } else if (netif_running(bp->dev)) {
+ netif_close(bp->dev);
+ }
+ netdev_unlock(bp->dev);
+ rtnl_unlock();
+ if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ bnxt_ulp_start(bp, rc);
+ return rc;
}
-void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
+static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
{
- struct bnxt_fw_health *health = bp->fw_health;
- u8 state;
+ bool rc = false;
+ u32 datalen;
+ u16 index;
+ u8 *buf;
+
+ if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &datalen) || !datalen) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd entry error");
+ return false;
+ }
- if (healthy)
- state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
- else
- state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
+ buf = kzalloc(datalen, GFP_KERNEL);
+ if (!buf) {
+ NL_SET_ERR_MSG_MOD(extack, "insufficient memory for nvm test");
+ return false;
+ }
- if (health->fatal)
- devlink_health_reporter_state_update(health->fw_fatal_reporter,
- state);
- else
- devlink_health_reporter_state_update(health->fw_reset_reporter,
- state);
+ if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error");
+ goto done;
+ }
+
+ if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
+ BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error");
+ goto done;
+ }
+
+ rc = true;
- health->fatal = false;
+done:
+ kfree(buf);
+ return rc;
}
-void bnxt_dl_health_recovery_done(struct bnxt *bp)
+static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id,
+ struct netlink_ext_ack *extack)
{
- struct bnxt_fw_health *hlth = bp->fw_health;
-
- if (hlth->fatal)
- devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter);
- else
- devlink_health_reporter_recovery_done(hlth->fw_reset_reporter);
+ return id == DEVLINK_ATTR_SELFTEST_ID_FLASH;
}
-static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
- struct netlink_ext_ack *extack);
+static enum devlink_selftest_status bnxt_dl_selftest_run(struct devlink *dl,
+ unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (id == DEVLINK_ATTR_SELFTEST_ID_FLASH)
+ return bnxt_nvm_test(bp, extack) ?
+ DEVLINK_SELFTEST_STATUS_PASS :
+ DEVLINK_SELFTEST_STATUS_FAIL;
+
+ return DEVLINK_SELFTEST_STATUS_SKIP;
+}
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
@@ -287,6 +648,13 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
.info_get = bnxt_dl_info_get,
.flash_update = bnxt_dl_flash_update,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
+ .reload_down = bnxt_dl_reload_down,
+ .reload_up = bnxt_dl_reload_up,
+ .selftest_check = bnxt_dl_selftest_check,
+ .selftest_run = bnxt_dl_selftest_run,
};
static const struct devlink_ops bnxt_vf_dl_ops;
@@ -305,6 +673,8 @@ static const struct bnxt_dl_nvm_param nvm_params[] = {
NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
{DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, NVM_OFF_SUPPORT_RDMA,
+ BNXT_NVM_FUNC_CFG, 1, 1},
{BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
BNXT_NVM_SHARED_CFG, 1, 1},
};
@@ -374,7 +744,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
}
/* earlier devices present as an array of raw bytes */
- if (!BNXT_CHIP_P5(bp)) {
+ if (!BNXT_CHIP_P5_PLUS(bp)) {
dim = 0;
i = 0;
bits *= 3; /* array of 3 version components */
@@ -394,7 +764,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
goto exit;
bnxt_copy_from_nvm_data(&ver, data, bits, bytes);
- if (BNXT_CHIP_P5(bp)) {
+ if (BNXT_CHIP_P5_PLUS(bp)) {
*nvm_cfg_ver <<= 8;
*nvm_cfg_ver |= ver.vu8;
} else {
@@ -414,7 +784,7 @@ static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
if (!strlen(buf))
return 0;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE)))
return 0;
@@ -430,6 +800,57 @@ static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
return 0;
}
+#define BNXT_FW_SRT_PATCH "fw.srt.patch"
+#define BNXT_FW_CRT_PATCH "fw.crt.patch"
+
+static int bnxt_dl_livepatch_info_put(struct bnxt *bp,
+ struct devlink_info_req *req,
+ const char *key)
+{
+ struct hwrm_fw_livepatch_query_input *query;
+ struct hwrm_fw_livepatch_query_output *resp;
+ u16 flags;
+ int rc;
+
+ if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH)
+ return 0;
+
+ rc = hwrm_req_init(bp, query, HWRM_FW_LIVEPATCH_QUERY);
+ if (rc)
+ return rc;
+
+ if (!strcmp(key, BNXT_FW_SRT_PATCH))
+ query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW;
+ else if (!strcmp(key, BNXT_FW_CRT_PATCH))
+ query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW;
+ else
+ goto exit;
+
+ resp = hwrm_req_hold(bp, query);
+ rc = hwrm_req_send(bp, query);
+ if (rc)
+ goto exit;
+
+ flags = le16_to_cpu(resp->status_flags);
+ if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) {
+ resp->active_ver[sizeof(resp->active_ver) - 1] = '\0';
+ rc = devlink_info_version_running_put(req, key, resp->active_ver);
+ if (rc)
+ goto exit;
+ }
+
+ if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) {
+ resp->install_ver[sizeof(resp->install_ver) - 1] = '\0';
+ rc = devlink_info_version_stored_put(req, key, resp->install_ver);
+ if (rc)
+ goto exit;
+ }
+
+exit:
+ hwrm_req_drop(bp, query);
+ return rc;
+}
+
#define HWRM_FW_VER_STR_LEN 16
static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
@@ -445,10 +866,6 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
u32 ver = 0;
int rc;
- rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME);
- if (rc)
- return rc;
-
if (BNXT_PF(bp) && (bp->flags & BNXT_FLAG_DSN_VALID)) {
sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
@@ -554,8 +971,13 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info);
if (rc ||
- !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID))
+ !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID)) {
+ if (!bnxt_get_pkginfo(bp->dev, buf, sizeof(buf)))
+ return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
return 0;
+ }
buf[0] = 0;
strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
@@ -583,41 +1005,33 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
- return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
- DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ return rc;
+
+ if (BNXT_CHIP_P5_PLUS(bp)) {
+ rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
+ if (rc)
+ return rc;
+ }
+ return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH);
+
}
-static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
- union devlink_param_value *val)
+static int __bnxt_hwrm_nvm_req(struct bnxt *bp,
+ const struct bnxt_dl_nvm_param *nvm, void *msg,
+ union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
- struct bnxt_dl_nvm_param nvm_param;
struct hwrm_err_output *resp;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
- int idx = 0, rc, i;
-
- /* Get/Set NVM CFG parameter is supported only on PFs */
- if (BNXT_VF(bp)) {
- hwrm_req_drop(bp, req);
- return -EPERM;
- }
-
- for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
- if (nvm_params[i].id == param_id) {
- nvm_param = nvm_params[i];
- break;
- }
- }
-
- if (i == ARRAY_SIZE(nvm_params)) {
- hwrm_req_drop(bp, req);
- return -EOPNOTSUPP;
- }
+ int idx = 0, rc;
- if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+ if (nvm->dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
- else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ else if (nvm->dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
@@ -628,23 +1042,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
req->dest_data_addr = cpu_to_le64(data_dma_addr);
- req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
- req->option_num = cpu_to_le16(nvm_param.offset);
+ req->data_len = cpu_to_le16(nvm->nvm_num_bits);
+ req->option_num = cpu_to_le16(nvm->offset);
req->index_0 = cpu_to_le16(idx);
if (idx)
req->dimensions = cpu_to_le16(1);
resp = hwrm_req_hold(bp, req);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
- bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ bnxt_copy_to_nvm_data(data, val, nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
rc = hwrm_req_send(bp, msg);
} else {
rc = hwrm_req_send_silent(bp, msg);
if (!rc) {
bnxt_copy_from_nvm_data(val, data,
- nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
} else {
if (resp->cmd_err ==
NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
@@ -657,8 +1071,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
return rc;
}
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ union devlink_param_value *val)
+{
+ const struct bnxt_dl_nvm_param *nvm_param;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+ nvm_param = &nvm_params[i];
+ if (nvm_param->id == param_id)
+ return __bnxt_hwrm_nvm_req(bp, nvm_param, msg, val);
+ }
+ return -EOPNOTSUPP;
+}
+
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_get_variable_input *req;
@@ -676,7 +1105,8 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
}
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_set_variable_input *req;
@@ -692,6 +1122,32 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
}
+static int bnxt_dl_roce_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ const struct bnxt_dl_nvm_param nvm_roce_cap = {0, NVM_OFF_RDMA_CAPABLE,
+ BNXT_NVM_SHARED_CFG, 1, 1};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ struct hwrm_nvm_get_variable_input *req;
+ union devlink_param_value roce_cap;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
+ if (rc)
+ return rc;
+
+ if (__bnxt_hwrm_nvm_req(bp, &nvm_roce_cap, req, &roce_cap)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to verify if device is RDMA Capable");
+ return -EINVAL;
+ }
+ if (!roce_cap.vbool) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support RDMA");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -712,6 +1168,34 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
return 0;
}
+static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = bnxt_dl_get_remote_reset(dl);
+ return 0;
+}
+
+static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc;
+
+ rc = bnxt_hwrm_remote_dev_reset_set(bp, ctx->val.vbool);
+ if (rc)
+ return rc;
+
+ bnxt_dl_set_remote_reset(dl, ctx->val.vbool);
+ return rc;
+}
+
static const struct devlink_param bnxt_dl_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
@@ -729,22 +1213,34 @@ static const struct devlink_param bnxt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
bnxt_dl_msix_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_roce_validate),
DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
"gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
NULL),
+ /* keep REMOTE_DEV_RESET last, it is excluded based on caps */
+ DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ bnxt_remote_dev_reset_get,
+ bnxt_remote_dev_reset_set, NULL),
};
static int bnxt_dl_params_register(struct bnxt *bp)
{
+ int num_params = ARRAY_SIZE(bnxt_dl_params);
int rc;
if (bp->hwrm_spec_code < 0x10600)
return 0;
- rc = devlink_params_register(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ num_params--;
+
+ rc = devlink_params_register(bp->dl, bnxt_dl_params, num_params);
if (rc)
netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n",
rc);
@@ -753,11 +1249,15 @@ static int bnxt_dl_params_register(struct bnxt *bp)
static void bnxt_dl_params_unregister(struct bnxt *bp)
{
+ int num_params = ARRAY_SIZE(bnxt_dl_params);
+
if (bp->hwrm_spec_code < 0x10600)
return;
- devlink_params_unregister(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ num_params--;
+
+ devlink_params_unregister(bp->dl, bnxt_dl_params, num_params);
}
int bnxt_dl_register(struct bnxt *bp)
@@ -782,6 +1282,7 @@ int bnxt_dl_register(struct bnxt *bp)
bp->dl = dl;
bp_dl = devlink_priv(dl);
bp_dl->bp = bp;
+ bnxt_dl_set_remote_reset(dl, true);
/* Add switchdev eswitch mode setting, if SRIOV supported */
if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 406dc655a5fc..7f45dcd7b287 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -13,6 +13,7 @@
/* Struct to hold housekeeping info needed by devlink interface */
struct bnxt_dl {
struct bnxt *bp; /* back ptr to the controlling dev */
+ bool remote_reset;
};
static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
@@ -20,11 +21,30 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
return ((struct bnxt_dl *)devlink_priv(dl))->bp;
}
+static inline void bnxt_dl_remote_reload(struct bnxt *bp)
+{
+ devlink_remote_reload_actions_performed(bp->dl, 0,
+ BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+}
+
+static inline bool bnxt_dl_get_remote_reset(struct devlink *dl)
+{
+ return ((struct bnxt_dl *)devlink_priv(dl))->remote_reset;
+}
+
+static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
+{
+ ((struct bnxt_dl *)devlink_priv(dl))->remote_reset = value;
+}
+
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
#define NVM_OFF_IGNORE_ARI 164
+#define NVM_OFF_RDMA_CAPABLE 161
#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define NVM_OFF_SUPPORT_RDMA 506
#define NVM_OFF_NVM_CFG_VER 602
#define BNXT_NVM_CFG_VER_BITS 8
@@ -53,11 +73,11 @@ enum bnxt_dl_version_type {
BNXT_VERSION_STORED,
};
-void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
-void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
-void bnxt_dl_health_recovery_done(struct bnxt *bp);
+void bnxt_devlink_health_fw_report(struct bnxt *bp);
+void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy);
+void bnxt_dl_health_fw_recovery_done(struct bnxt *bp);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
index 6f6576dc417a..53a3bcb0efe0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -8,7 +8,7 @@
*/
#include <linux/dim.h>
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
void bnxt_dim_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index fbb56b1f70fd..068e191ede19 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -8,9 +8,11 @@
* the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/ctype.h>
#include <linux/stringify.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/linkmode.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -22,18 +24,25 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/timecounter.h>
-#include "bnxt_hsi.h"
+#include <net/netdev_queues.h>
+#include <net/netlink.h>
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
+#include "bnxt_ulp.h"
#include "bnxt_xdp.h"
#include "bnxt_ptp.h"
#include "bnxt_ethtool.h"
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#include "bnxt_coredump.h"
-#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
-#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
-#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
+
+#define BNXT_NVM_ERR_MSG(dev, extack, msg) \
+ do { \
+ if (extack) \
+ NL_SET_ERR_MSG_MOD(extack, msg); \
+ netdev_err(dev, "%s\n", msg); \
+ } while (0)
static u32 bnxt_get_msglevel(struct net_device *dev)
{
@@ -68,6 +77,9 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
+ if (hw_coal->flags &
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
+ kernel_coal->use_cqe_mode_rx = true;
hw_coal = &bp->tx_coal;
mult = hw_coal->bufs_per_record;
@@ -75,6 +87,9 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
+ if (hw_coal->flags &
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
+ kernel_coal->use_cqe_mode_tx = true;
coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
@@ -101,12 +116,22 @@ static int bnxt_set_coalesce(struct net_device *dev,
}
}
+ if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
+ !(bp->coal_cap.cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
+ return -EOPNOTSUPP;
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
hw_coal->coal_ticks = coal->rx_coalesce_usecs;
hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
+ hw_coal->flags &=
+ ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if (kernel_coal->use_cqe_mode_rx)
+ hw_coal->flags |=
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
hw_coal = &bp->tx_coal;
mult = hw_coal->bufs_per_record;
@@ -114,6 +139,11 @@ static int bnxt_set_coalesce(struct net_device *dev,
hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
+ hw_coal->flags &=
+ ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if (kernel_coal->use_cqe_mode_tx)
+ hw_coal->flags |=
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
u32 stats_ticks = coal->stats_block_coalesce_usecs;
@@ -134,11 +164,10 @@ static int bnxt_set_coalesce(struct net_device *dev,
}
reset_coalesce:
- if (netif_running(dev)) {
+ if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_stats) {
- rc = bnxt_close_nic(bp, true, false);
- if (!rc)
- rc = bnxt_open_nic(bp, true, false);
+ bnxt_close_nic(bp, true, false);
+ rc = bnxt_open_nic(bp, true, false);
} else {
rc = bnxt_hwrm_set_coal(bp);
}
@@ -311,13 +340,16 @@ enum {
RX_NETPOLL_DISCARDS,
};
-static struct {
- u64 counter;
- char string[ETH_GSTRING_LEN];
-} bnxt_sw_func_stats[] = {
- {0, "rx_total_discard_pkts"},
- {0, "tx_total_discard_pkts"},
- {0, "rx_total_netpoll_discards"},
+static const char *const bnxt_ring_err_stats_arr[] = {
+ "rx_total_l4_csum_errors",
+ "rx_total_resets",
+ "rx_total_buf_errors",
+ "rx_total_oom_discards",
+ "rx_total_netpoll_discards",
+ "rx_total_ring_discards",
+ "tx_total_resets",
+ "tx_total_ring_discards",
+ "total_missed_irqs",
};
#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
@@ -427,6 +459,9 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
+ BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
+ BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
+ BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss),
};
static const struct {
@@ -465,7 +500,7 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};
-#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
+#define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \
(ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
@@ -477,9 +512,9 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
if (BNXT_SUPPORTS_TPA(bp)) {
if (bp->max_tpa_v2) {
- if (BNXT_CHIP_P5_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
return BNXT_NUM_TPA_RING_STATS_P5;
- return BNXT_NUM_TPA_RING_STATS_P5_SR2;
+ return BNXT_NUM_TPA_RING_STATS_P7;
}
return BNXT_NUM_TPA_RING_STATS;
}
@@ -494,22 +529,28 @@ static int bnxt_get_num_ring_stats(struct bnxt *bp)
bnxt_get_num_tpa_ring_stats(bp);
tx = NUM_RING_TX_HW_STATS;
cmn = NUM_RING_CMN_SW_STATS;
- return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
+ return rx * bp->rx_nr_rings +
+ tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) +
cmn * bp->cp_nr_rings;
}
static int bnxt_get_num_stats(struct bnxt *bp)
{
int num_stats = bnxt_get_num_ring_stats(bp);
+ int len;
- num_stats += BNXT_NUM_SW_FUNC_STATS;
+ num_stats += BNXT_NUM_RING_ERR_STATS;
if (bp->flags & BNXT_FLAG_PORT_STATS)
num_stats += BNXT_NUM_PORT_STATS;
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- num_stats += bp->fw_rx_stats_ext_size +
- bp->fw_tx_stats_ext_size;
+ len = min_t(int, bp->fw_rx_stats_ext_size,
+ ARRAY_SIZE(bnxt_port_stats_ext_arr));
+ num_stats += len;
+ len = min_t(int, bp->fw_tx_stats_ext_size,
+ ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
+ num_stats += len;
if (bp->pri2cos_valid)
num_stats += BNXT_NUM_STATS_PRI;
}
@@ -553,18 +594,17 @@ static bool is_tx_ring(struct bnxt *bp, int ring_num)
static void bnxt_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *buf)
{
- u32 i, j = 0;
+ struct bnxt_total_ring_err_stats ring_err_stats = {0};
struct bnxt *bp = netdev_priv(dev);
+ u64 *curr, *prev;
u32 tpa_stats;
+ u32 i, j = 0;
if (!bp->bnapi) {
- j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
+ j += bnxt_get_num_ring_stats(bp);
goto skip_ring_stats;
}
- for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
- bnxt_sw_func_stats[i].counter = 0;
-
tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -592,28 +632,25 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
buf[j] = sw_stats[k];
skip_tpa_ring_stats:
- sw = (u64 *)&cpr->sw_stats.rx;
+ sw = (u64 *)&cpr->sw_stats->rx;
if (is_rx_ring(bp, i)) {
for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
buf[j] = sw[k];
}
- sw = (u64 *)&cpr->sw_stats.cmn;
+ sw = (u64 *)&cpr->sw_stats->cmn;
for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
buf[j] = sw[k];
-
- bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
- BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
- bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
- BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
- bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter +=
- cpr->sw_stats.rx.rx_netpoll_discards;
}
- for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
- buf[j] = bnxt_sw_func_stats[i].counter;
+ bnxt_get_ring_err_stats(bp, &ring_err_stats);
skip_ring_stats:
+ curr = &ring_err_stats.rx_total_l4_csum_errors;
+ prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors;
+ for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++)
+ buf[j] = *curr + *prev;
+
if (bp->flags & BNXT_FLAG_PORT_STATS) {
u64 *port_stats = bp->port_stats.sw_stats;
@@ -623,12 +660,17 @@ skip_ring_stats:
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
+ u32 len;
- for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
+ len = min_t(u32, bp->fw_rx_stats_ext_size,
+ ARRAY_SIZE(bnxt_port_stats_ext_arr));
+ for (i = 0; i < len; i++, j++) {
buf[j] = *(rx_port_stats_ext +
bnxt_port_stats_ext_arr[i].offset);
}
- for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
+ len = min_t(u32, bp->fw_tx_stats_ext_size,
+ ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
+ for (i = 0; i < len; i++, j++) {
buf[j] = *(tx_port_stats_ext +
bnxt_tx_port_stats_ext_arr[i].offset);
}
@@ -646,16 +688,22 @@ skip_ring_stats:
buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
- long n = bnxt_tx_bytes_pri_arr[i].base_off +
- bp->pri2cos_idx[i];
+ u8 cos_idx = bp->pri2cos_idx[i];
+ long n;
+ n = bnxt_tx_bytes_pri_arr[i].base_off + cos_idx;
buf[j] = *(tx_port_stats_ext + n);
+ if (bp->cos0_cos1_shared && !cos_idx)
+ buf[j] += *(tx_port_stats_ext + n + 1);
}
for (i = 0; i < 8; i++, j++) {
- long n = bnxt_tx_pkts_pri_arr[i].base_off +
- bp->pri2cos_idx[i];
+ u8 cos_idx = bp->pri2cos_idx[i];
+ long n;
+ n = bnxt_tx_pkts_pri_arr[i].base_off + cos_idx;
buf[j] = *(tx_port_stats_ext + n);
+ if (bp->cos0_cos1_shared && !cos_idx)
+ buf[j] += *(tx_port_stats_ext + n + 1);
}
}
}
@@ -664,106 +712,105 @@ skip_ring_stats:
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnxt *bp = netdev_priv(dev);
- static const char * const *str;
u32 i, j, num_str;
+ const char *str;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- if (is_rx_ring(bp, i)) {
- num_str = NUM_RING_RX_HW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_rx_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_rx_ring(bp, i))
+ for (j = 0; j < NUM_RING_RX_HW_STATS; j++) {
+ str = bnxt_ring_rx_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
- }
- if (is_tx_ring(bp, i)) {
- num_str = NUM_RING_TX_HW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_tx_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_tx_ring(bp, i))
+ for (j = 0; j < NUM_RING_TX_HW_STATS; j++) {
+ str = bnxt_ring_tx_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
- }
num_str = bnxt_get_num_tpa_ring_stats(bp);
if (!num_str || !is_rx_ring(bp, i))
goto skip_tpa_stats;
if (bp->max_tpa_v2)
- str = bnxt_ring_tpa2_stats_str;
+ for (j = 0; j < num_str; j++) {
+ str = bnxt_ring_tpa2_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
+ }
else
- str = bnxt_ring_tpa_stats_str;
-
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i, str[j]);
- buf += ETH_GSTRING_LEN;
- }
-skip_tpa_stats:
- if (is_rx_ring(bp, i)) {
- num_str = NUM_RING_RX_SW_STATS;
for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_rx_sw_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_ring_tpa_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
+skip_tpa_stats:
+ if (is_rx_ring(bp, i))
+ for (j = 0; j < NUM_RING_RX_SW_STATS; j++) {
+ str = bnxt_rx_sw_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
+ }
+ for (j = 0; j < NUM_RING_CMN_SW_STATS; j++) {
+ str = bnxt_cmn_sw_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i, str);
}
- num_str = NUM_RING_CMN_SW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_cmn_sw_stats_str[j]);
- buf += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
- strcpy(buf, bnxt_sw_func_stats[i].string);
- buf += ETH_GSTRING_LEN;
}
+ for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++)
+ ethtool_puts(&buf, bnxt_ring_err_stats_arr[i]);
- if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ if (bp->flags & BNXT_FLAG_PORT_STATS)
for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
- strcpy(buf, bnxt_port_stats_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_port_stats_arr[i].string;
+ ethtool_puts(&buf, str);
}
- }
+
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
- strcpy(buf, bnxt_port_stats_ext_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ u32 len;
+
+ len = min_t(u32, bp->fw_rx_stats_ext_size,
+ ARRAY_SIZE(bnxt_port_stats_ext_arr));
+ for (i = 0; i < len; i++) {
+ str = bnxt_port_stats_ext_arr[i].string;
+ ethtool_puts(&buf, str);
}
- for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
- strcpy(buf,
- bnxt_tx_port_stats_ext_arr[i].string);
- buf += ETH_GSTRING_LEN;
+
+ len = min_t(u32, bp->fw_tx_stats_ext_size,
+ ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
+ for (i = 0; i < len; i++) {
+ str = bnxt_tx_port_stats_ext_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
if (bp->pri2cos_valid) {
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_rx_bytes_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_rx_bytes_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_rx_pkts_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_rx_pkts_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_tx_bytes_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_tx_bytes_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_tx_pkts_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_tx_pkts_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
}
}
break;
case ETH_SS_TEST:
if (bp->num_tests)
- memcpy(buf, bp->test_info->string,
- bp->num_tests * ETH_GSTRING_LEN);
+ for (i = 0; i < bp->num_tests; i++)
+ ethtool_puts(&buf, bp->test_info->string[i]);
break;
default:
netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
@@ -773,37 +820,64 @@ skip_tpa_stats:
}
static void bnxt_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
} else {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
ering->rx_jumbo_max_pending = 0;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
ering->rx_pending = bp->rx_ring_size;
ering->rx_jumbo_pending = bp->rx_agg_ring_size;
ering->tx_pending = bp->tx_ring_size;
+
+ kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX;
}
static int bnxt_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
+ u8 tcp_data_split = kernel_ering->tcp_data_split;
struct bnxt *bp = netdev_priv(dev);
+ u8 hds_config_mod;
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
(ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
+ hds_config_mod = tcp_data_split != dev->cfg->hds_config;
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod)
+ return -EINVAL;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ hds_config_mod && BNXT_RX_PAGE_MODE(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached");
+ return -EINVAL;
+ }
+
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
+ if (hds_config_mod) {
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ bp->flags |= BNXT_FLAG_HDS;
+ else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ bp->flags &= ~BNXT_FLAG_HDS;
+ }
+
bp->rx_ring_size = ering->rx_pending;
bp->tx_ring_size = ering->tx_pending;
bnxt_set_ring_params(bp);
@@ -831,7 +905,7 @@ static void bnxt_get_channels(struct net_device *dev,
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
tx_grps = max(tcs, 1);
if (bp->tx_nr_rings_xdp)
tx_grps++;
@@ -871,6 +945,7 @@ static int bnxt_set_channels(struct net_device *dev,
bool sh = false;
int tx_xdp = 0;
int rc = 0;
+ int tx_cp;
if (channel->other_count)
return -EINVAL;
@@ -890,7 +965,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (channel->combined_count)
sh = true;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
req_tx_rings = sh ? channel->combined_count : channel->tx_count;
req_rx_rings = sh ? channel->combined_count : channel->rx_count;
@@ -901,11 +976,6 @@ static int bnxt_set_channels(struct net_device *dev,
}
tx_xdp = req_rx_rings;
}
- rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
- if (rc) {
- netdev_warn(dev, "Unable to allocate the requested rings\n");
- return rc;
- }
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
@@ -914,18 +984,19 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
+ rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
+ if (rc) {
+ netdev_warn(dev, "Unable to allocate the requested rings\n");
+ return rc;
+ }
+
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
* before PF unload
*/
}
- rc = bnxt_close_nic(bp, true, false);
- if (rc) {
- netdev_err(bp->dev, "Set channel failure rc :%x\n",
- rc);
- return rc;
- }
+ bnxt_close_nic(bp, true, false);
}
if (sh) {
@@ -942,8 +1013,9 @@ static int bnxt_set_channels(struct net_device *dev,
if (tcs > 1)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
/* After changing number of rx channels, update NTUPLE feature. */
netdev_update_features(dev);
@@ -961,29 +1033,66 @@ static int bnxt_set_channels(struct net_device *dev,
return rc;
}
-#ifdef CONFIG_RFS_ACCEL
-static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[],
+ int tbl_size, u32 *ids, u32 start,
+ u32 id_cnt)
{
- int i, j = 0;
+ int i, j = start;
- cmd->data = bp->ntp_fltr_count;
- for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ if (j >= id_cnt)
+ return j;
+ for (i = 0; i < tbl_size; i++) {
struct hlist_head *head;
- struct bnxt_ntuple_filter *fltr;
+ struct bnxt_filter_base *fltr;
- head = &bp->ntp_fltr_hash_tbl[i];
- rcu_read_lock();
+ head = &tbl[i];
hlist_for_each_entry_rcu(fltr, head, hash) {
- if (j == cmd->rule_cnt)
- break;
- rule_locs[j++] = fltr->sw_id;
+ if (!fltr->flags ||
+ test_bit(BNXT_FLTR_FW_DELETED, &fltr->state))
+ continue;
+ ids[j++] = fltr->sw_id;
+ if (j == id_cnt)
+ return j;
}
- rcu_read_unlock();
- if (j == cmd->rule_cnt)
- break;
}
- cmd->rule_cnt = j;
+ return j;
+}
+
+static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
+ struct hlist_head tbl[],
+ int tbl_size, u32 id)
+{
+ int i;
+
+ for (i = 0; i < tbl_size; i++) {
+ struct hlist_head *head;
+ struct bnxt_filter_base *fltr;
+
+ head = &tbl[i];
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (fltr->flags && fltr->sw_id == id)
+ return fltr;
+ }
+ }
+ return NULL;
+}
+
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ u32 count;
+
+ cmd->data = bp->ntp_fltr_count;
+ rcu_read_lock();
+ count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0,
+ cmd->rule_cnt);
+ cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE,
+ rule_locs, count,
+ cmd->rule_cnt);
+ rcu_read_unlock();
+
return 0;
}
@@ -991,73 +1100,129 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fs =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
+ struct bnxt_flow_masks *fmasks;
struct flow_keys *fkeys;
- int i, rc = -EINVAL;
+ int rc = -EINVAL;
- if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+ if (fs->location >= bp->max_fltr)
return rc;
- for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
- struct hlist_head *head;
-
- head = &bp->ntp_fltr_hash_tbl[i];
- rcu_read_lock();
- hlist_for_each_entry_rcu(fltr, head, hash) {
- if (fltr->sw_id == fs->location)
- goto fltr_found;
+ rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE,
+ fs->location);
+ if (fltr_base) {
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *l2_fltr;
+ struct bnxt_l2_key *l2_key;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ l2_key = &l2_fltr->l2_key;
+ fs->flow_type = ETHER_FLOW;
+ ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr);
+ eth_broadcast_addr(m_ether->h_dest);
+ if (l2_key->vlan) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ fs->flow_type |= FLOW_EXT;
+ m_ext->vlan_tci = htons(0xfff);
+ h_ext->vlan_tci = htons(l2_key->vlan);
}
+ if (fltr_base->flags & BNXT_ACT_RING_DST)
+ fs->ring_cookie = fltr_base->rxq;
+ if (fltr_base->flags & BNXT_ACT_FUNC_DST)
+ fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) <<
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
rcu_read_unlock();
+ return 0;
}
- return rc;
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE,
+ fs->location);
+ if (!fltr_base) {
+ rcu_read_unlock();
+ return rc;
+ }
+ fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
-fltr_found:
fkeys = &fltr->fkeys;
+ fmasks = &fltr->fmasks;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) {
+ fs->flow_type = IP_USER_FLOW;
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD;
+ fs->m_u.usr_ip4_spec.proto = 0;
+ } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) {
+ fs->flow_type = IP_USER_FLOW;
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
+ fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V4_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V4_FLOW;
- else
+ } else {
goto fltr_err;
+ }
fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
- fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
-
+ fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src;
fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
- fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
-
- fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
-
- fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst;
+ if (fs->flow_type == TCP_V4_FLOW ||
+ fs->flow_type == UDP_V4_FLOW) {
+ fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src;
+ fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
+ }
} else {
- int i;
-
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) {
+ fs->flow_type = IPV6_USER_FLOW;
+ fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD;
+ fs->m_u.usr_ip6_spec.l4_proto = 0;
+ } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) {
+ fs->flow_type = IPV6_USER_FLOW;
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
+ fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V6_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V6_FLOW;
- else
+ } else {
goto fltr_err;
+ }
*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
fkeys->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] =
+ fmasks->addrs.v6addrs.src;
*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
fkeys->addrs.v6addrs.dst;
- for (i = 0; i < 4; i++) {
- fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
- fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] =
+ fmasks->addrs.v6addrs.dst;
+ if (fs->flow_type == TCP_V6_FLOW ||
+ fs->flow_type == UDP_V6_FLOW) {
+ fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src;
+ fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst;
}
- fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
-
- fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
}
- fs->ring_cookie = fltr->rxq;
+ if (fltr->base.flags & BNXT_ACT_DROP) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
+ fs->flow_type |= FLOW_RSS;
+ cmd->rss_context = fltr->base.fw_vnic_id;
+ } else {
+ fs->ring_cookie = fltr->base.rxq;
+ }
rc = 0;
fltr_err:
@@ -1065,7 +1230,354 @@ fltr_err:
return rc;
}
-#endif
+
+static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
+ u32 index)
+{
+ struct ethtool_rxfh_context *ctx;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx, index);
+ if (!ctx)
+ return NULL;
+ return ethtool_rxfh_context_priv(ctx);
+}
+
+static int bnxt_alloc_vnic_rss_table(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
+ vnic->rss_table_size,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table)
+ return -ENOMEM;
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ return 0;
+}
+
+static int bnxt_add_l2_cls_rule(struct bnxt *bp,
+ struct ethtool_rx_flow_spec *fs)
+{
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *fltr;
+ struct bnxt_l2_key key;
+ u16 vnic_id;
+ u8 flags;
+ int rc;
+
+ if (BNXT_CHIP_P5_PLUS(bp))
+ return -EOPNOTSUPP;
+
+ if (!is_broadcast_ether_addr(m_ether->h_dest))
+ return -EINVAL;
+ ether_addr_copy(key.dst_mac_addr, h_ether->h_dest);
+ key.vlan = 0;
+ if (fs->flow_type & FLOW_EXT) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci)
+ return -EINVAL;
+ key.vlan = ntohs(h_ext->vlan_tci);
+ }
+
+ if (vf) {
+ flags = BNXT_ACT_FUNC_DST;
+ vnic_id = 0xffff;
+ vf--;
+ } else {
+ flags = BNXT_ACT_RING_DST;
+ vnic_id = bp->vnic_info[ring + 1].fw_vnic_id;
+ }
+ fltr = bnxt_alloc_new_l2_filter(bp, &key, flags);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
+
+ fltr->base.fw_vnic_id = vnic_id;
+ fltr->base.rxq = ring;
+ fltr->base.vf_idx = vf;
+ rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
+ if (rc)
+ bnxt_del_l2_filter(bp, fltr);
+ else
+ fs->location = fltr->base.sw_id;
+ return rc;
+}
+
+static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
+ struct ethtool_usrip4_spec *ip_mask)
+{
+ u8 mproto = ip_mask->proto;
+ u8 sproto = ip_spec->proto;
+
+ if (ip_mask->l4_4_bytes || ip_mask->tos ||
+ ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
+ (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP)))
+ return false;
+ return true;
+}
+
+static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
+ struct ethtool_usrip6_spec *ip_mask)
+{
+ u8 mproto = ip_mask->l4_proto;
+ u8 sproto = ip_spec->l4_proto;
+
+ if (ip_mask->l4_4_bytes || ip_mask->tclass ||
+ (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6)))
+ return false;
+ return true;
+}
+
+static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct bnxt_ntuple_filter *new_fltr, *fltr;
+ u32 flow_type = fs->flow_type & 0xff;
+ struct bnxt_l2_filter *l2_fltr;
+ struct bnxt_flow_masks *fmasks;
+ struct flow_keys *fkeys;
+ u32 idx, ring;
+ int rc;
+ u8 vf;
+
+ if (!bp->vnic_info)
+ return -EAGAIN;
+
+ vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
+ return -EOPNOTSUPP;
+
+ if (flow_type == IP_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec))
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_type == IPV6_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec))
+ return -EOPNOTSUPP;
+ }
+
+ new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
+ if (!new_fltr)
+ return -ENOMEM;
+
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ new_fltr->l2_fltr = l2_fltr;
+ fmasks = &new_fltr->fmasks;
+ fkeys = &new_fltr->fkeys;
+
+ rc = -EOPNOTSUPP;
+ switch (flow_type) {
+ case IP_USER_FLOW: {
+ struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
+
+ fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto
+ : BNXT_IP_PROTO_WILDCARD;
+ fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ break;
+ }
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW: {
+ struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec;
+
+ fkeys->basic.ip_proto = IPPROTO_TCP;
+ if (flow_type == UDP_V4_FLOW)
+ fkeys->basic.ip_proto = IPPROTO_UDP;
+ fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
+ break;
+ }
+ case IPV6_USER_FLOW: {
+ struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
+
+ fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto
+ : BNXT_IP_PROTO_WILDCARD;
+ fkeys->basic.n_proto = htons(ETH_P_IPV6);
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
+ break;
+ }
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW: {
+ struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec;
+
+ fkeys->basic.ip_proto = IPPROTO_TCP;
+ if (flow_type == UDP_V6_FLOW)
+ fkeys->basic.ip_proto = IPPROTO_UDP;
+ fkeys->basic.n_proto = htons(ETH_P_IPV6);
+
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
+ break;
+ }
+ default:
+ rc = -EOPNOTSUPP;
+ goto ntuple_err;
+ }
+ if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks)))
+ goto ntuple_err;
+
+ idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
+ rcu_read_lock();
+ fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
+ if (fltr) {
+ rcu_read_unlock();
+ rc = -EEXIST;
+ goto ntuple_err;
+ }
+ rcu_read_unlock();
+
+ new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ if (fs->flow_type & FLOW_RSS) {
+ struct bnxt_rss_ctx *rss_ctx;
+
+ new_fltr->base.fw_vnic_id = 0;
+ new_fltr->base.flags |= BNXT_ACT_RSS_CTX;
+ rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context);
+ if (rss_ctx) {
+ new_fltr->base.fw_vnic_id = rss_ctx->index;
+ } else {
+ rc = -EINVAL;
+ goto ntuple_err;
+ }
+ }
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ new_fltr->base.flags |= BNXT_ACT_DROP;
+ else
+ new_fltr->base.rxq = ring;
+ __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
+ rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
+ if (!rc) {
+ rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr);
+ if (rc) {
+ bnxt_del_ntp_filter(bp, new_fltr);
+ return rc;
+ }
+ fs->location = new_fltr->base.sw_id;
+ return 0;
+ }
+
+ntuple_err:
+ atomic_dec(&l2_fltr->refcnt);
+ kfree(new_fltr);
+ return rc;
+}
+
+static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ u32 ring, flow_type;
+ int rc;
+ u8 vf;
+
+ if (!netif_running(bp->dev))
+ return -EAGAIN;
+ if (!(bp->flags & BNXT_FLAG_RFS))
+ return -EPERM;
+ if (fs->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+
+ flow_type = fs->flow_type;
+ if ((flow_type == IP_USER_FLOW ||
+ flow_type == IPV6_USER_FLOW) &&
+ !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
+ return -EOPNOTSUPP;
+ if (flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+ flow_type &= ~FLOW_EXT;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
+ return bnxt_add_ntuple_cls_rule(bp, cmd);
+
+ ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ if (BNXT_VF(bp) && vf)
+ return -EINVAL;
+ if (BNXT_PF(bp) && vf > bp->pf.active_vfs)
+ return -EINVAL;
+ if (!vf && ring >= bp->rx_nr_rings)
+ return -EINVAL;
+
+ if (flow_type == ETHER_FLOW)
+ rc = bnxt_add_l2_cls_rule(bp, fs);
+ else
+ rc = bnxt_add_ntuple_cls_rule(bp, cmd);
+ return rc;
+}
+
+static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct bnxt_filter_base *fltr_base;
+ struct bnxt_ntuple_filter *fltr;
+ u32 id = fs->location;
+
+ rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, id);
+ if (fltr_base) {
+ struct bnxt_l2_filter *l2_fltr;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ rcu_read_unlock();
+ bnxt_hwrm_l2_filter_free(bp, l2_fltr);
+ bnxt_del_l2_filter(bp, l2_fltr);
+ return 0;
+ }
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE, id);
+ if (!fltr_base) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
+ if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+ bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr);
+ bnxt_del_ntp_filter(bp, fltr);
+ return 0;
+}
static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
{
@@ -1078,11 +1590,16 @@ static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
{
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
return RXH_IP_SRC | RXH_IP_DST;
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL)
+ return RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL;
return 0;
}
-static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct bnxt *bp = netdev_priv(dev);
+
cmd->data = 0;
switch (cmd->flow_type) {
case TCP_V4_FLOW:
@@ -1096,8 +1613,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
@@ -1115,8 +1638,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
@@ -1129,26 +1658,36 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
-static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- u32 rss_hash_cfg = bp->rss_hash_cfg;
+ struct bnxt *bp = netdev_priv(dev);
int tuple, rc = 0;
+ u32 rss_hash_cfg;
+
+ rss_hash_cfg = bp->rss_hash_cfg;
if (cmd->data == RXH_4TUPLE)
tuple = 4;
- else if (cmd->data == RXH_2TUPLE)
+ else if (cmd->data == RXH_2TUPLE ||
+ cmd->data == (RXH_2TUPLE | RXH_IP6_FL))
tuple = 2;
else if (!cmd->data)
tuple = 0;
else
return -EINVAL;
+ if (cmd->data & RXH_IP6_FL &&
+ !(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP))
+ return -EINVAL;
+
if (cmd->flow_type == TCP_V4_FLOW) {
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
} else if (cmd->flow_type == UDP_V4_FLOW) {
- if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+ if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
return -EINVAL;
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
if (tuple == 4)
@@ -1158,11 +1697,29 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
} else if (cmd->flow_type == UDP_V6_FLOW) {
- if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+ if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
return -EINVAL;
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ } else if (cmd->flow_type == AH_ESP_V4_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4;
+ } else if (cmd->flow_type == AH_ESP_V6_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6;
} else if (tuple == 4) {
return -EINVAL;
}
@@ -1188,16 +1745,23 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
- if (tuple == 2)
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL);
+ if (!tuple)
+ break;
+ if (cmd->data & RXH_IP6_FL)
+ rss_hash_cfg |=
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL;
+ else if (tuple == 2)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
- else if (!tuple)
- rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
break;
}
if (bp->rss_hash_cfg == rss_hash_cfg)
return 0;
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
+ bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
bp->rss_hash_cfg = rss_hash_cfg;
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
@@ -1206,6 +1770,13 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
return rc;
}
+static u32 bnxt_get_rx_ring_count(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return bp->rx_nr_rings;
+}
+
static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1213,14 +1784,9 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int rc = 0;
switch (cmd->cmd) {
-#ifdef CONFIG_RFS_ACCEL
- case ETHTOOL_GRXRINGS:
- cmd->data = bp->rx_nr_rings;
- break;
-
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count;
- cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+ cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRLALL:
@@ -1230,11 +1796,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRULE:
rc = bnxt_grxclsrule(bp, cmd);
break;
-#endif
-
- case ETHTOOL_GRXFH:
- rc = bnxt_grxfh(bp, cmd);
- break;
default:
rc = -EOPNOTSUPP;
@@ -1250,8 +1811,12 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int rc;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- rc = bnxt_srxfh(bp, cmd);
+ case ETHTOOL_SRXCLSRLINS:
+ rc = bnxt_srxclsrlins(bp, cmd);
+ break;
+
+ case ETHTOOL_SRXCLSRLDEL:
+ rc = bnxt_srxclsrldel(bp, cmd);
break;
default:
@@ -1265,8 +1830,9 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+ BNXT_RSS_TABLE_ENTRIES_P5;
return HW_HASH_INDEX_SIZE;
}
@@ -1275,54 +1841,209 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
return HW_HASH_KEY_SIZE;
}
-static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int bnxt_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
+ struct bnxt_rss_ctx *rss_ctx = NULL;
struct bnxt *bp = netdev_priv(dev);
+ u32 *indir_tbl = bp->rss_indir_tbl;
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!bp->vnic_info)
return 0;
- vnic = &bp->vnic_info[0];
- if (indir && bp->rss_indir_tbl) {
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
+ if (rxfh->rss_context) {
+ struct ethtool_rxfh_context *ctx;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context);
+ if (!ctx)
+ return -EINVAL;
+ indir_tbl = ethtool_rxfh_context_indir(ctx);
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+ vnic = &rss_ctx->vnic;
+ }
+
+ if (rxfh->indir && indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
- indir[i] = bp->rss_indir_tbl[i];
+ rxfh->indir[i] = indir_tbl[i];
}
- if (key && vnic->rss_hash_key)
- memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+ if (rxfh->key && vnic->rss_hash_key)
+ memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
return 0;
}
-static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx,
+ struct bnxt_rss_ctx *rss_ctx,
+ const struct ethtool_rxfh_param *rxfh)
{
- struct bnxt *bp = netdev_priv(dev);
- int rc = 0;
+ if (rxfh->key) {
+ if (rss_ctx) {
+ memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key,
+ HW_HASH_KEY_SIZE);
+ } else {
+ memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
+ }
+ if (rxfh->indir) {
+ u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ u32 *indir_tbl = bp->rss_indir_tbl;
- if (hfunc && hfunc != ETH_RSS_HASH_TOP)
+ if (rss_ctx)
+ indir_tbl = ethtool_rxfh_context_indir(ctx);
+ for (i = 0; i < tbl_size; i++)
+ indir_tbl[i] = rxfh->indir[i];
+ pad = bp->rss_indir_tbl_entries - tbl_size;
+ if (pad)
+ memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl));
+ }
+}
+
+static int bnxt_rxfh_context_check(struct bnxt *bp,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
return -EOPNOTSUPP;
+ }
- if (key)
+ if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
return -EOPNOTSUPP;
+ }
- if (indir) {
- u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
+ if (!netif_running(bp->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down");
+ return -EAGAIN;
+ }
- for (i = 0; i < tbl_size; i++)
- bp->rss_indir_tbl[i] = indir[i];
- pad = bp->rss_indir_tbl_entries - tbl_size;
- if (pad)
- memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+ return 0;
+}
+
+static int bnxt_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+ struct bnxt_vnic_info *vnic;
+ int rc;
+
+ rc = bnxt_rxfh_context_check(bp, rxfh, extack);
+ if (rc)
+ return rc;
+
+ if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
+ BNXT_MAX_ETH_RSS_CTX);
+ return -EINVAL;
+ }
+
+ if (!bnxt_rfs_capable(bp, true)) {
+ NL_SET_ERR_MSG_MOD(extack, "Out hardware resources");
+ return -ENOMEM;
}
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bp->num_rss_ctx++;
+
+ vnic = &rss_ctx->vnic;
+ vnic->rss_ctx = ctx;
+ vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
+ vnic->vnic_id = BNXT_VNIC_ID_INVALID;
+ rc = bnxt_alloc_vnic_rss_table(bp, vnic);
+ if (rc)
+ goto out;
+
+ /* Populate defaults in the context */
+ bnxt_set_dflt_rss_indir_tbl(bp, ctx);
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+ memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
+ memcpy(ethtool_rxfh_context_key(ctx),
+ bp->rss_hash_key, HW_HASH_KEY_SIZE);
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC");
+ goto out;
+ }
+
+ rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
+ goto out;
+ }
+ bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
+
+ rc = __bnxt_setup_vnic_p5(bp, vnic);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
+ goto out;
+ }
+
+ rss_ctx->index = rxfh->rss_context;
+ return 0;
+out:
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ return rc;
+}
+
+static int bnxt_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+ int rc;
+
+ rc = bnxt_rxfh_context_check(bp, rxfh, extack);
+ if (rc)
+ return rc;
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
+
+ return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic);
+}
+
+static int bnxt_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ return 0;
+}
+
+static int bnxt_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ bnxt_modify_rss(bp, NULL, NULL, rxfh);
+
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -1335,9 +2056,9 @@ static void bnxt_get_drvinfo(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp);
info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
@@ -1349,30 +2070,56 @@ static void bnxt_get_drvinfo(struct net_device *dev,
static int bnxt_get_regs_len(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- int reg_len;
if (!BNXT_PF(bp))
return -EOPNOTSUPP;
- reg_len = BNXT_PXP_REG_LEN;
+ return BNXT_PXP_REG_LEN + bp->pcie_stat_len;
+}
+
+static void *
+__bnxt_hwrm_pcie_qstats(struct bnxt *bp, struct hwrm_pcie_qstats_input *req)
+{
+ struct pcie_ctx_hw_stats_v2 *hw_pcie_stats;
+ dma_addr_t hw_pcie_stats_addr;
+ int rc;
- if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
- reg_len += sizeof(struct pcie_ctx_hw_stats);
+ hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
+ &hw_pcie_stats_addr);
+ if (!hw_pcie_stats)
+ return NULL;
- return reg_len;
+ req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
+ req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ rc = hwrm_req_send(bp, req);
+
+ return rc ? NULL : hw_pcie_stats;
}
+#define BNXT_PCIE_32B_ENTRY(start, end) \
+ { offsetof(struct pcie_ctx_hw_stats_v2, start),\
+ offsetof(struct pcie_ctx_hw_stats_v2, end) }
+
+static const struct {
+ u16 start;
+ u16 end;
+} bnxt_pcie_32b_entries[] = {
+ BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
+ BNXT_PCIE_32B_ENTRY(pcie_tl_credit_nph_histogram[0], unused_1),
+ BNXT_PCIE_32B_ENTRY(pcie_rd_latency_histogram[0], unused_2),
+};
+
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
- struct pcie_ctx_hw_stats *hw_pcie_stats;
+ struct hwrm_pcie_qstats_output *resp;
struct hwrm_pcie_qstats_input *req;
struct bnxt *bp = netdev_priv(dev);
- dma_addr_t hw_pcie_stats_addr;
- int rc;
+ u8 *src;
regs->version = 0;
- bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED))
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return;
@@ -1380,25 +2127,37 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
return;
- hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
- &hw_pcie_stats_addr);
- if (!hw_pcie_stats) {
- hwrm_req_drop(bp, req);
- return;
- }
-
- regs->version = 1;
- hwrm_req_hold(bp, req); /* hold on to slice */
- req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
- req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
- rc = hwrm_req_send(bp, req);
- if (!rc) {
- __le64 *src = (__le64 *)hw_pcie_stats;
- u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
- int i;
+ resp = hwrm_req_hold(bp, req);
+ src = __bnxt_hwrm_pcie_qstats(bp, req);
+ if (src) {
+ u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
+ int i, j, len;
+
+ len = min(bp->pcie_stat_len, le16_to_cpu(resp->pcie_stat_size));
+ if (len <= sizeof(struct pcie_ctx_hw_stats))
+ regs->version = 1;
+ else if (len < sizeof(struct pcie_ctx_hw_stats_v2))
+ regs->version = 2;
+ else
+ regs->version = 3;
+
+ for (i = 0, j = 0; i < len; ) {
+ if (i >= bnxt_pcie_32b_entries[j].start &&
+ i <= bnxt_pcie_32b_entries[j].end) {
+ u32 *dst32 = (u32 *)(dst + i);
+
+ *dst32 = le32_to_cpu(*(__le32 *)(src + i));
+ i += 4;
+ if (i > bnxt_pcie_32b_entries[j].end &&
+ j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
+ j++;
+ } else {
+ u64 *dst64 = (u64 *)(dst + i);
- for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
- dst[i] = le64_to_cpu(src[i]);
+ *dst64 = le64_to_cpu(*(__le64 *)(src + i));
+ i += 8;
+ }
+ }
}
hwrm_req_drop(bp, req);
}
@@ -1442,121 +2201,586 @@ static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
+/* TODO: support 25GB, 40GB, 50GB with different cable type */
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds)
{
- u32 speed_mask = 0;
+ linkmode_zero(mode);
- /* TODO: support 25GB, 40GB, 50GB with different cable type */
- /* set the advertised speeds */
if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
- speed_mask |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
- speed_mask |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
- speed_mask |= ADVERTISED_2500baseX_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
- speed_mask |= ADVERTISED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
- speed_mask |= ADVERTISED_40000baseCR4_Full;
-
- if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
- speed_mask |= ADVERTISED_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_TX)
- speed_mask |= ADVERTISED_Asym_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_RX)
- speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-
- return speed_mask;
-}
-
-#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
-{ \
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 100baseT_Full); \
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 1000baseT_Full); \
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 10000baseT_Full); \
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 25000baseCR_Full); \
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 40000baseCR4_Full);\
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 50000baseCR2_Full);\
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 100000baseCR4_Full);\
- if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- Pause); \
- if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
- ethtool_link_ksettings_add_link_mode( \
- lk_ksettings, name, Asym_Pause);\
- } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- Asym_Pause); \
- } \
-}
-
-#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
-{ \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 100baseT_Full) || \
- ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 100baseT_Half)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 1000baseT_Full) || \
- ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 1000baseT_Half)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 10000baseT_Full)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 25000baseCR_Full)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 40000baseCR4_Full)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 50000baseCR2_Full)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 100000baseCR4_Full)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
-}
-
-#define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
-{ \
- if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 50000baseCR_Full); \
- if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 100000baseCR2_Full);\
- if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 200000baseCR4_Full);\
-}
-
-#define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
-{ \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 50000baseCR_Full)) \
- (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 100000baseCR2_Full)) \
- (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 200000baseCR4_Full)) \
- (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode);
+}
+
+enum bnxt_media_type {
+ BNXT_MEDIA_UNKNOWN = 0,
+ BNXT_MEDIA_TP,
+ BNXT_MEDIA_CR,
+ BNXT_MEDIA_SR,
+ BNXT_MEDIA_LR_ER_FR,
+ BNXT_MEDIA_KR,
+ BNXT_MEDIA_KX,
+ BNXT_MEDIA_X,
+ __BNXT_MEDIA_END,
+};
+
+static const enum bnxt_media_type bnxt_phy_types[] = {
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
+};
+
+static enum bnxt_media_type
+bnxt_get_media(struct bnxt_link_info *link_info)
+{
+ switch (link_info->media_type) {
+ case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP:
+ return BNXT_MEDIA_TP;
+ case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC:
+ return BNXT_MEDIA_CR;
+ default:
+ if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types))
+ return bnxt_phy_types[link_info->phy_type];
+ return BNXT_MEDIA_UNKNOWN;
+ }
+}
+
+enum bnxt_link_speed_indices {
+ BNXT_LINK_SPEED_UNKNOWN = 0,
+ BNXT_LINK_SPEED_100MB_IDX,
+ BNXT_LINK_SPEED_1GB_IDX,
+ BNXT_LINK_SPEED_10GB_IDX,
+ BNXT_LINK_SPEED_25GB_IDX,
+ BNXT_LINK_SPEED_40GB_IDX,
+ BNXT_LINK_SPEED_50GB_IDX,
+ BNXT_LINK_SPEED_100GB_IDX,
+ BNXT_LINK_SPEED_200GB_IDX,
+ BNXT_LINK_SPEED_400GB_IDX,
+ __BNXT_LINK_SPEED_END
+};
+
+static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
+{
+ switch (speed) {
+ case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX;
+ case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX;
+ case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
+ case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
+ case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
+ case BNXT_LINK_SPEED_50GB:
+ case BNXT_LINK_SPEED_50GB_PAM4:
+ return BNXT_LINK_SPEED_50GB_IDX;
+ case BNXT_LINK_SPEED_100GB:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
+ return BNXT_LINK_SPEED_100GB_IDX;
+ case BNXT_LINK_SPEED_200GB:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
+ return BNXT_LINK_SPEED_200GB_IDX;
+ case BNXT_LINK_SPEED_400GB:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ return BNXT_LINK_SPEED_400GB_IDX;
+ default: return BNXT_LINK_SPEED_UNKNOWN;
+ }
+}
+
+static const enum ethtool_link_mode_bit_indices
+bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
+ [BNXT_LINK_SPEED_100MB_IDX] = {
+ {
+ [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_1GB_IDX] = {
+ {
+ [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ /* historically baseT, but DAC is more correctly baseX */
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_10GB_IDX] = {
+ {
+ [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_25GB_IDX] = {
+ {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_40GB_IDX] = {
+ {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_50GB_IDX] = {
+ [BNXT_SIG_MODE_NRZ] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ },
+ [BNXT_SIG_MODE_PAM4] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_100GB_IDX] = {
+ [BNXT_SIG_MODE_NRZ] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ },
+ [BNXT_SIG_MODE_PAM4] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ },
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_200GB_IDX] = {
+ [BNXT_SIG_MODE_PAM4] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ },
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_400GB_IDX] = {
+ [BNXT_SIG_MODE_PAM4] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ },
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
+ },
+ },
+};
+
+#define BNXT_LINK_MODE_UNKNOWN -1
+
+static enum ethtool_link_mode_bit_indices
+bnxt_get_link_mode(struct bnxt_link_info *link_info)
+{
+ enum ethtool_link_mode_bit_indices link_mode;
+ enum bnxt_link_speed_indices speed;
+ enum bnxt_media_type media;
+ u8 sig_mode;
+
+ if (link_info->phy_link_status != BNXT_LINK_LINK)
+ return BNXT_LINK_MODE_UNKNOWN;
+
+ media = bnxt_get_media(link_info);
+ if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+ speed = bnxt_fw_speed_idx(link_info->link_speed);
+ sig_mode = link_info->active_fec_sig_mode &
+ PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
+ } else {
+ speed = bnxt_fw_speed_idx(link_info->req_link_speed);
+ sig_mode = link_info->req_signal_mode;
+ }
+ if (sig_mode >= BNXT_SIG_MODE_MAX)
+ return BNXT_LINK_MODE_UNKNOWN;
+
+ /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux
+ * link mode, but since no such devices exist, the zeroes in the
+ * map can be conveniently used to represent unknown link modes.
+ */
+ link_mode = bnxt_link_modes[speed][sig_mode][media];
+ if (!link_mode)
+ return BNXT_LINK_MODE_UNKNOWN;
+
+ switch (link_mode) {
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT;
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
+ if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT;
+ break;
+ default:
+ break;
+ }
+
+ return link_mode;
+}
+
+static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ lk_ksettings->link_modes.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ lk_ksettings->link_modes.supported);
+ }
+
+ if (link_info->support_auto_speeds || link_info->support_auto_speeds2 ||
+ link_info->support_pam4_auto_speeds)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lk_ksettings->link_modes.supported);
+
+ if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ return;
+
+ if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ lk_ksettings->link_modes.advertising);
+ if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ lk_ksettings->link_modes.advertising);
+ if (link_info->lp_pause & BNXT_LINK_PAUSE_RX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ lk_ksettings->link_modes.lp_advertising);
+ if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ lk_ksettings->link_modes.lp_advertising);
+}
+
+static const u16 bnxt_nrz_speed_masks[] = {
+ [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB,
+ [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB,
+ [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB,
+ [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB,
+ [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB,
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB,
+ [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
+};
+
+static const u16 bnxt_pam4_speed_masks[] = {
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
+ [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
+ [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
+};
+
+static const u16 bnxt_nrz_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB,
+ [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB,
+ [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB,
+ [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB,
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB,
+ [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
+};
+
+static const u16 bnxt_pam4_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4,
+ [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4,
+ [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4,
+};
+
+static const u16 bnxt_pam4_112_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112,
+ [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112,
+ [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112,
+};
+
+static enum bnxt_link_speed_indices
+bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk)
+{
+ const u16 *speeds;
+ int idx, len;
+
+ switch (sig_mode) {
+ case BNXT_SIG_MODE_NRZ:
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ speeds = bnxt_nrz_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_nrz_speeds2_masks);
+ } else {
+ speeds = bnxt_nrz_speed_masks;
+ len = ARRAY_SIZE(bnxt_nrz_speed_masks);
+ }
+ break;
+ case BNXT_SIG_MODE_PAM4:
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ speeds = bnxt_pam4_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_pam4_speeds2_masks);
+ } else {
+ speeds = bnxt_pam4_speed_masks;
+ len = ARRAY_SIZE(bnxt_pam4_speed_masks);
+ }
+ break;
+ case BNXT_SIG_MODE_PAM4_112:
+ speeds = bnxt_pam4_112_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks);
+ break;
+ default:
+ return BNXT_LINK_SPEED_UNKNOWN;
+ }
+
+ for (idx = 0; idx < len; idx++) {
+ if (speeds[idx] == speed_msk)
+ return idx;
+ }
+
+ return BNXT_LINK_SPEED_UNKNOWN;
+}
+
+#define BNXT_FW_SPEED_MSK_BITS 16
+
+static void
+__bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
+ u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
+{
+ enum ethtool_link_mode_bit_indices link_mode;
+ enum bnxt_link_speed_indices speed;
+ u8 bit;
+
+ for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
+ speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit);
+ if (!speed)
+ continue;
+
+ link_mode = bnxt_link_modes[speed][sig_mode][media];
+ if (!link_mode)
+ continue;
+
+ linkmode_set_bit(link_mode, et_mask);
+ }
+}
+
+static void
+bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
+ u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
+{
+ if (media) {
+ __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
+ et_mask);
+ return;
+ }
+
+ /* list speeds for all media if unknown */
+ for (media = 1; media < __BNXT_MEDIA_END; media++)
+ __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
+ et_mask);
+}
+
+static void
+bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
+ u16 phy_flags = bp->phy_flags;
+
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ sp_nrz = link_info->support_speeds2;
+ sp_pam4 = link_info->support_speeds2;
+ sp_pam4_112 = link_info->support_speeds2;
+ } else {
+ sp_nrz = link_info->support_speeds;
+ sp_pam4 = link_info->support_pam4_speeds;
+ }
+ bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.supported);
+ bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.supported);
+ bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
+ phy_flags, lk_ksettings->link_modes.supported);
+}
+
+static void
+bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
+ u16 phy_flags = bp->phy_flags;
+
+ sp_nrz = link_info->advertising;
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ sp_pam4 = link_info->advertising;
+ sp_pam4_112 = link_info->advertising;
+ } else {
+ sp_pam4 = link_info->advertising_pam4;
+ }
+ bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.advertising);
+ bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.advertising);
+ bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
+ phy_flags, lk_ksettings->link_modes.advertising);
+}
+
+static void
+bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 phy_flags = bp->phy_flags;
+
+ bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media,
+ BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.lp_advertising);
+ bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media,
+ BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.lp_advertising);
+}
+
+static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
+ u16 speed_msk, const unsigned long *et_mask,
+ enum ethtool_link_mode_bit_indices mode)
+{
+ bool mode_desired = linkmode_test_bit(mode, et_mask);
+
+ if (!mode)
+ return;
+
+ /* enabled speeds for installed media should override */
+ if (installed_media && mode_desired) {
+ *speeds |= speed_msk;
+ *delta |= speed_msk;
+ return;
+ }
+
+ /* many to one mapping, only allow one change per fw_speed bit */
+ if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) {
+ *speeds ^= speed_msk;
+ *delta |= speed_msk;
+ }
+}
+
+static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
+ const unsigned long *et_mask)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks;
+ enum bnxt_media_type media = bnxt_get_media(link_info);
+ u16 *adv, *adv_pam4, *adv_pam4_112 = NULL;
+ u32 delta_pam4_112 = 0;
+ u32 delta_pam4 = 0;
+ u32 delta_nrz = 0;
+ int i, m;
+
+ adv = &link_info->advertising;
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ adv_pam4 = &link_info->advertising;
+ adv_pam4_112 = &link_info->advertising;
+ sp_msks = bnxt_nrz_speeds2_masks;
+ sp_pam4_msks = bnxt_pam4_speeds2_masks;
+ sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks;
+ } else {
+ adv_pam4 = &link_info->advertising_pam4;
+ sp_msks = bnxt_nrz_speed_masks;
+ sp_pam4_msks = bnxt_pam4_speed_masks;
+ }
+ for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
+ /* accept any legal media from user */
+ for (m = 1; m < __BNXT_MEDIA_END; m++) {
+ bnxt_update_speed(&delta_nrz, m == media,
+ adv, sp_msks[i], et_mask,
+ bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
+ bnxt_update_speed(&delta_pam4, m == media,
+ adv_pam4, sp_pam4_msks[i], et_mask,
+ bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
+ if (!adv_pam4_112)
+ continue;
+
+ bnxt_update_speed(&delta_pam4_112, m == media,
+ adv_pam4_112, sp_pam4_112_msks[i], et_mask,
+ bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]);
+ }
+ }
}
static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
@@ -1580,36 +2804,6 @@ static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
lk_ksettings->link_modes.advertising);
}
-static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
- struct ethtool_link_ksettings *lk_ksettings)
-{
- u16 fw_speeds = link_info->advertising;
- u8 fw_pause = 0;
-
- if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
- fw_pause = link_info->auto_pause_setting;
-
- BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
- fw_speeds = link_info->advertising_pam4;
- BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising);
- bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
-}
-
-static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
- struct ethtool_link_ksettings *lk_ksettings)
-{
- u16 fw_speeds = link_info->lp_auto_link_speeds;
- u8 fw_pause = 0;
-
- if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
- fw_pause = link_info->lp_pause;
-
- BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
- lp_advertising);
- fw_speeds = link_info->lp_auto_pam4_link_speeds;
- BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising);
-}
-
static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
@@ -1631,26 +2825,6 @@ static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
lk_ksettings->link_modes.supported);
}
-static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
- struct ethtool_link_ksettings *lk_ksettings)
-{
- u16 fw_speeds = link_info->support_speeds;
-
- BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
- fw_speeds = link_info->support_pam4_speeds;
- BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
-
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- Asym_Pause);
-
- if (link_info->support_auto_speeds ||
- link_info->support_pam4_auto_speeds)
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- Autoneg);
- bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
-}
-
u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
{
switch (fw_link_speed) {
@@ -1669,68 +2843,105 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
case BNXT_LINK_SPEED_40GB:
return SPEED_40000;
case BNXT_LINK_SPEED_50GB:
+ case BNXT_LINK_SPEED_50GB_PAM4:
return SPEED_50000;
case BNXT_LINK_SPEED_100GB:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
return SPEED_100000;
+ case BNXT_LINK_SPEED_200GB:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
+ return SPEED_200000;
+ case BNXT_LINK_SPEED_400GB:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ return SPEED_400000;
default:
return SPEED_UNKNOWN;
}
}
+static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
+ struct bnxt_link_info *link_info)
+{
+ struct ethtool_link_settings *base = &lk_ksettings->base;
+
+ if (link_info->link_state == BNXT_LINK_STATE_UP) {
+ base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+ base->duplex = DUPLEX_HALF;
+ if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ base->duplex = DUPLEX_FULL;
+ lk_ksettings->lanes = link_info->active_lanes;
+ } else if (!link_info->autoneg) {
+ base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
+ base->duplex = DUPLEX_HALF;
+ if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
+ base->duplex = DUPLEX_FULL;
+ }
+}
+
static int bnxt_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *lk_ksettings)
{
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_link_info *link_info = &bp->link_info;
struct ethtool_link_settings *base = &lk_ksettings->base;
- u32 ethtool_speed;
+ enum ethtool_link_mode_bit_indices link_mode;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info;
+ enum bnxt_media_type media;
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising);
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
+ base->duplex = DUPLEX_UNKNOWN;
+ base->speed = SPEED_UNKNOWN;
+ link_info = &bp->link_info;
+
mutex_lock(&bp->link_lock);
- bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
+ bnxt_get_ethtool_modes(link_info, lk_ksettings);
+ media = bnxt_get_media(link_info);
+ bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings);
+ bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
+ link_mode = bnxt_get_link_mode(link_info);
+ if (link_mode != BNXT_LINK_MODE_UNKNOWN)
+ ethtool_params_from_link_mode(lk_ksettings, link_mode);
+ else
+ bnxt_get_default_speeds(lk_ksettings, link_info);
- ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
if (link_info->autoneg) {
- bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
- ethtool_link_ksettings_add_link_mode(lk_ksettings,
- advertising, Autoneg);
+ bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lk_ksettings->link_modes.advertising);
base->autoneg = AUTONEG_ENABLE;
- base->duplex = DUPLEX_UNKNOWN;
- if (link_info->phy_link_status == BNXT_LINK_LINK) {
- bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
- if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
- base->duplex = DUPLEX_FULL;
- else
- base->duplex = DUPLEX_HALF;
- }
- ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+ bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings);
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ bnxt_get_all_ethtool_lp_speeds(link_info, media,
+ lk_ksettings);
} else {
base->autoneg = AUTONEG_DISABLE;
- ethtool_speed =
- bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
- base->duplex = DUPLEX_HALF;
- if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
- base->duplex = DUPLEX_FULL;
}
- base->speed = ethtool_speed;
base->port = PORT_NONE;
- if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ if (media == BNXT_MEDIA_TP) {
base->port = PORT_TP;
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- TP);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
- TP);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
+ lk_ksettings->link_modes.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
+ lk_ksettings->link_modes.advertising);
+ } else if (media == BNXT_MEDIA_KR) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
+ lk_ksettings->link_modes.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
+ lk_ksettings->link_modes.advertising);
} else {
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- FIBRE);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
- FIBRE);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ lk_ksettings->link_modes.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ lk_ksettings->link_modes.advertising);
- if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+ if (media == BNXT_MEDIA_CR)
base->port = PORT_DA;
- else if (link_info->media_type ==
- PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
+ else
base->port = PORT_FIBRE;
}
base->phy_address = link_info->phy_addr;
@@ -1739,13 +2950,16 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
return 0;
}
-static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
+static int
+bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
u16 support_pam4_spds = link_info->support_pam4_speeds;
+ u16 support_spds2 = link_info->support_speeds2;
u16 support_spds = link_info->support_speeds;
u8 sig_mode = BNXT_SIG_MODE_NRZ;
+ u32 lanes_needed = 1;
u16 fw_speed = 0;
switch (ethtool_speed) {
@@ -1754,7 +2968,8 @@ static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
break;
case SPEED_1000:
- if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
break;
case SPEED_2500:
@@ -1762,41 +2977,88 @@ static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
break;
case SPEED_10000:
- if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
break;
case SPEED_20000:
- if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
+ if (support_spds & BNXT_LINK_SPEED_MSK_20GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
+ lanes_needed = 2;
+ }
break;
case SPEED_25000:
- if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
break;
case SPEED_40000:
- if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
+ lanes_needed = 4;
+ }
break;
case SPEED_50000:
- if (support_spds & BNXT_LINK_SPEED_MSK_50GB) {
+ if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) &&
+ lanes != 1) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+ lanes_needed = 2;
} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
sig_mode = BNXT_SIG_MODE_PAM4;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) {
+ fw_speed = BNXT_LINK_SPEED_50GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
}
break;
case SPEED_100000:
- if (support_spds & BNXT_LINK_SPEED_MSK_100GB) {
+ if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) &&
+ lanes != 2 && lanes != 1) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
+ lanes_needed = 4;
} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 2;
+ } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) &&
+ lanes != 1) {
+ fw_speed = BNXT_LINK_SPEED_100GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 2;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
}
break;
case SPEED_200000:
if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 4;
+ } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) &&
+ lanes != 2) {
+ fw_speed = BNXT_LINK_SPEED_200GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 4;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
+ lanes_needed = 2;
+ }
+ break;
+ case SPEED_400000:
+ if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) &&
+ lanes != 4) {
+ fw_speed = BNXT_LINK_SPEED_400GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 8;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
+ lanes_needed = 4;
}
break;
}
@@ -1806,6 +3068,11 @@ static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
return -EINVAL;
}
+ if (lanes && lanes != lanes_needed) {
+ netdev_err(dev, "unsupported number of lanes for speed\n");
+ return -EINVAL;
+ }
+
if (link_info->req_link_speed == fw_speed &&
link_info->req_signal_mode == sig_mode &&
link_info->autoneg == 0)
@@ -1821,23 +3088,22 @@ static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
return 0;
}
-u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode)
{
u16 fw_speed_mask = 0;
- /* only support autoneg at speed 100, 1000, and 10000 */
- if (advertising & (ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
- }
- if (advertising & (ADVERTISED_1000baseT_Full |
- ADVERTISED_1000baseT_Half)) {
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
- }
- if (advertising & ADVERTISED_10000baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
- if (advertising & ADVERTISED_40000baseCR4_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
return fw_speed_mask;
@@ -1850,7 +3116,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
struct bnxt_link_info *link_info = &bp->link_info;
const struct ethtool_link_settings *base = &lk_ksettings->base;
bool set_pause = false;
- u32 speed;
+ u32 speed, lanes = 0;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
@@ -1858,12 +3124,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
mutex_lock(&bp->link_lock);
if (base->autoneg == AUTONEG_ENABLE) {
- link_info->advertising = 0;
- link_info->advertising_pam4 = 0;
- BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings,
- advertising);
- BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4,
- lk_ksettings, advertising);
+ bnxt_set_ethtool_speeds(link_info,
+ lk_ksettings->link_modes.advertising);
link_info->autoneg |= BNXT_AUTONEG_SPEED;
if (!link_info->advertising && !link_info->advertising_pam4) {
link_info->advertising = link_info->support_auto_speeds;
@@ -1873,7 +3135,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
/* any change to autoneg will cause link change, therefore the
* driver should put back the original pause setting in autoneg
*/
- set_pause = true;
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
+ set_pause = true;
} else {
u8 phy_type = link_info->phy_type;
@@ -1890,7 +3153,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
goto set_setting_exit;
}
speed = base->speed;
- rc = bnxt_force_link_speed(dev, speed);
+ lanes = lk_ksettings->lanes;
+ rc = bnxt_force_link_speed(dev, speed, lanes);
if (rc) {
if (rc == -EALREADY)
rc = 0;
@@ -1945,12 +3209,16 @@ static int bnxt_get_fecparam(struct net_device *dev,
case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
fec->active_fec |= ETHTOOL_FEC_LLRS;
break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_OFF;
+ break;
}
return 0;
}
static void bnxt_get_fec_stats(struct net_device *dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct bnxt *bp = netdev_priv(dev);
u64 *rx;
@@ -1961,6 +3229,14 @@ static void bnxt_get_fec_stats(struct net_device *dev,
rx = bp->rx_port_stats_ext.sw_stats;
fec_stats->corrected_bits.total =
*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
+
+ if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
+ return;
+
+ fec_stats->corrected_blocks.total =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
+ fec_stats->uncorrectable_blocks.total =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
}
static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
@@ -2062,7 +3338,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (!BNXT_PHY_CFG_ABLE(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
@@ -2073,9 +3349,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
}
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- if (bp->hwrm_spec_code >= 0x10201)
- link_info->req_flow_ctrl =
- PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
+ link_info->req_flow_ctrl = 0;
} else {
/* when transition from auto pause to force pause,
* force a link change
@@ -2104,7 +3378,7 @@ static u32 bnxt_get_link(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
/* TODO: handle MF, VF, driver close case */
- return bp->link_info.link_up;
+ return BNXT_LINK_IS_UP(bp);
}
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
@@ -2134,14 +3408,14 @@ static void bnxt_print_admin_err(struct bnxt *bp)
netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
-static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
- u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
- u32 dir_item_len, const u8 *data,
- size_t data_len)
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_write_input *req;
@@ -2167,7 +3441,7 @@ static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
req->host_src_addr = cpu_to_le64(dma_handle);
}
- hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT);
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->dir_type = cpu_to_le16(dir_type);
req->dir_ordinal = cpu_to_le16(dir_ordinal);
req->dir_ext = cpu_to_le16(dir_ext);
@@ -2180,13 +3454,18 @@ static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
return rc;
}
-static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
- u8 self_reset, u8 flags)
+int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_fw_reset_input *req;
int rc;
+ if (!bnxt_hwrm_reset_permitted(bp)) {
+ netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
+ return -EPERM;
+ }
+
rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
if (rc)
return rc;
@@ -2460,12 +3739,92 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
return rc;
}
+#define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
+#define MSG_INVALID_PKG "PKG install error : Invalid package"
+#define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
+#define MSG_INVALID_DEV "PKG install error : Invalid device"
+#define MSG_INTERNAL_ERR "PKG install error : Internal error"
+#define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
+#define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
+#define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
+#define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
+#define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
+
+static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
+ struct netlink_ext_ack *extack)
+{
+ switch (result) {
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
+ return -EINVAL;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
+ return -ENOPKG;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
+ return -EPERM;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
+ return -EOPNOTSUPP;
+ default:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
+ return -EIO;
+ }
+}
+
#define BNXT_PKG_DMA_SIZE 0x40000
#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
+static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
+ struct netlink_ext_ack *extack)
+{
+ u32 item_len;
+ int rc;
+
+ rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
+ &item_len, NULL);
+ if (rc) {
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
+ return rc;
+ }
+
+ if (fw_size > item_len) {
+ rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, 0, 1,
+ round_up(fw_size, 4096), NULL, 0);
+ if (rc) {
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
+ return rc;
+ }
+ }
+ return 0;
+}
+
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
- u32 install_type)
+ u32 install_type, struct netlink_ext_ack *extack)
{
struct hwrm_nvm_install_update_input *install;
struct hwrm_nvm_install_update_output *resp;
@@ -2476,9 +3835,15 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
u8 *kmem = NULL;
u32 modify_len;
u32 item_len;
+ u8 cmd_err;
u16 index;
int rc;
+ /* resize before flashing larger image than available space */
+ rc = bnxt_resize_update_entry(dev, fw->size, extack);
+ if (rc)
+ return rc;
+
bnxt_hwrm_fw_set_time(bp);
rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
@@ -2508,8 +3873,8 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
return rc;
}
- hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT);
- hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT);
+ hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
+ hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
hwrm_req_hold(bp, modify);
modify->host_src_addr = cpu_to_le64(dma_handle);
@@ -2527,12 +3892,11 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
BNX_DIR_EXT_NONE,
&index, &item_len, NULL);
if (rc) {
- netdev_err(dev, "PKG update area not created in nvram\n");
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
break;
}
if (fw->size > item_len) {
- netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
- (unsigned long)fw->size);
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
rc = -EFBIG;
break;
}
@@ -2559,6 +3923,8 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
}
rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
if (defrag_attempted) {
/* We have tried to defragment already in the previous
@@ -2567,15 +3933,24 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
break;
}
- if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
- NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ switch (cmd_err) {
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
+ rc = -EALREADY;
+ break;
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
install->flags =
cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
+
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
- if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
- NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
+ if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
/* FW has cleared NVM area, driver will create
* UPDATE directory and try the flash again
*/
@@ -2585,11 +3960,12 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
BNX_DIR_TYPE_UPDATE,
BNX_DIR_ORDINAL_FIRST,
0, 0, item_len, NULL, 0);
- } else if (rc) {
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
+ if (!rc)
+ break;
}
- } else if (rc) {
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
+ fallthrough;
+ default:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
}
} while (defrag_attempted && !rc);
@@ -2600,7 +3976,7 @@ pkg_abort:
if (resp->result) {
netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
(s8)resp->result, (int)resp->problem_item);
- rc = -ENOPKG;
+ rc = nvm_update_err_to_stderr(dev, resp->result, extack);
}
if (rc == -EACCES)
bnxt_print_admin_err(bp);
@@ -2608,7 +3984,7 @@ pkg_abort:
}
static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
- u32 install_type)
+ u32 install_type, struct netlink_ext_ack *extack)
{
const struct firmware *fw;
int rc;
@@ -2620,7 +3996,7 @@ static int bnxt_flash_package_from_file(struct net_device *dev, const char *file
return rc;
}
- rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type);
+ rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
release_firmware(fw);
@@ -2638,7 +4014,7 @@ static int bnxt_flash_device(struct net_device *dev,
if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
flash->region > 0xffff)
return bnxt_flash_package_from_file(dev, flash->data,
- flash->region);
+ flash->region, NULL);
return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
}
@@ -2708,7 +4084,7 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
if (rc)
return rc;
- buflen = dir_entries * entry_length;
+ buflen = mul_u32_u32(dir_entries, entry_length);
buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
if (!buf) {
hwrm_req_drop(bp, req);
@@ -2724,8 +4100,8 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
return rc;
}
-static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
- u32 length, u8 *data)
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
@@ -2759,9 +4135,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
return rc;
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length)
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length)
{
struct hwrm_nvm_find_dir_entry_output *output;
struct hwrm_nvm_find_dir_entry_input *req;
@@ -2825,39 +4201,56 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
return retval;
}
-static void bnxt_get_pkgver(struct net_device *dev)
+int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
{
struct bnxt *bp = netdev_priv(dev);
u16 index = 0;
char *pkgver;
u32 pkglen;
u8 *pkgbuf;
- int len;
+ int rc;
- if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
- BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
- &index, NULL, &pkglen) != 0)
- return;
+ rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &pkglen);
+ if (rc)
+ return rc;
pkgbuf = kzalloc(pkglen, GFP_KERNEL);
if (!pkgbuf) {
dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
pkglen);
- return;
+ return -ENOMEM;
}
- if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
+ rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
+ if (rc)
goto err;
pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
pkglen);
- if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
- len = strlen(bp->fw_ver_str);
- snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
- "/pkg %s", pkgver);
- }
+ if (pkgver && *pkgver != 0 && isdigit(*pkgver))
+ strscpy(ver, pkgver, size);
+ else
+ rc = -ENOENT;
+
err:
kfree(pkgbuf);
+
+ return rc;
+}
+
+static void bnxt_get_pkgver(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ char buf[FW_VER_STR_LEN - 5];
+ int len;
+
+ if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
+ len = strlen(bp->fw_ver_str);
+ snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len,
+ "/pkg %s", buf);
+ }
}
static int bnxt_get_eeprom(struct net_device *dev,
@@ -2936,12 +4329,13 @@ static int bnxt_set_eeprom(struct net_device *dev,
eeprom->len);
}
-static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
struct bnxt *bp = netdev_priv(dev);
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- u32 advertising;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
@@ -2951,7 +4345,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
- advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!edata->eee_enabled)
goto eee_ok;
@@ -2971,16 +4365,15 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
edata->tx_lpi_timer = eee->tx_lpi_timer;
}
}
- if (!edata->advertised) {
- edata->advertised = advertising & eee->supported;
- } else if (edata->advertised & ~advertising) {
- netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
- edata->advertised, advertising);
+ if (linkmode_empty(edata->advertised)) {
+ linkmode_and(edata->advertised, advertising, eee->supported);
+ } else if (linkmode_andnot(tmp, edata->advertised, advertising)) {
+ netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n");
rc = -EINVAL;
goto eee_exit;
}
- eee->advertised = edata->advertised;
+ linkmode_copy(eee->advertised, edata->advertised);
eee->tx_lpi_enabled = edata->tx_lpi_enabled;
eee->tx_lpi_timer = edata->tx_lpi_timer;
eee_ok:
@@ -2994,7 +4387,7 @@ eee_exit:
return rc;
}
-static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3006,19 +4399,102 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
/* Preserve tx_lpi_timer so that the last value will be used
* by default when it is re-enabled.
*/
- edata->advertised = 0;
+ linkmode_zero(edata->advertised);
edata->tx_lpi_enabled = 0;
}
if (!bp->eee.eee_active)
- edata->lp_advertised = 0;
+ linkmode_zero(edata->lp_advertised);
+
+ return 0;
+}
+
+static int bnxt_hwrm_pfcwd_qcfg(struct bnxt *bp, u16 *val)
+{
+ struct hwrm_queue_pfcwd_timeout_qcfg_output *resp;
+ struct hwrm_queue_pfcwd_timeout_qcfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCFG);
+ if (rc)
+ return rc;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ *val = le16_to_cpu(resp->pfcwd_timeout_value);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_hwrm_pfcwd_cfg(struct bnxt *bp, u16 val)
+{
+ struct hwrm_queue_pfcwd_timeout_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_CFG);
+ if (rc)
+ return rc;
+ req->pfcwd_timeout_value = cpu_to_le16(val);
+ rc = hwrm_req_send(bp, req);
+ return rc;
+}
+
+static int bnxt_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 rx_copybreak, val;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ rx_copybreak = *(u32 *)data;
+ if (rx_copybreak > BNXT_MAX_RX_COPYBREAK)
+ return -ERANGE;
+ if (rx_copybreak != bp->rx_copybreak) {
+ if (netif_running(dev))
+ return -EBUSY;
+ bp->rx_copybreak = rx_copybreak;
+ }
+ return 0;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ if (BNXT_VF(bp) || !bp->max_pfcwd_tmo_ms)
+ return -EOPNOTSUPP;
+
+ val = *(u16 *)data;
+ if (val > bp->max_pfcwd_tmo_ms &&
+ val != PFC_STORM_PREVENTION_AUTO)
+ return -EINVAL;
+ return bnxt_hwrm_pfcwd_cfg(bp, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnxt_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = bp->rx_copybreak;
+ break;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ if (!bp->max_pfcwd_tmo_ms)
+ return -EOPNOTSUPP;
+ return bnxt_hwrm_pfcwd_qcfg(bp, data);
+ default:
+ return -EOPNOTSUPP;
+ }
return 0;
}
static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
- u16 page_number, u16 start_addr,
- u16 data_length, u8 *buf)
+ u16 page_number, u8 bank,
+ u16 start_addr, u16 data_length,
+ u8 *buf)
{
struct hwrm_port_phy_i2c_read_output *output;
struct hwrm_port_phy_i2c_read_input *req;
@@ -3039,8 +4515,13 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
data_length -= xfer_size;
req->page_offset = cpu_to_le16(start_addr + byte_offset);
req->data_length = xfer_size;
- req->enables = cpu_to_le32(start_addr + byte_offset ?
- PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
+ req->enables =
+ cpu_to_le32((start_addr + byte_offset ?
+ PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
+ 0) |
+ (bank ?
+ PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
+ 0));
rc = hwrm_req_send(bp, req);
if (!rc)
memcpy(buf + byte_offset, output->data, xfer_size);
@@ -3058,6 +4539,9 @@ static int bnxt_get_module_info(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
/* No point in going further if phy status indicates
* module is not inserted or if it is powered down or
* if it is of type 10GBase-T
@@ -3070,7 +4554,7 @@ static int bnxt_get_module_info(struct net_device *dev,
if (bp->hwrm_spec_code < 0x10202)
return -EOPNOTSUPP;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
SFF_DIAG_SUPPORT_OFFSET + 1,
data);
if (!rc) {
@@ -3109,13 +4593,16 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
u16 start = eeprom->offset, length = eeprom->len;
int rc = 0;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
memset(data, 0, eeprom->len);
/* Read A0 portion of the EEPROM */
if (start < ETH_MODULE_SFF_8436_LEN) {
if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
length = ETH_MODULE_SFF_8436_LEN - start;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
start, length, data);
if (rc)
return rc;
@@ -3127,12 +4614,148 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
/* Read A2 portion of the EEPROM */
if (length) {
start -= ETH_MODULE_SFF_8436_LEN;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
start, length, data);
}
return rc;
}
+static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ if (bp->link_info.module_status <=
+ PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
+ return 0;
+
+ if (bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
+ bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE){
+ NL_SET_ERR_MSG_MOD(extack, "Operation not supported as PHY type is Base-T");
+ return -EOPNOTSUPP;
+ }
+ switch (bp->link_info.module_status) {
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
+ NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
+ break;
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
+ NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
+ break;
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
+ NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown error");
+ break;
+ }
+ return -EINVAL;
+}
+
+static int
+bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Module read/write not permitted on untrusted VF");
+ return -EPERM;
+ }
+
+ rc = bnxt_get_module_status(bp, extack);
+ if (rc)
+ return rc;
+
+ if (bp->hwrm_spec_code < 0x10202) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
+ return -EINVAL;
+ }
+
+ if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
+
+ rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
+ page_data->page, page_data->bank,
+ page_data->offset,
+ page_data->length,
+ page_data->data);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
+ return rc;
+ }
+ return page_data->length;
+}
+
+static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page)
+{
+ struct hwrm_port_phy_i2c_write_input *req;
+ int bytes_written = 0;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE);
+ if (rc)
+ return rc;
+
+ hwrm_req_hold(bp, req);
+ req->i2c_slave_addr = page->i2c_address << 1;
+ req->page_number = cpu_to_le16(page->page);
+ req->bank_number = page->bank;
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET |
+ PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER);
+
+ while (bytes_written < page->length) {
+ u16 xfer_size;
+
+ xfer_size = min_t(u16, page->length - bytes_written,
+ BNXT_MAX_PHY_I2C_RESP_SIZE);
+ req->page_offset = cpu_to_le16(page->offset + bytes_written);
+ req->data_length = xfer_size;
+ memcpy(req->data, page->data + bytes_written, xfer_size);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ break;
+ bytes_written += xfer_size;
+ }
+
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_set_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
+
+ rc = bnxt_write_sfp_module_eeprom_info(bp, page_data);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed");
+ return rc;
+ }
+ return page_data->length;
+}
+
static int bnxt_nway_reset(struct net_device *dev)
{
int rc = 0;
@@ -3274,7 +4897,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
fw_speed = bp->link_info.link_speed;
else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
@@ -3377,7 +5000,8 @@ static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP ||
+ TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) {
rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
raw_cons = NEXT_RAW_CMP(raw_cons);
raw_cons = NEXT_RAW_CMP(raw_cons);
@@ -3401,14 +5025,15 @@ static int bnxt_run_loopback(struct bnxt *bp)
int rc;
cpr = &rxr->bnapi->cp_ring;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
- pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ cpr = rxr->rx_cpr;
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak));
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
data = skb_put(skb, pkt_size);
- eth_broadcast_addr(data);
+ ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
@@ -3421,7 +5046,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
dev_kfree_skb(skb);
return -EIO;
}
- bnxt_xmit_bd(bp, txr, map, pkt_size);
+ bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
/* Sync BD data before updating doorbell */
wmb();
@@ -3472,7 +5097,15 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!bp->num_tests || !BNXT_PF(bp))
return;
+
memset(buf, 0, sizeof(u64) * bp->num_tests);
+ if (etest->flags & ETH_TEST_FL_OFFLINE &&
+ bnxt_ulp_registered(bp->edev)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n");
+ return;
+ }
+
if (!netif_running(dev)) {
etest->flags |= ETH_TEST_FL_FAILED;
return;
@@ -3502,44 +5135,51 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!offline) {
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
- rc = bnxt_close_nic(bp, false, false);
- if (rc)
- return;
+ bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
- buf[BNXT_MACLPBK_TEST_IDX] = 1;
- bnxt_hwrm_mac_loopback(bp, true);
- msleep(250);
rc = bnxt_half_open_nic(bp);
if (rc) {
- bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+ if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK)
+ goto skip_mac_loopback;
+
+ bnxt_hwrm_mac_loopback(bp, true);
+ msleep(250);
if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
else
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
+skip_mac_loopback:
+ buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK)
+ goto skip_phy_loopback;
+
bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_PHYLPBK_TEST_IDX] = 0;
+skip_phy_loopback:
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
if (do_ext_lpbk) {
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
bnxt_hwrm_phy_loopback(bp, true, true);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_EXTLPBK_TEST_IDX] = 0;
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
- rc = bnxt_open_nic(bp, false, true);
+ rc = bnxt_open_nic(bp, true, true);
}
if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
@@ -3590,7 +5230,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
}
}
- if (req & BNXT_FW_RESET_AP) {
+ if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
/* This feature is not supported in older firmware versions */
if (bp->hwrm_spec_code >= 0x10803) {
if (!bnxt_firmware_reset_ap(dev)) {
@@ -3609,349 +5249,26 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return 0;
}
-static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
- struct bnxt_hwrm_dbg_dma_info *info)
-{
- struct hwrm_dbg_cmn_input *cmn_req = msg;
- __le16 *seq_ptr = msg + info->seq_off;
- struct hwrm_dbg_cmn_output *cmn_resp;
- u16 seq = 0, len, segs_off;
- dma_addr_t dma_handle;
- void *dma_buf, *resp;
- int rc, off = 0;
-
- dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
- if (!dma_buf) {
- hwrm_req_drop(bp, msg);
- return -ENOMEM;
- }
-
- hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
- cmn_resp = hwrm_req_hold(bp, msg);
- resp = cmn_resp;
-
- segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
- total_segments);
- cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
- cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
- while (1) {
- *seq_ptr = cpu_to_le16(seq);
- rc = hwrm_req_send(bp, msg);
- if (rc)
- break;
-
- len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
- if (!seq &&
- cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
- info->segs = le16_to_cpu(*((__le16 *)(resp +
- segs_off)));
- if (!info->segs) {
- rc = -EIO;
- break;
- }
-
- info->dest_buf_size = info->segs *
- sizeof(struct coredump_segment_record);
- info->dest_buf = kmalloc(info->dest_buf_size,
- GFP_KERNEL);
- if (!info->dest_buf) {
- rc = -ENOMEM;
- break;
- }
- }
-
- if (info->dest_buf) {
- if ((info->seg_start + off + len) <=
- BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
- memcpy(info->dest_buf + off, dma_buf, len);
- } else {
- rc = -ENOBUFS;
- break;
- }
- }
-
- if (cmn_req->req_type ==
- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
- info->dest_buf_size += len;
-
- if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
- break;
-
- seq++;
- off += len;
- }
- hwrm_req_drop(bp, msg);
- return rc;
-}
-
-static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
- struct bnxt_coredump *coredump)
-{
- struct bnxt_hwrm_dbg_dma_info info = {NULL};
- struct hwrm_dbg_coredump_list_input *req;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
- if (rc)
- return rc;
-
- info.dma_len = COREDUMP_LIST_BUF_LEN;
- info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
- info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
- data_len);
-
- rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
- if (!rc) {
- coredump->data = info.dest_buf;
- coredump->data_size = info.dest_buf_size;
- coredump->total_segs = info.segs;
- }
- return rc;
-}
-
-static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
- u16 segment_id)
-{
- struct hwrm_dbg_coredump_initiate_input *req;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
- if (rc)
- return rc;
-
- hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
- req->component_id = cpu_to_le16(component_id);
- req->segment_id = cpu_to_le16(segment_id);
-
- return hwrm_req_send(bp, req);
-}
-
-static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
- u16 segment_id, u32 *seg_len,
- void *buf, u32 buf_len, u32 offset)
-{
- struct hwrm_dbg_coredump_retrieve_input *req;
- struct bnxt_hwrm_dbg_dma_info info = {NULL};
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
- if (rc)
- return rc;
-
- req->component_id = cpu_to_le16(component_id);
- req->segment_id = cpu_to_le16(segment_id);
-
- info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
- info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
- seq_no);
- info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
- data_len);
- if (buf) {
- info.dest_buf = buf + offset;
- info.buf_len = buf_len;
- info.seg_start = offset;
- }
-
- rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
- if (!rc)
- *seg_len = info.dest_buf_size;
-
- return rc;
-}
-
-static void
-bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
- struct bnxt_coredump_segment_hdr *seg_hdr,
- struct coredump_segment_record *seg_rec, u32 seg_len,
- int status, u32 duration, u32 instance)
-{
- memset(seg_hdr, 0, sizeof(*seg_hdr));
- memcpy(seg_hdr->signature, "sEgM", 4);
- if (seg_rec) {
- seg_hdr->component_id = (__force __le32)seg_rec->component_id;
- seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
- seg_hdr->low_version = seg_rec->version_low;
- seg_hdr->high_version = seg_rec->version_hi;
- } else {
- /* For hwrm_ver_get response Component id = 2
- * and Segment id = 0
- */
- seg_hdr->component_id = cpu_to_le32(2);
- seg_hdr->segment_id = 0;
- }
- seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
- seg_hdr->length = cpu_to_le32(seg_len);
- seg_hdr->status = cpu_to_le32(status);
- seg_hdr->duration = cpu_to_le32(duration);
- seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
- seg_hdr->instance = cpu_to_le32(instance);
-}
-
-static void
-bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
- time64_t start, s16 start_utc, u16 total_segs,
- int status)
-{
- time64_t end = ktime_get_real_seconds();
- u32 os_ver_major = 0, os_ver_minor = 0;
- struct tm tm;
-
- time64_to_tm(start, 0, &tm);
- memset(record, 0, sizeof(*record));
- memcpy(record->signature, "cOrE", 4);
- record->flags = 0;
- record->low_version = 0;
- record->high_version = 1;
- record->asic_state = 0;
- strlcpy(record->system_name, utsname()->nodename,
- sizeof(record->system_name));
- record->year = cpu_to_le16(tm.tm_year + 1900);
- record->month = cpu_to_le16(tm.tm_mon + 1);
- record->day = cpu_to_le16(tm.tm_mday);
- record->hour = cpu_to_le16(tm.tm_hour);
- record->minute = cpu_to_le16(tm.tm_min);
- record->second = cpu_to_le16(tm.tm_sec);
- record->utc_bias = cpu_to_le16(start_utc);
- strcpy(record->commandline, "ethtool -w");
- record->total_segments = cpu_to_le32(total_segs);
-
- sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
- record->os_ver_major = cpu_to_le32(os_ver_major);
- record->os_ver_minor = cpu_to_le32(os_ver_minor);
-
- strlcpy(record->os_name, utsname()->sysname, 32);
- time64_to_tm(end, 0, &tm);
- record->end_year = cpu_to_le16(tm.tm_year + 1900);
- record->end_month = cpu_to_le16(tm.tm_mon + 1);
- record->end_day = cpu_to_le16(tm.tm_mday);
- record->end_hour = cpu_to_le16(tm.tm_hour);
- record->end_minute = cpu_to_le16(tm.tm_min);
- record->end_second = cpu_to_le16(tm.tm_sec);
- record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
- record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
- bp->ver_resp.chip_rev << 8 |
- bp->ver_resp.chip_metal);
- record->asic_id2 = 0;
- record->coredump_status = cpu_to_le32(status);
- record->ioctl_low_version = 0;
- record->ioctl_high_version = 0;
-}
-
-static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
-{
- u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
- u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
- struct coredump_segment_record *seg_record = NULL;
- struct bnxt_coredump_segment_hdr seg_hdr;
- struct bnxt_coredump coredump = {NULL};
- time64_t start_time;
- u16 start_utc;
- int rc = 0, i;
-
- if (buf)
- buf_len = *dump_len;
-
- start_time = ktime_get_real_seconds();
- start_utc = sys_tz.tz_minuteswest * 60;
- seg_hdr_len = sizeof(seg_hdr);
-
- /* First segment should be hwrm_ver_get response */
- *dump_len = seg_hdr_len + ver_get_resp_len;
- if (buf) {
- bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
- 0, 0, 0);
- memcpy(buf + offset, &seg_hdr, seg_hdr_len);
- offset += seg_hdr_len;
- memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
- offset += ver_get_resp_len;
- }
-
- rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
- if (rc) {
- netdev_err(bp->dev, "Failed to get coredump segment list\n");
- goto err;
- }
-
- *dump_len += seg_hdr_len * coredump.total_segs;
-
- seg_record = (struct coredump_segment_record *)coredump.data;
- seg_record_len = sizeof(*seg_record);
-
- for (i = 0; i < coredump.total_segs; i++) {
- u16 comp_id = le16_to_cpu(seg_record->component_id);
- u16 seg_id = le16_to_cpu(seg_record->segment_id);
- u32 duration = 0, seg_len = 0;
- unsigned long start, end;
-
- if (buf && ((offset + seg_hdr_len) >
- BNXT_COREDUMP_BUF_LEN(buf_len))) {
- rc = -ENOBUFS;
- goto err;
- }
-
- start = jiffies;
-
- rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
- if (rc) {
- netdev_err(bp->dev,
- "Failed to initiate coredump for seg = %d\n",
- seg_record->segment_id);
- goto next_seg;
- }
-
- /* Write segment data into the buffer */
- rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
- &seg_len, buf, buf_len,
- offset + seg_hdr_len);
- if (rc && rc == -ENOBUFS)
- goto err;
- else if (rc)
- netdev_err(bp->dev,
- "Failed to retrieve coredump for seg = %d\n",
- seg_record->segment_id);
-
-next_seg:
- end = jiffies;
- duration = jiffies_to_msecs(end - start);
- bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
- rc, duration, 0);
-
- if (buf) {
- /* Write segment header into the buffer */
- memcpy(buf + offset, &seg_hdr, seg_hdr_len);
- offset += seg_hdr_len + seg_len;
- }
-
- *dump_len += seg_len;
- seg_record =
- (struct coredump_segment_record *)((u8 *)seg_record +
- seg_record_len);
- }
-
-err:
- if (buf)
- bnxt_fill_coredump_record(bp, buf + offset, start_time,
- start_utc, coredump.total_segs + 1,
- rc);
- kfree(coredump.data);
- *dump_len += sizeof(struct bnxt_coredump_record);
- if (rc == -ENOBUFS)
- netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
- return rc;
-}
-
static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
- if (dump->flag > BNXT_DUMP_CRASH) {
- netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
+ if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) {
+ netdev_info(dev,
+ "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n");
return -EINVAL;
}
- if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
- netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
- return -EOPNOTSUPP;
+ if (dump->flag == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR &&
+ (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) {
+ netdev_info(dev,
+ "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
+ return -EOPNOTSUPP;
+ } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) {
+ netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n");
+ return -EOPNOTSUPP;
+ }
}
bp->dump_flag = dump->flag;
@@ -3971,10 +5288,7 @@ static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
bp->ver_resp.hwrm_fw_rsvd_8b;
dump->flag = bp->dump_flag;
- if (bp->dump_flag == BNXT_DUMP_CRASH)
- dump->len = BNXT_CRASH_DUMP_LEN;
- else
- bnxt_get_coredump(bp, NULL, &dump->len);
+ dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
return 0;
}
@@ -3989,29 +5303,18 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
memset(buf, 0, dump->len);
dump->flag = bp->dump_flag;
- if (dump->flag == BNXT_DUMP_CRASH) {
-#ifdef CONFIG_TEE_BNXT_FW
- return tee_bnxt_copy_coredump(buf, 0, dump->len);
-#endif
- } else {
- return bnxt_get_coredump(bp, buf, &dump->len);
- }
-
- return 0;
+ return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
}
static int bnxt_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ptp_cfg *ptp;
ptp = bp->ptp_cfg;
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
- info->phc_index = -1;
if (!ptp)
return 0;
@@ -4026,9 +5329,32 @@ static int bnxt_get_ts_info(struct net_device *dev,
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
+ info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
return 0;
}
+static void bnxt_hwrm_pcie_qstats(struct bnxt *bp)
+{
+ struct hwrm_pcie_qstats_output *resp;
+ struct hwrm_pcie_qstats_input *req;
+
+ bp->pcie_stat_len = 0;
+ if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return;
+
+ if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ if (__bnxt_hwrm_pcie_qstats(bp, req))
+ bp->pcie_stat_len = min_t(u16,
+ le16_to_cpu(resp->pcie_stat_size),
+ sizeof(struct pcie_ctx_hw_stats_v2));
+ hwrm_req_drop(bp, req);
+}
+
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp;
@@ -4037,6 +5363,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct net_device *dev = bp->dev;
int i, rc;
+ bnxt_hwrm_pcie_qstats(bp);
if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
bnxt_get_pkgver(dev);
@@ -4070,7 +5397,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
test_info->timeout = HWRM_CMD_TIMEOUT;
for (i = 0; i < bp->num_tests; i++) {
char *str = test_info->string[i];
- char *fw_str = resp->test0_name + i * 32;
+ char *fw_str = resp->test_name[i];
if (i == BNXT_MACLPBK_TEST_IDX) {
strcpy(str, "Mac loopback test (offline)");
@@ -4081,14 +5408,9 @@ void bnxt_ethtool_init(struct bnxt *bp)
} else if (i == BNXT_IRQ_TEST_IDX) {
strcpy(str, "Interrupt_test (offline)");
} else {
- strlcpy(str, fw_str, ETH_GSTRING_LEN);
- strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
- if (test_info->offline_mask & (1 << i))
- strncat(str, " (offline)",
- ETH_GSTRING_LEN - strlen(str));
- else
- strncat(str, " (online)",
- ETH_GSTRING_LEN - strlen(str));
+ snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
+ fw_str, test_info->offline_mask & (1 << i) ?
+ "offline" : "online");
}
}
@@ -4223,6 +5545,33 @@ static void bnxt_get_rmon_stats(struct net_device *dev,
*ranges = bnxt_rmon_ranges;
}
+static void bnxt_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp) {
+ ts_stats->pkts = ptp->stats.ts_pkts;
+ ts_stats->lost = ptp->stats.ts_lost;
+ ts_stats->err = atomic64_read(&ptp->stats.ts_err);
+ }
+}
+
+static void bnxt_get_link_ext_stats(struct net_device *dev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
+ return;
+
+ rx = bp->rx_port_stats_ext.sw_stats;
+ stats->link_down_events =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
+}
+
void bnxt_ethtool_free(struct bnxt *bp)
{
kfree(bp->test_info);
@@ -4230,12 +5579,20 @@ void bnxt_ethtool_free(struct bnxt *bp)
}
const struct ethtool_ops bnxt_ethtool_ops = {
+ .cap_link_lanes_supported = 1,
+ .rxfh_per_ctx_key = 1,
+ .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1,
+ .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
+ .rxfh_priv_size = sizeof(struct bnxt_rss_ctx),
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_STATS_BLOCK_USECS |
- ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_USE_CQE,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_fec_stats = bnxt_get_fec_stats,
@@ -4262,19 +5619,30 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_channels = bnxt_set_channels,
.get_rxnfc = bnxt_get_rxnfc,
.set_rxnfc = bnxt_set_rxnfc,
+ .get_rx_ring_count = bnxt_get_rx_ring_count,
.get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
.set_rxfh = bnxt_set_rxfh,
+ .get_rxfh_fields = bnxt_get_rxfh_fields,
+ .set_rxfh_fields = bnxt_set_rxfh_fields,
+ .create_rxfh_context = bnxt_create_rxfh_context,
+ .modify_rxfh_context = bnxt_modify_rxfh_context,
+ .remove_rxfh_context = bnxt_remove_rxfh_context,
.flash_device = bnxt_flash_device,
.get_eeprom_len = bnxt_get_eeprom_len,
.get_eeprom = bnxt_get_eeprom,
.set_eeprom = bnxt_set_eeprom,
.get_link = bnxt_get_link,
+ .get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
+ .get_tunable = bnxt_get_tunable,
+ .set_tunable = bnxt_set_tunable,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
+ .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
@@ -4287,4 +5655,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eth_mac_stats = bnxt_get_eth_mac_stats,
.get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats,
.get_rmon_stats = bnxt_get_rmon_stats,
+ .get_ts_stats = bnxt_get_ptp_stats,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 0a57cb6a4a4b..33b86ede1ce5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -22,49 +22,6 @@ struct bnxt_led_cfg {
u8 rsvd;
};
-#define COREDUMP_LIST_BUF_LEN 2048
-#define COREDUMP_RETRIEVE_BUF_LEN 4096
-
-struct bnxt_coredump {
- void *data;
- int data_size;
- u16 total_segs;
-};
-
-#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
-
-struct bnxt_hwrm_dbg_dma_info {
- void *dest_buf;
- int dest_buf_size;
- u16 dma_len;
- u16 seq_off;
- u16 data_len_off;
- u16 segs;
- u32 seg_start;
- u32 buf_len;
-};
-
-struct hwrm_dbg_cmn_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
-};
-
-struct hwrm_dbg_cmn_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define HWRM_DBG_CMN_FLAGS_MORE 1
-};
-
-#define BNXT_CRASH_DUMP_LEN (8 << 20)
-
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
@@ -86,17 +43,35 @@ struct hwrm_dbg_cmn_output {
#define BNXT_PXP_REG_LEN 0x3110
+#define BNXT_IP_PROTO_FULL_MASK 0xFF
+#define BNXT_IP_PROTO_WILDCARD 0x0
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
-u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds);
u32 bnxt_fw_to_ethtool_speed(u16);
-u16 bnxt_get_fw_auto_link_speeds(u32);
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode);
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
+int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags);
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
- u32 install_type);
+ u32 install_type, struct netlink_ext_ack *extack);
+int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len);
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
deleted file mode 100644
index 94d07a9f7034..000000000000
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ /dev/null
@@ -1,9526 +0,0 @@
-/* Broadcom NetXtreme-C/E network driver.
- *
- * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2021 Broadcom Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
- *
- * DO NOT MODIFY!!! This file is automatically generated.
- */
-
-#ifndef _BNXT_HSI_H_
-#define _BNXT_HSI_H_
-
-/* hwrm_cmd_hdr (size:128b/16B) */
-struct hwrm_cmd_hdr {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_resp_hdr (size:64b/8B) */
-struct hwrm_resp_hdr {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-#define CMD_DISCR_TLV_ENCAP 0x8000UL
-#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
-
-
-#define TLV_TYPE_HWRM_REQUEST 0x1UL
-#define TLV_TYPE_HWRM_RESPONSE 0x2UL
-#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
-#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
-#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
-#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
-#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
-#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
-#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
-#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL
-#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
-#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
-#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL
-#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL
-#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS
-
-
-/* tlv (size:64b/8B) */
-struct tlv {
- __le16 cmd_discr;
- u8 reserved_8b;
- u8 flags;
- #define TLV_FLAGS_MORE 0x1UL
- #define TLV_FLAGS_MORE_LAST 0x0UL
- #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
- #define TLV_FLAGS_REQUIRED 0x2UL
- #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
- #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
- #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
- __le16 tlv_type;
- __le16 length;
-};
-
-/* input (size:128b/16B) */
-struct input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* output (size:64b/8B) */
-struct output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
-};
-
-/* hwrm_short_input (size:128b/16B) */
-struct hwrm_short_input {
- __le16 req_type;
- __le16 signature;
- #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
- #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
- __le16 target_id;
- #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
- #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
- #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
- __le16 size;
- __le64 req_addr;
-};
-
-/* cmd_nums (size:64b/8B) */
-struct cmd_nums {
- __le16 req_type;
- #define HWRM_VER_GET 0x0UL
- #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
- #define HWRM_ERROR_RECOVERY_QCFG 0xcUL
- #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
- #define HWRM_FUNC_BUF_UNRGTR 0xeUL
- #define HWRM_FUNC_VF_CFG 0xfUL
- #define HWRM_RESERVED1 0x10UL
- #define HWRM_FUNC_RESET 0x11UL
- #define HWRM_FUNC_GETFID 0x12UL
- #define HWRM_FUNC_VF_ALLOC 0x13UL
- #define HWRM_FUNC_VF_FREE 0x14UL
- #define HWRM_FUNC_QCAPS 0x15UL
- #define HWRM_FUNC_QCFG 0x16UL
- #define HWRM_FUNC_CFG 0x17UL
- #define HWRM_FUNC_QSTATS 0x18UL
- #define HWRM_FUNC_CLR_STATS 0x19UL
- #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
- #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
- #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
- #define HWRM_FUNC_DRV_RGTR 0x1dUL
- #define HWRM_FUNC_DRV_QVER 0x1eUL
- #define HWRM_FUNC_BUF_RGTR 0x1fUL
- #define HWRM_PORT_PHY_CFG 0x20UL
- #define HWRM_PORT_MAC_CFG 0x21UL
- #define HWRM_PORT_TS_QUERY 0x22UL
- #define HWRM_PORT_QSTATS 0x23UL
- #define HWRM_PORT_LPBK_QSTATS 0x24UL
- #define HWRM_PORT_CLR_STATS 0x25UL
- #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
- #define HWRM_PORT_PHY_QCFG 0x27UL
- #define HWRM_PORT_MAC_QCFG 0x28UL
- #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
- #define HWRM_PORT_PHY_QCAPS 0x2aUL
- #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
- #define HWRM_PORT_PHY_I2C_READ 0x2cUL
- #define HWRM_PORT_LED_CFG 0x2dUL
- #define HWRM_PORT_LED_QCFG 0x2eUL
- #define HWRM_PORT_LED_QCAPS 0x2fUL
- #define HWRM_QUEUE_QPORTCFG 0x30UL
- #define HWRM_QUEUE_QCFG 0x31UL
- #define HWRM_QUEUE_CFG 0x32UL
- #define HWRM_FUNC_VLAN_CFG 0x33UL
- #define HWRM_FUNC_VLAN_QCFG 0x34UL
- #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
- #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
- #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
- #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
- #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
- #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
- #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
- #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
- #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
- #define HWRM_VNIC_ALLOC 0x40UL
- #define HWRM_VNIC_FREE 0x41UL
- #define HWRM_VNIC_CFG 0x42UL
- #define HWRM_VNIC_QCFG 0x43UL
- #define HWRM_VNIC_TPA_CFG 0x44UL
- #define HWRM_VNIC_TPA_QCFG 0x45UL
- #define HWRM_VNIC_RSS_CFG 0x46UL
- #define HWRM_VNIC_RSS_QCFG 0x47UL
- #define HWRM_VNIC_PLCMODES_CFG 0x48UL
- #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
- #define HWRM_VNIC_QCAPS 0x4aUL
- #define HWRM_VNIC_UPDATE 0x4bUL
- #define HWRM_RING_ALLOC 0x50UL
- #define HWRM_RING_FREE 0x51UL
- #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
- #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
- #define HWRM_RING_AGGINT_QCAPS 0x54UL
- #define HWRM_RING_SCHQ_ALLOC 0x55UL
- #define HWRM_RING_SCHQ_CFG 0x56UL
- #define HWRM_RING_SCHQ_FREE 0x57UL
- #define HWRM_RING_RESET 0x5eUL
- #define HWRM_RING_GRP_ALLOC 0x60UL
- #define HWRM_RING_GRP_FREE 0x61UL
- #define HWRM_RING_CFG 0x62UL
- #define HWRM_RING_QCFG 0x63UL
- #define HWRM_RESERVED5 0x64UL
- #define HWRM_RESERVED6 0x65UL
- #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
- #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
- #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
- #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
- #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
- #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
- #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
- #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
- #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
- #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
- #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
- #define HWRM_CFA_L2_FILTER_FREE 0x91UL
- #define HWRM_CFA_L2_FILTER_CFG 0x92UL
- #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
- #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
- #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
- #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
- #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
- #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
- #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
- #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
- #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
- #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
- #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
- #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
- #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
- #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
- #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
- #define HWRM_STAT_CTX_ALLOC 0xb0UL
- #define HWRM_STAT_CTX_FREE 0xb1UL
- #define HWRM_STAT_CTX_QUERY 0xb2UL
- #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
- #define HWRM_PORT_QSTATS_EXT 0xb4UL
- #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
- #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
- #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
- #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
- #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
- #define HWRM_RESERVED7 0xbaUL
- #define HWRM_PORT_TX_FIR_CFG 0xbbUL
- #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
- #define HWRM_PORT_ECN_QSTATS 0xbdUL
- #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
- #define HWRM_FW_LIVEPATCH 0xbfUL
- #define HWRM_FW_RESET 0xc0UL
- #define HWRM_FW_QSTATUS 0xc1UL
- #define HWRM_FW_HEALTH_CHECK 0xc2UL
- #define HWRM_FW_SYNC 0xc3UL
- #define HWRM_FW_STATE_QCAPS 0xc4UL
- #define HWRM_FW_STATE_QUIESCE 0xc5UL
- #define HWRM_FW_STATE_BACKUP 0xc6UL
- #define HWRM_FW_STATE_RESTORE 0xc7UL
- #define HWRM_FW_SET_TIME 0xc8UL
- #define HWRM_FW_GET_TIME 0xc9UL
- #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
- #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
- #define HWRM_FW_IPC_MAILBOX 0xccUL
- #define HWRM_FW_ECN_CFG 0xcdUL
- #define HWRM_FW_ECN_QCFG 0xceUL
- #define HWRM_FW_SECURE_CFG 0xcfUL
- #define HWRM_EXEC_FWD_RESP 0xd0UL
- #define HWRM_REJECT_FWD_RESP 0xd1UL
- #define HWRM_FWD_RESP 0xd2UL
- #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
- #define HWRM_OEM_CMD 0xd4UL
- #define HWRM_PORT_PRBS_TEST 0xd5UL
- #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
- #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
- #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
- #define HWRM_PORT_DSC_DUMP 0xd9UL
- #define HWRM_PORT_EP_TX_QCFG 0xdaUL
- #define HWRM_PORT_EP_TX_CFG 0xdbUL
- #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
- #define HWRM_REG_POWER_QUERY 0xe1UL
- #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
- #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
- #define HWRM_WOL_FILTER_ALLOC 0xf0UL
- #define HWRM_WOL_FILTER_FREE 0xf1UL
- #define HWRM_WOL_FILTER_QCFG 0xf2UL
- #define HWRM_WOL_REASON_QCFG 0xf3UL
- #define HWRM_CFA_METER_QCAPS 0xf4UL
- #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
- #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
- #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
- #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
- #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
- #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL
- #define HWRM_CFA_VFR_ALLOC 0xfdUL
- #define HWRM_CFA_VFR_FREE 0xfeUL
- #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
- #define HWRM_CFA_VF_PAIR_FREE 0x101UL
- #define HWRM_CFA_VF_PAIR_INFO 0x102UL
- #define HWRM_CFA_FLOW_ALLOC 0x103UL
- #define HWRM_CFA_FLOW_FREE 0x104UL
- #define HWRM_CFA_FLOW_FLUSH 0x105UL
- #define HWRM_CFA_FLOW_STATS 0x106UL
- #define HWRM_CFA_FLOW_INFO 0x107UL
- #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
- #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
- #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
- #define HWRM_CFA_PAIR_ALLOC 0x10dUL
- #define HWRM_CFA_PAIR_FREE 0x10eUL
- #define HWRM_CFA_PAIR_INFO 0x10fUL
- #define HWRM_FW_IPC_MSG 0x110UL
- #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
- #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
- #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
- #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
- #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
- #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
- #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
- #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
- #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
- #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
- #define HWRM_CFA_COUNTER_CFG 0x11cUL
- #define HWRM_CFA_COUNTER_QCFG 0x11dUL
- #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
- #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
- #define HWRM_CFA_EEM_QCAPS 0x120UL
- #define HWRM_CFA_EEM_CFG 0x121UL
- #define HWRM_CFA_EEM_QCFG 0x122UL
- #define HWRM_CFA_EEM_OP 0x123UL
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
- #define HWRM_CFA_TFLIB 0x125UL
- #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
- #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
- #define HWRM_ENGINE_CKV_STATUS 0x12eUL
- #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
- #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
- #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
- #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
- #define HWRM_ENGINE_CKV_FLUSH 0x133UL
- #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
- #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
- #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
- #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
- #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
- #define HWRM_ENGINE_QG_QUERY 0x13dUL
- #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
- #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
- #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
- #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
- #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
- #define HWRM_ENGINE_QG_METER_BIND 0x143UL
- #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
- #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
- #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
- #define HWRM_ENGINE_SG_QUERY 0x147UL
- #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
- #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
- #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
- #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
- #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
- #define HWRM_ENGINE_STATS_CONFIG 0x155UL
- #define HWRM_ENGINE_STATS_CLEAR 0x156UL
- #define HWRM_ENGINE_STATS_QUERY 0x157UL
- #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
- #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
- #define HWRM_ENGINE_RQ_FREE 0x15fUL
- #define HWRM_ENGINE_CQ_ALLOC 0x160UL
- #define HWRM_ENGINE_CQ_FREE 0x161UL
- #define HWRM_ENGINE_NQ_ALLOC 0x162UL
- #define HWRM_ENGINE_NQ_FREE 0x163UL
- #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
- #define HWRM_ENGINE_FUNC_QCFG 0x165UL
- #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
- #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
- #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
- #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
- #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
- #define HWRM_FUNC_VF_BW_CFG 0x195UL
- #define HWRM_FUNC_VF_BW_QCFG 0x196UL
- #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
- #define HWRM_FUNC_QSTATS_EXT 0x198UL
- #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
- #define HWRM_FUNC_SPD_CFG 0x19aUL
- #define HWRM_FUNC_SPD_QCFG 0x19bUL
- #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
- #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
- #define HWRM_FUNC_PTP_CFG 0x19eUL
- #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
- #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
- #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
- #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
- #define HWRM_SELFTEST_QLIST 0x200UL
- #define HWRM_SELFTEST_EXEC 0x201UL
- #define HWRM_SELFTEST_IRQ 0x202UL
- #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
- #define HWRM_PCIE_QSTATS 0x204UL
- #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
- #define HWRM_MFG_TIMERS_QUERY 0x206UL
- #define HWRM_MFG_OTP_CFG 0x207UL
- #define HWRM_MFG_OTP_QCFG 0x208UL
- #define HWRM_MFG_HDMA_TEST 0x209UL
- #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
- #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
- #define HWRM_MFG_SOC_IMAGE 0x20cUL
- #define HWRM_MFG_SOC_QSTATUS 0x20dUL
- #define HWRM_MFG_PARAM_SEEPROM_SYNC 0x20eUL
- #define HWRM_MFG_PARAM_SEEPROM_READ 0x20fUL
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH 0x210UL
- #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
- #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
- #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
- #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
- #define HWRM_TF 0x2bcUL
- #define HWRM_TF_VERSION_GET 0x2bdUL
- #define HWRM_TF_SESSION_OPEN 0x2c6UL
- #define HWRM_TF_SESSION_ATTACH 0x2c7UL
- #define HWRM_TF_SESSION_REGISTER 0x2c8UL
- #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
- #define HWRM_TF_SESSION_CLOSE 0x2caUL
- #define HWRM_TF_SESSION_QCFG 0x2cbUL
- #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
- #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
- #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
- #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
- #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
- #define HWRM_TF_TBL_TYPE_GET 0x2daUL
- #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
- #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
- #define HWRM_TF_CTXT_MEM_ALLOC 0x2e2UL
- #define HWRM_TF_CTXT_MEM_FREE 0x2e3UL
- #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL
- #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL
- #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL
- #define HWRM_TF_EXT_EM_OP 0x2e7UL
- #define HWRM_TF_EXT_EM_CFG 0x2e8UL
- #define HWRM_TF_EXT_EM_QCFG 0x2e9UL
- #define HWRM_TF_EM_INSERT 0x2eaUL
- #define HWRM_TF_EM_DELETE 0x2ebUL
- #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
- #define HWRM_TF_EM_MOVE 0x2edUL
- #define HWRM_TF_TCAM_SET 0x2f8UL
- #define HWRM_TF_TCAM_GET 0x2f9UL
- #define HWRM_TF_TCAM_MOVE 0x2faUL
- #define HWRM_TF_TCAM_FREE 0x2fbUL
- #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
- #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
- #define HWRM_TF_IF_TBL_SET 0x2feUL
- #define HWRM_TF_IF_TBL_GET 0x2ffUL
- #define HWRM_SV 0x400UL
- #define HWRM_DBG_READ_DIRECT 0xff10UL
- #define HWRM_DBG_READ_INDIRECT 0xff11UL
- #define HWRM_DBG_WRITE_DIRECT 0xff12UL
- #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
- #define HWRM_DBG_DUMP 0xff14UL
- #define HWRM_DBG_ERASE_NVM 0xff15UL
- #define HWRM_DBG_CFG 0xff16UL
- #define HWRM_DBG_COREDUMP_LIST 0xff17UL
- #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
- #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
- #define HWRM_DBG_FW_CLI 0xff1aUL
- #define HWRM_DBG_I2C_CMD 0xff1bUL
- #define HWRM_DBG_RING_INFO_GET 0xff1cUL
- #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
- #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
- #define HWRM_DBG_DRV_TRACE 0xff1fUL
- #define HWRM_DBG_QCAPS 0xff20UL
- #define HWRM_DBG_QCFG 0xff21UL
- #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
- #define HWRM_DBG_USEQ_ALLOC 0xff23UL
- #define HWRM_DBG_USEQ_FREE 0xff24UL
- #define HWRM_DBG_USEQ_FLUSH 0xff25UL
- #define HWRM_DBG_USEQ_QCAPS 0xff26UL
- #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
- #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
- #define HWRM_DBG_USEQ_RUN 0xff29UL
- #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
- #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
- #define HWRM_NVM_DEFRAG 0xffecUL
- #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
- #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
- #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
- #define HWRM_NVM_FLUSH 0xfff0UL
- #define HWRM_NVM_GET_VARIABLE 0xfff1UL
- #define HWRM_NVM_SET_VARIABLE 0xfff2UL
- #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
- #define HWRM_NVM_MODIFY 0xfff4UL
- #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
- #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
- #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
- #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
- #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
- #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
- #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
- #define HWRM_NVM_RAW_DUMP 0xfffcUL
- #define HWRM_NVM_READ 0xfffdUL
- #define HWRM_NVM_WRITE 0xfffeUL
- #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
- #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
- __le16 unused_0[3];
-};
-
-/* ret_codes (size:64b/8B) */
-struct ret_codes {
- __le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS 0x0UL
- #define HWRM_ERR_CODE_FAIL 0x1UL
- #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
- #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
- #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
- #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
- #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
- #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
- #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
- #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
- #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
- #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
- #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
- #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
- #define HWRM_ERR_CODE_BUSY 0x10UL
- #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
- #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
- #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
- #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
- #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
- __le16 unused_0[3];
-};
-
-/* hwrm_err_output (size:128b/16B) */
-struct hwrm_err_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 opaque_0;
- __le16 opaque_1;
- u8 cmd_err;
- u8 valid;
-};
-#define HWRM_NA_SIGNATURE ((__le32)(-1))
-#define HWRM_MAX_REQ_LEN 128
-#define HWRM_MAX_RESP_LEN 704
-#define HW_HASH_INDEX_SIZE 0x80
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1
-#define HWRM_TARGET_ID_BONO 0xFFF8
-#define HWRM_TARGET_ID_KONG 0xFFF9
-#define HWRM_TARGET_ID_APE 0xFFFA
-#define HWRM_TARGET_ID_TOOLS 0xFFFD
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 52
-#define HWRM_VERSION_STR "1.10.2.52"
-
-/* hwrm_ver_get_input (size:192b/24B) */
-struct hwrm_ver_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 hwrm_intf_maj;
- u8 hwrm_intf_min;
- u8 hwrm_intf_upd;
- u8 unused_0[5];
-};
-
-/* hwrm_ver_get_output (size:1408b/176B) */
-struct hwrm_ver_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hwrm_intf_maj_8b;
- u8 hwrm_intf_min_8b;
- u8 hwrm_intf_upd_8b;
- u8 hwrm_intf_rsvd_8b;
- u8 hwrm_fw_maj_8b;
- u8 hwrm_fw_min_8b;
- u8 hwrm_fw_bld_8b;
- u8 hwrm_fw_rsvd_8b;
- u8 mgmt_fw_maj_8b;
- u8 mgmt_fw_min_8b;
- u8 mgmt_fw_bld_8b;
- u8 mgmt_fw_rsvd_8b;
- u8 netctrl_fw_maj_8b;
- u8 netctrl_fw_min_8b;
- u8 netctrl_fw_bld_8b;
- u8 netctrl_fw_rsvd_8b;
- __le32 dev_caps_cfg;
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
- #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
- #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
- #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
- #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
- #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
- u8 roce_fw_maj_8b;
- u8 roce_fw_min_8b;
- u8 roce_fw_bld_8b;
- u8 roce_fw_rsvd_8b;
- char hwrm_fw_name[16];
- char mgmt_fw_name[16];
- char netctrl_fw_name[16];
- char active_pkg_name[16];
- char roce_fw_name[16];
- __le16 chip_num;
- u8 chip_rev;
- u8 chip_metal;
- u8 chip_bond_id;
- u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
- __le16 max_req_win_len;
- __le16 max_resp_len;
- __le16 def_req_timeout;
- u8 flags;
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
- #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
- u8 unused_0[2];
- u8 always_1;
- __le16 hwrm_intf_major;
- __le16 hwrm_intf_minor;
- __le16 hwrm_intf_build;
- __le16 hwrm_intf_patch;
- __le16 hwrm_fw_major;
- __le16 hwrm_fw_minor;
- __le16 hwrm_fw_build;
- __le16 hwrm_fw_patch;
- __le16 mgmt_fw_major;
- __le16 mgmt_fw_minor;
- __le16 mgmt_fw_build;
- __le16 mgmt_fw_patch;
- __le16 netctrl_fw_major;
- __le16 netctrl_fw_minor;
- __le16 netctrl_fw_build;
- __le16 netctrl_fw_patch;
- __le16 roce_fw_major;
- __le16 roce_fw_minor;
- __le16 roce_fw_build;
- __le16 roce_fw_patch;
- __le16 max_ext_req_len;
- __le16 max_req_timeout;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* eject_cmpl (size:128b/16B) */
-struct eject_cmpl {
- __le16 type;
- #define EJECT_CMPL_TYPE_MASK 0x3fUL
- #define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
- #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
- #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
- #define EJECT_CMPL_FLAGS_SFT 6
- #define EJECT_CMPL_FLAGS_ERROR 0x40UL
- __le16 len;
- __le32 opaque;
- __le16 v;
- #define EJECT_CMPL_V 0x1UL
- #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
- #define EJECT_CMPL_ERRORS_SFT 1
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
- #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
- __le16 reserved16;
- __le32 unused_2;
-};
-
-/* hwrm_cmpl (size:128b/16B) */
-struct hwrm_cmpl {
- __le16 type;
- #define CMPL_TYPE_MASK 0x3fUL
- #define CMPL_TYPE_SFT 0
- #define CMPL_TYPE_HWRM_DONE 0x20UL
- #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
- __le16 sequence_id;
- __le32 unused_1;
- __le32 v;
- #define CMPL_V 0x1UL
- __le32 unused_3;
-};
-
-/* hwrm_fwd_req_cmpl (size:128b/16B) */
-struct hwrm_fwd_req_cmpl {
- __le16 req_len_type;
- #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
- #define FWD_REQ_CMPL_TYPE_SFT 0
- #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
- #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
- #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
- #define FWD_REQ_CMPL_REQ_LEN_SFT 6
- __le16 source_id;
- __le32 unused0;
- __le32 req_buf_addr_v[2];
- #define FWD_REQ_CMPL_V 0x1UL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
-};
-
-/* hwrm_fwd_resp_cmpl (size:128b/16B) */
-struct hwrm_fwd_resp_cmpl {
- __le16 type;
- #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
- #define FWD_RESP_CMPL_TYPE_SFT 0
- #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
- #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
- __le16 source_id;
- __le16 resp_len;
- __le16 unused_1;
- __le32 resp_buf_addr_v[2];
- #define FWD_RESP_CMPL_V 0x1UL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
- #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
-};
-
-/* hwrm_async_event_cmpl (size:128b/16B) */
-struct hwrm_async_event_cmpl {
- __le16 type;
- #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_MASTER 0x43UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x46UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_V 0x1UL
- #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_link_status_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
-};
-
-/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
-struct hwrm_async_event_cmpl_port_conn_not_allowed {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
-};
-
-/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_link_speed_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
-};
-
-/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
-struct hwrm_async_event_cmpl_reset_notify {
- __le16 type;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
-};
-
-/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_recovery {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
-};
-
-/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
-struct hwrm_async_event_cmpl_ring_monitor_msg {
- __le16 type;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_vf_cfg_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
-};
-
-/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
-struct hwrm_async_event_cmpl_default_vnic_change {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL
- #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10
-};
-
-/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
-struct hwrm_async_event_cmpl_hw_flow_aged {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
- #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
-};
-
-/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
-struct hwrm_async_event_cmpl_eem_cache_flush_req {
- __le16 type;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
-struct hwrm_async_event_cmpl_eem_cache_flush_done {
- __le16 type;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
-};
-
-/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
-struct hwrm_async_event_cmpl_deferred_response {
- __le16 type;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
-struct hwrm_async_event_cmpl_echo_request {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
-};
-
-/* hwrm_async_event_cmpl_phc_master (size:128b/16B) */
-struct hwrm_async_event_cmpl_phc_master {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_LAST ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER 0x43UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_SFT 16
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_V 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_MASK 0xfUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER
-};
-
-/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
-struct hwrm_async_event_cmpl_pps_timestamp {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
- #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
-};
-
-/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
-};
-
-/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
-struct hwrm_async_event_cmpl_hwrm_error {
- __le16 type;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
-};
-
-/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_base {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
-};
-
-/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_pause_storm {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
-};
-
-/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_invalid_signal {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
-};
-
-/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
-struct hwrm_async_event_cmpl_error_report_nvm {
- __le16 type;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
- __le32 event_data2;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
-};
-
-/* hwrm_func_reset_input (size:192b/24B) */
-struct hwrm_func_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
- __le16 vf_id;
- u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
- u8 unused_0;
-};
-
-/* hwrm_func_reset_output (size:128b/16B) */
-struct hwrm_func_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_getfid_input (size:192b/24B) */
-struct hwrm_func_getfid_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
- __le16 pci_id;
- u8 unused_0[2];
-};
-
-/* hwrm_func_getfid_output (size:128b/16B) */
-struct hwrm_func_getfid_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_func_vf_alloc_input (size:192b/24B) */
-struct hwrm_func_vf_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* hwrm_func_vf_alloc_output (size:128b/16B) */
-struct hwrm_func_vf_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 first_vf_id;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_func_vf_free_input (size:192b/24B) */
-struct hwrm_func_vf_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
- __le16 first_vf_id;
- __le16 num_vfs;
-};
-
-/* hwrm_func_vf_free_output (size:128b/16B) */
-struct hwrm_func_vf_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_vf_cfg_input (size:448b/56B) */
-struct hwrm_func_vf_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
- #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
- #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS 0x1000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS 0x2000UL
- __le16 mtu;
- __le16 guest_vlan;
- __le16 async_event_cr;
- u8 dflt_mac_addr[6];
- __le32 flags;
- #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
- #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
- #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
- #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
- #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
- #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
- #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
- #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
- #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
- #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- __le16 num_tx_key_ctxs;
- __le16 num_rx_key_ctxs;
-};
-
-/* hwrm_func_vf_cfg_output (size:128b/16B) */
-struct hwrm_func_vf_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_qcaps_input (size:192b/24B) */
-struct hwrm_func_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_qcaps_output (size:768b/96B) */
-struct hwrm_func_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le32 flags;
- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
- #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
- #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
- #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
- #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
- #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
- #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
- u8 mac_address[6];
- __le16 max_rsscos_ctx;
- __le16 max_cmpl_rings;
- __le16 max_tx_rings;
- __le16 max_rx_rings;
- __le16 max_l2_ctxs;
- __le16 max_vnics;
- __le16 first_vf_id;
- __le16 max_vfs;
- __le16 max_stat_ctx;
- __le32 max_encap_records;
- __le32 max_decap_records;
- __le32 max_tx_em_flows;
- __le32 max_tx_wm_flows;
- __le32 max_rx_em_flows;
- __le32 max_rx_wm_flows;
- __le32 max_mcast_filters;
- __le32 max_flow_id;
- __le32 max_hw_ring_grps;
- __le16 max_sp_tx_rings;
- __le16 max_msix_vfs;
- __le32 flags_ext;
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
- u8 max_schqs;
- u8 mpc_chnls_cap;
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
- #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
- __le16 max_key_ctxs_alloc;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_func_qcfg_input (size:192b/24B) */
-struct hwrm_func_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_qcfg_output (size:896b/112B) */
-struct hwrm_func_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- __le16 port_id;
- __le16 vlan;
- __le16 flags;
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
- #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
- #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
- #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
- #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
- #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
- #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
- #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
- #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
- u8 mac_address[6];
- __le16 pci_id;
- __le16 alloc_rsscos_ctx;
- __le16 alloc_cmpl_rings;
- __le16 alloc_tx_rings;
- __le16 alloc_rx_rings;
- __le16 alloc_l2_ctx;
- __le16 alloc_vnics;
- __le16 admin_mtu;
- __le16 mru;
- __le16 stat_ctx_id;
- u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
- u8 port_pf_cnt;
- #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
- #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
- __le16 dflt_vnic_id;
- __le16 max_mtu_configured;
- __le32 min_bw;
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
- u8 options;
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
- #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
- #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
- __le16 alloc_vfs;
- __le32 alloc_mcast_filters;
- __le32 alloc_hw_ring_grps;
- __le16 alloc_sp_tx_rings;
- __le16 alloc_stat_ctx;
- __le16 alloc_msix;
- __le16 registered_vfs;
- __le16 l2_doorbell_bar_size_kb;
- u8 unused_1;
- u8 always_1;
- __le32 reset_addr_poll;
- __le16 legacy_l2_db_size_kb;
- __le16 svif_info;
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
- #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
- u8 mpc_chnls;
- #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
- #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
- u8 unused_2[3];
- __le32 partition_min_bw;
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
- __le32 partition_max_bw;
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
- __le16 host_mtu;
- __le16 alloc_tx_key_ctxs;
- __le16 alloc_rx_key_ctxs;
- u8 unused_3[5];
- u8 valid;
-};
-
-/* hwrm_func_cfg_input (size:896b/112B) */
-struct hwrm_func_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 num_msix;
- __le32 flags;
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
- #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
- #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
- #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
- #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
- #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
- #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
- #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
- #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
- #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
- #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
- #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
- #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
- #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
- #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
- #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
- #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
- #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
- #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
- #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
- #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
- #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
- #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
- __le32 enables;
- #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
- #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
- #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
- #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
- #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
- #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
- #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
- #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
- #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
- #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
- #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
- #define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS 0x40000000UL
- #define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS 0x80000000UL
- __le16 admin_mtu;
- __le16 mru;
- __le16 num_rsscos_ctxs;
- __le16 num_cmpl_rings;
- __le16 num_tx_rings;
- __le16 num_rx_rings;
- __le16 num_l2_ctxs;
- __le16 num_vnics;
- __le16 num_stat_ctxs;
- __le16 num_hw_ring_grps;
- u8 dflt_mac_addr[6];
- __le16 dflt_vlan;
- __be32 dflt_ip_addr[4];
- __le32 min_bw;
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 max_bw;
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- __le16 async_event_cr;
- u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
- u8 allowed_vlan_pris;
- u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
- #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
- #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
- #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
- u8 options;
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
- #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
- #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
- #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
- #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
- __le16 num_mcast_filters;
- __le16 schq_id;
- __le16 mpc_chnls;
- #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
- #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
- #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
- #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
- #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
- __le32 partition_min_bw;
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
- __le32 partition_max_bw;
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
- __be16 tpid;
- __le16 host_mtu;
- __le16 num_tx_key_ctxs;
- __le16 num_rx_key_ctxs;
- u8 unused_0[4];
-};
-
-/* hwrm_func_cfg_output (size:128b/16B) */
-struct hwrm_func_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_qstats_input (size:192b/24B) */
-struct hwrm_func_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 flags;
- #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
- #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK
- u8 unused_0[5];
-};
-
-/* hwrm_func_qstats_output (size:1408b/176B) */
-struct hwrm_func_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_drop_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_drop_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_qstats_ext_input (size:256b/32B) */
-struct hwrm_func_qstats_ext_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 flags;
- #define FUNC_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
- u8 unused_0[1];
- __le32 enables;
- #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
- __le16 schq_id;
- __le16 traffic_class;
- u8 unused_1[4];
-};
-
-/* hwrm_func_qstats_ext_output (size:1536b/192B) */
-struct hwrm_func_qstats_ext_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_clr_stats_input (size:192b/24B) */
-struct hwrm_func_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_clr_stats_output (size:128b/16B) */
-struct hwrm_func_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_vf_resc_free_input (size:192b/24B) */
-struct hwrm_func_vf_resc_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- u8 unused_0[6];
-};
-
-/* hwrm_func_vf_resc_free_output (size:128b/16B) */
-struct hwrm_func_vf_resc_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_drv_rgtr_input (size:896b/112B) */
-struct hwrm_func_drv_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
- __le32 enables;
- #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
- #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
- __le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
- u8 ver_maj_8b;
- u8 ver_min_8b;
- u8 ver_upd_8b;
- u8 unused_0[3];
- __le32 timestamp;
- u8 unused_1[4];
- __le32 vf_req_fwd[8];
- __le32 async_event_fwd[8];
- __le16 ver_maj;
- __le16 ver_min;
- __le16 ver_upd;
- __le16 ver_patch;
-};
-
-/* hwrm_func_drv_rgtr_output (size:128b/16B) */
-struct hwrm_func_drv_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
-struct hwrm_func_drv_unrgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
- u8 unused_0[4];
-};
-
-/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
-struct hwrm_func_drv_unrgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
-struct hwrm_func_buf_rgtr_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
- #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
- __le16 vf_id;
- __le16 req_buf_num_pages;
- __le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
- __le16 req_buf_len;
- __le16 resp_buf_len;
- u8 unused_0[2];
- __le64 req_buf_page_addr0;
- __le64 req_buf_page_addr1;
- __le64 req_buf_page_addr2;
- __le64 req_buf_page_addr3;
- __le64 req_buf_page_addr4;
- __le64 req_buf_page_addr5;
- __le64 req_buf_page_addr6;
- __le64 req_buf_page_addr7;
- __le64 req_buf_page_addr8;
- __le64 req_buf_page_addr9;
- __le64 error_buf_addr;
- __le64 resp_buf_addr;
-};
-
-/* hwrm_func_buf_rgtr_output (size:128b/16B) */
-struct hwrm_func_buf_rgtr_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_drv_qver_input (size:192b/24B) */
-struct hwrm_func_drv_qver_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 reserved;
- __le16 fid;
- u8 unused_0[2];
-};
-
-/* hwrm_func_drv_qver_output (size:256b/32B) */
-struct hwrm_func_drv_qver_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
- u8 ver_maj_8b;
- u8 ver_min_8b;
- u8 ver_upd_8b;
- u8 unused_0[3];
- __le16 ver_maj;
- __le16 ver_min;
- __le16 ver_upd;
- __le16 ver_patch;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_func_resource_qcaps_input (size:192b/24B) */
-struct hwrm_func_resource_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_func_resource_qcaps_output (size:512b/64B) */
-struct hwrm_func_resource_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 max_vfs;
- __le16 max_msix;
- __le16 vf_reservation_strategy;
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
- __le16 min_rsscos_ctx;
- __le16 max_rsscos_ctx;
- __le16 min_cmpl_rings;
- __le16 max_cmpl_rings;
- __le16 min_tx_rings;
- __le16 max_tx_rings;
- __le16 min_rx_rings;
- __le16 max_rx_rings;
- __le16 min_l2_ctxs;
- __le16 max_l2_ctxs;
- __le16 min_vnics;
- __le16 max_vnics;
- __le16 min_stat_ctx;
- __le16 max_stat_ctx;
- __le16 min_hw_ring_grps;
- __le16 max_hw_ring_grps;
- __le16 max_tx_scheduler_inputs;
- __le16 flags;
- #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_tx_key_ctxs;
- __le16 max_tx_key_ctxs;
- __le16 min_rx_key_ctxs;
- __le16 max_rx_key_ctxs;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */
-struct hwrm_func_vf_resource_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 max_msix;
- __le16 min_rsscos_ctx;
- __le16 max_rsscos_ctx;
- __le16 min_cmpl_rings;
- __le16 max_cmpl_rings;
- __le16 min_tx_rings;
- __le16 max_tx_rings;
- __le16 min_rx_rings;
- __le16 max_rx_rings;
- __le16 min_l2_ctxs;
- __le16 max_l2_ctxs;
- __le16 min_vnics;
- __le16 max_vnics;
- __le16 min_stat_ctx;
- __le16 max_stat_ctx;
- __le16 min_hw_ring_grps;
- __le16 max_hw_ring_grps;
- __le16 flags;
- #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_tx_key_ctxs;
- __le16 max_tx_key_ctxs;
- __le16 min_rx_key_ctxs;
- __le16 max_rx_key_ctxs;
- u8 unused_0[2];
-};
-
-/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
-struct hwrm_func_vf_resource_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 reserved_rsscos_ctx;
- __le16 reserved_cmpl_rings;
- __le16 reserved_tx_rings;
- __le16 reserved_rx_rings;
- __le16 reserved_l2_ctxs;
- __le16 reserved_vnics;
- __le16 reserved_stat_ctx;
- __le16 reserved_hw_ring_grps;
- __le16 reserved_tx_key_ctxs;
- __le16 reserved_rx_key_ctxs;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
-struct hwrm_func_backing_store_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
-struct hwrm_func_backing_store_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 qp_max_entries;
- __le16 qp_min_qp1_entries;
- __le16 qp_max_l2_entries;
- __le16 qp_entry_size;
- __le16 srq_max_l2_entries;
- __le32 srq_max_entries;
- __le16 srq_entry_size;
- __le16 cq_max_l2_entries;
- __le32 cq_max_entries;
- __le16 cq_entry_size;
- __le16 vnic_max_vnic_entries;
- __le16 vnic_max_ring_table_entries;
- __le16 vnic_entry_size;
- __le32 stat_max_entries;
- __le16 stat_entry_size;
- __le16 tqm_entry_size;
- __le32 tqm_min_entries_per_ring;
- __le32 tqm_max_entries_per_ring;
- __le32 mrav_max_entries;
- __le16 mrav_entry_size;
- __le16 tim_entry_size;
- __le32 tim_max_entries;
- __le16 mrav_num_entries_units;
- u8 tqm_entries_multiple;
- u8 ctx_kind_initializer;
- __le16 ctx_init_mask;
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
- #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
- u8 qp_init_offset;
- u8 srq_init_offset;
- u8 cq_init_offset;
- u8 vnic_init_offset;
- u8 tqm_fp_rings_count;
- u8 stat_init_offset;
- u8 mrav_init_offset;
- u8 tqm_fp_rings_count_ext;
- u8 tkc_init_offset;
- u8 rkc_init_offset;
- __le16 tkc_entry_size;
- __le16 rkc_entry_size;
- __le32 tkc_max_entries;
- __le32 rkc_max_entries;
- u8 rsvd[7];
- u8 valid;
-};
-
-/* tqm_fp_ring_cfg (size:128b/16B) */
-struct tqm_fp_ring_cfg {
- u8 tqm_ring_pg_size_tqm_ring_lvl;
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
- u8 unused[3];
- __le32 tqm_ring_num_entries;
- __le64 tqm_ring_page_dir;
-};
-
-/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
-struct hwrm_func_backing_store_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
- __le32 enables;
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
- u8 qpc_pg_size_qpc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
- u8 srq_pg_size_srq_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
- u8 cq_pg_size_cq_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
- u8 vnic_pg_size_vnic_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
- u8 stat_pg_size_stat_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
- u8 tqm_sp_pg_size_tqm_sp_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
- u8 tqm_ring0_pg_size_tqm_ring0_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
- u8 tqm_ring1_pg_size_tqm_ring1_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
- u8 tqm_ring2_pg_size_tqm_ring2_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
- u8 tqm_ring3_pg_size_tqm_ring3_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
- u8 tqm_ring4_pg_size_tqm_ring4_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
- u8 tqm_ring5_pg_size_tqm_ring5_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
- u8 tqm_ring6_pg_size_tqm_ring6_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
- u8 tqm_ring7_pg_size_tqm_ring7_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
- u8 mrav_pg_size_mrav_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
- u8 tim_pg_size_tim_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
- __le64 qpc_page_dir;
- __le64 srq_page_dir;
- __le64 cq_page_dir;
- __le64 vnic_page_dir;
- __le64 stat_page_dir;
- __le64 tqm_sp_page_dir;
- __le64 tqm_ring0_page_dir;
- __le64 tqm_ring1_page_dir;
- __le64 tqm_ring2_page_dir;
- __le64 tqm_ring3_page_dir;
- __le64 tqm_ring4_page_dir;
- __le64 tqm_ring5_page_dir;
- __le64 tqm_ring6_page_dir;
- __le64 tqm_ring7_page_dir;
- __le64 mrav_page_dir;
- __le64 tim_page_dir;
- __le32 qp_num_entries;
- __le32 srq_num_entries;
- __le32 cq_num_entries;
- __le32 stat_num_entries;
- __le32 tqm_sp_num_entries;
- __le32 tqm_ring0_num_entries;
- __le32 tqm_ring1_num_entries;
- __le32 tqm_ring2_num_entries;
- __le32 tqm_ring3_num_entries;
- __le32 tqm_ring4_num_entries;
- __le32 tqm_ring5_num_entries;
- __le32 tqm_ring6_num_entries;
- __le32 tqm_ring7_num_entries;
- __le32 mrav_num_entries;
- __le32 tim_num_entries;
- __le16 qp_num_qp1_entries;
- __le16 qp_num_l2_entries;
- __le16 qp_entry_size;
- __le16 srq_num_l2_entries;
- __le16 srq_entry_size;
- __le16 cq_num_l2_entries;
- __le16 cq_entry_size;
- __le16 vnic_num_vnic_entries;
- __le16 vnic_num_ring_table_entries;
- __le16 vnic_entry_size;
- __le16 stat_entry_size;
- __le16 tqm_entry_size;
- __le16 mrav_entry_size;
- __le16 tim_entry_size;
- u8 tqm_ring8_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
- u8 ring8_unused[3];
- __le32 tqm_ring8_num_entries;
- __le64 tqm_ring8_page_dir;
- u8 tqm_ring9_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
- u8 ring9_unused[3];
- __le32 tqm_ring9_num_entries;
- __le64 tqm_ring9_page_dir;
- u8 tqm_ring10_pg_size_tqm_ring_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
- u8 ring10_unused[3];
- __le32 tqm_ring10_num_entries;
- __le64 tqm_ring10_page_dir;
- __le32 tkc_num_entries;
- __le32 rkc_num_entries;
- __le64 tkc_page_dir;
- __le64 rkc_page_dir;
- __le16 tkc_entry_size;
- __le16 rkc_entry_size;
- u8 tkc_pg_size_tkc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
- u8 rkc_pg_size_rkc_lvl;
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
- #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
- u8 rsvd[2];
-};
-
-/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
-struct hwrm_func_backing_store_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_error_recovery_qcfg_input (size:192b/24B) */
-struct hwrm_error_recovery_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 unused_0[8];
-};
-
-/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
-struct hwrm_error_recovery_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL
- __le32 driver_polling_freq;
- __le32 master_func_wait_period;
- __le32 normal_func_wait_period;
- __le32 master_func_wait_period_after_reset;
- __le32 max_bailout_time_after_reset;
- __le32 fw_health_status_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2
- __le32 fw_heartbeat_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2
- __le32 fw_reset_cnt_reg;
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2
- __le32 reset_inprogress_reg;
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2
- __le32 reset_inprogress_reg_mask;
- u8 unused_0[3];
- u8 reg_array_cnt;
- __le32 reset_reg[16];
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
- __le32 reset_reg_val[16];
- u8 delay_after_reset[16];
- __le32 err_recovery_cnt_reg;
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
- #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_func_echo_response_input (size:192b/24B) */
-struct hwrm_func_echo_response_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 event_data1;
- __le32 event_data2;
-};
-
-/* hwrm_func_echo_response_output (size:128b/16B) */
-struct hwrm_func_echo_response_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
-struct hwrm_func_ptp_pin_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 unused_0[8];
-};
-
-/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
-struct hwrm_func_ptp_pin_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_pins;
- u8 state;
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
- u8 pin0_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
- u8 pin1_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
- u8 pin2_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT
- u8 pin3_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
-struct hwrm_func_ptp_pin_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
- #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
- u8 pin0_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
- u8 pin0_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
- u8 pin1_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
- u8 pin1_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
- u8 pin2_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
- u8 pin2_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT
- u8 pin3_state;
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
- u8 pin3_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT
- u8 unused_0[4];
-};
-
-/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
-struct hwrm_func_ptp_pin_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_cfg_input (size:320b/40B) */
-struct hwrm_func_ptp_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 enables;
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
- #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
- u8 ptp_pps_event;
- #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
- u8 ptp_freq_adj_dll_source;
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
- u8 ptp_freq_adj_dll_phase;
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M
- u8 unused_0[3];
- __le32 ptp_freq_adj_ext_period;
- __le32 ptp_freq_adj_ext_up;
- __le32 ptp_freq_adj_ext_phase_lower;
- __le32 ptp_freq_adj_ext_phase_upper;
-};
-
-/* hwrm_func_ptp_cfg_output (size:128b/16B) */
-struct hwrm_func_ptp_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
-struct hwrm_func_ptp_ts_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
- #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
- u8 unused_0[4];
-};
-
-/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
-struct hwrm_func_ptp_ts_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 pps_event_ts;
- __le64 ptm_res_local_ts;
- __le64 ptm_pmstr_ts;
- __le32 ptm_mstr_prop_dly;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_func_drv_if_change_input (size:192b/24B) */
-struct hwrm_func_drv_if_change_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
- __le32 unused;
-};
-
-/* hwrm_func_drv_if_change_output (size:128b/16B) */
-struct hwrm_func_drv_if_change_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg_input (size:448b/56B) */
-struct hwrm_port_phy_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
- __le32 enables;
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
- __le16 port_id;
- __le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
- u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
- u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
- u8 auto_pause;
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- u8 unused_0;
- __le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
- __le16 auto_link_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
- #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
- u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
- #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
- u8 force_pause;
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
- u8 unused_1;
- __le32 preemphasis;
- __le16 eee_link_speed_mask;
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 force_pam4_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
- __le32 tx_lpi_timer;
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
- __le16 auto_link_pam4_speed_mask;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
- #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
- u8 unused_2[2];
-};
-
-/* hwrm_port_phy_cfg_output (size:128b/16B) */
-struct hwrm_port_phy_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
-struct hwrm_port_phy_cfg_cmd_err {
- u8 code;
- #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
- #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
- u8 unused_0[7];
-};
-
-/* hwrm_port_phy_qcfg_input (size:192b/24B) */
-struct hwrm_port_phy_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_phy_qcfg_output (size:768b/96B) */
-struct hwrm_port_phy_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
- u8 active_fec_signal_mode;
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
- #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
- __le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
- u8 duplex_cfg;
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
- u8 pause;
- #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
- __le16 support_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
- __le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
- u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
- u8 auto_pause;
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- __le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
- __le16 auto_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
- u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
- #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
- u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
- #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
- u8 force_pause;
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
- __le32 preemphasis;
- u8 phy_maj;
- u8 phy_min;
- u8 phy_bld;
- u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4
- u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
- u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
- u8 eee_config_phy_addr;
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
- u8 parallel_detect;
- #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
- __le16 link_partner_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
- u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
- u8 link_partner_adv_pause;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
- __le16 adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le16 link_partner_adv_eee_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
- __le32 xcvr_identifier_type_tx_lpi_timer;
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
- __le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
- u8 duplex_state;
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
- u8 option_flags;
- #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
- #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
- char phy_vendor_name[16];
- char phy_vendor_partnumber[16];
- __le16 support_pam4_speeds;
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
- #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
- __le16 force_pam4_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
- #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
- __le16 auto_pam4_link_speed_mask;
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
- #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
- u8 link_partner_pam4_adv_speeds;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
- u8 valid;
-};
-
-/* hwrm_port_mac_cfg_input (size:384b/48B) */
-struct hwrm_port_mac_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
- __le32 enables;
- #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
- #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
- #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
- #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
- #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
- #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
- #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
- #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
- __le16 port_id;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
- u8 vlan_pri2cos_map_pri;
- u8 reserved1;
- u8 tunnel_pri2cos_map_pri;
- u8 dscp2pri_map_pri;
- __le16 rx_ts_capture_ptp_msg_type;
- __le16 tx_ts_capture_ptp_msg_type;
- u8 cos_field_cfg;
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- u8 unused_0[3];
- __le32 ptp_freq_adj_ppb;
- __le32 ptp_adj_phase;
-};
-
-/* hwrm_port_mac_cfg_output (size:128b/16B) */
-struct hwrm_port_mac_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- __le16 mtu;
- u8 ipg;
- u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
-struct hwrm_port_mac_ptp_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
-struct hwrm_port_mac_ptp_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
- u8 unused_0[3];
- __le32 rx_ts_reg_off_lower;
- __le32 rx_ts_reg_off_upper;
- __le32 rx_ts_reg_off_seq_id;
- __le32 rx_ts_reg_off_src_id_0;
- __le32 rx_ts_reg_off_src_id_1;
- __le32 rx_ts_reg_off_src_id_2;
- __le32 rx_ts_reg_off_domain_id;
- __le32 rx_ts_reg_off_fifo;
- __le32 rx_ts_reg_off_fifo_adv;
- __le32 rx_ts_reg_off_granularity;
- __le32 tx_ts_reg_off_lower;
- __le32 tx_ts_reg_off_upper;
- __le32 tx_ts_reg_off_seq_id;
- __le32 tx_ts_reg_off_fifo;
- __le32 tx_ts_reg_off_granularity;
- __le32 ts_ref_clock_reg_lower;
- __le32 ts_ref_clock_reg_upper;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* tx_port_stats (size:3264b/408B) */
-struct tx_port_stats {
- __le64 tx_64b_frames;
- __le64 tx_65b_127b_frames;
- __le64 tx_128b_255b_frames;
- __le64 tx_256b_511b_frames;
- __le64 tx_512b_1023b_frames;
- __le64 tx_1024b_1518b_frames;
- __le64 tx_good_vlan_frames;
- __le64 tx_1519b_2047b_frames;
- __le64 tx_2048b_4095b_frames;
- __le64 tx_4096b_9216b_frames;
- __le64 tx_9217b_16383b_frames;
- __le64 tx_good_frames;
- __le64 tx_total_frames;
- __le64 tx_ucast_frames;
- __le64 tx_mcast_frames;
- __le64 tx_bcast_frames;
- __le64 tx_pause_frames;
- __le64 tx_pfc_frames;
- __le64 tx_jabber_frames;
- __le64 tx_fcs_err_frames;
- __le64 tx_control_frames;
- __le64 tx_oversz_frames;
- __le64 tx_single_dfrl_frames;
- __le64 tx_multi_dfrl_frames;
- __le64 tx_single_coll_frames;
- __le64 tx_multi_coll_frames;
- __le64 tx_late_coll_frames;
- __le64 tx_excessive_coll_frames;
- __le64 tx_frag_frames;
- __le64 tx_err;
- __le64 tx_tagged_frames;
- __le64 tx_dbl_tagged_frames;
- __le64 tx_runt_frames;
- __le64 tx_fifo_underruns;
- __le64 tx_pfc_ena_frames_pri0;
- __le64 tx_pfc_ena_frames_pri1;
- __le64 tx_pfc_ena_frames_pri2;
- __le64 tx_pfc_ena_frames_pri3;
- __le64 tx_pfc_ena_frames_pri4;
- __le64 tx_pfc_ena_frames_pri5;
- __le64 tx_pfc_ena_frames_pri6;
- __le64 tx_pfc_ena_frames_pri7;
- __le64 tx_eee_lpi_events;
- __le64 tx_eee_lpi_duration;
- __le64 tx_llfc_logical_msgs;
- __le64 tx_hcfc_msgs;
- __le64 tx_total_collisions;
- __le64 tx_bytes;
- __le64 tx_xthol_frames;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
-};
-
-/* rx_port_stats (size:4224b/528B) */
-struct rx_port_stats {
- __le64 rx_64b_frames;
- __le64 rx_65b_127b_frames;
- __le64 rx_128b_255b_frames;
- __le64 rx_256b_511b_frames;
- __le64 rx_512b_1023b_frames;
- __le64 rx_1024b_1518b_frames;
- __le64 rx_good_vlan_frames;
- __le64 rx_1519b_2047b_frames;
- __le64 rx_2048b_4095b_frames;
- __le64 rx_4096b_9216b_frames;
- __le64 rx_9217b_16383b_frames;
- __le64 rx_total_frames;
- __le64 rx_ucast_frames;
- __le64 rx_mcast_frames;
- __le64 rx_bcast_frames;
- __le64 rx_fcs_err_frames;
- __le64 rx_ctrl_frames;
- __le64 rx_pause_frames;
- __le64 rx_pfc_frames;
- __le64 rx_unsupported_opcode_frames;
- __le64 rx_unsupported_da_pausepfc_frames;
- __le64 rx_wrong_sa_frames;
- __le64 rx_align_err_frames;
- __le64 rx_oor_len_frames;
- __le64 rx_code_err_frames;
- __le64 rx_false_carrier_frames;
- __le64 rx_ovrsz_frames;
- __le64 rx_jbr_frames;
- __le64 rx_mtu_err_frames;
- __le64 rx_match_crc_frames;
- __le64 rx_promiscuous_frames;
- __le64 rx_tagged_frames;
- __le64 rx_double_tagged_frames;
- __le64 rx_trunc_frames;
- __le64 rx_good_frames;
- __le64 rx_pfc_xon2xoff_frames_pri0;
- __le64 rx_pfc_xon2xoff_frames_pri1;
- __le64 rx_pfc_xon2xoff_frames_pri2;
- __le64 rx_pfc_xon2xoff_frames_pri3;
- __le64 rx_pfc_xon2xoff_frames_pri4;
- __le64 rx_pfc_xon2xoff_frames_pri5;
- __le64 rx_pfc_xon2xoff_frames_pri6;
- __le64 rx_pfc_xon2xoff_frames_pri7;
- __le64 rx_pfc_ena_frames_pri0;
- __le64 rx_pfc_ena_frames_pri1;
- __le64 rx_pfc_ena_frames_pri2;
- __le64 rx_pfc_ena_frames_pri3;
- __le64 rx_pfc_ena_frames_pri4;
- __le64 rx_pfc_ena_frames_pri5;
- __le64 rx_pfc_ena_frames_pri6;
- __le64 rx_pfc_ena_frames_pri7;
- __le64 rx_sch_crc_err_frames;
- __le64 rx_undrsz_frames;
- __le64 rx_frag_frames;
- __le64 rx_eee_lpi_events;
- __le64 rx_eee_lpi_duration;
- __le64 rx_llfc_physical_msgs;
- __le64 rx_llfc_logical_msgs;
- __le64 rx_llfc_msgs_with_crc_err;
- __le64 rx_hcfc_msgs;
- __le64 rx_hcfc_msgs_with_crc_err;
- __le64 rx_bytes;
- __le64 rx_runt_bytes;
- __le64 rx_runt_frames;
- __le64 rx_stat_discard;
- __le64 rx_stat_err;
-};
-
-/* hwrm_port_qstats_input (size:320b/40B) */
-struct hwrm_port_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 flags;
- #define PORT_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- #define PORT_QSTATS_REQ_FLAGS_LAST PORT_QSTATS_REQ_FLAGS_COUNTER_MASK
- u8 unused_0[5];
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* hwrm_port_qstats_output (size:128b/16B) */
-struct hwrm_port_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* tx_port_stats_ext (size:2048b/256B) */
-struct tx_port_stats_ext {
- __le64 tx_bytes_cos0;
- __le64 tx_bytes_cos1;
- __le64 tx_bytes_cos2;
- __le64 tx_bytes_cos3;
- __le64 tx_bytes_cos4;
- __le64 tx_bytes_cos5;
- __le64 tx_bytes_cos6;
- __le64 tx_bytes_cos7;
- __le64 tx_packets_cos0;
- __le64 tx_packets_cos1;
- __le64 tx_packets_cos2;
- __le64 tx_packets_cos3;
- __le64 tx_packets_cos4;
- __le64 tx_packets_cos5;
- __le64 tx_packets_cos6;
- __le64 tx_packets_cos7;
- __le64 pfc_pri0_tx_duration_us;
- __le64 pfc_pri0_tx_transitions;
- __le64 pfc_pri1_tx_duration_us;
- __le64 pfc_pri1_tx_transitions;
- __le64 pfc_pri2_tx_duration_us;
- __le64 pfc_pri2_tx_transitions;
- __le64 pfc_pri3_tx_duration_us;
- __le64 pfc_pri3_tx_transitions;
- __le64 pfc_pri4_tx_duration_us;
- __le64 pfc_pri4_tx_transitions;
- __le64 pfc_pri5_tx_duration_us;
- __le64 pfc_pri5_tx_transitions;
- __le64 pfc_pri6_tx_duration_us;
- __le64 pfc_pri6_tx_transitions;
- __le64 pfc_pri7_tx_duration_us;
- __le64 pfc_pri7_tx_transitions;
-};
-
-/* rx_port_stats_ext (size:3648b/456B) */
-struct rx_port_stats_ext {
- __le64 link_down_events;
- __le64 continuous_pause_events;
- __le64 resume_pause_events;
- __le64 continuous_roce_pause_events;
- __le64 resume_roce_pause_events;
- __le64 rx_bytes_cos0;
- __le64 rx_bytes_cos1;
- __le64 rx_bytes_cos2;
- __le64 rx_bytes_cos3;
- __le64 rx_bytes_cos4;
- __le64 rx_bytes_cos5;
- __le64 rx_bytes_cos6;
- __le64 rx_bytes_cos7;
- __le64 rx_packets_cos0;
- __le64 rx_packets_cos1;
- __le64 rx_packets_cos2;
- __le64 rx_packets_cos3;
- __le64 rx_packets_cos4;
- __le64 rx_packets_cos5;
- __le64 rx_packets_cos6;
- __le64 rx_packets_cos7;
- __le64 pfc_pri0_rx_duration_us;
- __le64 pfc_pri0_rx_transitions;
- __le64 pfc_pri1_rx_duration_us;
- __le64 pfc_pri1_rx_transitions;
- __le64 pfc_pri2_rx_duration_us;
- __le64 pfc_pri2_rx_transitions;
- __le64 pfc_pri3_rx_duration_us;
- __le64 pfc_pri3_rx_transitions;
- __le64 pfc_pri4_rx_duration_us;
- __le64 pfc_pri4_rx_transitions;
- __le64 pfc_pri5_rx_duration_us;
- __le64 pfc_pri5_rx_transitions;
- __le64 pfc_pri6_rx_duration_us;
- __le64 pfc_pri6_rx_transitions;
- __le64 pfc_pri7_rx_duration_us;
- __le64 pfc_pri7_rx_transitions;
- __le64 rx_bits;
- __le64 rx_buffer_passed_threshold;
- __le64 rx_pcs_symbol_err;
- __le64 rx_corrected_bits;
- __le64 rx_discard_bytes_cos0;
- __le64 rx_discard_bytes_cos1;
- __le64 rx_discard_bytes_cos2;
- __le64 rx_discard_bytes_cos3;
- __le64 rx_discard_bytes_cos4;
- __le64 rx_discard_bytes_cos5;
- __le64 rx_discard_bytes_cos6;
- __le64 rx_discard_bytes_cos7;
- __le64 rx_discard_packets_cos0;
- __le64 rx_discard_packets_cos1;
- __le64 rx_discard_packets_cos2;
- __le64 rx_discard_packets_cos3;
- __le64 rx_discard_packets_cos4;
- __le64 rx_discard_packets_cos5;
- __le64 rx_discard_packets_cos6;
- __le64 rx_discard_packets_cos7;
-};
-
-/* hwrm_port_qstats_ext_input (size:320b/40B) */
-struct hwrm_port_qstats_ext_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- u8 flags;
- #define PORT_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
- #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
- #define PORT_QSTATS_EXT_REQ_FLAGS_LAST PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
- u8 unused_0;
- __le64 tx_stat_host_addr;
- __le64 rx_stat_host_addr;
-};
-
-/* hwrm_port_qstats_ext_output (size:128b/16B) */
-struct hwrm_port_qstats_ext_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tx_stat_size;
- __le16 rx_stat_size;
- __le16 total_active_cos_queues;
- u8 flags;
- #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
- u8 valid;
-};
-
-/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
-struct hwrm_port_lpbk_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
-struct hwrm_port_lpbk_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 lpbk_ucast_frames;
- __le64 lpbk_mcast_frames;
- __le64 lpbk_bcast_frames;
- __le64 lpbk_ucast_bytes;
- __le64 lpbk_mcast_bytes;
- __le64 lpbk_bcast_bytes;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
- __le64 rx_stat_discard;
- __le64 rx_stat_error;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_ecn_qstats_input (size:256b/32B) */
-struct hwrm_port_ecn_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 ecn_stat_buf_size;
- u8 flags;
- #define PORT_ECN_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- #define PORT_ECN_QSTATS_REQ_FLAGS_LAST PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK
- u8 unused_0[3];
- __le64 ecn_stat_host_addr;
-};
-
-/* hwrm_port_ecn_qstats_output (size:128b/16B) */
-struct hwrm_port_ecn_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ecn_stat_buf_size;
- u8 mark_en;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* port_stats_ecn (size:512b/64B) */
-struct port_stats_ecn {
- __le64 mark_cnt_cos0;
- __le64 mark_cnt_cos1;
- __le64 mark_cnt_cos2;
- __le64 mark_cnt_cos3;
- __le64 mark_cnt_cos4;
- __le64 mark_cnt_cos5;
- __le64 mark_cnt_cos6;
- __le64 mark_cnt_cos7;
-};
-
-/* hwrm_port_clr_stats_input (size:192b/24B) */
-struct hwrm_port_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 flags;
- #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
- u8 unused_0[5];
-};
-
-/* hwrm_port_clr_stats_output (size:128b/16B) */
-struct hwrm_port_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
-struct hwrm_port_lpbk_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
-struct hwrm_port_lpbk_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_ts_query_input (size:320b/40B) */
-struct hwrm_port_ts_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
- #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
- #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
- __le16 port_id;
- u8 unused_0[2];
- __le16 enables;
- #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
- #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
- #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
- __le16 ts_req_timeout;
- __le32 ptp_seq_id;
- __le16 ptp_hdr_offset;
- u8 unused_1[6];
-};
-
-/* hwrm_port_ts_query_output (size:192b/24B) */
-struct hwrm_port_ts_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ptp_msg_ts;
- __le16 ptp_msg_seqid;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_port_phy_qcaps_input (size:192b/24B) */
-struct hwrm_port_phy_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_phy_qcaps_output (size:256b/32B) */
-struct hwrm_port_phy_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
- u8 port_cnt;
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4
- __le16 supported_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
- __le16 supported_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
- __le16 supported_speeds_eee_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
- __le32 tx_lpi_timer_low;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
- __le32 valid_tx_lpi_timer_high;
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
- #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
- #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
- __le16 supported_pam4_speeds_auto_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
- __le16 supported_pam4_speeds_force_mode;
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
- #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
-struct hwrm_port_phy_i2c_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
- __le16 port_id;
- u8 i2c_slave_addr;
- u8 unused_0;
- __le16 page_number;
- __le16 page_offset;
- u8 data_length;
- u8 unused_1[7];
-};
-
-/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
-struct hwrm_port_phy_i2c_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 data[16];
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
-struct hwrm_port_phy_mdio_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[2];
- __le16 port_id;
- u8 phy_addr;
- u8 dev_addr;
- __le16 reg_addr;
- __le16 reg_data;
- u8 cl45_mdio;
- u8 unused_1[7];
-};
-
-/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
-struct hwrm_port_phy_mdio_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
-struct hwrm_port_phy_mdio_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[2];
- __le16 port_id;
- u8 phy_addr;
- u8 dev_addr;
- __le16 reg_addr;
- u8 cl45_mdio;
- u8 unused_1;
-};
-
-/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
-struct hwrm_port_phy_mdio_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 reg_data;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_port_led_cfg_input (size:512b/64B) */
-struct hwrm_port_led_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
- #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
- #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
- #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
- __le16 port_id;
- u8 num_leds;
- u8 rsvd;
- u8 led0_id;
- u8 led0_state;
- #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
- u8 led0_color;
- #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 rsvd0;
- u8 led1_id;
- u8 led1_state;
- #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
- u8 led1_color;
- #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 rsvd1;
- u8 led2_id;
- u8 led2_state;
- #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
- u8 led2_color;
- #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 rsvd2;
- u8 led3_id;
- u8 led3_state;
- #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
- #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
- #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
- u8 led3_color;
- #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 rsvd3;
-};
-
-/* hwrm_port_led_cfg_output (size:128b/16B) */
-struct hwrm_port_led_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_port_led_qcfg_input (size:192b/24B) */
-struct hwrm_port_led_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_led_qcfg_output (size:448b/56B) */
-struct hwrm_port_led_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
- u8 led0_state;
- #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
- u8 led0_color;
- #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
- u8 unused_0;
- __le16 led0_blink_on;
- __le16 led0_blink_off;
- u8 led0_group_id;
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
- u8 led1_state;
- #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
- u8 led1_color;
- #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
- u8 unused_1;
- __le16 led1_blink_on;
- __le16 led1_blink_off;
- u8 led1_group_id;
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
- u8 led2_state;
- #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
- u8 led2_color;
- #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
- u8 unused_2;
- __le16 led2_blink_on;
- __le16 led2_blink_off;
- u8 led2_group_id;
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
- #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
- u8 led3_state;
- #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
- #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
- u8 led3_color;
- #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
- #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
- u8 unused_3;
- __le16 led3_blink_on;
- __le16 led3_blink_off;
- u8 led3_group_id;
- u8 unused_4[6];
- u8 valid;
-};
-
-/* hwrm_port_led_qcaps_input (size:192b/24B) */
-struct hwrm_port_led_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_port_led_qcaps_output (size:384b/48B) */
-struct hwrm_port_led_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_leds;
- u8 unused[3];
- u8 led0_id;
- u8 led0_type;
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
- u8 led0_group_id;
- u8 unused_0;
- __le16 led0_state_caps;
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led0_color_caps;
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led1_id;
- u8 led1_type;
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
- u8 led1_group_id;
- u8 unused_1;
- __le16 led1_state_caps;
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led1_color_caps;
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led2_id;
- u8 led2_type;
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
- u8 led2_group_id;
- u8 unused_2;
- __le16 led2_state_caps;
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led2_color_caps;
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 led3_id;
- u8 led3_type;
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
- #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
- u8 led3_group_id;
- u8 unused_3;
- __le16 led3_state_caps;
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
- __le16 led3_color_caps;
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
- u8 unused_4[3];
- u8 valid;
-};
-
-/* hwrm_queue_qportcfg_input (size:192b/24B) */
-struct hwrm_queue_qportcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
- __le16 port_id;
- u8 drv_qmap_cap;
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
- #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
- u8 unused_0;
-};
-
-/* hwrm_queue_qportcfg_output (size:1344b/168B) */
-struct hwrm_queue_qportcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 max_configurable_queues;
- u8 max_configurable_lossless_queues;
- u8 queue_cfg_allowed;
- u8 queue_cfg_info;
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
- u8 queue_pfcenable_cfg_allowed;
- u8 queue_pri2cos_cfg_allowed;
- u8 queue_cos2bw_cfg_allowed;
- u8 queue_id0;
- u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
- u8 queue_id1;
- u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
- u8 queue_id2;
- u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
- u8 queue_id3;
- u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
- u8 queue_id4;
- u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
- u8 queue_id5;
- u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
- u8 queue_id6;
- u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
- u8 queue_id7;
- u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
- u8 queue_id0_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
- char qid0_name[16];
- char qid1_name[16];
- char qid2_name[16];
- char qid3_name[16];
- char qid4_name[16];
- char qid5_name[16];
- char qid6_name[16];
- char qid7_name[16];
- u8 queue_id1_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id2_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id3_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id4_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id5_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id6_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 queue_id7_service_profile_type;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
- u8 valid;
-};
-
-/* hwrm_queue_qcfg_input (size:192b/24B) */
-struct hwrm_queue_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
- __le32 queue_id;
-};
-
-/* hwrm_queue_qcfg_output (size:128b/16B) */
-struct hwrm_queue_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 queue_len;
- u8 service_profile;
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
- u8 queue_cfg_info;
- #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_queue_cfg_input (size:320b/40B) */
-struct hwrm_queue_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
- __le32 enables;
- #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
- #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
- __le32 queue_id;
- __le32 dflt_len;
- u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
- u8 unused_0[7];
-};
-
-/* hwrm_queue_cfg_output (size:128b/16B) */
-struct hwrm_queue_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
-struct hwrm_queue_pfcenable_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
-struct hwrm_queue_pfcenable_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
-struct hwrm_queue_pfcenable_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
- #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
- __le16 port_id;
- u8 unused_0[2];
-};
-
-/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
-struct hwrm_queue_pfcenable_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
-struct hwrm_queue_pri2cos_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
- u8 port_id;
- u8 unused_0[3];
-};
-
-/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
-struct hwrm_queue_pri2cos_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 queue_cfg_info;
- #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
-struct hwrm_queue_pri2cos_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
- __le32 enables;
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
- u8 port_id;
- u8 pri0_cos_queue_id;
- u8 pri1_cos_queue_id;
- u8 pri2_cos_queue_id;
- u8 pri3_cos_queue_id;
- u8 pri4_cos_queue_id;
- u8 pri5_cos_queue_id;
- u8 pri6_cos_queue_id;
- u8 pri7_cos_queue_id;
- u8 unused_0[7];
-};
-
-/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
-struct hwrm_queue_pri2cos_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
-struct hwrm_queue_cos2bw_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
-};
-
-/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
-struct hwrm_queue_cos2bw_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 queue_id0;
- u8 unused_0;
- __le16 unused_1;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
- u8 unused_2[4];
- u8 valid;
-};
-
-/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
-struct hwrm_queue_cos2bw_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
- __le16 port_id;
- u8 queue_id0;
- u8 unused_0;
- __le32 queue_id0_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id0_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id0_pri_lvl;
- u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
- u8 unused_1[5];
-};
-
-/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
-struct hwrm_queue_cos2bw_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
-struct hwrm_queue_dscp_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 port_id;
- u8 unused_0[7];
-};
-
-/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
-struct hwrm_queue_dscp_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_dscp_bits;
- u8 unused_0;
- __le16 max_entries;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
-struct hwrm_queue_dscp2pri_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- u8 port_id;
- u8 unused_0;
- __le16 dest_data_buffer_size;
- u8 unused_1[4];
-};
-
-/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
-struct hwrm_queue_dscp2pri_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 entry_cnt;
- u8 default_pri;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
-struct hwrm_queue_dscp2pri_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le32 flags;
- #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
- __le32 enables;
- #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
- u8 port_id;
- u8 default_pri;
- __le16 entry_cnt;
- u8 unused_0[4];
-};
-
-/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
-struct hwrm_queue_dscp2pri_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_alloc_input (size:192b/24B) */
-struct hwrm_vnic_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
- __le16 virtio_net_fid;
- u8 unused_0[2];
-};
-
-/* hwrm_vnic_alloc_output (size:128b/16B) */
-struct hwrm_vnic_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 vnic_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_vnic_free_input (size:192b/24B) */
-struct hwrm_vnic_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_free_output (size:128b/16B) */
-struct hwrm_vnic_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_cfg_input (size:384b/48B) */
-struct hwrm_vnic_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
- #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
- #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
- #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
- #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
- __le32 enables;
- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
- #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
- #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
- #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
- #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
- __le16 vnic_id;
- __le16 dflt_ring_grp;
- __le16 rss_rule;
- __le16 cos_rule;
- __le16 lb_rule;
- __le16 mru;
- __le16 default_rx_ring_id;
- __le16 default_cmpl_ring_id;
- __le16 queue_id;
- u8 rx_csum_v2_mode;
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
- #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
- u8 unused0[5];
-};
-
-/* hwrm_vnic_cfg_output (size:128b/16B) */
-struct hwrm_vnic_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_qcaps_input (size:192b/24B) */
-struct hwrm_vnic_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_qcaps_output (size:192b/24B) */
-struct hwrm_vnic_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 mru;
- u8 unused_0[2];
- __le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
- #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
- #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
- #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
- #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
- #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
- #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
- __le16 max_aggs_supported;
- u8 unused_1[5];
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
-struct hwrm_vnic_tpa_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
- #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
- __le32 enables;
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
- #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
- __le16 vnic_id;
- __le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
- __le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
- u8 unused_0[2];
- __le32 max_agg_timer;
- __le32 min_agg_len;
-};
-
-/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
-struct hwrm_vnic_tpa_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
-struct hwrm_vnic_tpa_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vnic_id;
- u8 unused_0[6];
-};
-
-/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
-struct hwrm_vnic_tpa_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
- #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
- __le16 max_agg_segs;
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
- #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
- __le16 max_aggs;
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
- #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
- __le32 max_agg_timer;
- __le32 min_agg_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
-struct hwrm_vnic_rss_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
- __le16 vnic_id;
- u8 ring_table_pair_index;
- u8 hash_mode_flags;
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
- __le64 ring_grp_tbl_addr;
- __le64 hash_key_tbl_addr;
- __le16 rss_ctx_idx;
- u8 unused_1[6];
-};
-
-/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
-struct hwrm_vnic_rss_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
-struct hwrm_vnic_rss_cfg_cmd_err {
- u8 code;
- #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
- #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
- u8 unused_0[7];
-};
-
-/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
-struct hwrm_vnic_plcmodes_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
- #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
- __le32 enables;
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
- #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
- __le32 vnic_id;
- __le16 jumbo_thresh;
- __le16 hds_offset;
- __le16 hds_threshold;
- __le16 max_bds;
- u8 unused_0[4];
-};
-
-/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
-struct hwrm_vnic_plcmodes_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
-struct hwrm_vnic_rss_cos_lb_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 rss_cos_lb_ctx_id;
- u8 unused_0[6];
-};
-
-/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_alloc_input (size:704b/88B) */
-struct hwrm_ring_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
- #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
- #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
- #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
- #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
- u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
- #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
- #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
- #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
- #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
- u8 unused_0;
- __le16 flags;
- #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
- __le64 page_tbl_addr;
- __le32 fbo;
- u8 page_size;
- u8 page_tbl_depth;
- __le16 schq_id;
- __le32 length;
- __le16 logical_id;
- __le16 cmpl_ring_id;
- __le16 queue_id;
- __le16 rx_buf_size;
- __le16 rx_ring_id;
- __le16 nq_ring_id;
- __le16 ring_arb_cfg;
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- __le16 unused_3;
- __le32 reserved3;
- __le32 stat_ctx_id;
- __le32 reserved4;
- __le32 max_bw;
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
- #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
- #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
- #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
- #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
- #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
- u8 mpc_chnls_type;
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
- #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
- u8 unused_4[2];
- __le64 cq_handle;
-};
-
-/* hwrm_ring_alloc_output (size:128b/16B) */
-struct hwrm_ring_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 ring_id;
- __le16 logical_ring_id;
- u8 push_buffer_index;
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
- #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
- u8 unused_0[2];
- u8 valid;
-};
-
-/* hwrm_ring_free_input (size:256b/32B) */
-struct hwrm_ring_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
- #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
- #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
- #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
- #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
- u8 flags;
- #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
- #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
- __le16 ring_id;
- __le32 prod_idx;
- __le32 opaque;
- __le32 unused_1;
-};
-
-/* hwrm_ring_free_output (size:128b/16B) */
-struct hwrm_ring_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_reset_input (size:192b/24B) */
-struct hwrm_ring_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
- #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
- #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
- u8 unused_0;
- __le16 ring_id;
- u8 unused_1[4];
-};
-
-/* hwrm_ring_reset_output (size:128b/16B) */
-struct hwrm_ring_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 push_buffer_index;
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
- #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
- u8 unused_0[3];
- u8 consumer_idx[3];
- u8 valid;
-};
-
-/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
-struct hwrm_ring_aggint_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
-struct hwrm_ring_aggint_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 cmpl_params;
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
- #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
- __le32 nq_params;
- #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
- __le16 num_cmpl_dma_aggr_min;
- __le16 num_cmpl_dma_aggr_max;
- __le16 num_cmpl_dma_aggr_during_int_min;
- __le16 num_cmpl_dma_aggr_during_int_max;
- __le16 cmpl_aggr_dma_tmr_min;
- __le16 cmpl_aggr_dma_tmr_max;
- __le16 cmpl_aggr_dma_tmr_during_int_min;
- __le16 cmpl_aggr_dma_tmr_during_int_max;
- __le16 int_lat_tmr_min_min;
- __le16 int_lat_tmr_min_max;
- __le16 int_lat_tmr_max_min;
- __le16 int_lat_tmr_max_max;
- __le16 num_cmpl_aggr_int_min;
- __le16 num_cmpl_aggr_int_max;
- __le16 timer_units;
- u8 unused_0[1];
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
-struct hwrm_ring_cmpl_ring_qaggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
- #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
- u8 unused_0[4];
-};
-
-/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
-struct hwrm_ring_cmpl_ring_qaggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flags;
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
-struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 ring_id;
- __le16 flags;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
- __le16 num_cmpl_dma_aggr;
- __le16 num_cmpl_dma_aggr_during_int;
- __le16 cmpl_aggr_dma_tmr;
- __le16 cmpl_aggr_dma_tmr_during_int;
- __le16 int_lat_tmr_min;
- __le16 int_lat_tmr_max;
- __le16 num_cmpl_aggr_int;
- __le16 enables;
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
- u8 unused_0[4];
-};
-
-/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
-struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_ring_grp_alloc_input (size:192b/24B) */
-struct hwrm_ring_grp_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 cr;
- __le16 rr;
- __le16 ar;
- __le16 sc;
-};
-
-/* hwrm_ring_grp_alloc_output (size:128b/16B) */
-struct hwrm_ring_grp_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 ring_group_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_ring_grp_free_input (size:192b/24B) */
-struct hwrm_ring_grp_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 ring_group_id;
- u8 unused_0[4];
-};
-
-/* hwrm_ring_grp_free_output (size:128b/16B) */
-struct hwrm_ring_grp_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
-#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
-#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
-#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
-
-/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
-struct hwrm_cfa_l2_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
- __le32 enables;
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
- u8 l2_addr[6];
- u8 num_vlans;
- u8 t_num_vlans;
- u8 l2_addr_mask[6];
- __le16 l2_ovlan;
- __le16 l2_ovlan_mask;
- __le16 l2_ivlan;
- __le16 l2_ivlan_mask;
- u8 unused_1[2];
- u8 t_l2_addr[6];
- u8 unused_2[2];
- u8 t_l2_addr_mask[6];
- __le16 t_l2_ovlan;
- __le16 t_l2_ovlan_mask;
- __le16 t_l2_ivlan;
- __le16 t_l2_ivlan_mask;
- u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
- u8 unused_3;
- __le32 src_id;
- u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 unused_4;
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
- u8 unused_5;
- __le32 unused_6;
- __le64 l2_filter_id_hint;
-};
-
-/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_l2_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 l2_filter_id;
- __le32 flow_id;
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_l2_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 l2_filter_id;
-};
-
-/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_l2_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
-struct hwrm_cfa_l2_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
- __le32 enables;
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- __le64 l2_filter_id;
- __le32 dst_id;
- __le32 new_mirror_vnic_id;
-};
-
-/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
-struct hwrm_cfa_l2_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
-struct hwrm_cfa_l2_set_rx_mask_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 vnic_id;
- __le32 mask;
- #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
- #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
- __le64 mc_tbl_addr;
- __le32 num_mc_entries;
- u8 unused_0[4];
- __le64 vlan_tag_tbl_addr;
- __le32 num_vlan_tags;
- u8 unused_1[4];
-};
-
-/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
-struct hwrm_cfa_l2_set_rx_mask_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
-struct hwrm_cfa_l2_set_rx_mask_cmd_err {
- u8 code;
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
-struct hwrm_cfa_tunnel_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- __le32 enables;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
- __le64 l2_filter_id;
- u8 l2_addr[6];
- __le16 l2_ivlan;
- __le32 l3_addr[4];
- __le32 t_l3_addr[4];
- u8 l3_addr_type;
- u8 t_l3_addr_type;
- u8 tunnel_type;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 tunnel_flags;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
- __le32 vni;
- __le32 dst_vnic_id;
- __le32 mirror_vnic_id;
-};
-
-/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_tunnel_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tunnel_filter_id;
- __le32 flow_id;
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_tunnel_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 tunnel_filter_id;
-};
-
-/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_tunnel_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
-struct hwrm_vxlan_ipv4_hdr {
- u8 ver_hlen;
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
- #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
- #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
- u8 tos;
- __be16 ip_id;
- __be16 flags_frag_offset;
- u8 ttl;
- u8 protocol;
- __be32 src_ip_addr;
- __be32 dest_ip_addr;
-};
-
-/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
-struct hwrm_vxlan_ipv6_hdr {
- __be32 ver_tc_flow_label;
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
- #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
- __be16 payload_len;
- u8 next_hdr;
- u8 ttl;
- __be32 src_ip_addr[4];
- __be32 dest_ip_addr[4];
-};
-
-/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
-struct hwrm_cfa_encap_data_vxlan {
- u8 src_mac_addr[6];
- __le16 unused_0;
- u8 dst_mac_addr[6];
- u8 num_vlan_tags;
- u8 unused_1;
- __be16 ovlan_tpid;
- __be16 ovlan_tci;
- __be16 ivlan_tpid;
- __be16 ivlan_tci;
- __le32 l3[10];
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
- #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
- #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
- __be16 src_port;
- __be16 dst_port;
- __be32 vni;
- u8 hdr_rsvd0[3];
- u8 hdr_rsvd1;
- u8 hdr_flags;
- u8 unused[3];
-};
-
-/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
-struct hwrm_cfa_encap_record_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL
- u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6
- u8 unused_0[3];
- __le32 encap_data[20];
-};
-
-/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
-struct hwrm_cfa_encap_record_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 encap_record_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
-struct hwrm_cfa_encap_record_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_record_id;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
-struct hwrm_cfa_encap_record_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
-struct hwrm_cfa_ntuple_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
- __le32 enables;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL
- __le64 l2_filter_id;
- u8 src_macaddr[6];
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
- u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
- __le16 dst_id;
- __le16 mirror_vnic_id;
- u8 tunnel_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
- __be32 src_ipaddr[4];
- __be32 src_ipaddr_mask[4];
- __be32 dst_ipaddr[4];
- __be32 dst_ipaddr_mask[4];
- __be16 src_port;
- __be16 src_port_mask;
- __be16 dst_port;
- __be16 dst_port_mask;
- __le64 ntuple_filter_id_hint;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_ntuple_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 ntuple_filter_id;
- __le32 flow_id;
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
-struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
- u8 code;
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_ntuple_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 ntuple_filter_id;
-};
-
-/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_ntuple_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
-struct hwrm_cfa_ntuple_filter_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
- __le32 flags;
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
- #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
- __le64 ntuple_filter_id;
- __le32 new_dst_id;
- __le32 new_mirror_vnic_id;
- __le16 new_meter_instance_id;
- #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
- #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
- u8 unused_1[6];
-};
-
-/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
-struct hwrm_cfa_ntuple_filter_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
-struct hwrm_cfa_decap_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
- __le32 enables;
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
- __be32 tunnel_id;
- u8 tunnel_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
- u8 unused_0;
- __le16 unused_1;
- u8 src_macaddr[6];
- u8 unused_2[2];
- u8 dst_macaddr[6];
- __be16 ovlan_vid;
- __be16 ivlan_vid;
- __be16 t_ovlan_vid;
- __be16 t_ivlan_vid;
- __be16 ethertype;
- u8 ip_addr_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
- u8 ip_protocol;
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
- __le16 unused_3;
- __le32 unused_4;
- __be32 src_ipaddr[4];
- __be32 dst_ipaddr[4];
- __be16 src_port;
- __be16 dst_port;
- __le16 dst_id;
- __le16 l2_ctxt_ref_id;
-};
-
-/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
-struct hwrm_cfa_decap_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 decap_filter_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_decap_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 decap_filter_id;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_decap_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
-struct hwrm_cfa_flow_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flags;
- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
- #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL
- __le16 src_fid;
- __le32 tunnel_handle;
- __le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL
- __le16 dst_fid;
- __be16 l2_rewrite_vlan_tpid;
- __be16 l2_rewrite_vlan_tci;
- __le16 act_meter_id;
- __le16 ref_flow_handle;
- __be16 ethertype;
- __be16 outer_vlan_tci;
- __be16 dmac[3];
- __be16 inner_vlan_tci;
- __be16 smac[3];
- u8 ip_dst_mask_len;
- u8 ip_src_mask_len;
- __be32 ip_dst[4];
- __be32 ip_src[4];
- __be16 l4_src_port;
- __be16 l4_src_port_mask;
- __be16 l4_dst_port;
- __be16 l4_dst_port_mask;
- __be32 nat_ip_address[4];
- __be16 l2_rewrite_dmac[3];
- __be16 nat_port;
- __be16 l2_rewrite_smac[3];
- u8 ip_proto;
- u8 tunnel_type;
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
-};
-
-/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
-struct hwrm_cfa_flow_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 flow_handle;
- u8 unused_0[2];
- __le32 flow_id;
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
- #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX
- __le64 ext_flow_handle;
- __le32 flow_counter_id;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
-struct hwrm_cfa_flow_alloc_cmd_err {
- u8 code;
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
- #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
- u8 unused_0[7];
-};
-
-/* hwrm_cfa_flow_free_input (size:256b/32B) */
-struct hwrm_cfa_flow_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- __le16 unused_0;
- __le32 flow_counter_id;
- __le64 ext_flow_handle;
-};
-
-/* hwrm_cfa_flow_free_output (size:256b/32B) */
-struct hwrm_cfa_flow_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet;
- __le64 byte;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_info_input (size:256b/32B) */
-struct hwrm_cfa_flow_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 flow_handle;
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
- u8 unused_0[6];
- __le64 ext_flow_handle;
-};
-
-/* hwrm_cfa_flow_info_output (size:5632b/704B) */
-struct hwrm_cfa_flow_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL
- u8 profile;
- __le16 src_fid;
- __le16 dst_fid;
- __le16 l2_ctxt_id;
- __le64 em_info;
- __le64 tcam_info;
- __le64 vfp_tcam_info;
- __le16 ar_id;
- __le16 flow_handle;
- __le32 tunnel_handle;
- __le16 flow_timer;
- u8 unused_0[6];
- __le32 flow_key_data[130];
- __le32 flow_action_info[30];
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_cfa_flow_stats_input (size:640b/80B) */
-struct hwrm_cfa_flow_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 num_flows;
- __le16 flow_handle_0;
- __le16 flow_handle_1;
- __le16 flow_handle_2;
- __le16 flow_handle_3;
- __le16 flow_handle_4;
- __le16 flow_handle_5;
- __le16 flow_handle_6;
- __le16 flow_handle_7;
- __le16 flow_handle_8;
- __le16 flow_handle_9;
- u8 unused_0[2];
- __le32 flow_id_0;
- __le32 flow_id_1;
- __le32 flow_id_2;
- __le32 flow_id_3;
- __le32 flow_id_4;
- __le32 flow_id_5;
- __le32 flow_id_6;
- __le32 flow_id_7;
- __le32 flow_id_8;
- __le32 flow_id_9;
-};
-
-/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
-struct hwrm_cfa_flow_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 packet_0;
- __le64 packet_1;
- __le64 packet_2;
- __le64 packet_3;
- __le64 packet_4;
- __le64 packet_5;
- __le64 packet_6;
- __le64 packet_7;
- __le64 packet_8;
- __le64 packet_9;
- __le64 byte_0;
- __le64 byte_1;
- __le64 byte_2;
- __le64 byte_3;
- __le64 byte_4;
- __le64 byte_5;
- __le64 byte_6;
- __le64 byte_7;
- __le64 byte_8;
- __le64 byte_9;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
-struct hwrm_cfa_vfr_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 vf_id;
- __le16 reserved;
- u8 unused_0[4];
- char vfr_name[32];
-};
-
-/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
-struct hwrm_cfa_vfr_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 rx_cfa_code;
- __le16 tx_cfa_action;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_cfa_vfr_free_input (size:448b/56B) */
-struct hwrm_cfa_vfr_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- char vfr_name[32];
- __le16 vf_id;
- __le16 reserved;
- u8 unused_0[4];
-};
-
-/* hwrm_cfa_vfr_free_output (size:128b/16B) */
-struct hwrm_cfa_vfr_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
-struct hwrm_cfa_eem_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
- __le32 unused_0;
-};
-
-/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
-struct hwrm_cfa_eem_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL
- __le32 unused_0;
- __le32 supported;
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
- #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
- __le32 max_entries_supported;
- __le16 key_entry_size;
- __le16 record_entry_size;
- __le16 efc_entry_size;
- __le16 fid_entry_size;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
-struct hwrm_cfa_eem_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
- #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL
- __le16 group_id;
- __le16 unused_0;
- __le32 num_entries;
- __le32 unused_1;
- __le16 key0_ctx_id;
- __le16 key1_ctx_id;
- __le16 record_ctx_id;
- __le16 efc_ctx_id;
- __le16 fid_ctx_id;
- __le16 unused_2;
- __le32 unused_3;
-};
-
-/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
-struct hwrm_cfa_eem_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
-struct hwrm_cfa_eem_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL
- __le32 unused_0;
-};
-
-/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
-struct hwrm_cfa_eem_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
- #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
- __le32 num_entries;
- __le16 key0_ctx_id;
- __le16 key1_ctx_id;
- __le16 record_ctx_id;
- __le16 efc_ctx_id;
- __le16 fid_ctx_id;
- u8 unused_2[5];
- u8 valid;
-};
-
-/* hwrm_cfa_eem_op_input (size:192b/24B) */
-struct hwrm_cfa_eem_op_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL
- __le16 unused_0;
- __le16 op;
- #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL
- #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL
- #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL
- #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL
- #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP
-};
-
-/* hwrm_cfa_eem_op_output (size:128b/16B) */
-struct hwrm_cfa_eem_op_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
-struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 unused_0[4];
-};
-
-/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
-struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
- u8 unused_0[7];
-};
-
-/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- __be16 tunnel_dst_port_val;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
- u8 unused_0;
- __be16 tunnel_dst_port_val;
- u8 unused_1[4];
-};
-
-/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 tunnel_dst_port_id;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
- u8 unused_0;
- __le16 tunnel_dst_port_id;
- u8 unused_1[4];
-};
-
-/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_1[7];
- u8 valid;
-};
-
-/* ctx_hw_stats (size:1280b/160B) */
-struct ctx_hw_stats {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 tpa_pkts;
- __le64 tpa_bytes;
- __le64 tpa_events;
- __le64 tpa_aborts;
-};
-
-/* ctx_hw_stats_ext (size:1408b/176B) */
-struct ctx_hw_stats_ext {
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
-};
-
-/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
-struct hwrm_stat_ctx_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 stats_dma_addr;
- __le32 update_period_ms;
- u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- u8 unused_0;
- __le16 stats_dma_length;
-};
-
-/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
-struct hwrm_stat_ctx_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_free_input (size:192b/24B) */
-struct hwrm_stat_ctx_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 unused_0[4];
-};
-
-/* hwrm_stat_ctx_free_output (size:128b/16B) */
-struct hwrm_stat_ctx_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 stat_ctx_id;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_query_input (size:192b/24B) */
-struct hwrm_stat_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 flags;
- #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[3];
-};
-
-/* hwrm_stat_ctx_query_output (size:1408b/176B) */
-struct hwrm_stat_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_error_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 rx_agg_pkts;
- __le64 rx_agg_bytes;
- __le64 rx_agg_events;
- __le64 rx_agg_aborts;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
-struct hwrm_stat_ext_ctx_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 flags;
- #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
- u8 unused_0[3];
-};
-
-/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
-struct hwrm_stat_ext_ctx_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 rx_ucast_pkts;
- __le64 rx_mcast_pkts;
- __le64 rx_bcast_pkts;
- __le64 rx_discard_pkts;
- __le64 rx_error_pkts;
- __le64 rx_ucast_bytes;
- __le64 rx_mcast_bytes;
- __le64 rx_bcast_bytes;
- __le64 tx_ucast_pkts;
- __le64 tx_mcast_pkts;
- __le64 tx_bcast_pkts;
- __le64 tx_error_pkts;
- __le64 tx_discard_pkts;
- __le64 tx_ucast_bytes;
- __le64 tx_mcast_bytes;
- __le64 tx_bcast_bytes;
- __le64 rx_tpa_eligible_pkt;
- __le64 rx_tpa_eligible_bytes;
- __le64 rx_tpa_pkt;
- __le64 rx_tpa_bytes;
- __le64 rx_tpa_errors;
- __le64 rx_tpa_events;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
-struct hwrm_stat_ctx_clr_stats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 stat_ctx_id;
- u8 unused_0[4];
-};
-
-/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
-struct hwrm_stat_ctx_clr_stats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_pcie_qstats_input (size:256b/32B) */
-struct hwrm_pcie_qstats_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 pcie_stat_size;
- u8 unused_0[6];
- __le64 pcie_stat_host_addr;
-};
-
-/* hwrm_pcie_qstats_output (size:128b/16B) */
-struct hwrm_pcie_qstats_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 pcie_stat_size;
- u8 unused_0[5];
- u8 valid;
-};
-
-/* pcie_ctx_hw_stats (size:768b/96B) */
-struct pcie_ctx_hw_stats {
- __le64 pcie_pl_signal_integrity;
- __le64 pcie_dl_signal_integrity;
- __le64 pcie_tl_signal_integrity;
- __le64 pcie_link_integrity;
- __le64 pcie_tx_traffic_rate;
- __le64 pcie_rx_traffic_rate;
- __le64 pcie_tx_dllp_statistics;
- __le64 pcie_rx_dllp_statistics;
- __le64 pcie_equalization_time;
- __le32 pcie_ltssm_histogram[4];
- __le64 pcie_recovery_histogram;
-};
-
-/* hwrm_fw_reset_input (size:192b/24B) */
-struct hwrm_fw_reset_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION
- u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
- #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
- u8 host_idx;
- u8 flags;
- #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
- #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
- u8 unused_0[4];
-};
-
-/* hwrm_fw_reset_output (size:128b/16B) */
-struct hwrm_fw_reset_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
- #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_fw_qstatus_input (size:192b/24B) */
-struct hwrm_fw_qstatus_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
- u8 unused_0[7];
-};
-
-/* hwrm_fw_qstatus_output (size:128b/16B) */
-struct hwrm_fw_qstatus_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
- u8 nvm_option_action_status;
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
- #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_fw_set_time_input (size:256b/32B) */
-struct hwrm_fw_set_time_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 year;
- #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
- #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
- u8 month;
- u8 day;
- u8 hour;
- u8 minute;
- u8 second;
- u8 unused_0;
- __le16 millisecond;
- __le16 zone;
- #define FW_SET_TIME_REQ_ZONE_UTC 0
- #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
- #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
- u8 unused_1[4];
-};
-
-/* hwrm_fw_set_time_output (size:128b/16B) */
-struct hwrm_fw_set_time_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_struct_hdr (size:128b/16B) */
-struct hwrm_struct_hdr {
- __le16 struct_id;
- #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
- #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
- #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
- #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
- #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_MSIX_PER_VF
- __le16 len;
- u8 version;
- u8 count;
- __le16 subtype;
- __le16 next_offset;
- #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
- u8 unused_0[6];
-};
-
-/* hwrm_struct_data_dcbx_app (size:64b/8B) */
-struct hwrm_struct_data_dcbx_app {
- __be16 protocol_id;
- u8 protocol_selector;
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
- #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
- u8 priority;
- u8 valid;
- u8 unused_0[3];
-};
-
-/* hwrm_fw_set_structured_data_input (size:256b/32B) */
-struct hwrm_fw_set_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- u8 hdr_cnt;
- u8 unused_0[5];
-};
-
-/* hwrm_fw_set_structured_data_output (size:128b/16B) */
-struct hwrm_fw_set_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
-struct hwrm_fw_set_structured_data_cmd_err {
- u8 code;
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
- u8 unused_0[7];
-};
-
-/* hwrm_fw_get_structured_data_input (size:256b/32B) */
-struct hwrm_fw_get_structured_data_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 structure_id;
- __le16 subtype;
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
- u8 count;
- u8 unused_0;
-};
-
-/* hwrm_fw_get_structured_data_output (size:128b/16B) */
-struct hwrm_fw_get_structured_data_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 hdr_cnt;
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
-struct hwrm_fw_get_structured_data_cmd_err {
- u8 code;
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
- u8 unused_0[7];
-};
-
-/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
-struct hwrm_exec_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- u8 unused_0[6];
-};
-
-/* hwrm_exec_fwd_resp_output (size:128b/16B) */
-struct hwrm_exec_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
-struct hwrm_reject_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 encap_request[26];
- __le16 encap_resp_target_id;
- u8 unused_0[6];
-};
-
-/* hwrm_reject_fwd_resp_output (size:128b/16B) */
-struct hwrm_reject_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fwd_resp_input (size:1024b/128B) */
-struct hwrm_fwd_resp_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_resp_target_id;
- __le16 encap_resp_cmpl_ring;
- __le16 encap_resp_len;
- u8 unused_0;
- u8 unused_1;
- __le64 encap_resp_addr;
- __le32 encap_resp[24];
-};
-
-/* hwrm_fwd_resp_output (size:128b/16B) */
-struct hwrm_fwd_resp_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
-struct hwrm_fwd_async_event_cmpl_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 encap_async_event_target_id;
- u8 unused_0[6];
- __le32 encap_async_event_cmpl[4];
-};
-
-/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
-struct hwrm_fwd_async_event_cmpl_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_temp_monitor_query_input (size:128b/16B) */
-struct hwrm_temp_monitor_query_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_temp_monitor_query_output (size:128b/16B) */
-struct hwrm_temp_monitor_query_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 temp;
- u8 phy_temp;
- u8 om_temp;
- u8 flags;
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
- u8 temp2;
- u8 phy_temp2;
- u8 om_temp2;
- u8 valid;
-};
-
-/* hwrm_wol_filter_alloc_input (size:512b/64B) */
-struct hwrm_wol_filter_alloc_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- __le32 enables;
- #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
- #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
- __le16 port_id;
- u8 wol_type;
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
- #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
- u8 unused_0[5];
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_buf_size;
- __le16 pattern_mask_size;
- u8 unused_1[4];
- __le64 pattern_buf_addr;
- __le64 pattern_mask_addr;
-};
-
-/* hwrm_wol_filter_alloc_output (size:128b/16B) */
-struct hwrm_wol_filter_alloc_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 unused_0[6];
- u8 valid;
-};
-
-/* hwrm_wol_filter_free_input (size:256b/32B) */
-struct hwrm_wol_filter_free_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
- __le32 enables;
- #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
- __le16 port_id;
- u8 wol_filter_id;
- u8 unused_0[5];
-};
-
-/* hwrm_wol_filter_free_output (size:128b/16B) */
-struct hwrm_wol_filter_free_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
-struct hwrm_wol_filter_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- __le16 handle;
- u8 unused_0[4];
- __le64 pattern_buf_addr;
- __le16 pattern_buf_size;
- u8 unused_1[6];
- __le64 pattern_mask_addr;
- __le16 pattern_mask_size;
- u8 unused_2[6];
-};
-
-/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
-struct hwrm_wol_filter_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 next_handle;
- u8 wol_filter_id;
- u8 wol_type;
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
- #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
- __le32 unused_0;
- u8 mac_address[6];
- __le16 pattern_offset;
- __le16 pattern_size;
- __le16 pattern_mask_size;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
-struct hwrm_wol_reason_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 port_id;
- u8 unused_0[6];
- __le64 wol_pkt_buf_addr;
- __le16 wol_pkt_buf_size;
- u8 unused_1[6];
-};
-
-/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
-struct hwrm_wol_reason_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 wol_filter_id;
- u8 wol_reason;
- #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
- #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
- u8 wol_pkt_len;
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_dbg_read_direct_input (size:256b/32B) */
-struct hwrm_dbg_read_direct_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 read_addr;
- __le32 read_len32;
-};
-
-/* hwrm_dbg_read_direct_output (size:128b/16B) */
-struct hwrm_dbg_read_direct_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 crc32;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_dbg_qcaps_input (size:192b/24B) */
-struct hwrm_dbg_qcaps_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- u8 unused_0[6];
-};
-
-/* hwrm_dbg_qcaps_output (size:192b/24B) */
-struct hwrm_dbg_qcaps_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[2];
- __le32 coredump_component_disable_caps;
- #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
- __le32 flags;
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
- #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_dbg_qcfg_input (size:192b/24B) */
-struct hwrm_dbg_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 fid;
- __le16 flags;
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
- #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
- __le32 coredump_component_disable_flags;
- #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
-};
-
-/* hwrm_dbg_qcfg_output (size:256b/32B) */
-struct hwrm_dbg_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 fid;
- u8 unused_0[2];
- __le32 coredump_size;
- __le32 flags;
- #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
- #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
- #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
- #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
- #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
- #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
- __le16 async_cmpl_ring;
- u8 unused_2[2];
- __le32 crashdump_size;
- u8 unused_3[3];
- u8 valid;
-};
-
-/* coredump_segment_record (size:128b/16B) */
-struct coredump_segment_record {
- __le16 component_id;
- __le16 segment_id;
- __le16 max_instances;
- u8 version_hi;
- u8 version_low;
- u8 seg_flags;
- u8 compress_flags;
- #define SFLAG_COMPRESSED_ZLIB 0x1UL
- u8 unused_0[2];
- __le32 segment_len;
-};
-
-/* hwrm_dbg_coredump_list_input (size:256b/32B) */
-struct hwrm_dbg_coredump_list_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
- __le16 seq_no;
- u8 flags;
- #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
- u8 unused_0[1];
-};
-
-/* hwrm_dbg_coredump_list_output (size:128b/16B) */
-struct hwrm_dbg_coredump_list_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
- u8 unused_0;
- __le16 total_segments;
- __le16 data_len;
- u8 unused_1;
- u8 valid;
-};
-
-/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
-struct hwrm_dbg_coredump_initiate_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 component_id;
- __le16 segment_id;
- __le16 instance;
- __le16 unused_0;
- u8 seg_flags;
- u8 unused_1[7];
-};
-
-/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
-struct hwrm_dbg_coredump_initiate_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* coredump_data_hdr (size:128b/16B) */
-struct coredump_data_hdr {
- __le32 address;
- __le32 flags_length;
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
- #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
- __le32 instance;
- __le32 next_offset;
-};
-
-/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
-struct hwrm_dbg_coredump_retrieve_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
- __le32 unused_0;
- __le16 component_id;
- __le16 segment_id;
- __le16 instance;
- __le16 unused_1;
- u8 seg_flags;
- u8 unused_2;
- __le16 unused_3;
- __le32 unused_4;
- __le32 seq_no;
- __le32 unused_5;
-};
-
-/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
-struct hwrm_dbg_coredump_retrieve_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
- u8 unused_0;
- __le16 data_len;
- u8 unused_1[3];
- u8 valid;
-};
-
-/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
-struct hwrm_dbg_ring_info_get_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 ring_type;
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
- #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
- u8 unused_0[3];
- __le32 fw_ring_id;
-};
-
-/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
-struct hwrm_dbg_ring_info_get_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 producer_index;
- __le32 consumer_index;
- __le32 cag_vector_ctrl;
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_nvm_read_input (size:320b/40B) */
-struct hwrm_nvm_read_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le16 dir_idx;
- u8 unused_0[2];
- __le32 offset;
- __le32 len;
- u8 unused_1[4];
-};
-
-/* hwrm_nvm_read_output (size:128b/16B) */
-struct hwrm_nvm_read_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
-struct hwrm_nvm_get_dir_entries_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
-};
-
-/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
-struct hwrm_nvm_get_dir_entries_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
-struct hwrm_nvm_get_dir_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
-struct hwrm_nvm_get_dir_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 entries;
- __le32 entry_length;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_write_input (size:448b/56B) */
-struct hwrm_nvm_write_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 dir_data_length;
- __le16 option;
- __le16 flags;
- #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
- #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
- #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
- __le32 dir_item_length;
- __le32 offset;
- __le32 len;
- __le32 unused_0;
-};
-
-/* hwrm_nvm_write_output (size:128b/16B) */
-struct hwrm_nvm_write_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le16 dir_idx;
- u8 unused_0;
- u8 valid;
-};
-
-/* hwrm_nvm_write_cmd_err (size:64b/8B) */
-struct hwrm_nvm_write_cmd_err {
- u8 code;
- #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_modify_input (size:320b/40B) */
-struct hwrm_nvm_modify_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le16 dir_idx;
- __le16 flags;
- #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
- #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
- __le32 offset;
- __le32 len;
- u8 unused_1[4];
-};
-
-/* hwrm_nvm_modify_output (size:128b/16B) */
-struct hwrm_nvm_modify_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
-struct hwrm_nvm_find_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
- __le16 dir_idx;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 opt_ordinal;
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
- u8 unused_0[3];
-};
-
-/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
-struct hwrm_nvm_find_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 dir_item_length;
- __le32 dir_data_length;
- __le32 fw_ver;
- __le16 dir_ordinal;
- __le16 dir_idx;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
-struct hwrm_nvm_erase_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_idx;
- u8 unused_0[6];
-};
-
-/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
-struct hwrm_nvm_erase_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
-struct hwrm_nvm_get_dev_info_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_nvm_get_dev_info_output (size:640b/80B) */
-struct hwrm_nvm_get_dev_info_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 manufacturer_id;
- __le16 device_id;
- __le32 sector_size;
- __le32 nvram_size;
- __le32 reserved_size;
- __le32 available_size;
- u8 nvm_cfg_ver_maj;
- u8 nvm_cfg_ver_min;
- u8 nvm_cfg_ver_upd;
- u8 flags;
- #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
- char pkg_name[16];
- __le16 hwrm_fw_major;
- __le16 hwrm_fw_minor;
- __le16 hwrm_fw_build;
- __le16 hwrm_fw_patch;
- __le16 mgmt_fw_major;
- __le16 mgmt_fw_minor;
- __le16 mgmt_fw_build;
- __le16 mgmt_fw_patch;
- __le16 roce_fw_major;
- __le16 roce_fw_minor;
- __le16 roce_fw_build;
- __le16 roce_fw_patch;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
-struct hwrm_nvm_mod_dir_entry_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 enables;
- #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
- __le16 dir_idx;
- __le16 dir_ordinal;
- __le16 dir_ext;
- __le16 dir_attr;
- __le32 checksum;
-};
-
-/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
-struct hwrm_nvm_mod_dir_entry_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_verify_update_input (size:192b/24B) */
-struct hwrm_nvm_verify_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le16 dir_type;
- __le16 dir_ordinal;
- __le16 dir_ext;
- u8 unused_0[2];
-};
-
-/* hwrm_nvm_verify_update_output (size:128b/16B) */
-struct hwrm_nvm_verify_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_install_update_input (size:192b/24B) */
-struct hwrm_nvm_install_update_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 install_type;
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
- #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
- __le16 flags;
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
- #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
- u8 unused_0[2];
-};
-
-/* hwrm_nvm_install_update_output (size:192b/24B) */
-struct hwrm_nvm_install_update_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le64 installed_items;
- u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
- u8 problem_item;
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
- #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
- u8 reset_required;
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
- #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
- u8 unused_0[4];
- u8 valid;
-};
-
-/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
-struct hwrm_nvm_install_update_cmd_err {
- u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_get_variable_input (size:320b/40B) */
-struct hwrm_nvm_get_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 dest_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
- u8 unused_0;
-};
-
-/* hwrm_nvm_get_variable_output (size:128b/16B) */
-struct hwrm_nvm_get_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le16 data_len;
- __le16 option_num;
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
- u8 unused_0[3];
- u8 valid;
-};
-
-/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
-struct hwrm_nvm_get_variable_cmd_err {
- u8 code;
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
- u8 unused_0[7];
-};
-
-/* hwrm_nvm_set_variable_input (size:320b/40B) */
-struct hwrm_nvm_set_variable_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 src_data_addr;
- __le16 data_len;
- __le16 option_num;
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
- #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
- __le16 dimensions;
- __le16 index_0;
- __le16 index_1;
- __le16 index_2;
- __le16 index_3;
- u8 flags;
- #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
- #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
- #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
- #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
- #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
- u8 unused_0;
-};
-
-/* hwrm_nvm_set_variable_output (size:128b/16B) */
-struct hwrm_nvm_set_variable_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
-struct hwrm_nvm_set_variable_cmd_err {
- u8 code;
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
- u8 unused_0[7];
-};
-
-/* hwrm_selftest_qlist_input (size:128b/16B) */
-struct hwrm_selftest_qlist_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_selftest_qlist_output (size:2240b/280B) */
-struct hwrm_selftest_qlist_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 num_tests;
- u8 available_tests;
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 offline_tests;
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0;
- __le16 test_timeout;
- u8 unused_1[2];
- char test0_name[32];
- char test1_name[32];
- char test2_name[32];
- char test3_name[32];
- char test4_name[32];
- char test5_name[32];
- char test6_name[32];
- char test7_name[32];
- u8 eyescope_target_BER_support;
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
- #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
- u8 unused_2[6];
- u8 valid;
-};
-
-/* hwrm_selftest_exec_input (size:192b/24B) */
-struct hwrm_selftest_exec_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- u8 flags;
- #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[7];
-};
-
-/* hwrm_selftest_exec_output (size:128b/16B) */
-struct hwrm_selftest_exec_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 requested_tests;
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
- u8 test_success;
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
- u8 unused_0[5];
- u8 valid;
-};
-
-/* hwrm_selftest_irq_input (size:128b/16B) */
-struct hwrm_selftest_irq_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
-};
-
-/* hwrm_selftest_irq_output (size:128b/16B) */
-struct hwrm_selftest_irq_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 unused_0[7];
- u8 valid;
-};
-
-/* db_push_info (size:64b/8B) */
-struct db_push_info {
- u32 push_size_push_index;
- #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
- #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
- #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
- #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
- u32 reserved32;
-};
-
-/* fw_status_reg (size:32b/4B) */
-struct fw_status_reg {
- u32 fw_status;
- #define FW_STATUS_REG_CODE_MASK 0xffffUL
- #define FW_STATUS_REG_CODE_SFT 0
- #define FW_STATUS_REG_CODE_READY 0x8000UL
- #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
- #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
- #define FW_STATUS_REG_RECOVERABLE 0x20000UL
- #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
- #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
- #define FW_STATUS_REG_SHUTDOWN 0x100000UL
- #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
- #define FW_STATUS_REG_RECOVERING 0x400000UL
-};
-
-/* hcomm_status (size:64b/8B) */
-struct hcomm_status {
- u32 sig_ver;
- #define HCOMM_STATUS_VER_MASK 0xffUL
- #define HCOMM_STATUS_VER_SFT 0
- #define HCOMM_STATUS_VER_LATEST 0x1UL
- #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
- #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
- #define HCOMM_STATUS_SIGNATURE_SFT 8
- #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
- #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
- u32 fw_status_loc;
- #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
- #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
- #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
- #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
- #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
-};
-#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
-
-#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
new file mode 100644
index 000000000000..de3427c6c6aa
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
@@ -0,0 +1,241 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2023 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/pci.h>
+#include <linux/bnxt/hsi.h>
+
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_hwmon.h"
+
+void bnxt_hwmon_notify_event(struct bnxt *bp)
+{
+ u32 attr;
+
+ if (!bp->hwmon_dev)
+ return;
+
+ switch (bp->thermal_threshold_type) {
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
+ attr = hwmon_temp_max_alarm;
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
+ attr = hwmon_temp_crit_alarm;
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
+ attr = hwmon_temp_emergency_alarm;
+ break;
+ default:
+ return;
+ }
+
+ hwmon_notify_event(&bp->pdev->dev, hwmon_temp, attr, 0);
+}
+
+static int bnxt_hwrm_temp_query(struct bnxt *bp, u8 *temp)
+{
+ struct hwrm_temp_monitor_query_output *resp;
+ struct hwrm_temp_monitor_query_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
+ if (rc)
+ return rc;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (rc)
+ goto drop_req;
+
+ if (temp) {
+ *temp = resp->temp;
+ } else if (resp->flags &
+ TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE) {
+ bp->fw_cap |= BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED;
+ bp->warn_thresh_temp = resp->warn_threshold;
+ bp->crit_thresh_temp = resp->critical_threshold;
+ bp->fatal_thresh_temp = resp->fatal_threshold;
+ bp->shutdown_thresh_temp = resp->shutdown_threshold;
+ }
+drop_req:
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static umode_t bnxt_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct bnxt *bp = _data;
+
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_emergency:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_emergency_alarm:
+ if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED))
+ return 0;
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int bnxt_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct bnxt *bp = dev_get_drvdata(dev);
+ u8 temp = 0;
+ int rc;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp * 1000;
+ return rc;
+ case hwmon_temp_max:
+ *val = bp->warn_thresh_temp * 1000;
+ return 0;
+ case hwmon_temp_crit:
+ *val = bp->crit_thresh_temp * 1000;
+ return 0;
+ case hwmon_temp_emergency:
+ *val = bp->fatal_thresh_temp * 1000;
+ return 0;
+ case hwmon_temp_max_alarm:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp >= bp->warn_thresh_temp;
+ return rc;
+ case hwmon_temp_crit_alarm:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp >= bp->crit_thresh_temp;
+ return rc;
+ case hwmon_temp_emergency_alarm:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp >= bp->fatal_thresh_temp;
+ return rc;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *bnxt_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_EMERGENCY | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops bnxt_hwmon_ops = {
+ .is_visible = bnxt_hwmon_is_visible,
+ .read = bnxt_hwmon_read,
+};
+
+static const struct hwmon_chip_info bnxt_hwmon_chip_info = {
+ .ops = &bnxt_hwmon_ops,
+ .info = bnxt_hwmon_info,
+};
+
+static ssize_t temp1_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnxt *bp = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", bp->shutdown_thresh_temp * 1000);
+}
+
+static ssize_t temp1_shutdown_alarm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnxt *bp = dev_get_drvdata(dev);
+ u8 temp;
+ int rc;
+
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (rc)
+ return -EIO;
+
+ return sysfs_emit(buf, "%u\n", temp >= bp->shutdown_thresh_temp);
+}
+
+static DEVICE_ATTR_RO(temp1_shutdown);
+static DEVICE_ATTR_RO(temp1_shutdown_alarm);
+
+static struct attribute *bnxt_temp_extra_attrs[] = {
+ &dev_attr_temp1_shutdown.attr,
+ &dev_attr_temp1_shutdown_alarm.attr,
+ NULL,
+};
+
+static umode_t bnxt_temp_extra_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct bnxt *bp = dev_get_drvdata(dev);
+
+ /* Shutdown temperature setting in NVM is optional */
+ if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED) ||
+ !bp->shutdown_thresh_temp)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group bnxt_temp_extra_group = {
+ .attrs = bnxt_temp_extra_attrs,
+ .is_visible = bnxt_temp_extra_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(bnxt_temp_extra);
+
+void bnxt_hwmon_uninit(struct bnxt *bp)
+{
+ if (bp->hwmon_dev) {
+ hwmon_device_unregister(bp->hwmon_dev);
+ bp->hwmon_dev = NULL;
+ }
+}
+
+void bnxt_hwmon_init(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int rc;
+
+ /* temp1_xxx is only sensor, ensure not registered if it will fail */
+ rc = bnxt_hwrm_temp_query(bp, NULL);
+ if (rc == -EACCES || rc == -EOPNOTSUPP) {
+ bnxt_hwmon_uninit(bp);
+ return;
+ }
+
+ if (bp->hwmon_dev)
+ return;
+
+ bp->hwmon_dev = hwmon_device_register_with_info(&pdev->dev,
+ DRV_MODULE_NAME, bp,
+ &bnxt_hwmon_chip_info,
+ bnxt_temp_extra_groups);
+ if (IS_ERR(bp->hwmon_dev)) {
+ bp->hwmon_dev = NULL;
+ dev_warn(&pdev->dev, "Cannot register hwmon device\n");
+ }
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h
new file mode 100644
index 000000000000..de54a562e06a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h
@@ -0,0 +1,30 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2023 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HWMON_H
+#define BNXT_HWMON_H
+
+#ifdef CONFIG_BNXT_HWMON
+void bnxt_hwmon_notify_event(struct bnxt *bp);
+void bnxt_hwmon_uninit(struct bnxt *bp);
+void bnxt_hwmon_init(struct bnxt *bp);
+#else
+static inline void bnxt_hwmon_notify_event(struct bnxt *bp)
+{
+}
+
+static inline void bnxt_hwmon_uninit(struct bnxt *bp)
+{
+}
+
+static inline void bnxt_hwmon_init(struct bnxt *bp)
+{
+}
+#endif
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index bb7327b82d0b..5ce190f50120 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -20,8 +20,8 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -359,6 +359,8 @@ static int __hwrm_to_stderr(u32 hwrm_err)
return -EAGAIN;
case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
return -EOPNOTSUPP;
+ case HWRM_ERR_CODE_PF_UNAVAILABLE:
+ return -ENODEV;
default:
return -EIO;
}
@@ -416,6 +418,44 @@ hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
}
+static void hwrm_req_dbg(struct bnxt *bp, struct input *req)
+{
+ u32 ring = le16_to_cpu(req->cmpl_ring);
+ u32 type = le16_to_cpu(req->req_type);
+ u32 tgt = le16_to_cpu(req->target_id);
+ u32 seq = le16_to_cpu(req->seq_id);
+ char opt[32] = "\n";
+
+ if (unlikely(ring != (u16)BNXT_HWRM_NO_CMPL_RING))
+ snprintf(opt, 16, " ring %d\n", ring);
+
+ if (unlikely(tgt != BNXT_HWRM_TARGET))
+ snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
+
+ netdev_dbg(bp->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
+ type, seq, opt);
+}
+
+#define hwrm_err(bp, ctx, fmt, ...) \
+ do { \
+ if ((ctx)->flags & BNXT_HWRM_CTX_SILENT) \
+ netdev_dbg((bp)->dev, fmt, __VA_ARGS__); \
+ else \
+ netdev_err((bp)->dev, fmt, __VA_ARGS__); \
+ } while (0)
+
+static bool hwrm_wait_must_abort(struct bnxt *bp, u32 req_type, u32 *fw_status)
+{
+ if (req_type == HWRM_VER_GET)
+ return false;
+
+ if (!bp->fw_health || !bp->fw_health->status_reliable)
+ return false;
+
+ *fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ return *fw_status && !BNXT_FW_IS_HEALTHY(*fw_status);
+}
+
static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
{
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
@@ -427,8 +467,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
unsigned int i, timeout, tmo_count;
u32 *data = (u32 *)ctx->req;
u32 msg_len = ctx->req_len;
+ u32 req_type, sts;
int rc = -EBUSY;
- u32 req_type;
u16 len = 0;
u8 *valid;
@@ -436,11 +476,17 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
memset(ctx->resp, 0, PAGE_SIZE);
req_type = le16_to_cpu(ctx->req->req_type);
- if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET)
+ if (BNXT_NO_FW_ACCESS(bp) &&
+ (req_type != HWRM_FUNC_RESET && req_type != HWRM_VER_GET)) {
+ netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",
+ req_type);
goto exit;
+ }
if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
msg_len > bp->hwrm_max_ext_req_len) {
+ netdev_warn(bp->dev, "oversized hwrm request, req_type 0x%x",
+ req_type);
rc = -E2BIG;
goto exit;
}
@@ -490,13 +536,15 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
/* Ring channel doorbell */
writel(1, bp->bar0 + doorbell_offset);
+ hwrm_req_dbg(bp, ctx->req);
+
if (!pci_is_enabled(bp->pdev)) {
rc = -ENODEV;
goto exit;
}
/* Limit timeout to an upper limit */
- timeout = min_t(uint, ctx->timeout, HWRM_CMD_MAX_TIMEOUT);
+ timeout = min(ctx->timeout, bp->hwrm_cmd_max_timeout ?: HWRM_CMD_MAX_TIMEOUT);
/* convert timeout to usec */
timeout *= 1000;
@@ -523,17 +571,19 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
} else {
- if (HWRM_WAIT_MUST_ABORT(bp, ctx))
- break;
+ if (hwrm_wait_must_abort(bp, req_type, &sts)) {
+ hwrm_err(bp, ctx, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n",
+ req_type, sts);
+ goto exit;
+ }
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
}
}
if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
- le16_to_cpu(ctx->req->req_type));
+ hwrm_err(bp, ctx, "Resp cmpl intr err msg: 0x%x\n",
+ req_type);
goto exit;
}
len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
@@ -565,7 +615,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
if (resp_seq != seen_out_of_seq) {
netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
le16_to_cpu(resp_seq),
- le16_to_cpu(ctx->req->req_type),
+ req_type,
le16_to_cpu(ctx->req->seq_id));
seen_out_of_seq = resp_seq;
}
@@ -576,40 +626,45 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
} else {
- if (HWRM_WAIT_MUST_ABORT(bp, ctx))
- goto timeout_abort;
+ if (hwrm_wait_must_abort(bp, req_type, &sts)) {
+ hwrm_err(bp, ctx, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n",
+ req_type,
+ le16_to_cpu(ctx->req->seq_id),
+ len, sts);
+ goto exit;
+ }
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
}
}
if (i >= tmo_count) {
-timeout_abort:
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
- hwrm_total_timeout(i),
- le16_to_cpu(ctx->req->req_type),
- le16_to_cpu(ctx->req->seq_id), len);
+ hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
+ hwrm_total_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len);
goto exit;
}
/* Last byte of resp contains valid bit */
valid = ((u8 *)ctx->resp) + len - 1;
- for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
+ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
/* make sure we read from updated DMA memory */
dma_rmb();
if (*valid)
break;
- usleep_range(1, 5);
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
- hwrm_total_timeout(i),
- le16_to_cpu(ctx->req->req_type),
- le16_to_cpu(ctx->req->seq_id), len,
- *valid);
+ hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
+ hwrm_total_timeout(i) + j, req_type,
+ le16_to_cpu(ctx->req->seq_id), len, *valid);
goto exit;
}
}
@@ -620,11 +675,12 @@ timeout_abort:
*/
*valid = 0;
rc = le16_to_cpu(ctx->resp->error_code);
- if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) {
- netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- le16_to_cpu(ctx->resp->req_type),
- le16_to_cpu(ctx->resp->seq_id), rc);
- }
+ if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT))
+ netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n",
+ req_type);
+ else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index 4d17f0d5363b..791b3a0cdb83 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -10,7 +10,7 @@
#ifndef BNXT_HWRM_H
#define BNXT_HWRM_H
-#include "bnxt_hsi.h"
+#include <linux/bnxt/hsi.h>
enum bnxt_hwrm_ctx_flags {
/* Update the HWRM_API_FLAGS right below for any new non-internal bit added here */
@@ -58,11 +58,10 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
-#define HWRM_CMD_MAX_TIMEOUT 40000
+#define HWRM_CMD_MAX_TIMEOUT 60000U
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
-#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
#define BNXT_HWRM_TARGET 0xffff
#define BNXT_HWRM_NO_CMPL_RING -1
#define BNXT_HWRM_REQ_MAX_SIZE 128
@@ -83,10 +82,6 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define HWRM_MIN_TIMEOUT 25
#define HWRM_MAX_TIMEOUT 40
-#define HWRM_WAIT_MUST_ABORT(bp, ctx) \
- (le16_to_cpu((ctx)->req->req_type) != HWRM_VER_GET && \
- !bnxt_is_fw_healthy(bp))
-
static inline unsigned int hwrm_total_timeout(unsigned int n)
{
return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT :
@@ -95,7 +90,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n)
}
-#define HWRM_VALID_BIT_DELAY_USEC 150
+#define HWRM_VALID_BIT_DELAY_USEC 50000
static inline bool bnxt_cfa_hwrm_message(u16 req_type)
{
@@ -142,4 +137,18 @@ int hwrm_req_send_silent(struct bnxt *bp, void *req);
int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len);
void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags);
void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma);
+
+/* Older devices can only support req length of 128.
+ * HWRM_FUNC_CFG requests which don't need fields starting at
+ * num_quic_tx_key_ctxs can use this helper to avoid getting -E2BIG.
+ */
+static inline int
+bnxt_hwrm_func_cfg_short_req_init(struct bnxt *bp,
+ struct hwrm_func_cfg_input **req)
+{
+ u32 req_len;
+
+ req_len = min_t(u32, sizeof(**req), bp->hwrm_max_ext_req_len);
+ return __hwrm_req_init(bp, (void **)req, HWRM_FUNC_CFG, req_len);
+}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index f0aa480799ca..a8a74f07bb54 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -11,16 +11,29 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
-#include <linux/timecounter.h>
#include <linux/timekeeping.h>
#include <linux/ptp_classify.h>
-#include "bnxt_hsi.h"
+#include <linux/clocksource.h>
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ptp.h"
+static int bnxt_ptp_cfg_settime(struct bnxt *bp, u64 time)
+{
+ struct hwrm_func_ptp_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME);
+ req->ptp_set_time = cpu_to_le64(time);
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
{
unsigned int ptp_class;
@@ -49,26 +62,75 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
- spin_lock_bh(&ptp->ptp_lock);
+ if (BNXT_PTP_USE_RTC(ptp->bp))
+ return bnxt_ptp_cfg_settime(ptp->bp, ns);
+
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_init(&ptp->tc, &ptp->cc, ns);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
/* Caller holds ptp_lock */
+static int __bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 high_before, high_now, low;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return -EIO;
+
+ high_before = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
+ ptp_read_system_prets(sts);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+ high_now = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
+ if (high_now != high_before) {
+ ptp_read_system_prets(sts);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+ }
+ *ns = ((u64)high_now << 32) | low;
+
+ return 0;
+}
+
static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
+ int rc;
- if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ /* We have to serialize reg access and FW reset */
+ read_seqlock_excl_irqsave(&ptp->ptp_lock, flags);
+ rc = __bnxt_refclk_read(bp, sts, ns);
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return rc;
+}
+
+static int bnxt_refclk_read_low(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u32 *low)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
+
+ /* We have to serialize reg access and FW reset */
+ read_seqlock_excl_irqsave(&ptp->ptp_lock, flags);
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
return -EIO;
+ }
ptp_read_system_prets(sts);
- *ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ *low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
ptp_read_system_postts(sts);
- *ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
+
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -78,13 +140,12 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp)
if (!ptp)
return;
- spin_lock_bh(&ptp->ptp_lock);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
- spin_unlock_bh(&ptp->ptp_lock);
}
-static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
+static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
+ u32 txts_tmo, int slot)
{
struct hwrm_port_ts_query_output *resp;
struct hwrm_port_ts_query_input *req;
@@ -97,14 +158,20 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
req->flags = cpu_to_le32(flags);
if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
+ struct bnxt_ptp_tx_req *txts_req = &bp->ptp_cfg->txts_req[slot];
+ u32 tmo_us = txts_tmo * 1000;
+
req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
- req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
- req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
- req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
+ req->ptp_seq_id = cpu_to_le32(txts_req->tx_seqid);
+ req->ptp_hdr_offset = cpu_to_le16(txts_req->tx_hdr_off);
+ if (!tmo_us)
+ tmo_us = BNXT_PTP_QTS_TIMEOUT;
+ tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US);
+ req->ts_req_timeout = cpu_to_le16(tmo_us);
}
resp = hwrm_req_hold(bp, req);
- rc = hwrm_req_send(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
if (!rc)
*ts = le64_to_cpu(resp->ptp_msg_ts);
hwrm_req_drop(bp, req);
@@ -118,38 +185,69 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
u64 ns, cycles;
+ u32 low;
int rc;
- spin_lock_bh(&ptp->ptp_lock);
- rc = bnxt_refclk_read(ptp->bp, sts, &cycles);
- if (rc) {
- spin_unlock_bh(&ptp->ptp_lock);
+ rc = bnxt_refclk_read_low(ptp->bp, sts, &low);
+ if (rc)
return rc;
- }
- ns = timecounter_cyc2time(&ptp->tc, cycles);
- spin_unlock_bh(&ptp->ptp_lock);
+
+ cycles = bnxt_extend_cycles_32b_to_48b(ptp, low);
+ ns = bnxt_timecounter_cyc2time(ptp, cycles);
*ts = ns_to_timespec64(ns);
return 0;
}
+void bnxt_ptp_update_current_time(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ bnxt_refclk_read(ptp->bp, NULL, &ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
+}
+
+static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
+{
+ struct hwrm_port_mac_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE);
+ req->ptp_adj_phase = cpu_to_le64(delta);
+
+ rc = hwrm_req_send(ptp->bp, req);
+ if (rc) {
+ netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc);
+ } else {
+ bnxt_ptp_update_current_time(ptp->bp);
+ }
+
+ return rc;
+}
+
static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
+ unsigned long flags;
+
+ if (BNXT_PTP_USE_RTC(ptp->bp))
+ return bnxt_ptp_adjphc(ptp, delta);
- spin_lock_bh(&ptp->ptp_lock);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_adjtime(&ptp->tc, delta);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
-static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
+static int bnxt_ptp_adjfine_rtc(struct bnxt *bp, long scaled_ppm)
{
- struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
- ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
struct hwrm_port_mac_cfg_input *req;
- struct bnxt *bp = ptp->bp;
int rc;
rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
@@ -158,13 +256,30 @@ static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
- rc = hwrm_req_send(ptp->bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc)
- netdev_err(ptp->bp->dev,
- "ptp adjfreq failed. rc = %d\n", rc);
+ netdev_err(bp->dev,
+ "ptp adjfine failed. rc = %d\n", rc);
return rc;
}
+static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ struct bnxt *bp = ptp->bp;
+ unsigned long flags;
+
+ if (!BNXT_MH(bp))
+ return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
+
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
+ timecounter_read(&ptp->tc);
+ ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
+ return 0;
+}
+
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -172,9 +287,7 @@ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
u64 ns, pps_ts;
pps_ts = EVENT_PPS_TS(data2, data1);
- spin_lock_bh(&ptp->ptp_lock);
- ns = timecounter_cyc2time(&ptp->tc, pps_ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ ns = bnxt_timecounter_cyc2time(ptp, pps_ts);
switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) {
case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL:
@@ -244,6 +357,44 @@ static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
return hwrm_req_send(bp, req);
}
+int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct hwrm_port_mac_cfg_input *req;
+ int rc;
+
+ if (!ptp || !ptp->tstamp_filters)
+ return -EIO;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ goto out;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
+ (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) {
+ ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE);
+ netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
+ }
+
+ req->flags = cpu_to_le32(ptp->tstamp_filters);
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+ req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
+
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ bp->ptp_all_rx_tstamp = !!(ptp->tstamp_filters &
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE);
+ return 0;
+ }
+ ptp->tstamp_filters = 0;
+out:
+ bp->ptp_all_rx_tstamp = 0;
+ netdev_warn(bp->dev, "Failed to configure HW packet timestamp filters\n");
+ return rc;
+}
+
void bnxt_ptp_reapply_pps(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -275,14 +426,11 @@ static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
u64 nsec_now, nsec_delta;
int rc;
- spin_lock_bh(&ptp->ptp_lock);
rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now);
- if (rc) {
- spin_unlock_bh(&ptp->ptp_lock);
+ if (rc)
return rc;
- }
- nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now);
- spin_unlock_bh(&ptp->ptp_lock);
+
+ nsec_now = bnxt_timecounter_cyc2time(ptp, cycles_now);
nsec_delta = target_ns - nsec_now;
*cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult);
@@ -331,7 +479,7 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
struct bnxt *bp = ptp->bp;
- u8 pin_id;
+ int pin_id;
int rc;
switch (rq->type) {
@@ -339,6 +487,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
/* Configure an External PPS IN */
pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
+ if (!TSIO_PIN_VALID(pin_id))
+ return -EOPNOTSUPP;
if (!on)
break;
rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN);
@@ -352,6 +502,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
/* Configure a Periodic PPS OUT */
pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
+ if (!TSIO_PIN_VALID(pin_id))
+ return -EOPNOTSUPP;
if (!on)
break;
@@ -380,33 +532,39 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- struct hwrm_port_mac_cfg_input *req;
u32 flags = 0;
- int rc;
- rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
- if (rc)
- return rc;
+ switch (ptp->rx_filter) {
+ case HWTSTAMP_FILTER_ALL:
+ flags = PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE;
+ break;
+ case HWTSTAMP_FILTER_NONE:
+ flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+ break;
+ }
- if (ptp->rx_filter)
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
- else
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
if (ptp->tx_tstamp_en)
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
else
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
- req->flags = cpu_to_le32(flags);
- req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
- req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
- return hwrm_req_send(bp, req);
+ ptp->tstamp_filters = flags;
+
+ return bnxt_ptp_cfg_tstamp_filters(bp);
}
-int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int bnxt_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
struct bnxt_ptp_cfg *ptp;
u16 old_rxctl;
int old_rx_filter, rc;
@@ -416,24 +574,24 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (!ptp)
return -EOPNOTSUPP;
- if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
- return -EFAULT;
-
- if (stmpconf.flags)
- return -EINVAL;
-
- if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
- stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
+ stmpconf->tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
old_rx_filter = ptp->rx_filter;
old_rxctl = ptp->rxctl;
old_tx_tstamp_en = ptp->tx_tstamp_en;
- switch (stmpconf.rx_filter) {
+ switch (stmpconf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp->rxctl = 0;
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
break;
+ case HWTSTAMP_FILTER_ALL:
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) {
+ ptp->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+ return -EOPNOTSUPP;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -456,7 +614,7 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
- if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ if (stmpconf->tx_type == HWTSTAMP_TX_ON)
ptp->tx_tstamp_en = 1;
else
ptp->tx_tstamp_en = 0;
@@ -465,9 +623,8 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (rc)
goto ts_set_err;
- stmpconf.rx_filter = ptp->rx_filter;
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ stmpconf->rx_filter = ptp->rx_filter;
+ return 0;
ts_set_err:
ptp->rx_filter = old_rx_filter;
@@ -476,22 +633,22 @@ ts_set_err:
return rc;
}
-int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int bnxt_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf)
{
struct bnxt *bp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
struct bnxt_ptp_cfg *ptp;
ptp = bp->ptp_cfg;
if (!ptp)
return -EOPNOTSUPP;
- stmpconf.flags = 0;
- stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ stmpconf->flags = 0;
+ stmpconf->tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON
+ : HWTSTAMP_TX_OFF;
- stmpconf.rx_filter = ptp->rx_filter;
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ stmpconf->rx_filter = ptp->rx_filter;
+ return 0;
}
static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win)
@@ -516,7 +673,7 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
int rc, i;
reg_arr = ptp->refclk_regs;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_CHIP_P5(bp)) {
rc = bnxt_map_regs(bp, reg_arr, 2, BNXT_PTP_GRC_WIN);
if (rc)
return rc;
@@ -525,6 +682,14 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
(ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK);
return 0;
}
+ if (bp->flags & BNXT_FLAG_CHIP_P7) {
+ for (i = 0; i < 2; i++) {
+ if (reg_arr[i] & BNXT_GRC_BASE_MASK)
+ return -EINVAL;
+ ptp->refclk_mapped_regs[i] = reg_arr[i];
+ }
+ return 0;
+ }
return -ENODEV;
}
@@ -534,38 +699,51 @@ static void bnxt_unmap_ptp_regs(struct bnxt *bp)
(BNXT_PTP_GRC_WIN - 1) * 4);
}
-static u64 bnxt_cc_read(const struct cyclecounter *cc)
+static u64 bnxt_cc_read(struct cyclecounter *cc)
{
struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
u64 ns = 0;
- bnxt_refclk_read(ptp->bp, NULL, &ns);
+ __bnxt_refclk_read(ptp->bp, NULL, &ns);
return ns;
}
-static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
+static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct skb_shared_hwtstamps timestamp;
+ struct bnxt_ptp_tx_req *txts_req;
+ unsigned long now = jiffies;
u64 ts = 0, ns = 0;
+ u32 tmo = 0;
int rc;
- rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts);
+ txts_req = &ptp->txts_req[slot];
+ /* make sure bnxt_get_tx_ts_p5() has updated abs_txts_tmo */
+ smp_rmb();
+ if (!time_after_eq(now, txts_req->abs_txts_tmo))
+ tmo = jiffies_to_msecs(txts_req->abs_txts_tmo - now);
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts,
+ tmo, slot);
if (!rc) {
memset(&timestamp, 0, sizeof(timestamp));
- spin_lock_bh(&ptp->ptp_lock);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
timestamp.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(ptp->tx_skb, &timestamp);
+ skb_tstamp_tx(txts_req->tx_skb, &timestamp);
+ ptp->stats.ts_pkts++;
} else {
- netdev_err(bp->dev, "TS query for TX timer failed rc = %x\n",
- rc);
+ if (!time_after_eq(jiffies, txts_req->abs_txts_tmo))
+ return -EAGAIN;
+
+ ptp->stats.ts_lost++;
+ netdev_warn_once(bp->dev,
+ "TS query for TX timer failed rc = %x\n", rc);
}
- dev_kfree_skb_any(ptp->tx_skb);
- ptp->tx_skb = NULL;
- atomic_inc(&ptp->tx_avail);
+ dev_kfree_skb_any(txts_req->tx_skb);
+ txts_req->tx_skb = NULL;
+
+ return 0;
}
static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
@@ -574,53 +752,136 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
ptp_info);
unsigned long now = jiffies;
struct bnxt *bp = ptp->bp;
+ u16 cons = ptp->txts_cons;
+ unsigned long flags;
+ u32 num_requests;
+ int rc = 0;
+
+ num_requests = BNXT_MAX_TX_TS - READ_ONCE(ptp->tx_avail);
+ while (num_requests--) {
+ if (IS_ERR(ptp->txts_req[cons].tx_skb))
+ goto next_slot;
+ if (!ptp->txts_req[cons].tx_skb)
+ break;
+ rc = bnxt_stamp_tx_skb(bp, cons);
+ if (rc == -EAGAIN)
+ break;
+next_slot:
+ BNXT_PTP_INC_TX_AVAIL(ptp);
+ cons = NEXT_TXTS(cons);
+ }
+ ptp->txts_cons = cons;
- if (ptp->tx_skb)
- bnxt_stamp_tx_skb(bp, ptp->tx_skb);
-
- if (!time_after_eq(now, ptp->next_period))
+ if (!time_after_eq(now, ptp->next_period)) {
+ if (rc == -EAGAIN)
+ return 0;
return ptp->next_period - now;
+ }
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
if (time_after_eq(now, ptp->next_overflow_check)) {
- spin_lock_bh(&ptp->ptp_lock);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_read(&ptp->tc);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
}
+ if (rc == -EAGAIN)
+ return 0;
return HZ;
}
-int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb)
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp)
{
- struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_ptp_tx_req *txts_req;
+ u16 cons = ptp->txts_cons;
+
+ /* make sure ptp aux worker finished with
+ * possible BNXT_STATE_OPEN set
+ */
+ ptp_cancel_worker_sync(ptp->ptp_clock);
+
+ ptp->tx_avail = BNXT_MAX_TX_TS;
+ while (cons != ptp->txts_prod) {
+ txts_req = &ptp->txts_req[cons];
+ if (!IS_ERR_OR_NULL(txts_req->tx_skb))
+ dev_kfree_skb_any(txts_req->tx_skb);
+ cons = NEXT_TXTS(cons);
+ }
+ ptp->txts_cons = cons;
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+}
- if (ptp->tx_skb) {
- netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n");
- return -EBUSY;
+int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
+{
+ spin_lock_bh(&ptp->ptp_tx_lock);
+ if (ptp->tx_avail) {
+ *prod = ptp->txts_prod;
+ ptp->txts_prod = NEXT_TXTS(*prod);
+ ptp->tx_avail--;
+ spin_unlock_bh(&ptp->ptp_tx_lock);
+ return 0;
}
- ptp->tx_skb = skb;
+ spin_unlock_bh(&ptp->ptp_tx_lock);
+ atomic64_inc(&ptp->stats.ts_err);
+ return -ENOSPC;
+}
+
+void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_ptp_tx_req *txts_req;
+
+ txts_req = &ptp->txts_req[prod];
+ txts_req->abs_txts_tmo = jiffies + msecs_to_jiffies(ptp->txts_tmo);
+ /* make sure abs_txts_tmo is written first */
+ smp_wmb();
+ txts_req->tx_skb = skb;
ptp_schedule_worker(ptp->ptp_clock, 0);
- return 0;
}
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- u64 time;
if (!ptp)
return -ENODEV;
- BNXT_READ_TIME64(ptp, time, ptp->old_time);
- *ts = (time & BNXT_HI_TIMER_MASK) | pkt_ts;
- if (pkt_ts < (time & BNXT_LO_TIMER_MASK))
- *ts += BNXT_LO_TIMER_MASK + 1;
+ *ts = bnxt_extend_cycles_32b_to_48b(ptp, pkt_ts);
return 0;
}
+void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct tx_ts_cmp *tscmp)
+{
+ struct skb_shared_hwtstamps timestamp = {};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 opaque = tscmp->tx_ts_cmp_opaque;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_sw_tx_bd *tx_buf;
+ u64 ts, ns;
+ u16 cons;
+
+ txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
+ ts = BNXT_GET_TX_TS_48B_NS(tscmp);
+ cons = TX_OPAQUE_IDX(opaque);
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
+ if (tx_buf->is_ts_pkt) {
+ if (BNXT_TX_TS_ERR(tscmp)) {
+ netdev_err(bp->dev,
+ "timestamp completion error 0x%x 0x%x\n",
+ le32_to_cpu(tscmp->tx_ts_cmp_flags_type),
+ le32_to_cpu(tscmp->tx_ts_cmp_errors_v));
+ } else {
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
+ timestamp.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(tx_buf->skb, &timestamp);
+ }
+ tx_buf->is_ts_pkt = 0;
+ }
+}
+
static const struct ptp_clock_info bnxt_ptp_caps = {
.owner = THIS_MODULE,
.name = "bnxt clock",
@@ -630,7 +891,7 @@ static const struct ptp_clock_info bnxt_ptp_caps = {
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = bnxt_ptp_adjfreq,
+ .adjfine = bnxt_ptp_adjfine,
.adjtime = bnxt_ptp_adjtime,
.do_aux_work = bnxt_ptp_ts_aux_work,
.gettimex64 = bnxt_ptp_gettimex,
@@ -691,7 +952,6 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
snprintf(ptp_info->pin_config[i].name,
sizeof(ptp_info->pin_config[i].name), "bnxt_pps%d", i);
ptp_info->pin_config[i].index = i;
- ptp_info->pin_config[i].chan = i;
if (*pin_usg == BNXT_PPS_PIN_PPS_IN)
ptp_info->pin_config[i].func = PTP_PF_EXTTS;
else if (*pin_usg == BNXT_PPS_PIN_PPS_OUT)
@@ -708,6 +968,8 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
ptp_info->n_per_out = 1;
ptp_info->pps = 1;
ptp_info->verify = bnxt_ptp_verify;
+ ptp_info->supported_extts_flags = PTP_RISING_EDGE | PTP_STRICT_FLAGS;
+ ptp_info->supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
return 0;
}
@@ -719,6 +981,82 @@ static bool bnxt_pps_config_ok(struct bnxt *bp)
return !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) == !ptp->ptp_info.pin_config;
}
+static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
+
+ if (!ptp->ptp_clock) {
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = bnxt_cc_read;
+ ptp->cc.mask = CYCLECOUNTER_MASK(48);
+ if (BNXT_MH(bp)) {
+ /* Use timecounter based non-real time mode */
+ ptp->cc.shift = BNXT_CYCLES_SHIFT;
+ ptp->cc.mult = clocksource_khz2mult(BNXT_DEVCLK_FREQ, ptp->cc.shift);
+ ptp->cmult = ptp->cc.mult;
+ } else {
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+ }
+ ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
+ }
+ if (init_tc) {
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
+ }
+}
+
+/* Caller holds ptp_lock */
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns)
+{
+ timecounter_init(&ptp->tc, &ptp->cc, ns);
+ /* For RTC, cycle_last must be in sync with the timecounter value. */
+ ptp->tc.cycle_last = ns & ptp->cc.mask;
+}
+
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
+{
+ struct timespec64 tsp;
+ unsigned long flags;
+ u64 ns;
+ int rc;
+
+ if (!bp->ptp_cfg || !BNXT_PTP_USE_RTC(bp))
+ return -ENODEV;
+
+ if (!phc_cfg) {
+ ktime_get_real_ts64(&tsp);
+ ns = timespec64_to_ns(&tsp);
+ rc = bnxt_ptp_cfg_settime(bp, ns);
+ if (rc)
+ return rc;
+ } else {
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME,
+ &ns, 0, 0);
+ if (rc)
+ return rc;
+ }
+ write_seqlock_irqsave(&bp->ptp_cfg->ptp_lock, flags);
+ bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns);
+ write_sequnlock_irqrestore(&bp->ptp_cfg->ptp_lock, flags);
+
+ return 0;
+}
+
+static void bnxt_ptp_free(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp->ptp_clock) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ }
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
+}
+
int bnxt_ptp_init(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -734,23 +1072,22 @@ int bnxt_ptp_init(struct bnxt *bp)
if (ptp->ptp_clock && bnxt_pps_config_ok(bp))
return 0;
- if (ptp->ptp_clock) {
- ptp_clock_unregister(ptp->ptp_clock);
- ptp->ptp_clock = NULL;
- kfree(ptp->ptp_info.pin_config);
- ptp->ptp_info.pin_config = NULL;
- }
- atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
- spin_lock_init(&ptp->ptp_lock);
+ bnxt_ptp_free(bp);
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = bnxt_cc_read;
- ptp->cc.mask = CYCLECOUNTER_MASK(48);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
+ WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS);
+ seqlock_init(&ptp->ptp_lock);
+ spin_lock_init(&ptp->ptp_tx_lock);
- ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
- timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+ if (BNXT_PTP_USE_RTC(bp)) {
+ bnxt_ptp_timecounter_init(bp, false);
+ rc = bnxt_ptp_init_rtc(bp, ptp->rtc_configured);
+ if (rc)
+ goto out;
+ } else {
+ bnxt_ptp_timecounter_init(bp, true);
+ bnxt_ptp_adjfine_rtc(bp, 0);
+ }
+ bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
ptp->ptp_info = bnxt_ptp_caps;
if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
@@ -762,17 +1099,26 @@ int bnxt_ptp_init(struct bnxt *bp)
int err = PTR_ERR(ptp->ptp_clock);
ptp->ptp_clock = NULL;
- bnxt_unmap_ptp_regs(bp);
- return err;
+ rc = err;
+ goto out;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- spin_lock_bh(&ptp->ptp_lock);
+
+ ptp->stats.ts_pkts = 0;
+ ptp->stats.ts_lost = 0;
+ atomic64_set(&ptp->stats.ts_err, 0);
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
bnxt_refclk_read(bp, NULL, &ptp->current_time);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
- spin_unlock_bh(&ptp->ptp_lock);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
ptp_schedule_worker(ptp->ptp_clock, 0);
}
+ ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO;
return 0;
+
+out:
+ bnxt_ptp_free(bp);
+ bnxt_unmap_ptp_regs(bp);
+ return rc;
}
void bnxt_ptp_clear(struct bnxt *bp)
@@ -789,9 +1135,5 @@ void bnxt_ptp_clear(struct bnxt *bp)
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
- if (ptp->tx_skb) {
- dev_kfree_skb_any(ptp->tx_skb);
- ptp->tx_skb = NULL;
- }
bnxt_unmap_ptp_regs(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index fa5f05708e6d..8cc2fae47644 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -10,14 +10,22 @@
#ifndef BNXT_PTP_H
#define BNXT_PTP_H
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
#define BNXT_PTP_GRC_WIN 6
#define BNXT_PTP_GRC_WIN_BASE 0x6000
#define BNXT_MAX_PHC_DRIFT 31000000
+#define BNXT_CYCLES_SHIFT 23
+#define BNXT_DEVCLK_FREQ 1000000
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
#define BNXT_HI_TIMER_MASK 0xffff00000000UL
+#define BNXT_HI_TIMER_SHIFT 24
+#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */
#define BNXT_PTP_QTS_TIMEOUT 1000
+#define BNXT_PTP_QTS_MAX_TMO_US 65535U
#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
@@ -28,7 +36,7 @@ struct pps_pin {
u8 state;
};
-#define TSIO_PIN_VALID(pin) ((pin) < (BNXT_MAX_TSIO_PINS))
+#define TSIO_PIN_VALID(pin) ((pin) >= 0 && (pin) < (BNXT_MAX_TSIO_PINS))
#define EVENT_DATA2_PPS_EVENT_TYPE(data2) \
((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE)
@@ -72,6 +80,22 @@ struct bnxt_pps {
struct pps_pin pins[BNXT_MAX_TSIO_PINS];
};
+struct bnxt_ptp_stats {
+ u64 ts_pkts;
+ u64 ts_lost;
+ atomic64_t ts_err;
+};
+
+#define BNXT_MAX_TX_TS 4
+#define NEXT_TXTS(idx) (((idx) + 1) & (BNXT_MAX_TX_TS - 1))
+
+struct bnxt_ptp_tx_req {
+ struct sk_buff *tx_skb;
+ u16 tx_seqid;
+ u16 tx_hdr_off;
+ unsigned long abs_txts_tmo;
+};
+
struct bnxt_ptp_cfg {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
@@ -79,20 +103,22 @@ struct bnxt_ptp_cfg {
struct timecounter tc;
struct bnxt_pps pps_info;
/* serialize timecounter access */
- spinlock_t ptp_lock;
- struct sk_buff *tx_skb;
+ seqlock_t ptp_lock;
+ /* serialize ts tx request queuing */
+ spinlock_t ptp_tx_lock;
u64 current_time;
- u64 old_time;
unsigned long next_period;
unsigned long next_overflow_check;
- /* 48-bit PHC overflows in 78 hours. Check overflow every 19 hours. */
- #define BNXT_PHC_OVERFLOW_PERIOD (19 * 3600 * HZ)
+ u32 cmult;
+ /* cache of upper 24 bits of cyclecoutner. 8 bits are used to check for roll-over */
+ u32 old_time;
+ /* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */
+ #define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ)
+
+ struct bnxt_ptp_tx_req txts_req[BNXT_MAX_TX_TS];
- u16 tx_seqid;
- u16 tx_hdr_off;
struct bnxt *bp;
- atomic_t tx_avail;
-#define BNXT_MAX_TX_TS 1
+ u32 tx_avail;
u16 rxctl;
#define BNXT_PTP_MSG_SYNC (1 << 0)
#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
@@ -109,31 +135,67 @@ struct bnxt_ptp_cfg {
BNXT_PTP_MSG_PDELAY_REQ | \
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
+ u8 rtc_configured:1;
int rx_filter;
+ u32 tstamp_filters;
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
+ u32 txts_tmo;
+ u16 txts_prod;
+ u16 txts_cons;
+
+ struct bnxt_ptp_stats stats;
};
-#if BITS_PER_LONG == 32
-#define BNXT_READ_TIME64(ptp, dst, src) \
+#define BNXT_PTP_INC_TX_AVAIL(ptp) \
do { \
- spin_lock_bh(&(ptp)->ptp_lock); \
- (dst) = (src); \
- spin_unlock_bh(&(ptp)->ptp_lock); \
+ spin_lock_bh(&(ptp)->ptp_tx_lock); \
+ (ptp)->tx_avail++; \
+ spin_unlock_bh(&(ptp)->ptp_tx_lock); \
} while (0)
-#else
-#define BNXT_READ_TIME64(ptp, dst, src) \
- ((dst) = READ_ONCE(src))
-#endif
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
+void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
+int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
-int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
-int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
-int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
+int bnxt_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack);
+int bnxt_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf);
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp);
+int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
+void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
+void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct tx_ts_cmp *tscmp);
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
int bnxt_ptp_init(struct bnxt *bp);
void bnxt_ptp_clear(struct bnxt *bp);
+static inline u64 bnxt_timecounter_cyc2time(struct bnxt_ptp_cfg *ptp, u64 ts)
+{
+ unsigned int seq;
+ u64 ns;
+
+ do {
+ seq = read_seqbegin(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, ts);
+ } while (read_seqretry(&ptp->ptp_lock, seq));
+
+ return ns;
+}
+
+static inline u64 bnxt_extend_cycles_32b_to_48b(struct bnxt_ptp_cfg *ptp, u32 ts)
+{
+ u64 time, cycles;
+
+ time = (u64)READ_ONCE(ptp->old_time) << BNXT_HI_TIMER_SHIFT;
+ cycles = (time & BNXT_HI_TIMER_MASK) | ts;
+ if (ts < (time & BNXT_LO_TIMER_MASK))
+ cycles += BNXT_LO_TIMER_MASK + 1;
+ return cycles;
+}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 1d177fed44a6..be7deb9cc410 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -15,7 +15,8 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
-#include "bnxt_hsi.h"
+#include <net/dcbnl.h>
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
@@ -95,7 +96,7 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
/*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing
*/
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->flags = cpu_to_le32(func_flags);
@@ -146,7 +147,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
return 0;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -196,11 +197,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
- ivi->vlan = vf->vlan;
- if (vf->flags & BNXT_VF_QOS)
- ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
- else
- ivi->qos = 0;
+ ivi->vlan = vf->vlan & VLAN_VID_MASK;
+ ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
ivi->trusted = bnxt_is_trusted_vf(bp, vf);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
@@ -232,7 +230,7 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
}
vf = &bp->pf.vf[vf_id];
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -256,29 +254,33 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
if (bp->hwrm_spec_code < 0x10201)
return -ENOTSUPP;
- if (vlan_proto != htons(ETH_P_8021Q))
+ if (vlan_proto != htons(ETH_P_8021Q) &&
+ (vlan_proto != htons(ETH_P_8021AD) ||
+ !(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP)))
return -EPROTONOSUPPORT;
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
- /* TODO: needed to implement proper handling of user priority,
- * currently fail the command if there is valid priority
- */
- if (vlan_id > 4095 || qos)
+ if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES ||
+ (!vlan_id && qos))
return -EINVAL;
vf = &bp->pf.vf[vf_id];
- vlan_tag = vlan_id;
+ vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT;
if (vlan_tag == vf->vlan)
return 0;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->dflt_vlan = cpu_to_le16(vlan_tag);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) {
+ req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID);
+ req->tpid = vlan_proto;
+ }
rc = hwrm_req_send(bp, req);
if (!rc)
vf->vlan = vlan_tag;
@@ -307,14 +309,14 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return -EINVAL;
}
- if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+ if (min_tx_rate > pf_link_speed) {
netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
min_tx_rate, vf_id);
return -EINVAL;
}
if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
return 0;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
@@ -330,6 +332,38 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return rc;
}
+static int bnxt_set_vf_link_admin_state(struct bnxt *bp, int vf_id)
+{
+ struct hwrm_func_cfg_input *req;
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
+ return 0;
+
+ vf = &bp->pf.vf[vf_id];
+
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(vf->fw_fid);
+ switch (vf->flags & (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP)) {
+ case BNXT_VF_LINK_FORCED:
+ req->options =
+ FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN;
+ break;
+ case (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP):
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP;
+ break;
+ default:
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
+ break;
+ }
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
{
struct bnxt *bp = netdev_priv(dev);
@@ -355,10 +389,11 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
break;
default:
netdev_err(bp->dev, "Invalid link option\n");
- rc = -EINVAL;
- break;
+ return -EINVAL;
}
- if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
+ if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)
+ rc = bnxt_set_vf_link_admin_state(bp, vf_id);
+ else if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc;
@@ -491,7 +526,7 @@ static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
struct bnxt_vf_info *vf;
int rc;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -518,6 +553,63 @@ static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
return hwrm_req_send(bp, req);
}
+static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_qcaps_output *resp;
+ struct hwrm_func_cfg_input *cfg_req;
+ struct hwrm_func_qcaps_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
+ if (rc)
+ return;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto err;
+
+ rc = hwrm_req_init(bp, cfg_req, HWRM_FUNC_CFG);
+ if (rc)
+ goto err;
+
+ /* In case of VF Dynamic resource allocation, driver will provision
+ * maximum resources to all the VFs. FW will dynamically allocate
+ * resources to VFs on the fly, so always divide the resources by 1.
+ */
+ if (BNXT_ROCE_VF_DYN_ALLOC_CAP(bp))
+ num_vfs = 1;
+
+ cfg_req->fid = cpu_to_le16(0xffff);
+ cfg_req->enables2 =
+ cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF);
+ cfg_req->roce_max_av_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_av) / num_vfs);
+ cfg_req->roce_max_cq_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_cq) / num_vfs);
+ cfg_req->roce_max_mrw_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_mrw) / num_vfs);
+ cfg_req->roce_max_qp_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_qp) / num_vfs);
+ cfg_req->roce_max_srq_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_srq) / num_vfs);
+ cfg_req->roce_max_gid_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_gid) / num_vfs);
+
+ rc = hwrm_req_send(bp, cfg_req);
+
+err:
+ hwrm_req_drop(bp, req);
+ if (rc)
+ netdev_err(bp->dev, "RoCE sriov configuration failed\n");
+}
+
/* Only called by PF to reserve resources for VFs, returns actual number of
* VFs configured, or < 0 on error.
*/
@@ -536,7 +628,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
vf_ring_grps = 0;
} else {
@@ -550,7 +642,6 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
- vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
@@ -566,17 +657,26 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req->min_l2_ctxs = cpu_to_le16(min);
req->min_vnics = cpu_to_le16(min);
req->min_stat_ctx = cpu_to_le16(min);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
req->min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
vf_tx_rings /= num_vfs;
vf_rx_rings /= num_vfs;
- vf_vnics /= num_vfs;
+ if ((bp->fw_cap & BNXT_FW_CAP_PRE_RESV_VNICS) &&
+ vf_vnics >= pf->max_vfs) {
+ /* Take into account that FW has pre-reserved 1 VNIC for
+ * each pf->max_vfs.
+ */
+ vf_vnics = (vf_vnics - pf->max_vfs + num_vfs) / num_vfs;
+ } else {
+ vf_vnics /= num_vfs;
+ }
vf_stat_ctx /= num_vfs;
vf_ring_grps /= num_vfs;
vf_rss /= num_vfs;
+ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req->min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req->min_tx_rings = cpu_to_le16(vf_tx_rings);
req->min_rx_rings = cpu_to_le16(vf_rx_rings);
@@ -594,20 +694,26 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req->max_rsscos_ctx = cpu_to_le16(vf_rss);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
req->max_msix = cpu_to_le16(vf_msix / num_vfs);
hwrm_req_hold(bp, req);
for (i = 0; i < num_vfs; i++) {
+ struct bnxt_vf_info *vf = &pf->vf[i];
+
+ vf->fw_fid = pf->first_vf_id + i;
+ rc = bnxt_set_vf_link_admin_state(bp, i);
+ if (rc)
+ break;
+
if (reset)
__bnxt_set_vf_params(bp, i);
- req->vf_id = cpu_to_le16(pf->first_vf_id + i);
+ req->vf_id = cpu_to_le16(vf->fw_fid);
rc = hwrm_req_send(bp, req);
if (rc)
break;
pf->active_vfs = i + 1;
- pf->vf[i].fw_fid = pf->first_vf_id + i;
}
if (pf->active_vfs) {
@@ -622,8 +728,8 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
le16_to_cpu(req->min_rsscos_ctx) * n;
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- hw_resc->max_irqs -= vf_msix * n;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
}
@@ -645,7 +751,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
u32 mtu, i;
int rc;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -674,7 +780,13 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
- mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) {
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
+ req->enables |=
+ cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
+ }
+
+ mtu = bp->dev->mtu + VLAN_ETH_HLEN;
req->mru = cpu_to_le16(mtu);
req->admin_mtu = cpu_to_le16(mtu);
@@ -749,7 +861,9 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
*num_vfs = rc;
}
- bnxt_ulp_sriov_cfg(bp, *num_vfs);
+ if (BNXT_RDMA_SRIOV_EN(bp) && BNXT_ROCE_VF_RESC_CAP(bp))
+ bnxt_hwrm_roce_sriov_cfg(bp, *num_vfs);
+
return 0;
}
@@ -761,7 +875,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
int avail_cp, avail_stat;
- /* Check if we can enable requested num of vf's. At a mininum
+ /* Check if we can enable requested num of vf's. At a minimum
* we require 1 RX 1 TX rings for each VF. In this minimum conf
* features like TPA will not be available.
*/
@@ -826,19 +940,38 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
if (rc)
goto err_out2;
+ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return 0;
+
+ /* Create representors for VFs in switchdev mode */
+ devl_lock(bp->dl);
+ rc = bnxt_vf_reps_create(bp);
+ devl_unlock(bp->dl);
+ if (rc) {
+ netdev_info(bp->dev, "Cannot enable VFS as representors cannot be created\n");
+ goto err_out3;
+ }
+
return 0;
+err_out3:
+ /* Disable SR-IOV */
+ pci_disable_sriov(bp->pdev);
+
err_out2:
/* Free the resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
+ /* Restore the max resources */
+ bnxt_hwrm_func_qcaps(bp);
+
err_out1:
bnxt_free_vf_resources(bp);
return rc;
}
-void bnxt_sriov_disable(struct bnxt *bp)
+void __bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
@@ -846,7 +979,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
return;
/* synchronize VF and VF-rep create and destroy */
- mutex_lock(&bp->sriov_lock);
+ devl_lock(bp->dl);
bnxt_vf_reps_destroy(bp);
if (pci_vfs_assigned(bp->pdev)) {
@@ -859,16 +992,24 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Free the HW resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
}
- mutex_unlock(&bp->sriov_lock);
+ devl_unlock(bp->dl);
bnxt_free_vf_resources(bp);
+}
+
+static void bnxt_sriov_disable(struct bnxt *bp)
+{
+ if (!pci_num_vf(bp->pdev))
+ return;
+
+ __bnxt_sriov_disable(bp);
/* Reclaim all resources for the PF. */
rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_restore_pf_fw_resources(bp);
+ netdev_unlock(bp->dev);
rtnl_unlock();
-
- bnxt_ulp_sriov_cfg(bp, 0);
}
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
@@ -876,23 +1017,22 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev);
- if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
- netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
- return 0;
- }
-
rtnl_lock();
+ netdev_lock(dev);
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
bp->sriov_cfg = true;
+ netdev_unlock(dev);
rtnl_unlock();
if (pci_vfs_assigned(bp->pdev)) {
@@ -926,8 +1066,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_fwd_resp_input *req;
int rc;
- if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
+ if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
+ netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
+ msg_size);
return -EINVAL;
+ }
rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
if (!rc) {
@@ -1042,7 +1185,7 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
/* There are two cases:
* 1.If firmware spec < 0x10202,VF MAC address is not forwarded
* to the PF and so it doesn't have to match
- * 2.Allow VF to modify it's own MAC when PF has not assigned a
+ * 2.Allow VF to modify its own MAC when PF has not assigned a
* valid MAC address and firmware spec >= 0x10202
*/
mac_ok = true;
@@ -1061,7 +1204,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
+ struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
@@ -1072,6 +1215,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
mutex_unlock(&bp->link_lock);
phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+ /* New SPEEDS2 fields are beyond the legacy structure, so
+ * clear the SPEEDS2_SUPPORTED flag.
+ */
+ phy_qcfg_resp.option_flags &=
+ ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
phy_qcfg_resp.valid = 1;
if (vf->flags & BNXT_VF_LINK_UP) {
@@ -1233,7 +1381,7 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
return 0;
}
-void bnxt_sriov_disable(struct bnxt *bp)
+void __bnxt_sriov_disable(struct bnxt *bp)
{
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 9a4bacba477b..e4979d729312 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -38,7 +38,7 @@ bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
-void bnxt_sriov_disable(struct bnxt *);
+void __bnxt_sriov_disable(struct bnxt *bp);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
int bnxt_approve_mac(struct bnxt *, const u8 *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index e6a4a768b10b..2d66bf59cd64 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -19,8 +19,8 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_tunnel_key.h>
#include <net/vxlan.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_sriov.h"
@@ -244,7 +244,7 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
offset < offset_of_ip6_daddr + 16) {
actions->nat.src_xlate = false;
idx = (offset - offset_of_ip6_daddr) / 4;
- actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+ actions->nat.l3.ipv6.daddr.s6_addr32[idx] = htonl(val);
} else {
netdev_err(bp->dev,
"%s: IPv6_hdr: Invalid pedit field\n",
@@ -370,16 +370,20 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
struct bnxt_tc_flow *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd);
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct flow_dissector *dissector = rule->match.dissector;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
- if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
- (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
- netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n",
+ if ((dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
+ (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
+ netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%llx\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -1312,7 +1316,7 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel decap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds. Ignore src_port in the tunnel_key,
+ * tunnel header fields. Ignore src_port in the tunnel_key,
* since it is not required for decap filters.
*/
decap_key->tp_src = 0;
@@ -1406,7 +1410,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel encap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds
+ * tunnel header fields
*/
encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
&tc_info->encap_ht_params,
@@ -1868,7 +1872,7 @@ static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
struct flow_cls_offload *flower = type_data;
struct bnxt *bp = priv->bp;
- if (flower->common.chain_index)
+ if (!tc_cls_can_offload_and_chain0(bp->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -1962,7 +1966,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, v
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
- if (!bnxt_is_netdev_indr_offload(netdev))
+ if (!netdev || !bnxt_is_netdev_indr_offload(netdev))
return -EOPNOTSUPP;
switch (type) {
@@ -2075,6 +2079,7 @@ destroy_flow_table:
rhashtable_destroy(&tc_info->flow_table);
free_tc_info:
kfree(tc_info);
+ bp->tc_info = NULL;
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index fde0c3e8ac57..927971c362f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -19,221 +19,176 @@
#include <linux/irq.h>
#include <asm/byteorder.h>
#include <linux/bitmap.h>
+#include <linux/auxiliary_bus.h>
+#include <net/netdev_lock.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
-static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
- struct bnxt_ulp_ops *ulp_ops, void *handle)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_ulp *ulp;
-
- ASSERT_RTNL();
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
+static DEFINE_IDA(bnxt_aux_dev_ids);
- ulp = &edev->ulp_tbl[ulp_id];
- if (rcu_access_pointer(ulp->ulp_ops)) {
- netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
- return -EBUSY;
- }
- if (ulp_id == BNXT_ROCE_ULP) {
- unsigned int max_stat_ctxs;
+static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ int num_msix, i;
- max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
- if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
- bp->cp_nr_rings == max_stat_ctxs)
- return -ENOMEM;
+ if (!edev->ulp_tbl->msix_requested) {
+ netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
+ return;
}
-
- atomic_set(&ulp->ref_count, 0);
- ulp->handle = handle;
- rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
-
- if (ulp_id == BNXT_ROCE_ULP) {
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_hwrm_vnic_cfg(bp, 0);
+ num_msix = edev->ulp_tbl->msix_requested;
+ for (i = 0; i < num_msix; i++) {
+ ent[i].vector = bp->irq_tbl[i].vector;
+ ent[i].ring_idx = i;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ ent[i].db_offset = bp->db_offset;
+ else
+ ent[i].db_offset = i * 0x80;
}
+}
+int bnxt_get_ulp_msix_num(struct bnxt *bp)
+{
+ if (bp->edev)
+ return bp->edev->ulp_num_msix_vec;
return 0;
}
-static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
+void bnxt_set_ulp_msix_num(struct bnxt *bp, int num)
{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_ulp *ulp;
- int i = 0;
+ if (bp->edev)
+ bp->edev->ulp_num_msix_vec = num;
+}
- ASSERT_RTNL();
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
+int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev))
+ return bp->edev->ulp_num_msix_vec;
+ return 0;
+}
- ulp = &edev->ulp_tbl[ulp_id];
- if (!rcu_access_pointer(ulp->ulp_ops)) {
- netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
- return -EINVAL;
- }
- if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
- edev->en_ops->bnxt_free_msix(edev, ulp_id);
+int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
+{
+ if (bp->edev)
+ return bp->edev->ulp_num_ctxs;
+ return 0;
+}
- if (ulp->max_async_event_id)
- bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
+void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx)
+{
+ if (bp->edev)
+ bp->edev->ulp_num_ctxs = num_ulp_ctx;
+}
- RCU_INIT_POINTER(ulp->ulp_ops, NULL);
- synchronize_rcu();
- ulp->max_async_event_id = 0;
- ulp->async_events_bmap = NULL;
- while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
- msleep(100);
- i++;
- }
+int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev))
+ return bp->edev->ulp_num_ctxs;
return 0;
}
-static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
+void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
{
- struct bnxt_en_dev *edev = bp->edev;
- int num_msix, idx, i;
-
- num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
- idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
- for (i = 0; i < num_msix; i++) {
- ent[i].vector = bp->irq_tbl[idx + i].vector;
- ent[i].ring_idx = idx + i;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- ent[i].db_offset = DB_PF_OFFSET_P5;
- if (BNXT_VF(bp))
- ent[i].db_offset = DB_VF_OFFSET_P5;
- } else {
- ent[i].db_offset = (idx + i) * 0x80;
- }
+ if (bp->edev) {
+ bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS;
+ /* Reserve one additional stat_ctx for PF0 (except
+ * on 1-port NICs) as it also creates one stat_ctx
+ * for PF1 in case of RoCE bonding.
+ */
+ if (BNXT_PF(bp) && !bp->pf.port_id &&
+ bp->port_count > 1)
+ bp->edev->ulp_num_ctxs++;
+
+ /* Reserve one additional stat_ctx when the device is capable
+ * of supporting port mirroring on RDMA device.
+ */
+ if (BNXT_MIRROR_ON_ROCE_CAP(bp))
+ bp->edev->ulp_num_ctxs++;
}
}
-static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
- struct bnxt_msix_entry *ent, int num_msix)
+int bnxt_register_dev(struct bnxt_en_dev *edev,
+ struct bnxt_ulp_ops *ulp_ops,
+ void *handle)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
- struct bnxt_hw_resc *hw_resc;
- int max_idx, max_cp_rings;
- int avail_msix, idx;
- int total_vecs;
+ unsigned int max_stat_ctxs;
+ struct bnxt_ulp *ulp;
int rc = 0;
- ASSERT_RTNL();
- if (ulp_id != BNXT_ROCE_ULP)
- return -EINVAL;
-
- if (!(bp->flags & BNXT_FLAG_USING_MSIX))
- return -ENODEV;
-
- if (edev->ulp_tbl[ulp_id].msix_requested)
- return -EAGAIN;
-
- max_cp_rings = bnxt_get_max_func_cp_rings(bp);
- avail_msix = bnxt_get_avail_msix(bp, num_msix);
- if (!avail_msix)
- return -ENOMEM;
- if (avail_msix > num_msix)
- avail_msix = num_msix;
-
- if (BNXT_NEW_RM(bp)) {
- idx = bp->cp_nr_rings;
- } else {
- max_idx = min_t(int, bp->total_irqs, max_cp_rings);
- idx = max_idx - avail_msix;
+ netdev_lock(dev);
+ mutex_lock(&edev->en_dev_lock);
+ if (!bp->irq_tbl) {
+ rc = -ENODEV;
+ goto exit;
}
- edev->ulp_tbl[ulp_id].msix_base = idx;
- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
- hw_resc = &bp->hw_resc;
- total_vecs = idx + avail_msix;
- if (bp->total_irqs < total_vecs ||
- (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
- if (netif_running(dev)) {
- bnxt_close_nic(bp, true, false);
- rc = bnxt_open_nic(bp, true, false);
- } else {
- rc = bnxt_reserve_rings(bp, true);
- }
- }
- if (rc) {
- edev->ulp_tbl[ulp_id].msix_requested = 0;
- return -EAGAIN;
+ max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
+ if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
+ bp->cp_nr_rings == max_stat_ctxs) {
+ rc = -ENOMEM;
+ goto exit;
}
- if (BNXT_NEW_RM(bp)) {
- int resv_msix;
-
- resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
- avail_msix = min_t(int, resv_msix, avail_msix);
- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
- }
- bnxt_fill_msix_vecs(bp, ent);
- edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
- return avail_msix;
-}
-
-static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
+ ulp = edev->ulp_tbl;
+ ulp->handle = handle;
+ rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
- ASSERT_RTNL();
- if (ulp_id != BNXT_ROCE_ULP)
- return -EINVAL;
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
- if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
- return 0;
+ edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
- edev->ulp_tbl[ulp_id].msix_requested = 0;
- edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
- bnxt_close_nic(bp, true, false);
- bnxt_open_nic(bp, true, false);
- }
- return 0;
+ bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
+exit:
+ mutex_unlock(&edev->en_dev_lock);
+ netdev_unlock(dev);
+ return rc;
}
+EXPORT_SYMBOL(bnxt_register_dev);
-int bnxt_get_ulp_msix_num(struct bnxt *bp)
+void bnxt_unregister_dev(struct bnxt_en_dev *edev)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_en_dev *edev = bp->edev;
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ulp *ulp;
- return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
- }
- return 0;
-}
+ ulp = edev->ulp_tbl;
+ netdev_lock(dev);
+ mutex_lock(&edev->en_dev_lock);
+ edev->ulp_tbl->msix_requested = 0;
-int bnxt_get_ulp_msix_base(struct bnxt *bp)
-{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_en_dev *edev = bp->edev;
+ if (ulp->max_async_event_id)
+ bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
- if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
- return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
- }
- return 0;
+ RCU_INIT_POINTER(ulp->ulp_ops, NULL);
+ synchronize_rcu();
+ ulp->max_async_event_id = 0;
+ ulp->async_events_bmap = NULL;
+ mutex_unlock(&edev->en_dev_lock);
+ netdev_unlock(dev);
+ return;
}
+EXPORT_SYMBOL(bnxt_unregister_dev);
-int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
+static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_en_dev *edev = bp->edev;
+ int roce_msix = BNXT_MAX_ROCE_MSIX;
- if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
- return BNXT_MIN_ROCE_STAT_CTXS;
- }
+ if (BNXT_VF(bp))
+ roce_msix = BNXT_MAX_ROCE_MSIX_VF;
+ else if (bp->port_partition_type)
+ roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF;
- return 0;
+ /* NQ MSIX vectors should match the number of CPUs plus 1 more for
+ * the CREQ MSIX, up to the default.
+ */
+ return min_t(int, roce_msix, num_online_cpus() + 1);
}
-static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
+int bnxt_send_msg(struct bnxt_en_dev *edev,
struct bnxt_fw_msg *fw_msg)
{
struct net_device *dev = edev->net;
@@ -243,7 +198,7 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
u32 resp_len;
int rc;
- if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
+ if (bp->fw_reset_state)
return -EBUSY;
rc = hwrm_req_init(bp, req, 0 /* don't care */);
@@ -252,7 +207,7 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
if (rc)
- return rc;
+ goto drop_req;
hwrm_req_timeout(bp, req, fw_msg->timeout);
resp = hwrm_req_hold(bp, req);
@@ -264,126 +219,97 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
memcpy(fw_msg->resp, resp, resp_len);
}
+drop_req:
hwrm_req_drop(bp, req);
return rc;
}
-
-static void bnxt_ulp_get(struct bnxt_ulp *ulp)
-{
- atomic_inc(&ulp->ref_count);
-}
-
-static void bnxt_ulp_put(struct bnxt_ulp *ulp)
-{
- atomic_dec(&ulp->ref_count);
-}
+EXPORT_SYMBOL(bnxt_send_msg);
void bnxt_ulp_stop(struct bnxt *bp)
{
+ struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
if (!edev)
return;
+ mutex_lock(&edev->en_dev_lock);
+ if (!bnxt_ulp_registered(edev) ||
+ (edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_stop_exit;
+
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+ if (aux_priv) {
+ struct auxiliary_device *adev;
+
+ adev = &aux_priv->aux_dev;
+ if (adev->dev.driver) {
+ const struct auxiliary_driver *adrv;
+ pm_message_t pm = {};
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_stop)
- continue;
- ops->ulp_stop(ulp->handle);
+ adrv = to_auxiliary_drv(adev->dev.driver);
+ edev->en_state = bp->state;
+ adrv->suspend(adev, pm);
+ }
}
+ulp_stop_exit:
+ mutex_unlock(&edev->en_dev_lock);
}
void bnxt_ulp_start(struct bnxt *bp, int err)
{
+ struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
- if (!edev)
+ if (!edev || err)
return;
- edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
+ mutex_lock(&edev->en_dev_lock);
+ if (!bnxt_ulp_registered(edev) ||
+ !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_start_exit;
- if (err)
- return;
+ if (edev->ulp_tbl->msix_requested)
+ bnxt_fill_msix_vecs(bp, edev->msix_entries);
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+ if (aux_priv) {
+ struct auxiliary_device *adev;
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_start)
- continue;
- ops->ulp_start(ulp->handle);
- }
-}
+ adev = &aux_priv->aux_dev;
+ if (adev->dev.driver) {
+ const struct auxiliary_driver *adrv;
-void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
-{
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
-
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- rcu_read_lock();
- ops = rcu_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_sriov_config) {
- rcu_read_unlock();
- continue;
+ adrv = to_auxiliary_drv(adev->dev.driver);
+ edev->en_state = bp->state;
+ adrv->resume(adev);
}
- bnxt_ulp_get(ulp);
- rcu_read_unlock();
- ops->ulp_sriov_config(ulp->handle, num_vfs);
- bnxt_ulp_put(ulp);
- }
-}
-
-void bnxt_ulp_shutdown(struct bnxt *bp)
-{
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
-
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_shutdown)
- continue;
- ops->ulp_shutdown(ulp->handle);
}
+ulp_start_exit:
+ edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
+ mutex_unlock(&edev->en_dev_lock);
}
void bnxt_ulp_irq_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
+ bool reset = false;
- if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ if (!edev)
return;
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+ if (bnxt_ulp_registered(bp->edev)) {
+ struct bnxt_ulp *ulp = edev->ulp_tbl;
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_stop)
return;
- ops->ulp_irq_stop(ulp->handle);
+ if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
+ reset = true;
+ ops->ulp_irq_stop(ulp->handle, reset);
}
}
@@ -392,17 +318,17 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
- if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ if (!edev)
return;
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+ if (bnxt_ulp_registered(bp->edev)) {
+ struct bnxt_ulp *ulp = edev->ulp_tbl;
struct bnxt_msix_entry *ent = NULL;
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_restart)
return;
@@ -423,80 +349,185 @@ void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
u16 event_id = le16_to_cpu(cmpl->event_id);
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
- int i;
+ struct bnxt_ulp *ulp;
- if (!edev)
+ if (!bnxt_ulp_registered(edev))
return;
+ ulp = edev->ulp_tbl;
rcu_read_lock();
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rcu_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_async_notifier)
- continue;
- if (!ulp->async_events_bmap ||
- event_id > ulp->max_async_event_id)
- continue;
-
- /* Read max_async_event_id first before testing the bitmap. */
- smp_rmb();
- if (test_bit(event_id, ulp->async_events_bmap))
- ops->ulp_async_notifier(ulp->handle, cmpl);
- }
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ goto exit_unlock_rcu;
+ if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
+ goto exit_unlock_rcu;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ smp_rmb();
+
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+exit_unlock_rcu:
rcu_read_unlock();
}
-static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
- unsigned long *events_bmap, u16 max_id)
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
+ ulp = edev->ulp_tbl;
ulp->async_events_bmap = events_bmap;
/* Make sure bnxt_ulp_async_events() sees this order */
smp_wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
- return 0;
}
+EXPORT_SYMBOL(bnxt_register_async_events);
-static const struct bnxt_en_ops bnxt_en_ops_tbl = {
- .bnxt_register_device = bnxt_register_dev,
- .bnxt_unregister_device = bnxt_unregister_dev,
- .bnxt_request_msix = bnxt_req_msix_vecs,
- .bnxt_free_msix = bnxt_free_msix_vecs,
- .bnxt_send_fw_msg = bnxt_send_msg,
- .bnxt_register_fw_async_events = bnxt_register_async_events,
-};
+void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
+{
+ struct bnxt_aux_priv *aux_priv;
+ struct auxiliary_device *adev;
+
+ /* Skip if no auxiliary device init was done. */
+ if (!bp->aux_priv)
+ return;
-struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
+ aux_priv = bp->aux_priv;
+ adev = &aux_priv->aux_dev;
+ auxiliary_device_uninit(adev);
+}
+
+static void bnxt_aux_dev_release(struct device *dev)
{
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_en_dev *edev;
+ struct bnxt_aux_priv *aux_priv =
+ container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
+ struct bnxt *bp = netdev_priv(aux_priv->edev->net);
+
+ ida_free(&bnxt_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv->edev->ulp_tbl);
+ bp->edev = NULL;
+ kfree(aux_priv->edev);
+ kfree(aux_priv);
+ bp->aux_priv = NULL;
+}
+
+void bnxt_rdma_aux_device_del(struct bnxt *bp)
+{
+ if (!bp->edev)
+ return;
+
+ auxiliary_device_delete(&bp->aux_priv->aux_dev);
+}
+
+static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
+{
+ edev->net = bp->dev;
+ edev->pdev = bp->pdev;
+ edev->l2_db_size = bp->db_size;
+ edev->l2_db_size_nc = bp->db_size;
+ edev->l2_db_offset = bp->db_offset;
+ mutex_init(&edev->en_dev_lock);
- edev = bp->edev;
- if (!edev) {
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev)
- return ERR_PTR(-ENOMEM);
- edev->en_ops = &bnxt_en_ops_tbl;
- edev->net = dev;
- edev->pdev = bp->pdev;
- edev->l2_db_size = bp->db_size;
- edev->l2_db_size_nc = bp->db_size;
- bp->edev = edev;
- }
- edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
- return bp->edev;
+ if (bp->flags & BNXT_FLAG_VF)
+ edev->flags |= BNXT_EN_FLAG_VF;
+ if (BNXT_ROCE_VF_RESC_CAP(bp))
+ edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
+ if (BNXT_SW_RES_LMT(bp))
+ edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
+
+ edev->chip_num = bp->chip_num;
+ edev->hw_ring_stats_size = bp->hw_ring_stats_size;
+ edev->pf_port_id = bp->pf.port_id;
+ edev->en_state = bp->state;
+ edev->bar0 = bp->bar0;
+}
+
+void bnxt_rdma_aux_device_add(struct bnxt *bp)
+{
+ struct auxiliary_device *aux_dev;
+ int rc;
+
+ if (!bp->edev)
+ return;
+
+ aux_dev = &bp->aux_priv->aux_dev;
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n");
+ auxiliary_device_uninit(aux_dev);
+ bp->flags &= ~BNXT_FLAG_ROCE_CAP;
+ }
+}
+
+void bnxt_rdma_aux_device_init(struct bnxt *bp)
+{
+ struct auxiliary_device *aux_dev;
+ struct bnxt_aux_priv *aux_priv;
+ struct bnxt_en_dev *edev;
+ struct bnxt_ulp *ulp;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+ return;
+
+ aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
+ if (!aux_priv)
+ goto exit;
+
+ aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
+ if (aux_priv->id < 0) {
+ netdev_warn(bp->dev,
+ "ida alloc failed for ROCE auxiliary device\n");
+ kfree(aux_priv);
+ goto exit;
+ }
+
+ aux_dev = &aux_priv->aux_dev;
+ aux_dev->id = aux_priv->id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &bp->pdev->dev;
+ aux_dev->dev.release = bnxt_aux_dev_release;
+
+ rc = auxiliary_device_init(aux_dev);
+ if (rc) {
+ ida_free(&bnxt_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv);
+ goto exit;
+ }
+ bp->aux_priv = aux_priv;
+
+ /* From this point, all cleanup will happen via the .release callback &
+ * any error unwinding will need to include a call to
+ * auxiliary_device_uninit.
+ */
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ goto aux_dev_uninit;
+
+ aux_priv->edev = edev;
+
+ ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
+ if (!ulp)
+ goto aux_dev_uninit;
+
+ edev->ulp_tbl = ulp;
+ bp->edev = edev;
+ bnxt_set_edev_info(edev, bp);
+ bp->ulp_num_msix_want = bnxt_set_dflt_ulp_msix(bp);
+
+ return;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(aux_dev);
+exit:
+ bp->flags &= ~BNXT_FLAG_ROCE_CAP;
}
-EXPORT_SYMBOL(bnxt_ulp_probe);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 6b4d2556a6df..3c5b8a53f715 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -10,13 +10,13 @@
#ifndef BNXT_ULP_H
#define BNXT_ULP_H
-#define BNXT_ROCE_ULP 0
-#define BNXT_OTHER_ULP 1
-#define BNXT_MAX_ULP 2
-
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
+#define BNXT_MAX_ROCE_MSIX_VF 2
+#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5
+#define BNXT_MAX_ROCE_MSIX 64
+
struct hwrm_async_event_cmpl;
struct bnxt;
@@ -29,11 +29,7 @@ struct bnxt_msix_entry {
struct bnxt_ulp_ops {
/* async_notifier() cannot sleep (in BH context) */
void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
- void (*ulp_stop)(void *);
- void (*ulp_start)(void *);
- void (*ulp_sriov_config)(void *, int);
- void (*ulp_shutdown)(void *);
- void (*ulp_irq_stop)(void *);
+ void (*ulp_irq_stop)(void *, bool);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -51,22 +47,25 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
- u16 msix_base;
- atomic_t ref_count;
};
struct bnxt_en_dev {
struct net_device *net;
struct pci_dev *pdev;
+ struct bnxt_msix_entry msix_entries[BNXT_MAX_ROCE_MSIX];
u32 flags;
#define BNXT_EN_FLAG_ROCEV1_CAP 0x1
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
- #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
- const struct bnxt_en_ops *en_ops;
- struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ #define BNXT_EN_FLAG_VF 0x10
+#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
+ #define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x20
+ #define BNXT_EN_FLAG_SW_RES_LMT 0x40
+#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT)
+
+ struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
* bytes mapped by L2
* driver.
@@ -75,38 +74,55 @@ struct bnxt_en_dev {
* bytes mapped as non-
* cacheable.
*/
-};
+ int l2_db_offset; /* Doorbell offset in
+ * bytes within
+ * l2_db_size_nc.
+ */
+ u16 chip_num;
+ u16 hw_ring_stats_size;
+ u16 pf_port_id;
+ unsigned long en_state; /* Could be checked in
+ * RoCE driver suspend
+ * mode only. Will be
+ * updated in resume.
+ */
+ void __iomem *bar0;
-struct bnxt_en_ops {
- int (*bnxt_register_device)(struct bnxt_en_dev *, int,
- struct bnxt_ulp_ops *, void *);
- int (*bnxt_unregister_device)(struct bnxt_en_dev *, int);
- int (*bnxt_request_msix)(struct bnxt_en_dev *, int,
- struct bnxt_msix_entry *, int);
- int (*bnxt_free_msix)(struct bnxt_en_dev *, int);
- int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int,
- struct bnxt_fw_msg *);
- int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int,
- unsigned long *, u16);
+ u16 ulp_num_msix_vec;
+ u16 ulp_num_ctxs;
+
+ /* serialize ulp operations */
+ struct mutex en_dev_lock;
};
-static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
+static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
{
- if (edev && rcu_access_pointer(edev->ulp_tbl[ulp_id].ulp_ops))
+ if (edev && rcu_access_pointer(edev->ulp_tbl->ulp_ops))
return true;
return false;
}
int bnxt_get_ulp_msix_num(struct bnxt *bp);
-int bnxt_get_ulp_msix_base(struct bnxt *bp);
+int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp);
+void bnxt_set_ulp_msix_num(struct bnxt *bp, int num);
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
+void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ctxs);
+int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp);
+void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
-void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
-struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
-
+void bnxt_rdma_aux_device_uninit(struct bnxt *bp);
+void bnxt_rdma_aux_device_del(struct bnxt *bp);
+void bnxt_rdma_aux_device_add(struct bnxt *bp);
+void bnxt_rdma_aux_device_init(struct bnxt *bp);
+int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
+ void *handle);
+void bnxt_unregister_dev(struct bnxt_en_dev *edev);
+int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 8eb28e088582..bd116fd578d8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -12,8 +12,8 @@
#include <linux/rtnetlink.h>
#include <linux/jhash.h>
#include <net/pkt_cls.h>
+#include <linux/bnxt/hsi.h>
-#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_vfr.h"
@@ -222,7 +222,7 @@ static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
@@ -257,8 +257,7 @@ bool bnxt_dev_is_vf_rep(struct net_device *dev)
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
- * happens under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happens under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_close(struct bnxt *bp)
{
@@ -278,8 +277,7 @@ void bnxt_vf_reps_close(struct bnxt *bp)
/* Called when the parent PF interface is opened (re-opened):
* As the mode transition from SWITCHDEV to LEGACY
- * happen under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happen under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_open(struct bnxt *bp)
{
@@ -348,7 +346,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
* before proceeding with VF-rep cleanup.
*/
- rtnl_lock();
+ netdev_lock(bp->dev);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
closed = true;
@@ -356,14 +354,19 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* un-publish cfa_code_map so that RX path can't see it anymore */
kfree(bp->cfa_code_map);
bp->cfa_code_map = NULL;
- bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- if (closed)
+ if (closed) {
+ /* Temporarily set legacy mode to avoid re-opening
+ * representors and restore switchdev mode after that.
+ */
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
bnxt_open_nic(bp, false, false);
- rtnl_unlock();
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ }
+ netdev_unlock(bp->dev);
- /* Need to call vf_reps_destroy() outside of rntl_lock
- * as unregister_netdev takes rtnl_lock
+ /* Need to call vf_reps_destroy() outside of netdev instance lock
+ * as unregister_netdev takes it
*/
__bnxt_vf_reps_destroy(bp);
}
@@ -371,7 +374,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Free the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
void bnxt_vf_reps_free(struct bnxt *bp)
{
@@ -408,7 +411,7 @@ static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
/* Allocate the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
int bnxt_vf_reps_alloc(struct bnxt *bp)
{
@@ -463,6 +466,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
struct net_device *pf_dev = bp->dev;
u16 max_mtu;
+ SET_NETDEV_DEV(dev, &bp->pdev->dev);
dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
/* Just inherit all the featues of the parent PF as the VF-R
@@ -482,7 +486,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->min_mtu = ETH_ZLEN;
}
-static int bnxt_vf_reps_create(struct bnxt *bp)
+int bnxt_vf_reps_create(struct bnxt *bp)
{
u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
struct bnxt_vf_rep *vf_rep;
@@ -535,7 +539,6 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
/* publish cfa_code_map only after all VF-reps have been initialized */
bp->cfa_code_map = cfa_code_map;
- bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
netif_keep_dst(bp->dev);
return 0;
@@ -559,15 +562,13 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
- int rc = 0;
+ int ret = 0;
- mutex_lock(&bp->sriov_lock);
if (bp->eswitch_mode == mode) {
netdev_info(bp->dev, "already in %s eswitch mode\n",
mode == DEVLINK_ESWITCH_MODE_LEGACY ?
"legacy" : "switchdev");
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
switch (mode) {
@@ -578,25 +579,22 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
if (bp->hwrm_spec_code < 0x10803) {
netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
- rc = -ENOTSUPP;
- goto done;
+ return -ENOTSUPP;
}
- if (pci_num_vf(bp->pdev) == 0) {
- netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n");
- rc = -EPERM;
- goto done;
- }
- rc = bnxt_vf_reps_create(bp);
+ /* Create representors for existing VFs */
+ if (pci_num_vf(bp->pdev) > 0)
+ ret = bnxt_vf_reps_create(bp);
break;
default:
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
-done:
- mutex_unlock(&bp->sriov_lock);
- return rc;
+
+ if (!ret)
+ bp->eswitch_mode = mode;
+
+ return ret;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index 5637a84884d7..33a965631d0b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -14,6 +14,7 @@
#define MAX_CFA_CODE 65536
+int bnxt_vf_reps_create(struct bnxt *bp);
void bnxt_vf_reps_destroy(struct bnxt *bp);
void bnxt_vf_reps_close(struct bnxt *bp);
void bnxt_vf_reps_open(struct bnxt *bp);
@@ -37,6 +38,11 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
#else
+static inline int bnxt_vf_reps_create(struct bnxt *bp)
+{
+ return 0;
+}
+
static inline void bnxt_vf_reps_close(struct bnxt *bp)
{
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c8083df5e0ab..3e77a96e5a3e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -15,43 +15,93 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
-#include <net/page_pool.h>
-#include "bnxt_hsi.h"
+#include <net/netdev_lock.h>
+#include <net/page_pool/helpers.h>
+#include <linux/bnxt/hsi.h>
#include "bnxt.h"
#include "bnxt_xdp.h"
+DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len)
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp)
{
+ struct skb_shared_info *sinfo;
struct bnxt_sw_tx_bd *tx_buf;
struct tx_bd *txbd;
+ int num_frags = 0;
u32 flags;
u16 prod;
+ int i;
- prod = txr->tx_prod;
- tx_buf = &txr->tx_buf_ring[prod];
+ if (xdp && xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ num_frags = sinfo->nr_frags;
+ }
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
- TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+ /* fill up the first buffer */
+ prod = txr->tx_prod;
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
+ tx_buf->nr_frags = num_frags;
+ if (xdp)
+ tx_buf->page = virt_to_head_page(xdp->data);
+
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_CNT(num_frags + 1) |
+ bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
- txbd->tx_bd_opaque = prod;
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ /* now let us fill up the frags into the next buffers */
+ for (i = 0; i < num_frags ; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct bnxt_sw_tx_bd *frag_tx_buf;
+ dma_addr_t frag_mapping;
+ int frag_len;
+
+ prod = NEXT_TX(prod);
+ WRITE_ONCE(txr->tx_prod, prod);
+
+ /* first fill up the first buffer */
+ frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
+ frag_tx_buf->page = skb_frag_page(frag);
+
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
+
+ frag_len = skb_frag_size(frag);
+ flags = frag_len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
+ txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
+
+ len = frag_len;
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+ /* Sync TX BD */
+ wmb();
prod = NEXT_TX(prod);
- txr->tx_prod = prod;
+ WRITE_ONCE(txr->tx_prod, prod);
+
return tx_buf;
}
static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len, u16 rx_prod)
+ dma_addr_t mapping, u32 len, u16 rx_prod,
+ struct xdp_buff *xdp)
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
tx_buf->rx_prod = rx_prod;
tx_buf->action = XDP_TX;
+
}
static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
@@ -61,25 +111,29 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
tx_buf->action = XDP_REDIRECT;
tx_buf->xdpf = xdpf;
dma_unmap_addr_set(tx_buf, mapping, mapping);
- dma_unmap_len_set(tx_buf, len, 0);
+ dma_unmap_len_set(tx_buf, len, len);
}
-void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ u16 tx_hw_cons = txr->tx_hw_cons;
bool rx_doorbell_needed = false;
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
- int i;
+ int j, frags;
+
+ if (!budget)
+ return;
- for (i = 0; i < nr_pkts; i++) {
- tx_buf = &txr->tx_buf_ring[tx_cons];
+ while (RING_TX(bp, tx_cons) != tx_hw_cons) {
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
if (tx_buf->action == XDP_REDIRECT) {
struct pci_dev *pdev = bp->pdev;
@@ -92,31 +146,91 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
tx_buf->action = 0;
tx_buf->xdpf = NULL;
} else if (tx_buf->action == XDP_TX) {
+ tx_buf->action = 0;
rx_doorbell_needed = true;
last_tx_cons = tx_cons;
+
+ frags = tx_buf->nr_frags;
+ for (j = 0; j < frags; j++) {
+ tx_cons = NEXT_TX(tx_cons);
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
+ page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
+ }
+ } else {
+ bnxt_sched_reset_txr(bp, txr, tx_cons);
+ return;
}
tx_cons = NEXT_TX(tx_cons);
}
- txr->tx_cons = tx_cons;
+
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
+ WRITE_ONCE(txr->tx_cons, tx_cons);
if (rx_doorbell_needed) {
- tx_buf = &txr->tx_buf_ring[last_tx_cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, last_tx_cons)];
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
+
}
}
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+
+ return !!xdp_prog;
+}
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 *data_ptr, unsigned int len,
+ struct xdp_buff *xdp)
+{
+ u32 buflen = BNXT_RX_PAGE_SIZE;
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct pci_dev *pdev;
+ dma_addr_t mapping;
+ u32 offset;
+
+ pdev = bp->pdev;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ offset = bp->rx_offset;
+
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
+
+ xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
+ xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true);
+}
+
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *shinfo;
+ int i;
+
+ if (!xdp || !xdp_buff_has_frags(xdp))
+ return;
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&shinfo->frags[i]);
+
+ page_pool_recycle_direct(rxr->page_pool, page);
+ }
+ shinfo->nr_frags = 0;
+}
+
/* returns the following:
* true - packet consumed by XDP and new buffer is allocated.
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+ struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
+ unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
- struct xdp_buff xdp;
dma_addr_t mapping;
+ u32 tx_needed = 1;
void *orig_data;
u32 tx_avail;
u32 offset;
@@ -126,19 +240,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false;
pdev = bp->pdev;
- rx_buf = &rxr->rx_buf_ring[cons];
offset = bp->rx_offset;
- mapping = rx_buf->mapping - bp->rx_dma_offset;
- dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
-
- txr = rxr->bnapi->tx_ring;
+ txr = rxr->bnapi->tx_ring[0];
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
- xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
- xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
- orig_data = xdp.data;
+ orig_data = xdp->data;
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
tx_avail = bnxt_tx_avail(bp, txr);
/* If the tx ring is not full, we must not update the rx producer yet
@@ -147,27 +255,41 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail != bp->tx_ring_size)
*event &= ~BNXT_RX_EVENT;
- *len = xdp.data_end - xdp.data;
- if (orig_data != xdp.data) {
- offset = xdp.data - xdp.data_hard_start;
- *data_ptr = xdp.data_hard_start + offset;
+ *len = xdp->data_end - xdp->data;
+ if (orig_data != xdp->data) {
+ offset = xdp->data - xdp->data_hard_start;
+ *data_ptr = xdp->data_hard_start + offset;
}
+
switch (act) {
case XDP_PASS:
return false;
case XDP_TX:
- if (tx_avail < 1) {
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ *event &= BNXT_TX_CMP_EVENT;
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ tx_needed += sinfo->nr_frags;
+ *event = BNXT_AGG_EVENT;
+ }
+
+ if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
- *event = BNXT_TX_EVENT;
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
bp->rx_dir);
+
+ *event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
- NEXT_RX(rxr->rx_prod));
+ NEXT_RX(rxr->rx_prod), xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
@@ -175,18 +297,16 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
- dma_unmap_page_attrs(&pdev->dev, mapping,
- PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
- if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
+ if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
page_pool_recycle_direct(rxr->page_pool, page);
return true;
@@ -195,12 +315,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event |= BNXT_REDIRECT_EVENT;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
@@ -227,11 +348,16 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
ring = smp_processor_id() % bp->tx_nr_rings_xdp;
txr = &bp->tx_ring[ring];
+ if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
+ return -EINVAL;
+
+ if (static_branch_unlikely(&bnxt_xdp_locking_key))
+ spin_lock(&txr->xdp_tx_lock);
+
for (i = 0; i < num_frames; i++) {
struct xdp_frame *xdp = frames[i];
- if (!txr || !bnxt_tx_avail(bp, txr) ||
- !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP))
+ if (!bnxt_tx_avail(bp, txr))
break;
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
@@ -250,21 +376,30 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
}
+ if (static_branch_unlikely(&bnxt_xdp_locking_key))
+ spin_unlock(&txr->xdp_tx_lock);
+
return nxmit;
}
-/* Under rtnl_lock */
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
struct net_device *dev = bp->dev;
- int tx_xdp = 0, rc, tc;
+ int tx_xdp = 0, tx_cp, rc, tc;
struct bpf_prog *old;
- if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
- netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
+ netdev_assert_locked(dev);
+
+ if (prog && !prog->aux->xdp_has_frags &&
+ bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
+ if (prog && bp->flags & BNXT_FLAG_HDS) {
+ netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n");
+ return -EOPNOTSUPP;
+ }
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
@@ -272,7 +407,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog)
tx_xdp = bp->rx_nr_rings;
- tc = netdev_get_num_tc(dev);
+ tc = bp->num_tc;
if (!tc)
tc = 1;
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
@@ -290,19 +425,15 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog) {
bnxt_set_rx_skb_mode(bp, true);
+ xdp_features_set_redirect_target_locked(dev, true);
} else {
- int rx, tx;
-
+ xdp_features_clear_redirect_target_locked(dev);
bnxt_set_rx_skb_mode(bp, false);
- bnxt_get_max_rings(bp, &rx, &tx, true);
- if (rx > 1) {
- bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features |= NETIF_F_LRO;
- }
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
- bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = max_t(int, tx_cp, bp->rx_nr_rings);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
@@ -327,3 +458,18 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
return rc;
}
+
+struct sk_buff *
+bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
+ struct page_pool *pool, struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ if (!skb)
+ return NULL;
+
+ xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
+ BNXT_RX_PAGE_SIZE * num_frags,
+ xdp_buff_get_skb_flags(xdp));
+ return skb;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0df40c3beb05..220285e190fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -10,15 +10,28 @@
#ifndef BNXT_XDP_H
#define BNXT_XDP_H
+DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len);
-void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp);
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len,
- u8 *event);
+ struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
+ unsigned int *len, u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags);
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 *data_ptr, unsigned int len,
+ struct xdp_buff *xdp);
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp);
+struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
+ u8 num_frags, struct page_pool *pool,
+ struct xdp_buff *xdp);
#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f7f10cfb3476..6e97a5a7daaf 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -31,6 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/random.h>
+#include <linux/workqueue.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define BCM_VLAN 1
#endif
@@ -660,7 +661,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
- id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
+ id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
@@ -669,7 +670,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
{
- kfree(id_tbl->table);
+ bitmap_free(id_tbl->table);
id_tbl->table = NULL;
}
@@ -1027,16 +1028,14 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
- &udev->l2_ring_map,
- GFP_KERNEL | __GFP_COMP);
+ &udev->l2_ring_map, GFP_KERNEL);
if (!udev->l2_ring)
return -ENOMEM;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
- &udev->l2_buf_map,
- GFP_KERNEL | __GFP_COMP);
+ &udev->l2_buf_map, GFP_KERNEL);
if (!udev->l2_buf) {
__cnic_free_uio_rings(udev);
return -ENOMEM;
@@ -1109,10 +1108,11 @@ static int cnic_init_uio(struct cnic_dev *dev)
TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
CNIC_PAGE_MASK;
+ uinfo->mem[1].dma_addr = cp->status_blk_map;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
- uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+ uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE * 9);
else
- uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+ uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE);
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
@@ -1120,20 +1120,26 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
CNIC_PAGE_MASK;
- uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
+ uinfo->mem[1].dma_addr = cp->status_blk_map;
+ uinfo->mem[1].size = PAGE_ALIGN(sizeof(*cp->bnx2x_def_status_blk));
uinfo->name = "bnx2x_cnic";
}
- uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[1].dma_device = &dev->pcidev->dev;
+ uinfo->mem[1].memtype = UIO_MEM_DMA_COHERENT;
uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
- uinfo->mem[2].size = udev->l2_ring_size;
- uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[2].dma_addr = udev->l2_ring_map;
+ uinfo->mem[2].size = PAGE_ALIGN(udev->l2_ring_size);
+ uinfo->mem[2].dma_device = &dev->pcidev->dev;
+ uinfo->mem[2].memtype = UIO_MEM_DMA_COHERENT;
uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
- uinfo->mem[3].size = udev->l2_buf_size;
- uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[3].dma_addr = udev->l2_buf_map;
+ uinfo->mem[3].size = PAGE_ALIGN(udev->l2_buf_size);
+ uinfo->mem[3].dma_device = &dev->pcidev->dev;
+ uinfo->mem[3].memtype = UIO_MEM_DMA_COHERENT;
uinfo->version = CNIC_MODULE_VERSION;
uinfo->irq = UIO_IRQ_CUSTOM;
@@ -1315,6 +1321,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
return 0;
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
+ cp->status_blk_map = cp->ethdev->irq_arr[1].status_blk_map;
cp->l2_rx_ring_size = 15;
@@ -3009,9 +3016,9 @@ static int cnic_service_bnx2(void *data, void *status_blk)
return cnic_service_bnx2_queues(dev);
}
-static void cnic_service_bnx2_msix(struct tasklet_struct *t)
+static void cnic_service_bnx2_msix(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
@@ -3030,7 +3037,7 @@ static void cnic_doirq(struct cnic_dev *dev)
prefetch(cp->status_blk.gen);
prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
- tasklet_schedule(&cp->cnic_irq_task);
+ queue_work(system_bh_wq, &cp->cnic_irq_bh_work);
}
}
@@ -3134,9 +3141,9 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
return last_status;
}
-static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
+static void cnic_service_bnx2x_bh_work(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
@@ -3676,7 +3683,8 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
#if defined(CONFIG_INET)
struct rtable *rt;
- rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
+ rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0,
+ RT_SCOPE_UNIVERSE);
if (!IS_ERR(rt)) {
*dst = &rt->dst;
return 0;
@@ -4105,8 +4113,7 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
atomic_set(&cp->csk_tbl[i].ref_count, 0);
- port_id = prandom_u32();
- port_id %= CNIC_LOCAL_PORT_RANGE;
+ port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE);
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
CNIC_LOCAL_PORT_MIN, port_id)) {
cnic_cm_free_mem(dev);
@@ -4165,7 +4172,7 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
{
u32 seed;
- seed = prandom_u32();
+ seed = get_random_u32();
cnic_ctx_wr(dev, 45, 0, seed);
return 0;
}
@@ -4223,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_bnx2x_delete_wait(dev, 0);
- cancel_delayed_work(&cp->delete_task);
- flush_workqueue(cnic_wq);
+ cancel_delayed_work_sync(&cp->delete_task);
if (atomic_read(&cp->iscsi_conn) != 0)
netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
@@ -4422,7 +4428,7 @@ static void cnic_free_irq(struct cnic_dev *dev)
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
cp->disable_int_sync(dev);
- tasklet_kill(&cp->cnic_irq_task);
+ cancel_work_sync(&cp->cnic_irq_bh_work);
free_irq(ethdev->irq_arr[0].vector, dev);
}
}
@@ -4435,7 +4441,7 @@ static int cnic_request_irq(struct cnic_dev *dev)
err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
if (err)
- tasklet_disable(&cp->cnic_irq_task);
+ disable_work_sync(&cp->cnic_irq_bh_work);
return err;
}
@@ -4458,7 +4464,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2_msix);
err = cnic_request_irq(dev);
if (err)
return err;
@@ -4867,7 +4873,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2x_bh_work);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);
@@ -5326,6 +5332,7 @@ static int cnic_start_hw(struct cnic_dev *dev)
pci_dev_get(dev->pcidev);
cp->func = PCI_FUNC(dev->pcidev->devfn);
cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
+ cp->status_blk_map = ethdev->irq_arr[0].status_blk_map;
cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
err = cp->alloc_resc(dev);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 4baea81bae7a..1a314a75d2d2 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -260,6 +260,7 @@ struct cnic_local {
#define SM_RX_ID 0
#define SM_TX_ID 1
} status_blk;
+ dma_addr_t status_blk_map;
struct host_sp_status_block *bnx2x_def_status_blk;
@@ -267,7 +268,7 @@ struct cnic_local {
u32 bnx2x_igu_sb_id;
u32 int_num;
u32 last_status_idx;
- struct tasklet_struct cnic_irq_task;
+ struct work_struct cnic_irq_bh_work;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 789e5c7e9311..49a11ec80b36 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -190,6 +190,7 @@ struct cnic_ops {
struct cnic_irq {
unsigned int vector;
void *status_blk;
+ dma_addr_t status_blk_map;
u32 status_blk_num;
u32 status_blk_num2;
u32 irq_flags;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 226f4403cfed..05512aa10c20 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -35,21 +35,18 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/phy.h>
-#include <linux/platform_data/bcmgenet.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "bcmgenet.h"
-/* Maximum number of hardware queues, downsized if needed */
-#define GENET_MAX_MQ_CNT 4
-
/* Default highest priority queue for multi queue support */
-#define GENET_Q0_PRIORITY 0
+#define GENET_Q1_PRIORITY 0
+#define GENET_Q0_PRIORITY 1
-#define GENET_Q16_RX_BD_CNT \
+#define GENET_Q0_RX_BD_CNT \
(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
-#define GENET_Q16_TX_BD_CNT \
+#define GENET_Q0_TX_BD_CNT \
(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
#define RX_BUF_LENGTH 2048
@@ -104,7 +101,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
* the platform is explicitly configured for 64-bits/LPAE.
*/
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
#endif
}
@@ -117,24 +114,6 @@ static inline void dmadesc_set(struct bcmgenet_priv *priv,
dmadesc_set_length_status(priv, d, val);
}
-static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
- void __iomem *d)
-{
- dma_addr_t addr;
-
- addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO);
-
- /* Register writes to GISB bus can take couple hundred nanoseconds
- * and are done for each packet, save these expensive writes unless
- * the platform is explicitly configured for 64-bits/LPAE.
- */
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (priv->hw_params->flags & GENET_HAS_40BITS)
- addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32;
-#endif
- return addr;
-}
-
#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
@@ -464,33 +443,48 @@ static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
u32 offset;
u32 reg;
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg |= (1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg |= RBUF_HFB_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= (1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT)) |
+ RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg |= (1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset, reg, reg1;
- offset = HFB_FLT_ENABLE_V3PLUS;
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
- if (f_index < 32) {
- reg1 &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
- } else {
- reg &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- }
- if (!reg && !reg1) {
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg &= ~RBUF_HFB_EN;
+ reg &= ~(1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT));
+ if (!(reg & RBUF_HFB_FILTER_EN_MASK))
+ reg &= ~RBUF_HFB_EN;
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ if (f_index < 32) {
+ reg1 &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
+ } else {
+ reg &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ }
+ if (!reg && !reg1) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
}
@@ -500,6 +494,9 @@ static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
offset = f_index / 8;
reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
reg &= ~(0xF << (4 * (f_index % 8)));
@@ -513,9 +510,13 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
- offset = HFB_FLT_LEN_V3PLUS +
- ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
- sizeof(u32);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ offset = HFB_FLT_LEN_V2;
+ else
+ offset = HFB_FLT_LEN_V3PLUS;
+
+ offset += sizeof(u32) *
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4);
reg = bcmgenet_hfb_reg_readl(priv, offset);
reg &= ~(0xFF << (8 * (f_index % 4)));
reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
@@ -597,13 +598,13 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
struct bcmgenet_rxnfc_rule *rule)
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
- u32 offset = 0, f_length = 0, f;
+ u32 offset = 0, f_length = 0, f, q;
u8 val_8, mask_8;
__be16 val_16;
u16 mask_16;
size_t size;
- f = fs->location;
+ f = fs->location + 1;
if (fs->flow_type & FLOW_MAC_EXT) {
bcmgenet_hfb_insert_data(priv, f, 0,
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
@@ -685,19 +686,16 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
}
bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
- if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
- /* Ring 0 flows can be handled by the default Descriptor Ring
- * We'll map them to ring 0, but don't enable the filter
- */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
- rule->state = BCMGENET_RXNFC_STATE_DISABLED;
- } else {
+ if (fs->ring_cookie == RX_CLS_FLOW_WAKE)
+ q = 0;
+ else if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ q = priv->hw_params->rx_queues + 1;
+ else
/* Other Rx rings are direct mapped here */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
- fs->ring_cookie);
- bcmgenet_hfb_enable_filter(priv, f);
- rule->state = BCMGENET_RXNFC_STATE_ENABLED;
- }
+ q = fs->ring_cookie;
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, q);
+ bcmgenet_hfb_enable_filter(priv, f);
+ rule->state = BCMGENET_RXNFC_STATE_ENABLED;
}
/* bcmgenet_hfb_clear
@@ -708,6 +706,7 @@ static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 base, i;
+ bcmgenet_hfb_set_filter_length(priv, f_index, 0);
base = f_index * priv->hw_params->hfb_filter_size;
for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
@@ -717,22 +716,23 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
{
u32 i;
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
-
- for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
- bcmgenet_rdma_writel(priv, 0x0, i);
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
- for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
- bcmgenet_hfb_reg_writel(priv, 0x0,
- HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+ if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) {
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS);
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+ bcmgenet_rdma_writel(priv, 0, i);
+ }
for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
bcmgenet_hfb_clear_filter(priv, i);
+
+ /* Enable filter 0 to send default flow to ring 0 */
+ bcmgenet_hfb_set_filter_length(priv, 0, 4);
+ bcmgenet_hfb_enable_filter(priv, 0);
}
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
@@ -740,9 +740,6 @@ static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
int i;
INIT_LIST_HEAD(&priv->rxnfc_list);
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
@@ -837,20 +834,16 @@ static int bcmgenet_get_coalesce(struct net_device *dev,
unsigned int i;
ec->tx_max_coalesced_frames =
- bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_tdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_max_coalesced_frames =
- bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_rdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_coalesce_usecs =
- bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+ bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT) * 8192 / 1000;
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
ring = &priv->rx_rings[i];
ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
}
- ring = &priv->rx_rings[DESC_INDEX];
- ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
return 0;
}
@@ -920,17 +913,13 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* Program all TX queues with the same values, as there is no
* ethtool knob to do coalescing on a per-queue basis
*/
- for (i = 0; i < priv->hw_params->tx_queues; i++)
+ for (i = 0; i <= priv->hw_params->tx_queues; i++)
bcmgenet_tdma_ring_writel(priv, i,
ec->tx_max_coalesced_frames,
DMA_MBUF_DONE_THRESH);
- bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
- ec->tx_max_coalesced_frames,
- DMA_MBUF_DONE_THRESH);
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
- bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
return 0;
}
@@ -979,12 +968,13 @@ static int bcmgenet_set_pauseparam(struct net_device *dev,
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
- BCMGENET_STAT_NETDEV = -1,
+ BCMGENET_STAT_RTNL = -1,
BCMGENET_STAT_MIB_RX,
BCMGENET_STAT_MIB_TX,
BCMGENET_STAT_RUNT,
BCMGENET_STAT_MISC,
BCMGENET_STAT_SOFT,
+ BCMGENET_STAT_SOFT64,
};
struct bcmgenet_stats {
@@ -994,13 +984,15 @@ struct bcmgenet_stats {
enum bcmgenet_stat_type type;
/* reg offset from UMAC base for misc counters */
u16 reg_offset;
+ /* sync for u64 stats counters */
+ int syncp_offset;
};
-#define STAT_NETDEV(m) { \
+#define STAT_RTNL(m) { \
.stat_string = __stringify(m), \
- .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
- .stat_offset = offsetof(struct net_device_stats, m), \
- .type = BCMGENET_STAT_NETDEV, \
+ .stat_sizeof = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m), \
+ .type = BCMGENET_STAT_RTNL, \
}
#define STAT_GENET_MIB(str, m, _type) { \
@@ -1010,6 +1002,14 @@ struct bcmgenet_stats {
.type = _type, \
}
+#define STAT_GENET_SOFT_MIB64(str, s, m) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->s.m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, s.m), \
+ .type = BCMGENET_STAT_SOFT64, \
+ .syncp_offset = offsetof(struct bcmgenet_priv, s.syncp), \
+}
+
#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
@@ -1024,18 +1024,38 @@ struct bcmgenet_stats {
}
#define STAT_GENET_Q(num) \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
- tx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
- tx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
- rx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
- rx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
- rx_rings[num].errors), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
- rx_rings[num].dropped)
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_packets", \
+ tx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_bytes", \
+ tx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_errors", \
+ tx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_dropped", \
+ tx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_bytes", \
+ rx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_packets", \
+ rx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_errors", \
+ rx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_dropped", \
+ rx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_multicast", \
+ rx_rings[num].stats64, multicast), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_missed", \
+ rx_rings[num].stats64, missed), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_length_errors", \
+ rx_rings[num].stats64, length_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_over_errors", \
+ rx_rings[num].stats64, over_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_crc_errors", \
+ rx_rings[num].stats64, crc_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_frame_errors", \
+ rx_rings[num].stats64, frame_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_fragmented_errors", \
+ rx_rings[num].stats64, fragmented_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_broadcast", \
+ rx_rings[num].stats64, broadcast)
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
* between the end of TX stats and the beginning of the RX RUNT
@@ -1047,15 +1067,20 @@ struct bcmgenet_stats {
*/
static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
/* general stats */
- STAT_NETDEV(rx_packets),
- STAT_NETDEV(tx_packets),
- STAT_NETDEV(rx_bytes),
- STAT_NETDEV(tx_bytes),
- STAT_NETDEV(rx_errors),
- STAT_NETDEV(tx_errors),
- STAT_NETDEV(rx_dropped),
- STAT_NETDEV(tx_dropped),
- STAT_NETDEV(multicast),
+ STAT_RTNL(rx_packets),
+ STAT_RTNL(tx_packets),
+ STAT_RTNL(rx_bytes),
+ STAT_RTNL(tx_bytes),
+ STAT_RTNL(rx_errors),
+ STAT_RTNL(tx_errors),
+ STAT_RTNL(rx_dropped),
+ STAT_RTNL(tx_dropped),
+ STAT_RTNL(multicast),
+ STAT_RTNL(rx_missed_errors),
+ STAT_RTNL(rx_length_errors),
+ STAT_RTNL(rx_over_errors),
+ STAT_RTNL(rx_crc_errors),
+ STAT_RTNL(rx_frame_errors),
/* UniMAC RSV counters */
STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
@@ -1138,15 +1163,29 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_Q(1),
STAT_GENET_Q(2),
STAT_GENET_Q(3),
- STAT_GENET_Q(16),
+ STAT_GENET_Q(4),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
+#define BCMGENET_STATS64_ADD(stats, m, v) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_add(&stats->m, v); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
+#define BCMGENET_STATS64_INC(stats, m) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_inc(&stats->m); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+ strscpy(info->driver, "bcmgenet", sizeof(info->driver));
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -1162,14 +1201,14 @@ static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcmgenet_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcmgenet_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -1226,8 +1265,9 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
s = &bcmgenet_gstrings_stats[i];
switch (s->type) {
- case BCMGENET_STAT_NETDEV:
+ case BCMGENET_STAT_RTNL:
case BCMGENET_STAT_SOFT:
+ case BCMGENET_STAT_SOFT64:
continue;
case BCMGENET_STAT_RUNT:
offset += BCMGENET_STAT_OFFSET;
@@ -1265,32 +1305,45 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
u64 *data)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct rtnl_link_stats64 stats64;
+ struct u64_stats_sync *syncp;
+ unsigned int start;
int i;
if (netif_running(dev))
bcmgenet_update_mib_counters(priv);
- dev->netdev_ops->ndo_get_stats(dev);
+ dev_get_stats(dev, &stats64);
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
const struct bcmgenet_stats *s;
char *p;
s = &bcmgenet_gstrings_stats[i];
- if (s->type == BCMGENET_STAT_NETDEV)
- p = (char *)&dev->stats;
- else
- p = (char *)priv;
- p += s->stat_offset;
- if (sizeof(unsigned long) != sizeof(u32) &&
- s->stat_sizeof == sizeof(unsigned long))
- data[i] = *(unsigned long *)p;
- else
- data[i] = *(u32 *)p;
+ p = (char *)priv;
+
+ if (s->type == BCMGENET_STAT_SOFT64) {
+ syncp = (struct u64_stats_sync *)(p + s->syncp_offset);
+ do {
+ start = u64_stats_fetch_begin(syncp);
+ data[i] = u64_stats_read((u64_stats_t *)(p + s->stat_offset));
+ } while (u64_stats_fetch_retry(syncp, start));
+ } else {
+ if (s->type == BCMGENET_STAT_RTNL)
+ p = (char *)&stats64;
+
+ p += s->stat_offset;
+ if (sizeof(unsigned long) != sizeof(u32) &&
+ s->stat_sizeof == sizeof(unsigned long))
+ data[i] = *(unsigned long *)p;
+ else
+ data[i] = *(u32 *)p;
+ }
}
}
-static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
+ bool tx_lpi_enabled)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
@@ -1310,7 +1363,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
/* Enable EEE and switch to a 27Mhz clock automatically */
reg = bcmgenet_readl(priv->base + off);
- if (enable)
+ if (tx_lpi_enabled)
reg |= TBUF_EEE_EN | TBUF_PM_EN;
else
reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
@@ -1330,13 +1383,13 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
}
priv->eee.eee_enabled = enable;
- priv->eee.eee_active = enable;
+ priv->eee.tx_lpi_enabled = tx_lpi_enabled;
}
-static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
+ struct ethtool_keee *p = &priv->eee;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1344,18 +1397,17 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
if (!dev->phydev)
return -ENODEV;
- e->eee_enabled = p->eee_enabled;
- e->eee_active = p->eee_active;
+ e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
}
-static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
- int ret = 0;
+ struct ethtool_keee *p = &priv->eee;
+ bool active;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1366,16 +1418,11 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
p->eee_enabled = e->eee_enabled;
if (!p->eee_enabled) {
- bcmgenet_eee_enable_set(dev, false);
+ bcmgenet_eee_enable_set(dev, false, false);
} else {
- ret = phy_init_eee(dev->phydev, 0);
- if (ret) {
- netif_err(priv, hw, dev, "EEE initialization failed\n");
- return ret;
- }
-
+ active = phy_init_eee(dev->phydev, false) >= 0;
bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
- bcmgenet_eee_enable_set(dev, true);
+ bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled);
}
return phy_ethtool_set_eee(dev->phydev, e);
@@ -1387,7 +1434,8 @@ static int bcmgenet_validate_flow(struct net_device *dev,
struct ethtool_usrip4_spec *l4_mask;
struct ethhdr *eth_mask;
- if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) {
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES &&
+ cmd->fs.location != RX_CLS_LOC_ANY) {
netdev_err(dev, "rxnfc: Invalid location (%d)\n",
cmd->fs.location);
return -EINVAL;
@@ -1452,7 +1500,7 @@ static int bcmgenet_insert_flow(struct net_device *dev,
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *loc_rule;
- int err;
+ int err, i;
if (priv->hw_params->hfb_filter_size < 128) {
netdev_err(dev, "rxnfc: Not supported by this device\n");
@@ -1460,7 +1508,8 @@ static int bcmgenet_insert_flow(struct net_device *dev,
}
if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
- cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
+ cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE &&
+ cmd->fs.ring_cookie != RX_CLS_FLOW_DISC) {
netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
cmd->fs.ring_cookie);
return -EINVAL;
@@ -1470,12 +1519,34 @@ static int bcmgenet_insert_flow(struct net_device *dev,
if (err)
return err;
- loc_rule = &priv->rxnfc_rules[cmd->fs.location];
+ if (cmd->fs.location == RX_CLS_LOC_ANY) {
+ list_for_each_entry(loc_rule, &priv->rxnfc_list, list) {
+ cmd->fs.location = loc_rule->fs.location;
+ err = memcmp(&loc_rule->fs, &cmd->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+ if (!err)
+ /* rule exists so return current location */
+ return 0;
+ }
+ for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
+ loc_rule = &priv->rxnfc_rules[i];
+ if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
+ cmd->fs.location = i;
+ break;
+ }
+ }
+ if (i == MAX_NUM_OF_FS_RULES) {
+ cmd->fs.location = RX_CLS_LOC_ANY;
+ return -ENOSPC;
+ }
+ } else {
+ loc_rule = &priv->rxnfc_rules[cmd->fs.location];
+ }
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&loc_rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memcpy(&loc_rule->fs, &cmd->fs,
@@ -1505,10 +1576,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,
}
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
@@ -1569,6 +1640,13 @@ static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
return res;
}
+static u32 bcmgenet_get_rx_ring_count(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ return priv->hw_params->rx_queues ?: 1;
+}
+
static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1578,12 +1656,9 @@ static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int i = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->hw_params->rx_queues ?: 1;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bcmgenet_get_num_flows(priv);
- cmd->data = MAX_NUM_OF_FS_RULES;
+ cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRULE:
err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
@@ -1629,6 +1704,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_rxnfc = bcmgenet_get_rxnfc,
.set_rxnfc = bcmgenet_set_rxnfc,
+ .get_rx_ring_count = bcmgenet_get_rx_ring_count,
.get_pauseparam = bcmgenet_get_pauseparam,
.set_pauseparam = bcmgenet_set_pauseparam,
};
@@ -1651,9 +1727,9 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
/* Power down LED */
- if (priv->hw_params->flags & GENET_HAS_EXT) {
+ if (bcmgenet_has_ext(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm)
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv))
reg |= EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1676,13 +1752,14 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
return ret;
}
-static void bcmgenet_power_up(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+static int bcmgenet_power_up(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ int ret = 0;
u32 reg;
- if (!(priv->hw_params->flags & GENET_HAS_EXT))
- return;
+ if (!bcmgenet_has_ext(priv))
+ return ret;
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
@@ -1690,7 +1767,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
EXT_ENERGY_DET_MASK);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1718,11 +1795,13 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
break;
case GENET_POWER_WOL_MAGIC:
- bcmgenet_wol_power_up_cfg(priv, mode);
- return;
+ ret = bcmgenet_wol_power_up_cfg(priv, mode);
+ break;
default:
break;
}
+
+ return ret;
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1759,18 +1838,6 @@ static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
return tx_cb_ptr;
}
-static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv,
@@ -1785,18 +1852,6 @@ static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
INTRL2_CPU_MASK_CLEAR);
}
-static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
@@ -1868,6 +1923,7 @@ static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned int txbds_processed = 0;
unsigned int bytes_compl = 0;
@@ -1877,12 +1933,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct sk_buff *skb;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX)
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_CLEAR);
- else
- bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
- INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index), INTRL2_CPU_CLEAR);
/* Compute how many buffers are transmitted since last xmit call */
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
@@ -1913,22 +1964,51 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->free_bds += txbds_processed;
ring->c_index = c_index;
- ring->packets += pkts_compl;
- ring->bytes += bytes_compl;
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_add(&stats->packets, pkts_compl);
+ u64_stats_add(&stats->bytes, bytes_compl);
+ u64_stats_update_end(&stats->syncp);
- netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index),
pkts_compl, bytes_compl);
return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+ struct bcmgenet_tx_ring *ring,
+ bool all)
{
- unsigned int released;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ unsigned int released, drop, wr_ptr;
+ struct enet_cb *cb_ptr;
+ struct sk_buff *skb;
spin_lock_bh(&ring->lock);
released = __bcmgenet_tx_reclaim(dev, ring);
+ if (all) {
+ skb = NULL;
+ drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK;
+ released += drop;
+ ring->prod_index = ring->c_index & DMA_C_INDEX_MASK;
+ while (drop--) {
+ cb_ptr = bcmgenet_put_txcb(priv, ring);
+ skb = cb_ptr->skb;
+ bcmgenet_free_tx_cb(kdev, cb_ptr);
+ if (skb && cb_ptr == GENET_CB(skb)->first_cb) {
+ dev_consume_skb_any(skb);
+ skb = NULL;
+ }
+ }
+ if (skb)
+ dev_consume_skb_any(skb);
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+ wr_ptr = ring->write_ptr * WORDS_PER_BD(priv);
+ bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr,
+ TDMA_WRITE_PTR);
+ }
spin_unlock_bh(&ring->lock);
return released;
@@ -1944,14 +2024,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
spin_lock(&ring->lock);
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(ring->priv->dev, ring->index);
netif_tx_wake_queue(txq);
}
spin_unlock(&ring->lock);
if (work_done == 0) {
napi_complete(napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
return 0;
}
@@ -1962,22 +2042,21 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int i;
-
- if (netif_is_multiqueue(dev)) {
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
- }
+ int i = 0;
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+ do {
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true);
+ } while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev));
}
/* Reallocate the SKB to put enough headroom in front of it and insert
* the transmit checksum offsets in the descriptors
*/
static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
struct status_64 *status = NULL;
struct sk_buff *new_skb;
@@ -1994,7 +2073,7 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
if (!new_skb) {
dev_kfree_skb_any(skb);
priv->mib.tx_realloc_tsb_failed++;
- dev->stats.tx_dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
return NULL;
}
dev_consume_skb_any(skb);
@@ -2035,6 +2114,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
return skb;
}
+static void bcmgenet_hide_tsb(struct sk_buff *skb)
+{
+ __skb_pull(skb, sizeof(struct status_64));
+}
+
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -2052,30 +2136,21 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
index = skb_get_queue_mapping(skb);
/* Mapping strategy:
- * queue_mapping = 0, unclassified, packet xmited through ring16
- * queue_mapping = 1, goes to ring 0. (highest priority queue
- * queue_mapping = 2, goes to ring 1.
- * queue_mapping = 3, goes to ring 2.
- * queue_mapping = 4, goes to ring 3.
+ * queue_mapping = 0, unclassified, packet xmited through ring 0
+ * queue_mapping = 1, goes to ring 1. (highest priority queue)
+ * queue_mapping = 2, goes to ring 2.
+ * queue_mapping = 3, goes to ring 3.
+ * queue_mapping = 4, goes to ring 4.
*/
- if (index == 0)
- index = DESC_INDEX;
- else
- index -= 1;
-
ring = &priv->tx_rings[index];
- txq = netdev_get_tx_queue(dev, ring->queue);
+ txq = netdev_get_tx_queue(dev, index);
nr_frags = skb_shinfo(skb)->nr_frags;
spin_lock(&ring->lock);
if (ring->free_bds <= (nr_frags + 1)) {
- if (!netif_tx_queue_stopped(txq)) {
+ if (!netif_tx_queue_stopped(txq))
netif_tx_stop_queue(txq);
- netdev_err(dev,
- "%s: tx ring %d full when queue %d awake\n",
- __func__, index, ring->queue);
- }
ret = NETDEV_TX_BUSY;
goto out;
}
@@ -2086,7 +2161,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
GENET_CB(skb)->bytes_sent = skb->len;
/* add the Transmit Status Block */
- skb = bcmgenet_add_tsb(dev, skb);
+ skb = bcmgenet_add_tsb(dev, skb, ring);
if (!skb) {
ret = NETDEV_TX_OK;
goto out;
@@ -2129,8 +2204,10 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
/* Note: if we ever change from DMA_TX_APPEND_CRC below we
* will need to restore software padding of "runt" packets
*/
+ len_stat |= DMA_TX_APPEND_CRC;
+
if (!i) {
- len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
+ len_stat |= DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
len_stat |= DMA_TX_DO_CSUM;
}
@@ -2141,6 +2218,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
}
GENET_CB(skb)->last_cb = tx_cb_ptr;
+
+ bcmgenet_hide_tsb(skb);
skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */
@@ -2224,6 +2303,7 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int budget)
{
+ struct bcmgenet_rx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = ring->priv;
struct net_device *dev = priv->dev;
struct enet_cb *cb;
@@ -2237,15 +2317,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int discards;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX) {
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_CLEAR);
- } else {
- mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
- bcmgenet_intrl2_1_writel(priv,
- mask,
- INTRL2_CPU_CLEAR);
- }
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv, mask, INTRL2_CPU_CLEAR);
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
@@ -2253,7 +2326,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_P_INDEX_DISCARD_CNT_MASK;
if (discards > ring->old_discards) {
discards = discards - ring->old_discards;
- ring->errors += discards;
+ BCMGENET_STATS64_ADD(stats, missed, discards);
ring->old_discards += discards;
/* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -2279,7 +2352,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
skb = bcmgenet_rx_refill(priv, cb);
if (unlikely(!skb)) {
- ring->dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
goto next;
}
@@ -2287,8 +2360,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dma_length_status = status->length_status;
if (dev->features & NETIF_F_RXCSUM) {
rx_csum = (__force __be16)(status->rx_csum & 0xffff);
- skb->csum = (__force __wsum)ntohs(rx_csum);
- skb->ip_summed = CHECKSUM_COMPLETE;
+ if (rx_csum) {
+ skb->csum = (__force __wsum)ntohs(rx_csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
}
/* DMA flags and length are still valid no matter how
@@ -2302,10 +2377,17 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
__func__, p_index, ring->c_index,
ring->read_ptr, dma_length_status);
+ if (unlikely(len > RX_BUF_LENGTH)) {
+ netif_err(priv, rx_status, dev, "oversized packet\n");
+ BCMGENET_STATS64_INC(stats, length_errors);
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- ring->errors++;
+ BCMGENET_STATS64_INC(stats, fragmented_errors);
dev_kfree_skb_any(skb);
goto next;
}
@@ -2318,15 +2400,22 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_RX_RXER))) {
netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
(unsigned int)dma_flag);
+ u64_stats_update_begin(&stats->syncp);
if (dma_flag & DMA_RX_CRC_ERROR)
- dev->stats.rx_crc_errors++;
+ u64_stats_inc(&stats->crc_errors);
if (dma_flag & DMA_RX_OV)
- dev->stats.rx_over_errors++;
+ u64_stats_inc(&stats->over_errors);
if (dma_flag & DMA_RX_NO)
- dev->stats.rx_frame_errors++;
+ u64_stats_inc(&stats->frame_errors);
if (dma_flag & DMA_RX_LG)
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
+ u64_stats_inc(&stats->length_errors);
+ if ((dma_flag & (DMA_RX_CRC_ERROR |
+ DMA_RX_OV |
+ DMA_RX_NO |
+ DMA_RX_LG |
+ DMA_RX_RXER)) == DMA_RX_RXER)
+ u64_stats_inc(&stats->errors);
+ u64_stats_update_end(&stats->syncp);
dev_kfree_skb_any(skb);
goto next;
} /* error packet */
@@ -2346,10 +2435,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
/*Finish setting up the received SKB and send it to the kernel*/
skb->protocol = eth_type_trans(skb, priv->dev);
- ring->packets++;
- ring->bytes += len;
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->packets);
+ u64_stats_add(&stats->bytes, len);
if (dma_flag & DMA_RX_MULT)
- dev->stats.multicast++;
+ u64_stats_inc(&stats->multicast);
+ else if (dma_flag & DMA_RX_BRDCAST)
+ u64_stats_inc(&stats->broadcast);
+ u64_stats_update_end(&stats->syncp);
/* Notify kernel */
napi_gro_receive(&ring->napi, skb);
@@ -2382,15 +2476,13 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
work_done = bcmgenet_desc_rx(ring, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- ring->int_enable(ring);
- }
+ if (work_done < budget && napi_complete_done(napi, work_done))
+ bcmgenet_rx_ring_int_enable(ring);
if (ring->dim.use_dim) {
dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
ring->dim.bytes, &dim_sample);
- net_dim(&ring->dim.dim, dim_sample);
+ net_dim(&ring->dim.dim, &dim_sample);
}
return work_done;
@@ -2452,14 +2544,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
{
u32 reg;
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
+ if (reg & CMD_SW_RESET) {
+ spin_unlock_bh(&priv->reg_lock);
return;
+ }
if (enable)
reg |= mask;
else
reg &= ~mask;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
/* UniMAC stops on a packet boundary, wait for a full-size packet
* to be processed
@@ -2475,8 +2571,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
udelay(10);
/* issue soft reset and disable MAC while updating its registers */
+ spin_lock_bh(&priv->reg_lock);
bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
udelay(2);
+ spin_unlock_bh(&priv->reg_lock);
}
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -2502,7 +2600,7 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
int0_enable |= UMAC_IRQ_LINK_EVENT;
}
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2567,8 +2665,8 @@ static void init_umac(struct bcmgenet_priv *priv)
}
/* Enable MDIO interrupts on GENET v3+ */
- if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
- int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ if (bcmgenet_has_mdio_intr(priv))
+ int0_enable |= UMAC_IRQ_MDIO_EVENT;
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2618,15 +2716,6 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
spin_lock_init(&ring->lock);
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->queue = 0;
- ring->int_enable = bcmgenet_tx_ring16_int_enable;
- ring->int_disable = bcmgenet_tx_ring16_int_disable;
- } else {
- ring->queue = index + 1;
- ring->int_enable = bcmgenet_tx_ring_int_enable;
- ring->int_disable = bcmgenet_tx_ring_int_disable;
- }
ring->cbs = priv->tx_cbs + start_ptr;
ring->size = size;
ring->clean_ptr = start_ptr;
@@ -2637,8 +2726,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
ring->end_ptr = end_ptr - 1;
ring->prod_index = 0;
- /* Set flow period for ring != 16 */
- if (index != DESC_INDEX)
+ /* Set flow period for ring != 0 */
+ if (index)
flow_period_val = ENET_MAX_MTU_SIZE << 16;
bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
@@ -2662,8 +2751,7 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
DMA_END_ADDR);
/* Initialize Tx NAPI */
- netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll);
}
/* Initialize a RDMA ring */
@@ -2677,13 +2765,6 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->int_enable = bcmgenet_rx_ring16_int_enable;
- ring->int_disable = bcmgenet_rx_ring16_int_disable;
- } else {
- ring->int_enable = bcmgenet_rx_ring_int_enable;
- ring->int_disable = bcmgenet_rx_ring_int_disable;
- }
ring->cbs = priv->rx_cbs + start_ptr;
ring->size = size;
ring->c_index = 0;
@@ -2699,8 +2780,7 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
bcmgenet_init_rx_coalesce(ring);
/* Initialize Rx NAPI */
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
@@ -2730,15 +2810,11 @@ static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
@@ -2746,13 +2822,10 @@ static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
}
static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
@@ -2760,82 +2833,104 @@ static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
netif_napi_del(&ring->napi);
}
+}
+
+static int bcmgenet_tdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int bcmgenet_rdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
- ring = &priv->tx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
+ return -ETIMEDOUT;
}
/* Initialize Tx queues
*
- * Queues 0-3 are priority-based, each one has 32 descriptors,
- * with queue 0 being the highest priority queue.
+ * Queues 1-4 are priority-based, each one has 32 descriptors,
+ * with queue 1 being the highest priority queue.
*
- * Queue 16 is the default Tx queue with
- * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
+ * Queue 0 is the default Tx queue with
+ * GENET_Q0_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
*
* The transmit control block pool is then partitioned as follows:
- * - Tx queue 0 uses tx_cbs[0..31]
- * - Tx queue 1 uses tx_cbs[32..63]
- * - Tx queue 2 uses tx_cbs[64..95]
- * - Tx queue 3 uses tx_cbs[96..127]
- * - Tx queue 16 uses tx_cbs[128..255]
+ * - Tx queue 0 uses tx_cbs[0..127]
+ * - Tx queue 1 uses tx_cbs[128..159]
+ * - Tx queue 2 uses tx_cbs[160..191]
+ * - Tx queue 3 uses tx_cbs[192..223]
+ * - Tx queue 4 uses tx_cbs[224..255]
*/
static void bcmgenet_init_tx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i, dma_enable;
- u32 dma_ctrl, ring_cfg;
- u32 dma_priority[3] = {0, 0, 0};
-
- dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
+ unsigned int start = 0, end = GENET_Q0_TX_BD_CNT;
+ u32 i, ring_mask, dma_priority[3] = {0, 0, 0};
/* Enable strict priority arbiter mode */
bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
/* Initialize Tx priority queues */
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
- i * priv->hw_params->tx_bds_per_q,
- (i + 1) * priv->hw_params->tx_bds_per_q);
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ bcmgenet_init_tx_ring(priv, i, end - start, start, end);
+ start = end;
+ end += priv->hw_params->tx_bds_per_q;
dma_priority[DMA_PRIO_REG_INDEX(i)] |=
- ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
+ (i ? GENET_Q1_PRIORITY : GENET_Q0_PRIORITY)
+ << DMA_PRIO_REG_SHIFT(i);
}
- /* Initialize Tx default queue 16 */
- bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
- priv->hw_params->tx_queues *
- priv->hw_params->tx_bds_per_q,
- TOTAL_DESC);
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
- dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
- ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
- DMA_PRIO_REG_SHIFT(DESC_INDEX));
-
/* Set Tx queue priorities */
bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
- /* Enable Tx queues */
- bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Tx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Enable Tx DMA */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Tx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_CTRL);
}
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
@@ -2843,15 +2938,11 @@ static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_rx_ring_int_enable(ring);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
@@ -2859,15 +2950,11 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
cancel_work_sync(&ring->dim.dim.work);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
- cancel_work_sync(&ring->dim.dim.work);
}
static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
@@ -2875,13 +2962,10 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
netif_napi_del(&ring->napi);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
}
/* Initialize Rx queues
@@ -2889,57 +2973,32 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
* Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
* used to direct traffic to these queues.
*
- * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
+ * Queue 0 is also the default Rx queue with GENET_Q0_RX_BD_CNT descriptors.
*/
static int bcmgenet_init_rx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i;
- u32 dma_enable;
- u32 dma_ctrl;
- u32 ring_cfg;
+ unsigned int start = 0, end = GENET_Q0_RX_BD_CNT;
+ u32 i, ring_mask;
int ret;
- dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
-
/* Initialize Rx priority queues */
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
- ret = bcmgenet_init_rx_ring(priv, i,
- priv->hw_params->rx_bds_per_q,
- i * priv->hw_params->rx_bds_per_q,
- (i + 1) *
- priv->hw_params->rx_bds_per_q);
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
+ ret = bcmgenet_init_rx_ring(priv, i, end - start, start, end);
if (ret)
return ret;
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ start = end;
+ end += priv->hw_params->rx_bds_per_q;
}
- /* Initialize Rx default queue 16 */
- ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
- priv->hw_params->rx_queues *
- priv->hw_params->rx_bds_per_q,
- TOTAL_DESC);
- if (ret)
- return ret;
-
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
+ /* Configure Rx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Enable rings */
- bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
-
- /* Configure ring as descriptor ring and re-enable DMA if enabled */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Rx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_CTRL);
return 0;
}
@@ -2947,26 +3006,9 @@ static int bcmgenet_init_rx_queues(struct net_device *dev)
static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
{
int ret = 0;
- int timeout = 0;
- u32 reg;
- u32 dma_ctrl;
- int i;
/* Disable TDMA to stop add more frames in TX DMA */
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- /* Check TDMA status register to confirm TDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
ret = -ETIMEDOUT;
}
@@ -2975,39 +3017,11 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
usleep_range(10000, 20000);
/* Disable RDMA */
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- timeout = 0;
- /* Check RDMA status register to confirm RDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_rdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
ret = -ETIMEDOUT;
}
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
return ret;
}
@@ -3019,32 +3033,53 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++)
- dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
- priv->tx_cbs + i));
-
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ txq = netdev_get_tx_queue(priv->dev, i);
netdev_tx_reset_queue(txq);
}
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
- netdev_tx_reset_queue(txq);
-
bcmgenet_free_rx_buffers(priv);
kfree(priv->rx_cbs);
kfree(priv->tx_cbs);
}
/* init_edma: Initialize DMA control register */
-static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv, bool flush_rx)
{
- int ret;
- unsigned int i;
struct enet_cb *cb;
+ unsigned int i;
+ int ret;
+ u32 reg;
netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
+ /* Disable TX DMA */
+ ret = bcmgenet_tdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Tx DMA\n");
+ return ret;
+ }
+
+ /* Disable RX DMA */
+ ret = bcmgenet_rdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Rx DMA\n");
+ return ret;
+ }
+
+ /* Flush TX queues */
+ bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+ if (flush_rx) {
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
+ udelay(10);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+ }
+
/* Initialize common Rx ring structures */
priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
priv->num_rx_bds = TOTAL_DESC;
@@ -3094,6 +3129,15 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Initialize Tx queues */
bcmgenet_init_tx_queues(priv->dev);
+ /* Enable RX/TX DMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
return 0;
}
@@ -3123,7 +3167,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
}
-/* bcmgenet_isr1: handle Rx and Tx priority queues */
+/* bcmgenet_isr1: handle Rx and Tx queues */
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
@@ -3142,7 +3186,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
"%s: IRQ=0x%x\n", __func__, status);
/* Check Rx priority queue interrupts */
- for (index = 0; index < priv->hw_params->rx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->rx_queues; index++) {
if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
continue;
@@ -3150,20 +3194,20 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
rx_ring->dim.event_ctr++;
if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
+ bcmgenet_rx_ring_int_disable(rx_ring);
__napi_schedule_irqoff(&rx_ring->napi);
}
}
/* Check Tx priority queue interrupts */
- for (index = 0; index < priv->hw_params->tx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->tx_queues; index++) {
if (!(status & BIT(index)))
continue;
tx_ring = &priv->tx_rings[index];
if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
+ bcmgenet_tx_ring_int_disable(tx_ring);
__napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -3171,12 +3215,10 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
+/* bcmgenet_isr0: handle other stuff */
static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
- struct bcmgenet_rx_ring *rx_ring;
- struct bcmgenet_tx_ring *tx_ring;
unsigned int status;
unsigned long flags;
@@ -3190,29 +3232,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
netif_dbg(priv, intr, priv->dev,
"IRQ=0x%x\n", status);
- if (status & UMAC_IRQ_RXDMA_DONE) {
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_ring->dim.event_ctr++;
-
- if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
- __napi_schedule_irqoff(&rx_ring->napi);
- }
- }
-
- if (status & UMAC_IRQ_TXDMA_DONE) {
- tx_ring = &priv->tx_rings[DESC_INDEX];
-
- if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
- __napi_schedule_irqoff(&tx_ring->napi);
- }
- }
-
- if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ if (bcmgenet_has_mdio_intr(priv) && status & UMAC_IRQ_MDIO_EVENT)
wake_up(&priv->wq);
- }
/* all other interested interrupts handled in bottom half */
status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
@@ -3234,23 +3255,6 @@ static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void bcmgenet_poll_controller(struct net_device *dev)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
-
- /* Invoke the main RX/TX interrupt handler */
- disable_irq(priv->irq0);
- bcmgenet_isr0(priv->irq0, priv);
- enable_irq(priv->irq0);
-
- /* And the interrupt handler for RX/TX priority queues */
- disable_irq(priv->irq1);
- bcmgenet_isr1(priv->irq1, priv);
- enable_irq(priv->irq1);
-}
-#endif
-
static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
{
u32 reg;
@@ -3283,54 +3287,14 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
put_unaligned_be16(addr_tmp, &addr[4]);
}
-/* Returns a reusable dma control register value */
-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
-{
- unsigned int i;
- u32 reg;
- u32 dma_ctrl;
-
- /* disable DMA */
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
- udelay(10);
- bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
-
- return dma_ctrl;
-}
-
-static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
-{
- u32 reg;
-
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-}
-
static void bcmgenet_netif_start(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ netif_addr_lock_bh(dev);
bcmgenet_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3346,7 +3310,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long dma_ctrl;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3372,22 +3335,16 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv);
+ /* HFB init */
+ bcmgenet_hfb_init(priv);
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, true);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto err_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
- /* HFB init */
- bcmgenet_hfb_init(priv);
-
ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
dev->name, priv);
if (ret < 0) {
@@ -3430,22 +3387,25 @@ err_clk_disable:
return ret;
}
-static void bcmgenet_netif_stop(struct net_device *dev)
+static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- bcmgenet_disable_tx_napi(priv);
netif_tx_disable(dev);
/* Disable MAC receive */
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
umac_enable_set(priv, CMD_RX_EN, false);
+ if (stop_phy)
+ phy_stop(dev->phydev);
+
bcmgenet_dma_teardown(priv);
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
- phy_stop(dev->phydev);
+ bcmgenet_disable_tx_napi(priv);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@@ -3466,7 +3426,7 @@ static int bcmgenet_close(struct net_device *dev)
netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
- bcmgenet_netif_stop(dev);
+ bcmgenet_netif_stop(dev, false);
/* Really kill the PHY state machine and disconnect from it */
phy_disconnect(dev->phydev);
@@ -3493,16 +3453,11 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
if (!netif_msg_tx_err(priv))
return;
- txq = netdev_get_tx_queue(priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(priv->dev, ring->index);
spin_lock(&ring->lock);
- if (ring->index == DESC_INDEX) {
- intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
- } else {
- intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = 1 << ring->index;
- }
+ intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = 1 << ring->index;
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
txq_stopped = netif_tx_queue_stopped(txq);
@@ -3516,7 +3471,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
"(sw)c_index: %d (hw)c_index: %d\n"
"(sw)clean_p: %d (sw)write_p: %d\n"
"(sw)cb_ptr: %d (sw)end_ptr: %d\n",
- ring->index, ring->queue,
+ ring->index, ring->index,
txq_stopped ? "stopped" : "active",
intsts & intmsk ? "enabled" : "disabled",
free_bds, ring->size,
@@ -3529,30 +3484,25 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 int0_enable = 0;
u32 int1_enable = 0;
unsigned int q;
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
- bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
bcmgenet_tx_reclaim_all(dev);
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
int1_enable |= (1 << q);
- int0_enable = UMAC_IRQ_TXDMA_DONE;
-
/* Re-enable TX interrupts if disabled */
- bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
netif_trans_update(dev);
- dev->stats.tx_errors++;
+ BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors);
netif_tx_wake_all_queues(dev);
}
@@ -3590,16 +3540,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
* 3. The number of filters needed exceeds the number filters
* supported by the hardware.
*/
+ spin_lock(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
(nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
return;
} else {
reg &= ~CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
}
/* update MDF filter */
@@ -3638,47 +3591,72 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+static void bcmgenet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long tx_bytes = 0, tx_packets = 0;
- unsigned long rx_bytes = 0, rx_packets = 0;
- unsigned long rx_errors = 0, rx_dropped = 0;
- struct bcmgenet_tx_ring *tx_ring;
- struct bcmgenet_rx_ring *rx_ring;
+ struct bcmgenet_tx_stats64 *tx_stats;
+ struct bcmgenet_rx_stats64 *rx_stats;
+ u64 rx_length_errors, rx_over_errors;
+ u64 rx_missed, rx_fragmented_errors;
+ u64 rx_crc_errors, rx_frame_errors;
+ u64 tx_errors, tx_dropped;
+ u64 rx_errors, rx_dropped;
+ u64 tx_bytes, tx_packets;
+ u64 rx_bytes, rx_packets;
+ unsigned int start;
unsigned int q;
-
- for (q = 0; q < priv->hw_params->tx_queues; q++) {
- tx_ring = &priv->tx_rings[q];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
+ u64 multicast;
+
+ for (q = 0; q <= priv->hw_params->tx_queues; q++) {
+ tx_stats = &priv->tx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
+ tx_bytes = u64_stats_read(&tx_stats->bytes);
+ tx_packets = u64_stats_read(&tx_stats->packets);
+ tx_errors = u64_stats_read(&tx_stats->errors);
+ tx_dropped = u64_stats_read(&tx_stats->dropped);
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
+
+ stats->tx_bytes += tx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_errors += tx_errors;
+ stats->tx_dropped += tx_dropped;
}
- tx_ring = &priv->tx_rings[DESC_INDEX];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
-
- for (q = 0; q < priv->hw_params->rx_queues; q++) {
- rx_ring = &priv->rx_rings[q];
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
+ for (q = 0; q <= priv->hw_params->rx_queues; q++) {
+ rx_stats = &priv->rx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
+ rx_bytes = u64_stats_read(&rx_stats->bytes);
+ rx_packets = u64_stats_read(&rx_stats->packets);
+ rx_errors = u64_stats_read(&rx_stats->errors);
+ rx_dropped = u64_stats_read(&rx_stats->dropped);
+ rx_missed = u64_stats_read(&rx_stats->missed);
+ rx_length_errors = u64_stats_read(&rx_stats->length_errors);
+ rx_over_errors = u64_stats_read(&rx_stats->over_errors);
+ rx_crc_errors = u64_stats_read(&rx_stats->crc_errors);
+ rx_frame_errors = u64_stats_read(&rx_stats->frame_errors);
+ rx_fragmented_errors = u64_stats_read(&rx_stats->fragmented_errors);
+ multicast = u64_stats_read(&rx_stats->multicast);
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
+
+ rx_errors += rx_length_errors;
+ rx_errors += rx_crc_errors;
+ rx_errors += rx_frame_errors;
+ rx_errors += rx_fragmented_errors;
+
+ stats->rx_bytes += rx_bytes;
+ stats->rx_packets += rx_packets;
+ stats->rx_errors += rx_errors;
+ stats->rx_dropped += rx_dropped;
+ stats->rx_missed_errors += rx_missed;
+ stats->rx_length_errors += rx_length_errors;
+ stats->rx_over_errors += rx_over_errors;
+ stats->rx_crc_errors += rx_crc_errors;
+ stats->rx_frame_errors += rx_frame_errors;
+ stats->multicast += multicast;
}
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
-
- dev->stats.tx_bytes = tx_bytes;
- dev->stats.tx_packets = tx_packets;
- dev->stats.rx_bytes = rx_bytes;
- dev->stats.rx_packets = rx_packets;
- dev->stats.rx_errors = rx_errors;
- dev->stats.rx_missed_errors = rx_errors;
- dev->stats.rx_dropped = rx_dropped;
- return &dev->stats;
}
static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
@@ -3706,135 +3684,113 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_set_mac_address = bcmgenet_set_mac_addr,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = bcmgenet_poll_controller,
-#endif
- .ndo_get_stats = bcmgenet_get_stats,
+ .ndo_get_stats64 = bcmgenet_get_stats64,
.ndo_change_carrier = bcmgenet_change_carrier,
};
-/* Array of GENET hardware parameters/characteristics */
-static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
- [GENET_V1] = {
- .tx_queues = 0,
- .tx_bds_per_q = 0,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .hfb_offset = 0x1000,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x3000,
- .words_per_bd = 2,
- },
- [GENET_V2] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x1000,
- .hfb_reg_offset = 0x2000,
- .rdma_offset = 0x3000,
- .tdma_offset = 0x4000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT,
- },
- [GENET_V3] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x10000,
- .tdma_offset = 0x11000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
- GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V4] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V5] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
+/* GENET hardware parameters/characteristics */
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v1 = {
+ .tx_queues = 0,
+ .tx_bds_per_q = 0,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = GENET_RBUF_OFF + RBUF_HFB_CTRL_V1,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x3000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v2 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = 0x2000,
+ .rdma_offset = 0x3000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v3 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x10000,
+ .tdma_offset = 0x11000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v4 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
};
/* Infer hardware parameters from the detected GENET version */
static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
{
- struct bcmgenet_hw_params *params;
+ const struct bcmgenet_hw_params *params;
u32 reg;
u8 major;
u16 gphy_rev;
- if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
- bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
- genet_dma_ring_regs = genet_dma_ring_regs_v4;
- } else if (GENET_IS_V3(priv)) {
+ /* default to latest values */
+ params = &bcmgenet_hw_params_v4;
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v4;
+ if (GENET_IS_V3(priv)) {
+ params = &bcmgenet_hw_params_v3;
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V2(priv)) {
+ params = &bcmgenet_hw_params_v2;
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V1(priv)) {
+ params = &bcmgenet_hw_params_v1;
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
}
-
- /* enum genet_version starts at 1 */
- priv->hw_params = &bcmgenet_hw_params[priv->version];
- params = priv->hw_params;
+ priv->hw_params = params;
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 6)
+ if (major == 6 || major == 7)
major = 5;
else if (major == 5)
major = 4;
@@ -3885,7 +3841,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
}
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (!(params->flags & GENET_HAS_40BITS))
+ if (!bcmgenet_has_40bits(priv))
pr_warn("GENET does not support 40-bits PA\n");
#endif
@@ -3910,7 +3866,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
struct bcmgenet_plat_data {
enum bcmgenet_version version;
u32 dma_max_burst_length;
- bool ephy_16nm;
+ u32 flags;
};
static const struct bcmgenet_plat_data v1_plat_data = {
@@ -3921,32 +3877,43 @@ static const struct bcmgenet_plat_data v1_plat_data = {
static const struct bcmgenet_plat_data v2_plat_data = {
.version = GENET_V2,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT,
};
static const struct bcmgenet_plat_data v3_plat_data = {
.version = GENET_V3,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
+ GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v4_plat_data = {
.version = GENET_V4,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v5_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm2711_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = 0x08,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm7712_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
- .ephy_16nm = true,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET |
+ GENET_HAS_EPHY_16NM,
};
static const struct of_device_id bcmgenet_match[] = {
@@ -3963,7 +3930,6 @@ MODULE_DEVICE_TABLE(of, bcmgenet_match);
static int bcmgenet_probe(struct platform_device *pdev)
{
- struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
const struct bcmgenet_plat_data *pdata;
struct bcmgenet_priv *priv;
struct net_device *dev;
@@ -3990,6 +3956,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
priv->wol_irq = platform_get_irq_optional(pdev, 2);
+ if (priv->wol_irq == -EPROBE_DEFER) {
+ err = priv->wol_irq;
+ goto err;
+ }
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
@@ -3997,6 +3967,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/* Set default pause parameters */
@@ -4018,12 +3989,16 @@ static int bcmgenet_probe(struct platform_device *pdev)
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ netdev_sw_irq_coalesce_default_on(dev);
+
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
- err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
- dev->name, priv);
- if (!err)
- device_set_wakeup_capable(&pdev->dev, 1);
+ if (priv->wol_irq > 0) {
+ err = devm_request_irq(&pdev->dev, priv->wol_irq,
+ bcmgenet_wol_isr, 0, dev->name, priv);
+ if (!err)
+ device_set_wakeup_capable(&pdev->dev, 1);
+ }
/* Set the needed headroom to account for any possible
* features enabling/disabling at runtime
@@ -4037,10 +4012,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (pdata) {
priv->version = pdata->version;
priv->dma_max_burst_length = pdata->dma_max_burst_length;
- priv->ephy_16nm = pdata->ephy_16nm;
- } else {
- priv->version = pd->genet_version;
- priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
+ priv->flags = pdata->flags;
}
priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
@@ -4057,7 +4029,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_set_hw_params(priv);
err = -EIO;
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -4090,16 +4062,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
- if (pd && !IS_ERR_OR_NULL(pd->mac_address))
- eth_hw_addr_set(dev, pd->mac_address);
- else
- if (device_get_ethdev_address(&pdev->dev, dev))
- if (has_acpi_companion(&pdev->dev)) {
- u8 addr[ETH_ALEN];
+ if (device_get_ethdev_address(&pdev->dev, dev))
+ if (has_acpi_companion(&pdev->dev)) {
+ u8 addr[ETH_ALEN];
- bcmgenet_get_hw_addr(priv, addr);
- eth_hw_addr_set(dev, addr);
- }
+ bcmgenet_get_hw_addr(priv, addr);
+ eth_hw_addr_set(dev, addr);
+ }
if (!is_valid_ether_addr(dev->dev_addr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
@@ -4112,16 +4081,19 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (err)
goto err_clk_disable;
- /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
- * just the ring 16 descriptor based TX
- */
+ /* setup number of real queues + 1 */
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
/* Set default coalescing parameters */
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
priv->rx_rings[i].rx_max_coalesced_frames = 1;
- priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
+
+ /* Initialize u64 stats seq counter for 32bit machines */
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
+ u64_stats_init(&priv->rx_rings[i].stats64.syncp);
+ for (i = 0; i <= priv->hw_params->tx_queues; i++)
+ u64_stats_init(&priv->tx_rings[i].stats64.syncp);
/* libphy will determine the link state */
netif_carrier_off(dev);
@@ -4144,7 +4116,7 @@ err:
return err;
}
-static int bcmgenet_remove(struct platform_device *pdev)
+static void bcmgenet_remove(struct platform_device *pdev)
{
struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
@@ -4152,8 +4124,6 @@ static int bcmgenet_remove(struct platform_device *pdev)
unregister_netdev(priv->dev);
bcmgenet_mii_exit(priv->dev);
free_netdev(priv->dev);
-
- return 0;
}
static void bcmgenet_shutdown(struct platform_device *pdev)
@@ -4187,9 +4157,22 @@ static int bcmgenet_resume_noirq(struct device *d)
reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
if (reg & UMAC_IRQ_WAKE_EVENT)
pm_wakeup_event(&priv->pdev->dev, 0);
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ if (!bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC))
+ return 0;
+
+ /* Failed so fall through to reset MAC */
}
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
+ /* If this is an internal GPHY, power it back on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (priv->internal_phy)
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+ /* take MAC out of reset */
+ bcmgenet_umac_reset(priv);
return 0;
}
@@ -4199,23 +4182,46 @@ static int bcmgenet_resume(struct device *d)
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
- unsigned long dma_ctrl;
int ret;
+ u32 reg;
if (!netif_running(dev))
return 0;
- /* From WOL-enabled suspend, switch to regular clock */
- if (device_may_wakeup(d) && priv->wolopts)
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
-
- /* If this is an internal GPHY, power it back on now, before UniMAC is
- * brought out of reset as absolutely no UniMAC activity is allowed
- */
- if (priv->internal_phy)
- bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
-
- bcmgenet_umac_reset(priv);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (reg & CMD_RX_EN) {
+ /* Successfully exited WoL, just resume data flows */
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_enable_filter(priv,
+ rule->fs.location + 1);
+ bcmgenet_hfb_enable_filter(priv, 0);
+ bcmgenet_set_rx_mode(dev);
+ bcmgenet_enable_rx_napi(priv);
+
+ /* Reinitialize Tx flows */
+ bcmgenet_tdma_disable(priv);
+ bcmgenet_init_tx_queues(priv->dev);
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ bcmgenet_enable_tx_napi(priv);
+
+ bcmgenet_link_intr_enable(priv);
+ phy_start_machine(dev->phydev);
+
+ netif_device_attach(dev);
+ enable_irq(priv->irq1);
+ return 0;
+ }
+ /* MAC was reset so complete bcmgenet_netif_stop() */
+ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, false);
+ bcmgenet_rdma_disable(priv);
+ bcmgenet_intr_disable(priv);
+ bcmgenet_fini_dma(priv);
+ enable_irq(priv->irq1);
+ }
init_umac(priv);
@@ -4236,25 +4242,16 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv);
-
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, false);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto out_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
- if (priv->eee.eee_enabled)
- bcmgenet_eee_enable_set(dev, true);
-
bcmgenet_netif_start(dev);
netif_device_attach(dev);
@@ -4272,19 +4269,52 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ u32 reg, hfb_enable = 0;
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
- bcmgenet_netif_stop(dev);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ netif_tx_disable(dev);
+
+ /* Suspend non-wake Rx data flows */
+ if (priv->wolopts & WAKE_FILTER)
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE &&
+ rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ hfb_enable |= 1 << rule->fs.location;
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg &= ~RBUF_HFB_FILTER_EN_MASK;
+ reg |= hfb_enable << (RBUF_HFB_FILTER_EN_SHIFT + 1);
+ } else {
+ bcmgenet_hfb_reg_writel(priv, hfb_enable << 1,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ }
+ if (!hfb_enable)
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
+ /* Clear any old filter matches so only new matches wake */
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- /* Disable filtering */
- bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv))
+ netdev_warn(priv->dev,
+ "Timed out while disabling TX DMA\n");
+
+ bcmgenet_disable_tx_napi(priv);
+ bcmgenet_disable_rx_napi(priv);
+ disable_irq(priv->irq1);
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_tx_napi(priv);
+ } else {
+ /* Teardown the interface */
+ bcmgenet_netif_stop(dev, true);
+ }
return 0;
}
@@ -4335,7 +4365,7 @@ MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
static struct platform_driver bcmgenet_driver = {
.probe = bcmgenet_probe,
- .remove = bcmgenet_remove,
+ .remove = bcmgenet_remove,
.shutdown = bcmgenet_shutdown,
.driver = {
.name = "bcmgenet",
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 946f6e283c4e..5ec3979779ec 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -18,6 +18,9 @@
#include "../unimac.h"
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT 4
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -152,6 +155,30 @@ struct bcmgenet_mib_counters {
u32 tx_realloc_tsb_failed;
};
+struct bcmgenet_tx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+};
+
+struct bcmgenet_rx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t bytes;
+ u64_stats_t packets;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+ u64_stats_t multicast;
+ u64_stats_t broadcast;
+ u64_stats_t missed;
+ u64_stats_t length_errors;
+ u64_stats_t over_errors;
+ u64_stats_t crc_errors;
+ u64_stats_t frame_errors;
+ u64_stats_t fragmented_errors;
+};
+
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
@@ -271,6 +298,8 @@ struct bcmgenet_mib_counters {
/* Only valid for GENETv3+ */
#define UMAC_IRQ_MDIO_DONE (1 << 23)
#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+#define UMAC_IRQ_MDIO_EVENT (UMAC_IRQ_MDIO_DONE | \
+ UMAC_IRQ_MDIO_ERROR)
/* INTRL2 instance 1 definitions */
#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF
@@ -476,6 +505,7 @@ enum bcmgenet_version {
#define GENET_HAS_EXT (1 << 1)
#define GENET_HAS_MDIO_INTR (1 << 2)
#define GENET_HAS_MOCA_LINK_DET (1 << 3)
+#define GENET_HAS_EPHY_16NM (1 << 4)
/* BCMGENET hardware parameters, keep this structure nicely aligned
* since it is going to be used in hot paths
@@ -496,7 +526,6 @@ struct bcmgenet_hw_params {
u32 rdma_offset;
u32 tdma_offset;
u32 words_per_bd;
- u32 flags;
};
struct bcmgenet_skb_cb {
@@ -510,10 +539,8 @@ struct bcmgenet_skb_cb {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
- unsigned long packets;
- unsigned long bytes;
+ struct bcmgenet_tx_stats64 stats64;
unsigned int index; /* ring index */
- unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */
unsigned int clean_ptr; /* Tx ring clean pointer */
@@ -523,8 +550,6 @@ struct bcmgenet_tx_ring {
unsigned int prod_index; /* Tx ring producer index SW copy */
unsigned int cb_ptr; /* Tx ring initial CB ptr */
unsigned int end_ptr; /* Tx ring end CB ptr */
- void (*int_enable)(struct bcmgenet_tx_ring *);
- void (*int_disable)(struct bcmgenet_tx_ring *);
struct bcmgenet_priv *priv;
};
@@ -538,10 +563,7 @@ struct bcmgenet_net_dim {
struct bcmgenet_rx_ring {
struct napi_struct napi; /* Rx NAPI struct */
- unsigned long bytes;
- unsigned long packets;
- unsigned long errors;
- unsigned long dropped;
+ struct bcmgenet_rx_stats64 stats64;
unsigned int index; /* Rx ring index */
struct enet_cb *cbs; /* Rx ring buffer control block */
unsigned int size; /* Rx ring size */
@@ -553,8 +575,6 @@ struct bcmgenet_rx_ring {
struct bcmgenet_net_dim dim;
u32 rx_max_coalesced_frames;
u32 rx_coalesce_usecs;
- void (*int_enable)(struct bcmgenet_rx_ring *);
- void (*int_disable)(struct bcmgenet_rx_ring *);
struct bcmgenet_priv *priv;
};
@@ -573,6 +593,8 @@ struct bcmgenet_rxnfc_rule {
/* device context */
struct bcmgenet_priv {
void __iomem *base;
+ /* reg_lock: lock to serialize access to shared registers */
+ spinlock_t reg_lock;
enum bcmgenet_version version;
struct net_device *dev;
@@ -581,7 +603,7 @@ struct bcmgenet_priv {
struct enet_cb *tx_cbs;
unsigned int num_tx_bds;
- struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+ struct bcmgenet_tx_ring tx_rings[GENET_MAX_MQ_CNT + 1];
/* receive variables */
void __iomem *rx_bds;
@@ -591,10 +613,11 @@ struct bcmgenet_priv {
struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES];
struct list_head rxnfc_list;
- struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
+ struct bcmgenet_rx_ring rx_rings[GENET_MAX_MQ_CNT + 1];
/* other misc variables */
- struct bcmgenet_hw_params *hw_params;
+ const struct bcmgenet_hw_params *hw_params;
+ u32 flags;
unsigned autoneg_pause:1;
unsigned tx_pause:1;
unsigned rx_pause:1;
@@ -613,7 +636,6 @@ struct bcmgenet_priv {
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
- bool ephy_16nm;
/* Interrupt variables */
struct work_struct bcmgenet_irq_work;
@@ -641,13 +663,37 @@ struct bcmgenet_priv {
struct clk *clk_wol;
u32 wolopts;
u8 sopass[SOPASS_MAX];
- bool wol_active;
struct bcmgenet_mib_counters mib;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
+static inline bool bcmgenet_has_40bits(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_40BITS);
+}
+
+static inline bool bcmgenet_has_ext(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EXT);
+}
+
+static inline bool bcmgenet_has_mdio_intr(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MDIO_INTR);
+}
+
+static inline bool bcmgenet_has_moca_link_det(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MOCA_LINK_DET);
+}
+
+static inline bool bcmgenet_has_ephy_16nm(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EPHY_16NM);
+}
+
#define GENET_IO_MACRO(name, offset) \
static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
u32 off) \
@@ -700,7 +746,10 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode);
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode);
+
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
+ bool tx_lpi_enabled);
#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index e31a5a397f11..8fb551288298 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -40,11 +40,28 @@
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
- wol->wolopts = priv->wolopts;
- memset(wol->sopass, 0, sizeof(wol->sopass));
+ if (dev->phydev) {
+ phy_ethtool_get_wol(dev->phydev, wol);
+ phy_wolopts = wol->wolopts;
+ }
+
+ /* MAC is not wake-up capable, return what the PHY does */
+ if (!device_can_wakeup(kdev))
+ return;
+
+ /* Overlay MAC capabilities with that of the PHY queried before */
+ wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
+ wol->wolopts |= priv->wolopts;
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
+
+ /* Otherwise the MAC one */
+ memset(wol->sopass, 0, sizeof(wol->sopass));
if (wol->wolopts & WAKE_MAGICSECURE)
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
@@ -56,6 +73,14 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
+ int ret;
+
+ /* Try Wake-on-LAN from the PHY first */
+ if (dev->phydev) {
+ ret = phy_ethtool_set_wol(dev->phydev, wol);
+ if (ret != -EOPNOTSUPP && wol->wolopts)
+ return ret;
+ }
if (!device_can_wakeup(kdev))
return -ENOTSUPP;
@@ -70,14 +95,18 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts) {
device_set_wakeup_enable(kdev, 1);
/* Avoid unbalanced enable_irq_wake calls */
- if (priv->wol_irq_disabled)
+ if (priv->wol_irq_disabled) {
enable_irq_wake(priv->wol_irq);
+ enable_irq_wake(priv->irq0);
+ }
priv->wol_irq_disabled = false;
} else {
device_set_wakeup_enable(kdev, 0);
/* Avoid unbalanced disable_irq_wake calls */
- if (!priv->wol_irq_disabled)
+ if (!priv->wol_irq_disabled) {
disable_irq_wake(priv->wol_irq);
+ disable_irq_wake(priv->irq0);
+ }
priv->wol_irq_disabled = true;
}
@@ -116,8 +145,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
- struct bcmgenet_rxnfc_rule *rule;
- u32 reg, hfb_ctrl_reg, hfb_enable = 0;
+ u32 reg, hfb_ctrl_reg;
int retries = 0;
if (mode != GENET_POWER_WOL_MAGIC) {
@@ -125,16 +153,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return -EINVAL;
}
- /* Can't suspend with WoL if MAC is still in reset */
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
- reg &= ~CMD_SW_RESET;
-
- /* disable RX */
- reg &= ~CMD_RX_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- mdelay(10);
-
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_EN;
@@ -146,13 +164,8 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (priv->wolopts & WAKE_FILTER) {
- list_for_each_entry(rule, &priv->rxnfc_list, list)
- if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE)
- hfb_enable |= (1 << rule->fs.location);
- reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ reg = hfb_ctrl_reg | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv);
@@ -167,27 +180,30 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries);
- clk_prepare_enable(priv->clk_wol);
- priv->wol_active = 1;
+ /* Disable phy status updates while suspending */
+ mutex_lock(&dev->phydev->lock);
+ dev->phydev->state = PHY_READY;
+ mutex_unlock(&dev->phydev->lock);
- if (hfb_enable) {
- bcmgenet_hfb_reg_writel(priv, hfb_enable,
- HFB_FLT_ENABLE_V3PLUS + 4);
- hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
- }
+ clk_prepare_enable(priv->clk_wol);
/* Enable CRC forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
+ /* Can't suspend with WoL if MAC is still in reset */
+ if (reg & CMD_SW_RESET)
+ reg &= ~CMD_SW_RESET;
+
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
- if (hfb_enable)
+ if (hfb_ctrl_reg & RBUF_HFB_EN)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR);
@@ -195,43 +211,57 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return 0;
}
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ struct net_device *dev = priv->dev;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode);
- return;
+ return -EINVAL;
}
- if (!priv->wol_active)
- return; /* failed to suspend so skip the rest */
-
- priv->wol_active = 0;
clk_disable_unprepare(priv->clk_wol);
priv->crc_fwd_en = 0;
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT,
+ INTRL2_CPU_MASK_SET);
+ if (bcmgenet_has_mdio_intr(priv))
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_MDIO_EVENT,
+ INTRL2_CPU_MASK_CLEAR);
+
/* Disable Magic Packet Detection */
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
if (!(reg & MPD_EN))
- return; /* already reset so skip the rest */
+ return -EPERM; /* already reset so skip the rest */
reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
}
- /* Disable WAKE_FILTER Detection */
- if (priv->wolopts & WAKE_FILTER) {
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (!(reg & RBUF_ACPI_EN))
- return; /* already reset so skip the rest */
- reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ /* Disable ACPI mode */
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (!(reg & RBUF_ACPI_EN))
+ return -EPERM; /* already reset so skip the rest */
+ reg &= ~RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Disable CRC Forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
+
+ /* Resume link status tracking */
+ mutex_lock(&dev->phydev->lock);
+ if (dev->phydev->link)
+ dev->phydev->state = PHY_RUNNING;
+ else
+ dev->phydev->state = PHY_NOLINK;
+ mutex_unlock(&dev->phydev->lock);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 5f259641437a..38f854b94a79 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#include <linux/acpi.h>
@@ -20,7 +20,6 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
-#include <linux/platform_data/bcmgenet.h>
#include <linux/platform_data/mdio-bcm-unimac.h>
#include "bcmgenet.h"
@@ -30,6 +29,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
+ bool active;
/* speed */
if (phydev->speed == SPEED_1000)
@@ -72,10 +72,10 @@ static void bcmgenet_mac_config(struct net_device *dev)
* Receive clock is provided by the PHY.
*/
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg &= ~OOB_DISABLE;
reg |= RGMII_LINK;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
CMD_HD_EN |
@@ -88,6 +88,12 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= CMD_TX_EN | CMD_RX_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
+
+ active = phy_init_eee(phydev, 0) >= 0;
+ bcmgenet_eee_enable_set(dev,
+ priv->eee.eee_enabled && active,
+ priv->eee.tx_lpi_enabled);
}
/* setup netdev link state when PHY link status change and
@@ -95,10 +101,18 @@ static void bcmgenet_mac_config(struct net_device *dev)
*/
void bcmgenet_mii_setup(struct net_device *dev)
{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
+ u32 reg;
- if (phydev->link)
+ if (phydev->link) {
bcmgenet_mac_config(dev);
+ } else {
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ }
+
phy_print_status(phydev);
}
@@ -139,7 +153,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (GENET_IS_V4(priv) || priv->ephy_16nm) {
+ if (GENET_IS_V4(priv) || bcmgenet_has_ephy_16nm(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
if (enable) {
reg &= ~EXT_CK25_DIS;
@@ -154,10 +168,15 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
reg &= ~EXT_GPHY_RESET;
} else {
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
- EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR;
+ EXT_CFG_IDDQ_GLOBAL_PWR;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
+
reg |= EXT_CK25_DIS;
}
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
@@ -169,16 +188,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
- u32 reg;
-
- if (!GENET_IS_V5(priv)) {
- /* Speed settings are set in bcmgenet_mii_setup() */
- reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
- reg |= LED_ACT_SOURCE_MAC;
- bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
- }
-
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
fixed_phy_set_link_update(priv->dev->phydev,
bcmgenet_fixed_phy_link_update);
}
@@ -210,6 +220,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
if (!phy_name) {
phy_name = "MoCA";
+ if (!GENET_IS_V5(priv))
+ port_ctrl |= LED_ACT_SOURCE_MAC;
bcmgenet_moca_phy_setup(priv);
}
break;
@@ -266,18 +278,22 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
/* This is an external PHY (xMII), so we need to enable the RGMII
- * block for the interface to work
+ * block for the interface to work, unconditionally clear the
+ * Out-of-band disable since we do not need it.
*/
+ mutex_lock(&phydev->lock);
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
if (priv->ext_phy) {
- reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~ID_MODE_DIS;
reg |= id_mode_dis;
if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
reg |= RGMII_MODE_EN_V123;
else
reg |= RGMII_MODE_EN;
- bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ mutex_unlock(&phydev->lock);
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
@@ -393,6 +409,9 @@ int bcmgenet_mii_probe(struct net_device *dev)
if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_MAC_INTERRUPT;
+ /* Indicate that the MAC is responsible for PHY PM */
+ dev->phydev->mac_managed_pm = true;
+
return 0;
}
@@ -416,23 +435,6 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv)
return priv->mdio_dn;
}
-static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv,
- struct unimac_mdio_pdata *ppd)
-{
- struct device *kdev = &priv->pdev->dev;
- struct bcmgenet_platform_data *pd = kdev->platform_data;
-
- if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) {
- /*
- * Internal or external PHY with MDIO access
- */
- if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
- ppd->phy_mask = 1 << pd->phy_address;
- else
- ppd->phy_mask = 0;
- }
-}
-
static int bcmgenet_mii_wait(void *wait_func_data)
{
struct bcmgenet_priv *priv = wait_func_data;
@@ -447,7 +449,6 @@ static int bcmgenet_mii_wait(void *wait_func_data)
static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
{
struct platform_device *pdev = priv->pdev;
- struct bcmgenet_platform_data *pdata = pdev->dev.platform_data;
struct device_node *dn = pdev->dev.of_node;
struct unimac_mdio_pdata ppd;
struct platform_device *ppdev;
@@ -465,6 +466,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
ppd.wait_func = bcmgenet_mii_wait;
ppd.wait_func_data = priv;
ppd.bus_name = "bcmgenet MII bus";
+ /* Pass a reference to our "main" clock which is used for MDIO
+ * transfers
+ */
+ ppd.clk = priv->clk;
/* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD
* and is 2 * 32-bits word long, 8 bytes total.
@@ -487,8 +492,6 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
ppdev->dev.parent = &pdev->dev;
if (dn)
ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv);
- else if (pdata)
- bcmgenet_mii_pdata_init(priv, &ppd);
else
ppd.phy_mask = ~0;
@@ -570,58 +573,6 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
return 0;
}
-static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
-{
- struct device *kdev = &priv->pdev->dev;
- struct bcmgenet_platform_data *pd = kdev->platform_data;
- char phy_name[MII_BUS_ID_SIZE + 3];
- char mdio_bus_id[MII_BUS_ID_SIZE];
- struct phy_device *phydev;
-
- snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d",
- UNIMAC_MDIO_DRV_NAME, priv->pdev->id);
-
- if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) {
- snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
- mdio_bus_id, pd->phy_address);
-
- /*
- * Internal or external PHY with MDIO access
- */
- phydev = phy_attach(priv->dev, phy_name, pd->phy_interface);
- if (!phydev) {
- dev_err(kdev, "failed to register PHY device\n");
- return -ENODEV;
- }
- } else {
- /*
- * MoCA port or no MDIO access.
- * Use fixed PHY to represent the link layer.
- */
- struct fixed_phy_status fphy_status = {
- .link = 1,
- .speed = pd->phy_speed,
- .duplex = pd->phy_duplex,
- .pause = 0,
- .asym_pause = 0,
- };
-
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
- if (!phydev || IS_ERR(phydev)) {
- dev_err(kdev, "failed to register fixed PHY device\n");
- return -ENODEV;
- }
-
- /* Make sure we initialize MoCA PHYs with a link down */
- phydev->link = 0;
-
- }
-
- priv->phy_interface = pd->phy_interface;
-
- return 0;
-}
-
static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
{
struct device *kdev = &priv->pdev->dev;
@@ -632,7 +583,7 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
else if (has_acpi_companion(kdev))
return bcmgenet_phy_interface_init(priv);
else
- return bcmgenet_mii_pd_init(priv);
+ return -EINVAL;
}
int bcmgenet_mii_init(struct net_device *dev)
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index f38f40eb966e..30865fe03eeb 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
#include <asm/sibyte/board.h>
#include <asm/sibyte/sb1250.h>
-#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#if defined(CONFIG_SIBYTE_BCM1x80)
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h>
#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
#include <asm/sibyte/sb1250_mac.h>
#include <asm/sibyte/sb1250_dma.h>
-#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#if defined(CONFIG_SIBYTE_BCM1x80)
#define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2))
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
#define UNIT_INT(n) (K_INT_MAC_0 + (n))
@@ -1527,7 +1527,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
* Turn on the rest of the bits in the enable register
*/
-#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#if defined(CONFIG_SIBYTE_BCM1x80)
__raw_writeq(M_MAC_RXDMA_EN0 |
M_MAC_TXDMA_EN0, s->sbm_macenable);
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
@@ -2183,9 +2183,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
ea_reg >>= 8;
}
- for (i = 0; i < 6; i++) {
- dev->dev_addr[i] = eaddr[i];
- }
+ eth_hw_addr_set(dev, eaddr);
/*
* Initialize context (get pointers to registers and stuff), then
@@ -2205,7 +2203,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
dev->min_mtu = 0;
dev->max_mtu = ENET_PACKET_SIZE;
- netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
+ netif_napi_add_weight(dev, &sc->napi, sbmac_poll, 16);
dev->irq = UNIT_INT(idx);
@@ -2536,7 +2534,12 @@ static int sbmac_probe(struct platform_device *pldev)
int err;
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
- BUG_ON(!res);
+ if (!res) {
+ printk(KERN_ERR "%s: failed to get resource\n",
+ dev_name(&pldev->dev));
+ err = -EINVAL;
+ goto out_out;
+ }
sbm_base = ioremap(res->start, resource_size(res));
if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
@@ -2590,7 +2593,7 @@ out_out:
return err;
}
-static int sbmac_remove(struct platform_device *pldev)
+static void sbmac_remove(struct platform_device *pldev)
{
struct net_device *dev = platform_get_drvdata(pldev);
struct sbmac_softc *sc = netdev_priv(dev);
@@ -2601,8 +2604,6 @@ static int sbmac_remove(struct platform_device *pldev)
mdiobus_free(sc->mii_bus);
iounmap(sc->sbm_base);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver sbmac_driver = {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b1328c5524b5..75f66587983d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -54,9 +54,11 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <linux/crc32poly.h>
+#include <linux/crc32.h>
+#include <linux/dmi.h>
#include <net/checksum.h>
+#include <net/gso.h>
#include <net/ip.h>
#include <linux/io.h>
@@ -220,10 +222,11 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
-MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
+MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_TG3);
+MODULE_FIRMWARE(FIRMWARE_TG357766);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
@@ -1537,8 +1540,7 @@ static int tg3_mdio_init(struct tg3 *tp)
return -ENOMEM;
tp->mdio_bus->name = "tg3 mdio bus";
- snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
- (tp->pdev->bus->number << 8) | tp->pdev->devfn);
+ snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
tp->mdio_bus->priv = tp;
tp->mdio_bus->parent = &tp->pdev->dev;
tp->mdio_bus->read = &tg3_mdio_read;
@@ -2337,10 +2339,10 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
tg3_phy_toggle_auxctl_smdsp(tp, false);
}
-static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
+static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
{
u32 val;
- struct ethtool_eee *dest = &tp->eee;
+ struct ethtool_keee *dest = &tp->eee;
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return;
@@ -2361,13 +2363,13 @@ static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
/* Pull lp advertised settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
return;
- dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
/* Pull advertised and eee_enabled settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
return;
dest->eee_enabled = !!val;
- dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
/* Pull tx_lpi_enabled */
val = tr32(TG3_CPMU_EEE_MODE);
@@ -3736,7 +3738,7 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
}
do {
- u32 *fw_data = (u32 *)(fw_hdr + 1);
+ __be32 *fw_data = (__be32 *)(fw_hdr + 1);
for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
write_op(tp, cpu_scratch_base +
(be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
@@ -4018,7 +4020,7 @@ static int tg3_power_up(struct tg3 *tp)
static int tg3_setup_phy(struct tg3 *, bool);
-static int tg3_power_down_prepare(struct tg3 *tp)
+static void tg3_power_down_prepare(struct tg3 *tp)
{
u32 misc_host_ctrl;
bool device_should_wake, do_low_power;
@@ -4262,7 +4264,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
- return 0;
+ return;
}
static void tg3_power_down(struct tg3 *tp)
@@ -4353,23 +4355,12 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (!err) {
u32 err2;
- val = 0;
- /* Advertise 100-BaseTX EEE ability */
- if (advertise & ADVERTISED_100baseT_Full)
- val |= MDIO_AN_EEE_ADV_100TX;
- /* Advertise 1000-BaseT EEE ability */
- if (advertise & ADVERTISED_1000baseT_Full)
- val |= MDIO_AN_EEE_ADV_1000T;
-
- if (!tp->eee.eee_enabled) {
+ if (!tp->eee.eee_enabled)
val = 0;
- tp->eee.advertised = 0;
- } else {
- tp->eee.advertised = advertise &
- (ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full);
- }
+ else
+ val = ethtool_adv_to_mmd_eee_adv_t(advertise);
+ mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
if (err)
val = 0;
@@ -4617,7 +4608,7 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
static bool tg3_phy_eee_config_ok(struct tg3 *tp)
{
- struct ethtool_eee eee;
+ struct ethtool_keee eee = {};
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return true;
@@ -4625,13 +4616,13 @@ static bool tg3_phy_eee_config_ok(struct tg3 *tp)
tg3_eee_pull_config(tp, &eee);
if (tp->eee.eee_enabled) {
- if (tp->eee.advertised != eee.advertised ||
+ if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
return false;
} else {
/* EEE is disabled but we're advertising */
- if (eee.advertised)
+ if (!linkmode_empty(eee.advertised))
return false;
}
@@ -5503,7 +5494,6 @@ static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
int workaround, port_a;
serdes_cfg = 0;
- expected_sg_dig_ctrl = 0;
workaround = 0;
port_a = 1;
current_link_up = false;
@@ -5813,7 +5803,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
u32 current_speed = SPEED_UNKNOWN;
u8 current_duplex = DUPLEX_UNKNOWN;
bool current_link_up = false;
- u32 local_adv, remote_adv, sgsr;
+ u32 local_adv = 0, remote_adv = 0, sgsr;
if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720) &&
@@ -5954,9 +5944,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
else
current_duplex = DUPLEX_HALF;
- local_adv = 0;
- remote_adv = 0;
-
if (bmcr & BMCR_ANENABLE) {
u32 common;
@@ -6152,13 +6139,11 @@ static void tg3_refclk_write(struct tg3 *tp, u64 newval)
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
static inline void tg3_full_unlock(struct tg3 *tp);
-static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
struct tg3 *tp = netdev_priv(dev);
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
if (tg3_flag(tp, PTP_CAPABLE)) {
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
@@ -6168,8 +6153,6 @@ static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
if (tp->ptp_clock)
info->phc_index = ptp_clock_index(tp->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
@@ -6180,34 +6163,26 @@ static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
return 0;
}
-static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
- bool neg_adj = false;
- u32 correction = 0;
-
- if (ppb < 0) {
- neg_adj = true;
- ppb = -ppb;
- }
+ u64 correction;
+ bool neg_adj;
/* Frequency adjustment is performed using hardware with a 24 bit
* accumulator and a programmable correction value. On each clk, the
* correction value gets added to the accumulator and when it
* overflows, the time counter is incremented/decremented.
- *
- * So conversion from ppb to correction value is
- * ppb * (1 << 24) / 1000000000
*/
- correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
- TG3_EAV_REF_CLK_CORRECT_MASK;
+ neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
tg3_full_lock(tp, 0);
if (correction)
tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
TG3_EAV_REF_CLK_CORRECT_EN |
- (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
+ (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
+ ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
else
tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
@@ -6322,6 +6297,46 @@ err_out:
return -EOPNOTSUPP;
}
+static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
+ struct skb_shared_hwtstamps *timestamp)
+{
+ memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
+ timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
+ tp->ptp_adjust);
+}
+
+static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
+{
+ *hwclock = tr32(TG3_TX_TSTAMP_LSB);
+ *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
+}
+
+static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
+{
+ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+ struct skb_shared_hwtstamps timestamp;
+ u64 hwclock;
+
+ if (tp->ptp_txts_retrycnt > 2)
+ goto done;
+
+ tg3_read_tx_tstamp(tp, &hwclock);
+
+ if (hwclock != tp->pre_tx_ts) {
+ tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
+ skb_tstamp_tx(tp->tx_tstamp_skb, &timestamp);
+ goto done;
+ }
+ tp->ptp_txts_retrycnt++;
+ return HZ / 10;
+done:
+ dev_consume_skb_any(tp->tx_tstamp_skb);
+ tp->tx_tstamp_skb = NULL;
+ tp->ptp_txts_retrycnt = 0;
+ tp->pre_tx_ts = 0;
+ return -1;
+}
+
static const struct ptp_clock_info tg3_ptp_caps = {
.owner = THIS_MODULE,
.name = "tg3 clock",
@@ -6331,21 +6346,14 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.n_per_out = 1,
.n_pins = 0,
.pps = 0,
- .adjfreq = tg3_ptp_adjfreq,
+ .adjfine = tg3_ptp_adjfine,
.adjtime = tg3_ptp_adjtime,
+ .do_aux_work = tg3_ptp_ts_aux_work,
.gettimex64 = tg3_ptp_gettimex,
.settime64 = tg3_ptp_settime,
.enable = tg3_ptp_enable,
};
-static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
- struct skb_shared_hwtstamps *timestamp)
-{
- memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
- timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
- tp->ptp_adjust);
-}
-
/* tp->lock must be held */
static void tg3_ptp_init(struct tg3 *tp)
{
@@ -6376,6 +6384,8 @@ static void tg3_ptp_fini(struct tg3 *tp)
ptp_clock_unregister(tp->ptp_clock);
tp->ptp_clock = NULL;
tp->ptp_adjust = 0;
+ dev_consume_skb_any(tp->tx_tstamp_skb);
+ tp->tx_tstamp_skb = NULL;
}
static inline int tg3_irq_sync(struct tg3 *tp)
@@ -6447,6 +6457,14 @@ static void tg3_dump_state(struct tg3 *tp)
int i;
u32 *regs;
+ /* If it is a PCI error, all registers will be 0xffff,
+ * we don't dump them out, just report the error and return
+ */
+ if (tp->pdev->error_state != pci_channel_io_normal) {
+ netdev_err(tp->dev, "PCI channel ERROR!\n");
+ return;
+ }
+
regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
if (!regs)
return;
@@ -6546,6 +6564,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
while (sw_idx != hw_idx) {
struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
+ bool complete_skb_later = false;
struct sk_buff *skb = ri->skb;
int i, tx_bug = 0;
@@ -6556,12 +6575,17 @@ static void tg3_tx(struct tg3_napi *tnapi)
if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
struct skb_shared_hwtstamps timestamp;
- u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
- hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
-
- tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
+ u64 hwclock;
- skb_tstamp_tx(skb, &timestamp);
+ tg3_read_tx_tstamp(tp, &hwclock);
+ if (hwclock != tp->pre_tx_ts) {
+ tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
+ skb_tstamp_tx(skb, &timestamp);
+ tp->pre_tx_ts = 0;
+ } else {
+ tp->tx_tstamp_skb = skb;
+ complete_skb_later = true;
+ }
}
dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
@@ -6599,7 +6623,10 @@ static void tg3_tx(struct tg3_napi *tnapi)
pkts_compl++;
bytes_compl += skb->len;
- dev_consume_skb_any(skb);
+ if (!complete_skb_later)
+ dev_consume_skb_any(skb);
+ else
+ ptp_schedule_worker(tp->ptp_clock, 0);
if (unlikely(tx_bug)) {
tg3_tx_recover(tp);
@@ -6611,9 +6638,9 @@ static void tg3_tx(struct tg3_napi *tnapi)
tnapi->tx_cons = sw_idx;
- /* Need to make the tx_cons update visible to tg3_start_xmit()
+ /* Need to make the tx_cons update visible to __tg3_start_xmit()
* before checking for netif_queue_stopped(). Without the
- * memory barrier, there is a small possibility that tg3_start_xmit()
+ * memory barrier, there is a small possibility that __tg3_start_xmit()
* will miss it and cause the queue to be stopped forever.
*/
smp_mb();
@@ -6656,7 +6683,7 @@ static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
* We only need to fill in the address because the other members
* of the RX descriptor are invariant, see tg3_init_rings.
*
- * Note the purposeful assymetry of cpu vs. chip accesses. For
+ * Note the purposeful asymmetry of cpu vs. chip accesses. For
* posting buffers we only dirty the first cache line of the RX
* descriptor (containing the address). Whereas for the RX status
* buffers the cpu only reads the last cacheline of the RX descriptor
@@ -6853,7 +6880,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
desc_idx, *post_ptr);
drop_it_no_recycle:
/* Other statistics kept track of by card. */
- tp->rx_dropped++;
+ tnapi->rx_dropped++;
goto next_pkt;
}
@@ -6888,7 +6915,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
ri->data = NULL;
- skb = build_skb(data, frag_size);
+ if (frag_size)
+ skb = build_skb(data, frag_size);
+ else
+ skb = slab_build_skb(data);
if (!skb) {
tg3_frag_free(frag_size != 0, data);
goto drop_it_no_recycle;
@@ -7363,27 +7393,61 @@ tx_recovery:
static void tg3_napi_disable(struct tg3 *tp)
{
+ int txq_idx = tp->txq_cnt - 1;
+ int rxq_idx = tp->rxq_cnt - 1;
+ struct tg3_napi *tnapi;
int i;
- for (i = tp->irq_cnt - 1; i >= 0; i--)
- napi_disable(&tp->napi[i].napi);
+ for (i = tp->irq_cnt - 1; i >= 0; i--) {
+ tnapi = &tp->napi[i];
+ if (tnapi->tx_buffers) {
+ netif_queue_set_napi(tp->dev, txq_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ txq_idx--;
+ }
+ if (tnapi->rx_rcb) {
+ netif_queue_set_napi(tp->dev, rxq_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ rxq_idx--;
+ }
+ napi_disable(&tnapi->napi);
+ }
}
static void tg3_napi_enable(struct tg3 *tp)
{
+ int txq_idx = 0, rxq_idx = 0;
+ struct tg3_napi *tnapi;
int i;
- for (i = 0; i < tp->irq_cnt; i++)
- napi_enable(&tp->napi[i].napi);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ tnapi = &tp->napi[i];
+ napi_enable_locked(&tnapi->napi);
+ if (tnapi->tx_buffers) {
+ netif_queue_set_napi(tp->dev, txq_idx,
+ NETDEV_QUEUE_TYPE_TX,
+ &tnapi->napi);
+ txq_idx++;
+ }
+ if (tnapi->rx_rcb) {
+ netif_queue_set_napi(tp->dev, rxq_idx,
+ NETDEV_QUEUE_TYPE_RX,
+ &tnapi->napi);
+ rxq_idx++;
+ }
+ }
}
static void tg3_napi_init(struct tg3 *tp)
{
int i;
- netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
- for (i = 1; i < tp->irq_cnt; i++)
- netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
+ i ? tg3_poll_msix : tg3_poll);
+ netif_napi_set_irq_locked(&tp->napi[i].napi,
+ tp->napi[i].irq_vec);
+ }
}
static void tg3_napi_fini(struct tg3 *tp)
@@ -7850,7 +7914,7 @@ static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
}
-static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
/* Use GSO to workaround all TSO packets that meet HW bug conditions
* indicated in tg3_tx_frag_set()
@@ -7879,12 +7943,14 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
segs = skb_gso_segment(skb, tp->dev->features &
~(NETIF_F_TSO | NETIF_F_TSO6));
- if (IS_ERR(segs) || !segs)
+ if (IS_ERR(segs) || !segs) {
+ tnapi->tx_dropped++;
goto tg3_tso_bug_end;
+ }
skb_list_walk_safe(segs, seg, next) {
skb_mark_not_on_list(seg);
- tg3_start_xmit(seg, tp->dev);
+ __tg3_start_xmit(seg, tp->dev);
}
tg3_tso_bug_end:
@@ -7894,7 +7960,7 @@ tg3_tso_bug_end:
}
/* hard_start_xmit for all devices */
-static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
u32 len, entry, base_flags, mss, vlan = 0;
@@ -7945,7 +8011,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+ hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
/* HW/FW can not correctly segment packets that have been
* vlan encapsulated.
@@ -8033,8 +8099,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
tg3_flag(tp, TX_TSTAMP_EN)) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- base_flags |= TXD_FLAG_HWTSTAMP;
+ tg3_full_lock(tp, 0);
+ if (!tp->pre_tx_ts) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ base_flags |= TXD_FLAG_HWTSTAMP;
+ tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
+ }
+ tg3_full_unlock(tp);
}
len = skb_headlen(skb);
@@ -8138,11 +8209,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_wake_queue(txq);
}
- if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
- /* Packets are ready, update Tx producer idx on card. */
- tw32_tx_mbox(tnapi->prodmbox, entry);
- }
-
return NETDEV_TX_OK;
dma_error:
@@ -8151,10 +8217,46 @@ dma_error:
drop:
dev_kfree_skb_any(skb);
drop_nofree:
- tp->tx_dropped++;
+ tnapi->tx_dropped++;
return NETDEV_TX_OK;
}
+static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_queue *txq;
+ u16 skb_queue_mapping;
+ netdev_tx_t ret;
+
+ skb_queue_mapping = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, skb_queue_mapping);
+
+ ret = __tg3_start_xmit(skb, dev);
+
+ /* Notify the hardware that packets are ready by updating the TX ring
+ * tail pointer. We respect netdev_xmit_more() thus avoiding poking
+ * the hardware for every packet. To guarantee forward progress the TX
+ * ring must be drained when it is full as indicated by
+ * netif_xmit_stopped(). This needs to happen even when the current
+ * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
+ * queued by previous __tg3_start_xmit() calls might get stuck in
+ * the queue forever.
+ */
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
+ struct tg3_napi *tnapi;
+ struct tg3 *tp;
+
+ tp = netdev_priv(dev);
+ tnapi = &tp->napi[skb_queue_mapping];
+
+ if (tg3_flag(tp, ENABLE_TSS))
+ tnapi++;
+
+ tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
+ }
+
+ return ret;
+}
+
static void tg3_mac_loopback(struct tg3 *tp, bool enable)
{
if (enable) {
@@ -9330,7 +9432,7 @@ static void __tg3_set_rx_mode(struct net_device *);
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, bool silent)
{
- int err;
+ int err, i;
tg3_stop_fw(tp);
@@ -9351,6 +9453,13 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
/* And make sure the next sample is new data */
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+
+ for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ tnapi->rx_dropped = 0;
+ tnapi->tx_dropped = 0;
+ }
}
return err;
@@ -9697,26 +9806,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
static inline u32 calc_crc(unsigned char *buf, int len)
{
- u32 reg;
- u32 tmp;
- int j, k;
-
- reg = 0xffffffff;
-
- for (j = 0; j < len; j++) {
- reg ^= buf[j];
-
- for (k = 0; k < 8; k++) {
- tmp = reg & 0x01;
-
- reg >>= 1;
-
- if (tmp)
- reg ^= CRC32_POLY_LE;
- }
- }
-
- return ~reg;
+ return ~crc32(~0, buf, len);
}
static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
@@ -10052,7 +10142,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
/* Pseudo-header checksum is done by hardware logic and not
- * the offload processers, so make the chip do the pseudo-
+ * the offload processors, so make the chip do the pseudo-
* header checksums on receive. For transmit it is more
* convenient to do the pseudo-header checksum in software
* as Linux does that on transmit for us in all cases.
@@ -10969,7 +11059,7 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
static void tg3_timer(struct timer_list *t)
{
- struct tg3 *tp = from_timer(tp, t, timer);
+ struct tg3 *tp = timer_container_of(tp, t, timer);
spin_lock(&tp->lock);
@@ -11140,7 +11230,7 @@ static void tg3_timer_start(struct tg3 *tp)
static void tg3_timer_stop(struct tg3 *tp)
{
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
}
/* Restart hardware after configuration changes, self-test, etc.
@@ -11149,6 +11239,8 @@ static void tg3_timer_stop(struct tg3 *tp)
static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
__releases(tp->lock)
__acquires(tp->lock)
+ __releases(tp->dev->lock)
+ __acquires(tp->dev->lock)
{
int err;
@@ -11161,7 +11253,9 @@ static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
tg3_timer_stop(tp);
tp->irq_sync = 0;
tg3_napi_enable(tp);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 0);
}
return err;
@@ -11175,7 +11269,8 @@ static void tg3_reset_task(struct work_struct *work)
rtnl_lock();
tg3_full_lock(tp, 0);
- if (!netif_running(tp->dev)) {
+ if (tp->pcierr_recovery || !netif_running(tp->dev) ||
+ tp->pdev->error_state != pci_channel_io_normal) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
rtnl_unlock();
@@ -11188,6 +11283,7 @@ static void tg3_reset_task(struct work_struct *work)
tg3_netif_stop(tp);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 1);
if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
@@ -11207,12 +11303,14 @@ static void tg3_reset_task(struct work_struct *work)
* call cancel_work_sync() and wait forever.
*/
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
goto out;
}
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(tp->dev);
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
@@ -11231,18 +11329,17 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
else {
name = &tnapi->irq_lbl[0];
if (tnapi->tx_buffers && tnapi->rx_rcb)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-txrx-%d", tp->dev->name, irq_num);
else if (tnapi->tx_buffers)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-tx-%d", tp->dev->name, irq_num);
else if (tnapi->rx_rcb)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-rx-%d", tp->dev->name, irq_num);
else
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-%d", tp->dev->name, irq_num);
- name[IFNAMSIZ-1] = 0;
}
if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
@@ -11573,9 +11670,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
if (err)
goto out_ints_fini;
+ netdev_lock(dev);
tg3_napi_init(tp);
tg3_napi_enable(tp);
+ netdev_unlock(dev);
for (i = 0; i < tp->irq_cnt; i++) {
err = tg3_request_irq(tp, i);
@@ -11900,6 +11999,9 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
{
struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
struct tg3_hw_stats *hw_stats = tp->hw_stats;
+ unsigned long rx_dropped;
+ unsigned long tx_dropped;
+ int i;
stats->rx_packets = old_stats->rx_packets +
get_stat64(&hw_stats->rx_ucast_packets) +
@@ -11946,8 +12048,26 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
stats->rx_missed_errors = old_stats->rx_missed_errors +
get_stat64(&hw_stats->rx_discards);
- stats->rx_dropped = tp->rx_dropped;
- stats->tx_dropped = tp->tx_dropped;
+ /* Aggregate per-queue counters. The per-queue counters are updated
+ * by a single writer, race-free. The result computed by this loop
+ * might not be 100% accurate (counters can be updated in the middle of
+ * the loop) but the next tg3_get_nstats() will recompute the current
+ * value so it is acceptable.
+ *
+ * Note that these counters wrap around at 4G on 32bit machines.
+ */
+ rx_dropped = (unsigned long)(old_stats->rx_dropped);
+ tx_dropped = (unsigned long)(old_stats->tx_dropped);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ rx_dropped += tnapi->rx_dropped;
+ tx_dropped += tnapi->tx_dropped;
+ }
+
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
}
static int tg3_get_regs_len(struct net_device *dev)
@@ -12303,9 +12423,9 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tg3 *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -12391,7 +12511,10 @@ static int tg3_nway_reset(struct net_device *dev)
return r;
}
-static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static void tg3_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
@@ -12412,7 +12535,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
ering->tx_pending = tp->napi[0].tx_pending;
}
-static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static int tg3_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
int i, irq_sync = 0, err = 0;
@@ -12432,6 +12558,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
tp->rx_pending = ering->rx_pending;
@@ -12460,6 +12587,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err)
tg3_phy_start(tp);
@@ -12541,6 +12669,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
if (epause->autoneg)
@@ -12570,6 +12699,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
}
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
@@ -12589,29 +12719,17 @@ static int tg3_get_sset_count(struct net_device *dev, int sset)
}
}
-static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 tg3_get_rx_ring_count(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
if (!tg3_flag(tp, SUPPORT_MSIX))
- return -EOPNOTSUPP;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- if (netif_running(tp->dev))
- info->data = tp->rxq_cnt;
- else {
- info->data = num_online_cpus();
- if (info->data > TG3_RSS_MAX_NUM_QS)
- info->data = TG3_RSS_MAX_NUM_QS;
- }
+ return 1;
- return 0;
+ if (netif_running(tp->dev))
+ return tp->rxq_cnt;
- default:
- return -EOPNOTSUPP;
- }
+ return min_t(u32, netif_get_num_default_rss_queues(), tp->rxq_max);
}
static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
@@ -12625,24 +12743,23 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
return size;
}
-static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
{
struct tg3 *tp = netdev_priv(dev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
- indir[i] = tp->rss_ind_tbl[i];
+ rxfh->indir[i] = tp->rss_ind_tbl[i];
return 0;
}
-static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
- const u8 hfunc)
+static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
size_t i;
@@ -12650,15 +12767,16 @@ static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
- tp->rss_ind_tbl[i] = indir[i];
+ tp->rss_ind_tbl[i] = rxfh->indir[i];
if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
return 0;
@@ -12988,12 +13106,16 @@ static int tg3_test_nvram(struct tg3 *tp)
/* Bootstrap checksum at offset 0x10 */
csum = calc_crc((unsigned char *) buf, 0x10);
- if (csum != le32_to_cpu(buf[0x10/4]))
+
+ /* The type of buf is __be32 *, but this value is __le32 */
+ if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4]))
goto out;
/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
- csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
- if (csum != le32_to_cpu(buf[0xfc/4]))
+ csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88);
+
+ /* The type of buf is __be32 *, but this value is __le32 */
+ if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4]))
goto out;
kfree(buf);
@@ -13770,6 +13892,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
data[TG3_INTERRUPT_TEST] = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -13781,6 +13904,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err2)
tg3_phy_start(tp);
@@ -13790,25 +13914,20 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
-static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int tg3_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
return -EOPNOTSUPP;
- if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
- return -EFAULT;
-
- if (stmpconf.flags)
- return -EINVAL;
-
- if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
- stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
+ stmpconf->tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
- switch (stmpconf.rx_filter) {
+ switch (stmpconf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tp->rxptpctl = 0;
break;
@@ -13868,74 +13987,72 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
tw32(TG3_RX_PTP_CTL,
tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
- if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ if (stmpconf->tx_type == HWTSTAMP_TX_ON)
tg3_flag_set(tp, TX_TSTAMP_EN);
else
tg3_flag_clear(tp, TX_TSTAMP_EN);
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ return 0;
}
-static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int tg3_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf)
{
struct tg3 *tp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
return -EOPNOTSUPP;
- stmpconf.flags = 0;
- stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+ stmpconf->flags = 0;
+ stmpconf->tx_type = tg3_flag(tp, TX_TSTAMP_EN) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
switch (tp->rxptpctl) {
case 0:
- stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
break;
default:
WARN_ON_ONCE(1);
return -ERANGE;
}
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ return 0;
}
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -13990,12 +14107,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
- case SIOCSHWTSTAMP:
- return tg3_hwtstamp_set(dev, ifr);
-
- case SIOCGHWTSTAMP:
- return tg3_hwtstamp_get(dev, ifr);
-
default:
/* do nothing */
break;
@@ -14063,7 +14174,7 @@ static int tg3_set_coalesce(struct net_device *dev,
return 0;
}
-static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -14072,7 +14183,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- if (edata->advertised != tp->eee.advertised) {
+ if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
netdev_warn(tp->dev,
"Direct manipulation of EEE advertisement is not supported\n");
return -EINVAL;
@@ -14085,7 +14196,9 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EINVAL;
}
- tp->eee = *edata;
+ tp->eee.eee_enabled = edata->eee_enabled;
+ tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
+ tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
tg3_warn_mgmt_link_flap(tp);
@@ -14100,7 +14213,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -14143,7 +14256,7 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
.get_sset_count = tg3_get_sset_count,
- .get_rxnfc = tg3_get_rxnfc,
+ .get_rx_ring_count = tg3_get_rx_ring_count,
.get_rxfh_indir_size = tg3_get_rxfh_indir_size,
.get_rxfh = tg3_get_rxfh,
.set_rxfh = tg3_set_rxfh,
@@ -14187,7 +14300,7 @@ static void tg3_set_rx_mode(struct net_device *dev)
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (new_mtu > ETH_DATA_LEN) {
if (tg3_flag(tp, 5780_CLASS)) {
@@ -14225,6 +14338,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
+ netdev_lock(dev);
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -14244,6 +14358,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -14267,6 +14382,8 @@ static const struct net_device_ops tg3_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
+ .ndo_hwtstamp_get = tg3_hwtstamp_get,
+ .ndo_hwtstamp_set = tg3_hwtstamp_set,
};
static void tg3_get_eeprom_size(struct tg3 *tp)
@@ -15538,10 +15655,13 @@ static int tg3_phy_probe(struct tg3 *tp)
tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
- tp->eee.supported = SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full;
- tp->eee.advertised = ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full;
+ linkmode_zero(tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_copy(tp->eee.advertised, tp->eee.supported);
+
tp->eee.eee_enabled = 1;
tp->eee.tx_lpi_enabled = 1;
tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
@@ -16467,7 +16587,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, PCIX_TARGET_HWBUG);
- /* The chip can have it's power management PCI config
+ /* The chip can have its power management PCI config
* space registers clobbered due to this bug.
* So explicitly force the chip into D0 here.
*/
@@ -16958,12 +17078,14 @@ static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
addr_ok = is_valid_ether_addr(addr);
}
if (!addr_ok) {
+ __be32 be_hi, be_lo;
+
/* Next, try NVRAM. */
if (!tg3_flag(tp, NO_NVRAM) &&
- !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
- !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
- memcpy(&addr[0], ((char *)&hi) + 2, 2);
- memcpy(&addr[2], (char *)&lo, sizeof(lo));
+ !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) &&
+ !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) {
+ memcpy(&addr[0], ((char *)&be_hi) + 2, 2);
+ memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo));
}
/* Finally just fetch it out of the MAC control regs. */
else {
@@ -17007,7 +17129,7 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
!tg3_flag(tp, PCI_EXPRESS))
goto out;
-#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
+#if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
goal = BOUNDARY_MULTI_CACHELINE;
#else
#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
@@ -17682,7 +17804,7 @@ static int tg3_init_one(struct pci_dev *pdev,
* device behind the EPB cannot support DMA addresses > 40-bit.
* On 64-bit systems with IOMMU, use 40-bit dma_mask.
* On 64-bit systems without IOMMU, use 64-bit dma_mask and
- * do DMA address check in tg3_start_xmit().
+ * do DMA address check in __tg3_start_xmit().
*/
if (tg3_flag(tp, IS_5788))
persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
@@ -17694,6 +17816,9 @@ static int tg3_init_one(struct pci_dev *pdev,
} else
persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
+ persist_dma_mask = DMA_BIT_MASK(31);
+
/* Configure DMA attributes. */
if (dma_mask > DMA_BIT_MASK(32)) {
err = dma_set_mask(&pdev->dev, dma_mask);
@@ -17796,10 +17921,7 @@ static int tg3_init_one(struct pci_dev *pdev,
tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
tnapi->int_mbox = intmbx;
- if (i <= 4)
- intmbx += 0x8;
- else
- intmbx += 0x4;
+ intmbx += 0x8;
tnapi->consmbox = rcvmbx;
tnapi->prodmbox = sndmbx;
@@ -17976,7 +18098,6 @@ static int tg3_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
- int err = 0;
rtnl_lock();
@@ -18000,32 +18121,11 @@ static int tg3_suspend(struct device *device)
tg3_flag_clear(tp, INIT_COMPLETE);
tg3_full_unlock(tp);
- err = tg3_power_down_prepare(tp);
- if (err) {
- int err2;
-
- tg3_full_lock(tp, 0);
-
- tg3_flag_set(tp, INIT_COMPLETE);
- err2 = tg3_restart_hw(tp, true);
- if (err2)
- goto out;
-
- tg3_timer_start(tp);
-
- netif_device_attach(dev);
- tg3_netif_start(tp);
-
-out:
- tg3_full_unlock(tp);
-
- if (!err2)
- tg3_phy_start(tp);
- }
+ tg3_power_down_prepare(tp);
unlock:
rtnl_unlock();
- return err;
+ return 0;
}
static int tg3_resume(struct device *device)
@@ -18041,6 +18141,7 @@ static int tg3_resume(struct device *device)
netif_device_attach(dev);
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
@@ -18057,6 +18158,7 @@ static int tg3_resume(struct device *device)
out:
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -18069,12 +18171,59 @@ unlock:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
+ * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
+ * be, powered down.
+ */
+static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
+ },
+ },
+ {}
+};
+
static void tg3_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
+
netif_device_detach(dev);
if (netif_running(dev))
@@ -18082,8 +18231,23 @@ static void tg3_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF)
tg3_power_down(tp);
+ else if (system_state == SYSTEM_RESTART &&
+ dmi_first_match(tg3_restart_aer_quirk_table) &&
+ pdev->current_state != PCI_D3cold &&
+ pdev->current_state != PCI_UNKNOWN) {
+ /* Disable PCIe AER on the tg3 to avoid a fatal
+ * error during this system restart.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ }
rtnl_unlock();
+
+ pci_disable_device(pdev);
}
/**
@@ -18103,6 +18267,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
netdev_info(netdev, "PCI I/O error detected\n");
+ /* Want to make sure that the reset task doesn't run */
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
/* Could be second call or maybe we don't have netdev yet */
@@ -18119,9 +18286,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
tg3_timer_stop(tp);
- /* Want to make sure that the reset task doesn't run */
- tg3_reset_task_cancel(tp);
-
netif_device_detach(netdev);
/* Clean up software state, even if MMIO is blocked */
@@ -18132,7 +18296,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
done:
if (state == pci_channel_io_perm_failure) {
if (netdev) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
err = PCI_ERS_RESULT_DISCONNECT;
@@ -18150,7 +18316,7 @@ done:
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
+ * At this point, the card has experienced a hard reset,
* followed by fixups by BIOS, and has its config space
* set up identically to what it was at cold boot.
*/
@@ -18171,7 +18337,6 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (!netdev || !netif_running(netdev)) {
rc = PCI_ERS_RESULT_RECOVERED;
@@ -18186,7 +18351,9 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
done:
if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
rtnl_unlock();
@@ -18212,12 +18379,14 @@ static void tg3_io_resume(struct pci_dev *pdev)
if (!netdev || !netif_running(netdev))
goto done;
+ netdev_lock(netdev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
tg3_flag_set(tp, INIT_COMPLETE);
err = tg3_restart_hw(tp, true);
if (err) {
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
netdev_err(netdev, "Cannot restart hardware after reset.\n");
goto done;
}
@@ -18229,6 +18398,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
tg3_phy_start(tp);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1000c894064f..a9e7f88fa26d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2390,7 +2390,7 @@
#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
-/* Fast Ethernet Tranceiver definitions */
+/* Fast Ethernet Transceiver definitions */
#define MII_TG3_FET_PTEST 0x17
#define MII_TG3_FET_PTEST_TRIM_SEL 0x0010
#define MII_TG3_FET_PTEST_TRIM_2 0x0002
@@ -3018,6 +3018,7 @@ struct tg3_napi {
u16 *rx_rcb_prod_idx;
struct tg3_rx_prodring_set prodring;
struct tg3_rx_buffer_desc *rx_rcb;
+ unsigned long rx_dropped;
u32 tx_prod ____cacheline_aligned;
u32 tx_cons;
@@ -3026,12 +3027,13 @@ struct tg3_napi {
u32 prodmbox;
struct tg3_tx_buffer_desc *tx_ring;
struct tg3_tx_ring_info *tx_buffers;
+ unsigned long tx_dropped;
dma_addr_t status_mapping;
dma_addr_t rx_rcb_mapping;
dma_addr_t tx_desc_mapping;
- char irq_lbl[IFNAMSIZ];
+ char irq_lbl[IFNAMSIZ + 6 + 10]; /* name + "-txrx-" + %d */
unsigned int irq_vec;
};
@@ -3190,6 +3192,7 @@ struct tg3 {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
s64 ptp_adjust;
+ u8 ptp_txts_retrycnt;
/* begin "tx thread" cacheline section */
void (*write32_tx_mbox) (struct tg3 *, u32,
@@ -3219,8 +3222,6 @@ struct tg3 {
/* begin "everything else" cacheline(s) section */
- unsigned long rx_dropped;
- unsigned long tx_dropped;
struct rtnl_link_stats64 net_stats_prev;
struct tg3_ethtool_stats estats_prev;
@@ -3372,6 +3373,8 @@ struct tg3 {
struct tg3_hw_stats *hw_stats;
dma_addr_t stats_mapping;
struct work_struct reset_task;
+ struct sk_buff *tx_tstamp_skb;
+ u64 pre_tx_ts;
int nvram_lock_cnt;
u32 nvram_size;
@@ -3416,7 +3419,7 @@ struct tg3 {
unsigned int irq_cnt;
struct ethtool_coalesce coal;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
/* firmware info */
const char *fw_needed;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index 8f0ac7b99973..858c92129451 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -18,15 +18,43 @@
/* BFA state machine interfaces */
-typedef void (*bfa_sm_t)(void *sm, int event);
-
/* For converting from state machine function to state encoding. */
-struct bfa_sm_table {
- bfa_sm_t sm; /*!< state machine function */
- int state; /*!< state machine encoding */
- char *name; /*!< state name for display */
-};
-#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+#define BFA_SM_TABLE(n, s, e, t) \
+struct s; \
+enum e; \
+typedef void (*t)(struct s *, enum e); \
+ \
+struct n ## _sm_table_s { \
+ t sm; /* state machine function */ \
+ int state; /* state machine encoding */ \
+ char *name; /* state name for display */ \
+}; \
+ \
+static inline int \
+n ## _sm_to_state(struct n ## _sm_table_s *smt, t sm) \
+{ \
+ int i = 0; \
+ \
+ while (smt[i].sm && smt[i].sm != sm) \
+ i++; \
+ return smt[i].state; \
+}
+
+BFA_SM_TABLE(iocpf, bfa_iocpf, iocpf_event, bfa_fsm_iocpf_t)
+BFA_SM_TABLE(ioc, bfa_ioc, ioc_event, bfa_fsm_ioc_t)
+BFA_SM_TABLE(cmdq, bfa_msgq_cmdq, cmdq_event, bfa_fsm_msgq_cmdq_t)
+BFA_SM_TABLE(rspq, bfa_msgq_rspq, rspq_event, bfa_fsm_msgq_rspq_t)
+
+BFA_SM_TABLE(ioceth, bna_ioceth, bna_ioceth_event, bna_fsm_ioceth_t)
+BFA_SM_TABLE(enet, bna_enet, bna_enet_event, bna_fsm_enet_t)
+BFA_SM_TABLE(ethport, bna_ethport, bna_ethport_event, bna_fsm_ethport_t)
+BFA_SM_TABLE(tx, bna_tx, bna_tx_event, bna_fsm_tx_t)
+BFA_SM_TABLE(rxf, bna_rxf, bna_rxf_event, bna_fsm_rxf_t)
+BFA_SM_TABLE(rx, bna_rx, bna_rx_event, bna_fsm_rx_t)
+
+#undef BFA_SM_TABLE
+
+#define BFA_SM(_sm) (_sm)
/* State machine with entry actions. */
typedef void (*bfa_fsm_t)(void *fsm, int event);
@@ -41,24 +69,12 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
static void oc ## _sm_ ## st ## _entry(otype * fsm)
#define bfa_fsm_set_state(_fsm, _state) do { \
- (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ (_fsm)->fsm = (_state); \
_state ## _entry(_fsm); \
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
-#define bfa_fsm_cmp_state(_fsm, _state) \
- ((_fsm)->fsm == (bfa_fsm_t)(_state))
-
-static inline int
-bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
-{
- int i = 0;
-
- while (smt[i].sm && smt[i].sm != sm)
- i++;
- return smt[i].state;
-}
-
+#define bfa_fsm_cmp_state(_fsm, _state) ((_fsm)->fsm == (_state))
/* Generic wait counter. */
typedef void (*bfa_wc_resume_t) (void *cbarg);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index cd933817a0b8..92c7639d1fc7 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -114,7 +114,7 @@ bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
-static struct bfa_sm_table ioc_sm_table[] = {
+static struct ioc_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
@@ -183,7 +183,7 @@ bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
-static struct bfa_sm_table iocpf_sm_table[] = {
+static struct iocpf_sm_table_s iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
@@ -314,13 +314,13 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
{
switch (event) {
case IOC_E_FWRSP_GETATTR:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
fallthrough;
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -330,7 +330,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
break;
case IOC_E_DISABLE:
- del_timer(&ioc->ioc_timer);
+ timer_delete(&ioc->ioc_timer);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
@@ -659,13 +659,13 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
bfa_ioc_pf_disabled(ioc);
break;
case IOCPF_E_STOP:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
@@ -741,7 +741,7 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_ioc_sync_leave(ioc);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
@@ -774,13 +774,13 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWRSP_ENABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
break;
case IOCPF_E_INITFAIL:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
fallthrough;
case IOCPF_E_TIMEOUT:
@@ -791,7 +791,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
case IOCPF_E_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_nw_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
@@ -844,12 +844,12 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWRSP_DISABLE:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
case IOCPF_E_FAIL:
- del_timer(&ioc->iocpf_timer);
+ timer_delete(&ioc->iocpf_timer);
fallthrough;
case IOCPF_E_TIMEOUT:
@@ -1210,7 +1210,7 @@ bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
static void
bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
{
- del_timer(&ioc->sem_timer);
+ timer_delete(&ioc->sem_timer);
}
/* Initialize LPU local memory (aka secondary memory / SRAM) */
@@ -1982,7 +1982,7 @@ bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
static void
bfa_ioc_hb_stop(struct bfa_ioc *ioc)
{
- del_timer(&ioc->hb_timer);
+ timer_delete(&ioc->hb_timer);
}
/* Initiate a full firmware download. */
@@ -2839,7 +2839,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
static void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
{
- strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strscpy_pad(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
static void
@@ -2860,12 +2860,12 @@ static enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc *ioc)
{
enum bfa_iocpf_state iocpf_st;
- enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+ enum bfa_ioc_state ioc_st = ioc_sm_to_state(ioc_sm_table, ioc->fsm);
if (ioc_st == BFA_IOC_ENABLING ||
ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
- iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+ iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
switch (iocpf_st) {
case BFA_IOCPF_SEMWAIT:
@@ -2983,7 +2983,7 @@ bfa_nw_iocpf_timeout(struct bfa_ioc *ioc)
{
enum bfa_iocpf_state iocpf_st;
- iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+ iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
if (iocpf_st == BFA_IOCPF_HWINIT)
bfa_ioc_poll_fwinit(ioc);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index edd0ed5b5332..f30d06ec4ffe 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -147,16 +147,20 @@ struct bfa_ioc_notify {
(__notify)->cbarg = (__cbarg); \
} while (0)
+enum iocpf_event;
+
struct bfa_iocpf {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_iocpf *s, enum iocpf_event e);
struct bfa_ioc *ioc;
bool fw_mismatch_notified;
bool auto_recover;
u32 poll_time;
};
+enum ioc_event;
+
struct bfa_ioc {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_ioc *s, enum ioc_event e);
struct bfa *bfa;
struct bfa_pcidev pcidev;
struct timer_list ioc_timer;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index 47125f419530..fa40d5ec6f1c 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -202,7 +202,6 @@ static void
__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
{
size_t len = cmd->msg_size;
- int num_entries = 0;
size_t to_copy;
u8 *src, *dst;
@@ -219,7 +218,6 @@ __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
dst = (u8 *)cmdq->addr.kva;
dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
- num_entries++;
}
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
index 75343b535798..170a4b4bed96 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
@@ -55,8 +55,10 @@ enum bfa_msgq_cmdq_flags {
BFA_MSGQ_CMDQ_F_DB_UPDATE = 1,
};
+enum cmdq_event;
+
struct bfa_msgq_cmdq {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_msgq_cmdq *s, enum cmdq_event e);
enum bfa_msgq_cmdq_flags flags;
u16 producer_index;
@@ -81,8 +83,10 @@ enum bfa_msgq_rspq_flags {
typedef void (*bfa_msgq_mcfunc_t)(void *cbarg, struct bfi_msgq_mhdr *mhdr);
+enum rspq_event;
+
struct bfa_msgq_rspq {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_msgq_rspq *s, enum rspq_event e);
enum bfa_msgq_rspq_flags flags;
u16 producer_index;
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index a2c983f56b00..883de0ac8de4 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1257,7 +1257,7 @@ bna_enet_mtu_get(struct bna_enet *enet)
void
bna_enet_enable(struct bna_enet *enet)
{
- if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
+ if (enet->fsm != bna_enet_sm_stopped)
return;
enet->flags |= BNA_ENET_F_ENABLED;
@@ -1751,12 +1751,12 @@ bna_ioceth_uninit(struct bna_ioceth *ioceth)
void
bna_ioceth_enable(struct bna_ioceth *ioceth)
{
- if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
+ if (ioceth->fsm == bna_ioceth_sm_ready) {
bnad_cb_ioceth_ready(ioceth->bna->bnad);
return;
}
- if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
+ if (ioceth->fsm == bna_ioceth_sm_stopped)
bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
}
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 2623a0da4682..c05dc7a1c4a1 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1956,7 +1956,7 @@ static void
bna_rx_stop(struct bna_rx *rx)
{
rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
- if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
+ if (rx->fsm == bna_rx_sm_stopped)
bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
else {
rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
@@ -2535,7 +2535,7 @@ bna_rx_destroy(struct bna_rx *rx)
void
bna_rx_enable(struct bna_rx *rx)
{
- if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
+ if (rx->fsm != bna_rx_sm_stopped)
return;
rx->rx_flags |= BNA_RX_F_ENABLED;
@@ -3523,7 +3523,7 @@ bna_tx_destroy(struct bna_tx *tx)
void
bna_tx_enable(struct bna_tx *tx)
{
- if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
+ if (tx->fsm != bna_tx_sm_stopped)
return;
tx->flags |= BNA_TX_F_ENABLED;
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index 666b6922e24d..986f43d27711 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -312,8 +312,10 @@ struct bna_attr {
/* IOCEth */
+enum bna_ioceth_event;
+
struct bna_ioceth {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_ioceth *s, enum bna_ioceth_event e);
struct bfa_ioc ioc;
struct bna_attr attr;
@@ -334,8 +336,10 @@ struct bna_pause_config {
enum bna_status rx_pause;
};
+enum bna_enet_event;
+
struct bna_enet {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_enet *s, enum bna_enet_event e);
enum bna_enet_flags flags;
enum bna_enet_type type;
@@ -360,8 +364,10 @@ struct bna_enet {
/* Ethport */
+enum bna_ethport_event;
+
struct bna_ethport {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_ethport *s, enum bna_ethport_event e);
enum bna_ethport_flags flags;
enum bna_link_status link_status;
@@ -410,7 +416,7 @@ struct bna_ib {
/* Tx object */
/* Tx datapath control structure */
-#define BNA_Q_NAME_SIZE 16
+#define BNA_Q_NAME_SIZE (IFNAMSIZ + 6)
struct bna_tcb {
/* Fast path */
void **sw_qpt;
@@ -454,13 +460,16 @@ struct bna_txq {
};
/* Tx object */
+
+enum bna_tx_event;
+
struct bna_tx {
/* This should be the first one */
struct list_head qe;
int rid;
int hw_id;
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_tx *s, enum bna_tx_event e);
enum bna_tx_flags flags;
enum bna_tx_type type;
@@ -698,8 +707,11 @@ struct bna_rxp {
};
/* RxF structure (hardware Rx Function) */
+
+enum bna_rxf_event;
+
struct bna_rxf {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_rxf *s, enum bna_rxf_event e);
struct bfa_msgq_cmd_entry msgq_cmd;
union {
@@ -769,13 +781,16 @@ struct bna_rxf {
};
/* Rx object */
+
+enum bna_rx_event;
+
struct bna_rx {
/* This should be the first one */
struct list_head qe;
int rid;
int hw_id;
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_rx *s, enum bna_rx_event e);
enum bna_rx_type type;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index bbdc829c3524..9bed33295839 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -19,6 +19,7 @@
#include <linux/ip.h>
#include <linux/prefetch.h>
#include <linux/module.h>
+#include <net/gro.h>
#include "bnad.h"
#include "bna.h"
@@ -1037,8 +1038,7 @@ bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
static void
bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
{
- struct bnad_tx_info *tx_info =
- (struct bnad_tx_info *)tx->priv;
+ struct bnad_tx_info *tx_info = tx->priv;
struct bna_tcb *tcb;
u32 txq_id;
int i;
@@ -1056,7 +1056,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
static void
bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
{
- struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+ struct bnad_tx_info *tx_info = tx->priv;
struct bna_tcb *tcb;
u32 txq_id;
int i;
@@ -1092,10 +1092,10 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
* Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
*/
static void
-bnad_tx_cleanup(struct delayed_work *work)
+bnad_tx_cleanup(struct work_struct *work)
{
struct bnad_tx_info *tx_info =
- container_of(work, struct bnad_tx_info, tx_cleanup_work);
+ container_of(work, struct bnad_tx_info, tx_cleanup_work.work);
struct bnad *bnad = NULL;
struct bna_tcb *tcb;
unsigned long flags;
@@ -1133,7 +1133,7 @@ bnad_tx_cleanup(struct delayed_work *work)
static void
bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
{
- struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+ struct bnad_tx_info *tx_info = tx->priv;
struct bna_tcb *tcb;
int i;
@@ -1149,7 +1149,7 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
static void
bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
{
- struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bnad_rx_info *rx_info = rx->priv;
struct bna_ccb *ccb;
struct bnad_rx_ctrl *rx_ctrl;
int i;
@@ -1171,7 +1171,7 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
* Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
*/
static void
-bnad_rx_cleanup(void *work)
+bnad_rx_cleanup(struct work_struct *work)
{
struct bnad_rx_info *rx_info =
container_of(work, struct bnad_rx_info, rx_cleanup_work);
@@ -1208,7 +1208,7 @@ bnad_rx_cleanup(void *work)
static void
bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
{
- struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bnad_rx_info *rx_info = rx->priv;
struct bna_ccb *ccb;
struct bnad_rx_ctrl *rx_ctrl;
int i;
@@ -1231,7 +1231,7 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
static void
bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
{
- struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bnad_rx_info *rx_info = rx->priv;
struct bna_ccb *ccb;
struct bna_rcb *rcb;
struct bnad_rx_ctrl *rx_ctrl;
@@ -1535,8 +1535,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
for (i = 0; i < num_txqs; i++) {
vector_num = tx_info->tcb[i]->intr_vector;
- sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
- tx_id + tx_info->tcb[i]->id);
+ snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d",
+ bnad->netdev->name,
+ tx_id + tx_info->tcb[i]->id);
err = request_irq(bnad->msix_table[vector_num].vector,
(irq_handler_t)bnad_msix_tx, 0,
tx_info->tcb[i]->name,
@@ -1586,9 +1587,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
for (i = 0; i < num_rxps; i++) {
vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
- sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
- bnad->netdev->name,
- rx_id + rx_info->rx_ctrl[i].ccb->id);
+ snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE,
+ "%s CQ %d", bnad->netdev->name,
+ rx_id + rx_info->rx_ctrl[i].ccb->id);
err = request_irq(bnad->msix_table[vector_num].vector,
(irq_handler_t)bnad_msix_rx, 0,
rx_info->rx_ctrl[i].ccb->name,
@@ -1687,7 +1688,8 @@ err_return:
static void
bnad_ioc_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.ioc_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1698,7 +1700,8 @@ bnad_ioc_timeout(struct timer_list *t)
static void
bnad_ioc_hb_check(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.hb_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1709,7 +1712,8 @@ bnad_ioc_hb_check(struct timer_list *t)
static void
bnad_iocpf_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.iocpf_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1720,7 +1724,8 @@ bnad_iocpf_timeout(struct timer_list *t)
static void
bnad_iocpf_sem_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.sem_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1734,7 +1739,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t)
* Time CPU m CPU n
* 0 1 = test_bit
* 1 clear_bit
- * 2 del_timer_sync
+ * 2 timer_delete_sync
* 3 mod_timer
*/
@@ -1742,7 +1747,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t)
static void
bnad_dim_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, dim_timer);
+ struct bnad *bnad = timer_container_of(bnad, t, dim_timer);
struct bnad_rx_info *rx_info;
struct bnad_rx_ctrl *rx_ctrl;
int i, j;
@@ -1775,7 +1780,7 @@ bnad_dim_timeout(struct timer_list *t)
static void
bnad_stats_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, stats_timer);
+ struct bnad *bnad = timer_container_of(bnad, t, stats_timer);
unsigned long flags;
if (!netif_running(bnad->netdev) ||
@@ -1836,7 +1841,7 @@ bnad_stats_timer_stop(struct bnad *bnad)
to_del = 1;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->stats_timer);
+ timer_delete_sync(&bnad->stats_timer);
}
/* Utilities */
@@ -1881,7 +1886,6 @@ poll_exit:
return rcvd;
}
-#define BNAD_NAPI_POLL_QUOTA 64
static void
bnad_napi_add(struct bnad *bnad, u32 rx_id)
{
@@ -1892,7 +1896,7 @@ bnad_napi_add(struct bnad *bnad, u32 rx_id)
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
- bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
+ bnad_napi_poll_rx);
}
}
@@ -1993,8 +1997,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
}
tx_info->tx = tx;
- INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
- (work_func_t)bnad_tx_cleanup);
+ INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup);
/* Register ISR for the Tx object */
if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2161,7 +2164,7 @@ bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->dim_timer);
+ timer_delete_sync(&bnad->dim_timer);
}
init_completion(&bnad->bnad_completions.rx_comp);
@@ -2250,8 +2253,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
rx_info->rx = rx;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- INIT_WORK(&rx_info->rx_cleanup_work,
- (work_func_t)(bnad_rx_cleanup));
+ INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup);
/*
* Init NAPI, so that state is set to NAPI_STATE_SCHED,
@@ -2824,8 +2826,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
return -EINVAL;
}
- if (unlikely((gso_size + skb_transport_offset(skb) +
- tcp_hdrlen(skb)) >= skb->len)) {
+ if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
@@ -2873,8 +2874,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
if (unlikely(skb_headlen(skb) <
- skb_transport_offset(skb) +
- tcp_hdrlen(skb))) {
+ skb_tcp_all_headers(skb))) {
BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
return -EINVAL;
}
@@ -3282,7 +3282,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
mutex_lock(&bnad->conf_mutex);
mtu = netdev->mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
frame = BNAD_FRAME_SIZE(mtu);
new_frame = BNAD_FRAME_SIZE(new_mtu);
@@ -3421,7 +3421,7 @@ static const struct net_device_ops bnad_netdev_ops = {
};
static void
-bnad_netdev_init(struct bnad *bnad, bool using_dac)
+bnad_netdev_init(struct bnad *bnad)
{
struct net_device *netdev = bnad->netdev;
@@ -3434,10 +3434,8 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
-
- if (using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HIGHDMA;
netdev->mem_start = bnad->mmio_start;
netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
@@ -3544,8 +3542,7 @@ bnad_lock_uninit(struct bnad *bnad)
/* PCI Initialization */
static int
-bnad_pci_init(struct bnad *bnad,
- struct pci_dev *pdev, bool *using_dac)
+bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev)
{
int err;
@@ -3555,14 +3552,9 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- *using_dac = true;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- goto release_regions;
- *using_dac = false;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ goto release_regions;
pci_set_master(pdev);
return 0;
@@ -3585,7 +3577,6 @@ static int
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
- bool using_dac;
int err;
struct bnad *bnad;
struct bna *bna;
@@ -3615,13 +3606,8 @@ bnad_pci_probe(struct pci_dev *pdev,
bnad->id = atomic_inc_return(&bna_id) - 1;
mutex_lock(&bnad->conf_mutex);
- /*
- * PCI initialization
- * Output : using_dac = 1 for 64 bit DMA
- * = 0 for 32 bit DMA
- */
- using_dac = false;
- err = bnad_pci_init(bnad, pdev, &using_dac);
+ /* PCI initialization */
+ err = bnad_pci_init(bnad, pdev);
if (err)
goto unlock_mutex;
@@ -3634,7 +3620,7 @@ bnad_pci_probe(struct pci_dev *pdev,
goto pci_uninit;
/* Initialize netdev structure, set up ethtool ops */
- bnad_netdev_init(bnad, using_dac);
+ bnad_netdev_init(bnad);
/* Set link to down state */
netif_carrier_off(netdev);
@@ -3744,9 +3730,9 @@ probe_uninit:
bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
disable_ioceth:
bnad_ioceth_disable(bnad);
- del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_uninit(bna);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3787,9 +3773,9 @@ bnad_pci_remove(struct pci_dev *pdev)
mutex_lock(&bnad->conf_mutex);
bnad_ioceth_disable(bnad);
- del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
- del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer);
+ timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_uninit(bna);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 627a93ce38ab..4396997c59d0 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -19,7 +19,6 @@
#include <linux/firmware.h>
#include <linux/if_vlan.h>
-/* Fix for IA64 */
#include <asm/checksum.h>
#include <net/ip6_checksum.h>
@@ -352,7 +351,6 @@ struct bnad {
/* debugfs specific data */
char *regdata;
u32 reglen;
- struct dentry *bnad_dentry_files[5];
struct dentry *port_debugfs_root;
};
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 04ad0f2b9677..8f0972e6737c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
@@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
@@ -500,11 +500,6 @@ bnad_debugfs_init(struct bnad *bnad)
if (!bna_debugfs_root) {
bna_debugfs_root = debugfs_create_dir("bna", NULL);
atomic_set(&bna_debugfs_port_count, 0);
- if (!bna_debugfs_root) {
- netdev_warn(bnad->netdev,
- "debugfs root dir creation failed\n");
- return;
- }
}
/* Setup the pci_dev debugfs directory for the port */
@@ -512,28 +507,16 @@ bnad_debugfs_init(struct bnad *bnad)
if (!bnad->port_debugfs_root) {
bnad->port_debugfs_root =
debugfs_create_dir(name, bna_debugfs_root);
- if (!bnad->port_debugfs_root) {
- netdev_warn(bnad->netdev,
- "debugfs root dir creation failed\n");
- return;
- }
atomic_inc(&bna_debugfs_port_count);
for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
file = &bnad_debugfs_files[i];
- bnad->bnad_dentry_files[i] =
- debugfs_create_file(file->name,
- file->mode,
- bnad->port_debugfs_root,
- bnad,
- file->fops);
- if (!bnad->bnad_dentry_files[i]) {
- netdev_warn(bnad->netdev,
- "create %s entry failed\n",
- file->name);
- return;
- }
+ debugfs_create_file(file->name,
+ file->mode,
+ bnad->port_debugfs_root,
+ bnad,
+ file->fops);
}
}
}
@@ -542,15 +525,6 @@ bnad_debugfs_init(struct bnad *bnad)
void
bnad_debugfs_uninit(struct bnad *bnad)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
- if (bnad->bnad_dentry_files[i]) {
- debugfs_remove(bnad->bnad_dentry_files[i]);
- bnad->bnad_dentry_files[i] = NULL;
- }
- }
-
/* Remove the pci_dev debugfs directory for the port */
if (bnad->port_debugfs_root) {
debugfs_remove(bnad->port_debugfs_root);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 391b85f25141..216e25f26dbb 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -114,7 +114,7 @@ static const char *bnad_net_stats_strings[] = {
"mac_tx_deferral",
"mac_tx_excessive_deferral",
"mac_tx_single_collision",
- "mac_tx_muliple_collision",
+ "mac_tx_multiple_collision",
"mac_tx_late_collision",
"mac_tx_excessive_collision",
"mac_tx_total_collision",
@@ -235,13 +235,18 @@ static int
bnad_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- u32 supported, advertising;
-
- supported = SUPPORTED_10000baseT_Full;
- advertising = ADVERTISED_10000baseT_Full;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full);
cmd->base.autoneg = AUTONEG_DISABLE;
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
cmd->base.port = PORT_FIBRE;
cmd->base.phy_address = 0;
@@ -253,11 +258,6 @@ bnad_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
return 0;
}
@@ -283,7 +283,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct bfa_ioc_attr *ioc_attr;
unsigned long flags;
- strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -291,12 +291,12 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ strscpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
sizeof(drvinfo->fw_version));
kfree(ioc_attr);
}
- strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+ strscpy(drvinfo->bus_info, pci_name(bnad->pcidev),
sizeof(drvinfo->bus_info));
}
@@ -373,7 +373,7 @@ static int bnad_set_coalesce(struct net_device *netdev,
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (to_del)
- del_timer_sync(&bnad->dim_timer);
+ timer_delete_sync(&bnad->dim_timer);
spin_lock_irqsave(&bnad->bna_lock, flags);
bnad_rx_coalescing_timeo_set(bnad);
}
@@ -405,7 +405,9 @@ static int bnad_set_coalesce(struct net_device *netdev,
static void
bnad_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct bnad *bnad = netdev_priv(netdev);
@@ -418,7 +420,9 @@ bnad_get_ringparam(struct net_device *netdev,
static int
bnad_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
int i, current_err, err = 0;
struct bnad *bnad = netdev_priv(netdev);
@@ -604,7 +608,7 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
BUG_ON(!(strlen(bnad_net_stats_strings[i]) < ETH_GSTRING_LEN));
- ethtool_sprintf(&string, bnad_net_stats_strings[i]);
+ ethtool_puts(&string, bnad_net_stats_strings[i]);
}
bmap = bna_tx_rid_mask(&bnad->bna);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5620b97b3482..87414a2ddf6e 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -12,10 +12,8 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
-
-#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
-#define MACB_EXT_DESC
-#endif
+#include <linux/phy/phy.h>
+#include <linux/workqueue.h>
#define MACB_GREGS_NBR 16
#define MACB_GREGS_VERSION 2
@@ -81,6 +79,7 @@
#define GEM_NCFGR 0x0004 /* Network Config */
#define GEM_USRIO 0x000c /* User IO */
#define GEM_DMACFG 0x0010 /* DMA Configuration */
+#define GEM_PBUFRXCUT 0x0044 /* RX Partial Store and Forward */
#define GEM_JML 0x0048 /* Jumbo Max Length */
#define GEM_HS_MAC_CONFIG 0x0050 /* GEM high speed config */
#define GEM_HRB 0x0080 /* Hash Bottom */
@@ -94,6 +93,8 @@
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */
#define GEM_WOL 0x00b8 /* Wake on LAN */
+#define GEM_RXPTPUNI 0x00D4 /* PTP RX Unicast address */
+#define GEM_TXPTPUNI 0x00D8 /* PTP TX Unicast address */
#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
@@ -179,6 +180,13 @@
#define GEM_DCFG8 0x029C /* Design Config 8 */
#define GEM_DCFG10 0x02A4 /* Design Config 10 */
#define GEM_DCFG12 0x02AC /* Design Config 12 */
+#define GEM_ENST_START_TIME_Q0 0x0800 /* ENST Q0 start time */
+#define GEM_ENST_START_TIME_Q1 0x0804 /* ENST Q1 start time */
+#define GEM_ENST_ON_TIME_Q0 0x0820 /* ENST Q0 on time */
+#define GEM_ENST_ON_TIME_Q1 0x0824 /* ENST Q1 on time */
+#define GEM_ENST_OFF_TIME_Q0 0x0840 /* ENST Q0 off time */
+#define GEM_ENST_OFF_TIME_Q1 0x0844 /* ENST Q1 off time */
+#define GEM_ENST_CONTROL 0x0880 /* ENST control register */
#define GEM_USX_CONTROL 0x0A80 /* High speed PCS control register */
#define GEM_USX_STATUS 0x0A88 /* High speed PCS status register */
@@ -208,14 +216,19 @@
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
-#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
-#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
+#define GEM_ENST_START_TIME(hw_q) (0x0800 + ((hw_q) << 2))
+#define GEM_ENST_ON_TIME(hw_q) (0x0820 + ((hw_q) << 2))
+#define GEM_ENST_OFF_TIME(hw_q) (0x0840 + ((hw_q) << 2))
+
+/* Bitfields in ENST_CONTROL */
+#define GEM_ENST_DISABLE_QUEUE_OFFSET 16
+
/* Bitfields in NCR */
#define MACB_LB_OFFSET 0 /* reserved */
#define MACB_LB_SIZE 1
@@ -244,6 +257,8 @@
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
#define MACB_SRTSM_OFFSET 15 /* Store Receive Timestamp to Memory */
+#define MACB_PTPUNI_OFFSET 20 /* PTP Unicast packet enable */
+#define MACB_PTPUNI_SIZE 1
#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
#define MACB_OSSMODE_SIZE 1
#define MACB_MIIONRGMII_OFFSET 28 /* MII Usage on RGMII Interface */
@@ -342,6 +357,10 @@
#define GEM_ADDR64_SIZE 1
+/* Bitfields in PBUFRXCUT */
+#define GEM_ENCUTTHRU_OFFSET 31 /* Enable RX partial store and forward */
+#define GEM_ENCUTTHRU_SIZE 1
+
/* Bitfields in NSR */
#define MACB_NSR_LINK_OFFSET 0 /* pcs_link_state */
#define MACB_NSR_LINK_SIZE 1
@@ -508,6 +527,8 @@
#define GEM_TX_PKT_BUFF_OFFSET 21
#define GEM_TX_PKT_BUFF_SIZE 1
+#define GEM_RX_PBUF_ADDR_OFFSET 22
+#define GEM_RX_PBUF_ADDR_SIZE 4
/* Bitfields in DCFG5. */
#define GEM_TSU_OFFSET 8
@@ -516,6 +537,10 @@
/* Bitfields in DCFG6. */
#define GEM_PBUF_LSO_OFFSET 27
#define GEM_PBUF_LSO_SIZE 1
+#define GEM_PBUF_RSC_OFFSET 26
+#define GEM_PBUF_RSC_SIZE 1
+#define GEM_PBUF_CUTTHRU_OFFSET 25
+#define GEM_PBUF_CUTTHRU_SIZE 1
#define GEM_DAW64_OFFSET 23
#define GEM_DAW64_SIZE 1
@@ -539,6 +564,23 @@
#define GEM_HIGH_SPEED_OFFSET 26
#define GEM_HIGH_SPEED_SIZE 1
+/* Bitfields in ENST_START_TIME_Qx. */
+#define GEM_START_TIME_SEC_OFFSET 30
+#define GEM_START_TIME_SEC_SIZE 2
+#define GEM_START_TIME_NSEC_OFFSET 0
+#define GEM_START_TIME_NSEC_SIZE 30
+
+/* Bitfields in ENST_ON_TIME_Qx. */
+#define GEM_ON_TIME_OFFSET 0
+#define GEM_ON_TIME_SIZE 17
+
+/* Bitfields in ENST_OFF_TIME_Qx. */
+#define GEM_OFF_TIME_OFFSET 0
+#define GEM_OFF_TIME_SIZE 17
+
+/* Hardware ENST timing registers granularity */
+#define ENST_TIME_GRANULARITY_NS 8
+
/* Bitfields in USX_CONTROL. */
#define GEM_USX_CTRL_SPEED_OFFSET 14
#define GEM_USX_CTRL_SPEED_SIZE 3
@@ -631,6 +673,10 @@
#define GEM_T2OFST_OFFSET 0 /* offset value */
#define GEM_T2OFST_SIZE 7
+/* Bitfields in queue pointer registers */
+#define MACB_QUEUE_DISABLE_OFFSET 0 /* disable queue */
+#define MACB_QUEUE_DISABLE_SIZE 1
+
/* Offset for screener type 2 compare values (T2CMPOFST).
* Note the offset is applied after the specified point,
* e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
@@ -691,6 +737,8 @@
#define GEM_CLK_DIV48 3
#define GEM_CLK_DIV64 4
#define GEM_CLK_DIV96 5
+#define GEM_CLK_DIV128 6
+#define GEM_CLK_DIV224 7
/* Constants for MAN register */
#define MACB_MAN_C22_SOF 1
@@ -706,24 +754,31 @@
#define MACB_MAN_C45_CODE 2
/* Capability mask bits */
-#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
-#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002
-#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
-#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
-#define MACB_CAPS_USRIO_DISABLED 0x00000010
-#define MACB_CAPS_JUMBO 0x00000020
-#define MACB_CAPS_GEM_HAS_PTP 0x00000040
-#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
-#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
-#define MACB_CAPS_MIIONRGMII 0x00000200
-#define MACB_CAPS_CLK_HW_CHG 0x04000000
-#define MACB_CAPS_MACB_IS_EMAC 0x08000000
-#define MACB_CAPS_FIFO_MODE 0x10000000
-#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
-#define MACB_CAPS_SG_DISABLED 0x40000000
-#define MACB_CAPS_MACB_IS_GEM 0x80000000
-#define MACB_CAPS_PCS 0x01000000
-#define MACB_CAPS_HIGH_SPEED 0x02000000
+#define MACB_CAPS_ISR_CLEAR_ON_WRITE BIT(0)
+#define MACB_CAPS_USRIO_HAS_CLKEN BIT(1)
+#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII BIT(2)
+#define MACB_CAPS_NO_GIGABIT_HALF BIT(3)
+#define MACB_CAPS_USRIO_DISABLED BIT(4)
+#define MACB_CAPS_JUMBO BIT(5)
+#define MACB_CAPS_GEM_HAS_PTP BIT(6)
+#define MACB_CAPS_BD_RD_PREFETCH BIT(7)
+#define MACB_CAPS_NEEDS_RSTONUBR BIT(8)
+#define MACB_CAPS_MIIONRGMII BIT(9)
+#define MACB_CAPS_NEED_TSUCLK BIT(10)
+#define MACB_CAPS_QUEUE_DISABLE BIT(11)
+#define MACB_CAPS_QBV BIT(12)
+#define MACB_CAPS_PCS BIT(13)
+#define MACB_CAPS_HIGH_SPEED BIT(14)
+#define MACB_CAPS_CLK_HW_CHG BIT(15)
+#define MACB_CAPS_MACB_IS_EMAC BIT(16)
+#define MACB_CAPS_FIFO_MODE BIT(17)
+#define MACB_CAPS_GIGABIT_MODE_AVAILABLE BIT(18)
+#define MACB_CAPS_SG_DISABLED BIT(19)
+#define MACB_CAPS_MACB_IS_GEM BIT(20)
+#define MACB_CAPS_DMA_64B BIT(21)
+#define MACB_CAPS_DMA_PTP BIT(22)
+#define MACB_CAPS_RSC BIT(23)
+#define MACB_CAPS_NO_LSO BIT(24)
/* LSO settings */
#define MACB_LSO_UFO_ENABLE 0x01
@@ -766,8 +821,6 @@
#define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4)
#define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
-#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
-
/* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers
* and bitfields that are common across both devices, use macb_{read,write}l
@@ -802,12 +855,6 @@ struct macb_dma_desc {
u32 ctrl;
};
-#ifdef MACB_EXT_DESC
-#define HW_DMA_CAP_32B 0
-#define HW_DMA_CAP_64B (1 << 0)
-#define HW_DMA_CAP_PTP (1 << 1)
-#define HW_DMA_CAP_64B_PTP (HW_DMA_CAP_64B | HW_DMA_CAP_PTP)
-
struct macb_dma_desc_64 {
u32 addrh;
u32 resvd;
@@ -818,12 +865,6 @@ struct macb_dma_desc_ptp {
u32 ts_2;
};
-struct gem_tx_ts {
- struct sk_buff *skb;
- struct macb_dma_desc_ptp desc_ptp;
-};
-#endif
-
/* DMA descriptor bitfields */
#define MACB_RX_USED_OFFSET 0
#define MACB_RX_USED_SIZE 1
@@ -935,75 +976,73 @@ struct macb_tx_skb {
* device stats by a periodic timer.
*/
struct macb_stats {
- u32 rx_pause_frames;
- u32 tx_ok;
- u32 tx_single_cols;
- u32 tx_multiple_cols;
- u32 rx_ok;
- u32 rx_fcs_errors;
- u32 rx_align_errors;
- u32 tx_deferred;
- u32 tx_late_cols;
- u32 tx_excessive_cols;
- u32 tx_underruns;
- u32 tx_carrier_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_symbol_errors;
- u32 rx_oversize_pkts;
- u32 rx_jabbers;
- u32 rx_undersize_pkts;
- u32 sqe_test_errors;
- u32 rx_length_mismatch;
- u32 tx_pause_frames;
+ u64 rx_pause_frames;
+ u64 tx_ok;
+ u64 tx_single_cols;
+ u64 tx_multiple_cols;
+ u64 rx_ok;
+ u64 rx_fcs_errors;
+ u64 rx_align_errors;
+ u64 tx_deferred;
+ u64 tx_late_cols;
+ u64 tx_excessive_cols;
+ u64 tx_underruns;
+ u64 tx_carrier_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_symbol_errors;
+ u64 rx_oversize_pkts;
+ u64 rx_jabbers;
+ u64 rx_undersize_pkts;
+ u64 sqe_test_errors;
+ u64 rx_length_mismatch;
+ u64 tx_pause_frames;
};
struct gem_stats {
- u32 tx_octets_31_0;
- u32 tx_octets_47_32;
- u32 tx_frames;
- u32 tx_broadcast_frames;
- u32 tx_multicast_frames;
- u32 tx_pause_frames;
- u32 tx_64_byte_frames;
- u32 tx_65_127_byte_frames;
- u32 tx_128_255_byte_frames;
- u32 tx_256_511_byte_frames;
- u32 tx_512_1023_byte_frames;
- u32 tx_1024_1518_byte_frames;
- u32 tx_greater_than_1518_byte_frames;
- u32 tx_underrun;
- u32 tx_single_collision_frames;
- u32 tx_multiple_collision_frames;
- u32 tx_excessive_collisions;
- u32 tx_late_collisions;
- u32 tx_deferred_frames;
- u32 tx_carrier_sense_errors;
- u32 rx_octets_31_0;
- u32 rx_octets_47_32;
- u32 rx_frames;
- u32 rx_broadcast_frames;
- u32 rx_multicast_frames;
- u32 rx_pause_frames;
- u32 rx_64_byte_frames;
- u32 rx_65_127_byte_frames;
- u32 rx_128_255_byte_frames;
- u32 rx_256_511_byte_frames;
- u32 rx_512_1023_byte_frames;
- u32 rx_1024_1518_byte_frames;
- u32 rx_greater_than_1518_byte_frames;
- u32 rx_undersized_frames;
- u32 rx_oversize_frames;
- u32 rx_jabbers;
- u32 rx_frame_check_sequence_errors;
- u32 rx_length_field_frame_errors;
- u32 rx_symbol_errors;
- u32 rx_alignment_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_ip_header_checksum_errors;
- u32 rx_tcp_checksum_errors;
- u32 rx_udp_checksum_errors;
+ u64 tx_octets;
+ u64 tx_frames;
+ u64 tx_broadcast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_pause_frames;
+ u64 tx_64_byte_frames;
+ u64 tx_65_127_byte_frames;
+ u64 tx_128_255_byte_frames;
+ u64 tx_256_511_byte_frames;
+ u64 tx_512_1023_byte_frames;
+ u64 tx_1024_1518_byte_frames;
+ u64 tx_greater_than_1518_byte_frames;
+ u64 tx_underrun;
+ u64 tx_single_collision_frames;
+ u64 tx_multiple_collision_frames;
+ u64 tx_excessive_collisions;
+ u64 tx_late_collisions;
+ u64 tx_deferred_frames;
+ u64 tx_carrier_sense_errors;
+ u64 rx_octets;
+ u64 rx_frames;
+ u64 rx_broadcast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_pause_frames;
+ u64 rx_64_byte_frames;
+ u64 rx_65_127_byte_frames;
+ u64 rx_128_255_byte_frames;
+ u64 rx_256_511_byte_frames;
+ u64 rx_512_1023_byte_frames;
+ u64 rx_1024_1518_byte_frames;
+ u64 rx_greater_than_1518_byte_frames;
+ u64 rx_undersized_frames;
+ u64 rx_oversize_frames;
+ u64 rx_jabbers;
+ u64 rx_frame_check_sequence_errors;
+ u64 rx_length_field_frame_errors;
+ u64 rx_symbol_errors;
+ u64 rx_alignment_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_ip_header_checksum_errors;
+ u64 rx_tcp_checksum_errors;
+ u64 rx_udp_checksum_errors;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1011,7 +1050,7 @@ struct gem_stats {
* this register should contribute to.
*/
struct gem_statistic {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int offset;
u32 stat_bits;
};
@@ -1153,11 +1192,12 @@ struct macb_ptp_info {
s32 (*get_ptp_max_adj)(void);
unsigned int (*get_tsu_rate)(struct macb *bp);
int (*get_ts_info)(struct net_device *dev,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int (*get_hwtst)(struct net_device *netdev,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *tstamp_config);
int (*set_hwtst)(struct net_device *netdev,
- struct ifreq *ifr, int cmd);
+ struct kernel_hwtstamp_config *tstamp_config,
+ struct netlink_ext_ack *extack);
};
struct macb_pm_data {
@@ -1180,6 +1220,7 @@ struct macb_config {
struct clk **hclk, struct clk **tx_clk,
struct clk **rx_clk, struct clk **tsu_clk);
int (*init)(struct platform_device *pdev);
+ unsigned int max_tx_length;
int jumbo_max_len;
const struct macb_usrio_config *usrio;
};
@@ -1198,16 +1239,23 @@ struct macb_queue {
unsigned int IDR;
unsigned int IMR;
unsigned int TBQP;
- unsigned int TBQPH;
unsigned int RBQS;
unsigned int RBQP;
- unsigned int RBQPH;
+ /* ENST register offsets for this queue */
+ unsigned int ENST_START_TIME;
+ unsigned int ENST_ON_TIME;
+ unsigned int ENST_OFF_TIME;
+
+ /* Lock to protect tx_head and tx_tail */
+ spinlock_t tx_ptr_lock;
unsigned int tx_head, tx_tail;
struct macb_dma_desc *tx_ring;
struct macb_tx_skb *tx_skb;
dma_addr_t tx_ring_dma;
struct work_struct tx_error_task;
+ bool txubr_pending;
+ struct napi_struct napi_tx;
dma_addr_t rx_ring_dma;
dma_addr_t rx_buffers_dma;
@@ -1216,14 +1264,8 @@ struct macb_queue {
struct macb_dma_desc *rx_ring;
struct sk_buff **rx_skbuff;
void *rx_buffers;
- struct napi_struct napi;
+ struct napi_struct napi_rx;
struct queue_stats stats;
-
-#ifdef CONFIG_MACB_USE_HWSTAMP
- struct work_struct tx_ts_task;
- unsigned int tx_ts_head, tx_ts_tail;
- struct gem_tx_ts tx_timestamps[PTP_TS_BUFFER_SIZE];
-#endif
};
struct ethtool_rx_fs_item {
@@ -1244,13 +1286,14 @@ struct macb {
u32 (*macb_reg_readl)(struct macb *bp, int offset);
void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
+ struct macb_dma_desc *rx_ring_tieoff;
+ dma_addr_t rx_ring_tieoff_dma;
size_t rx_buffer_size;
unsigned int rx_ring_size;
unsigned int tx_ring_size;
unsigned int num_queues;
- unsigned int queue_mask;
struct macb_queue queues[MACB_MAX_QUEUES];
spinlock_t lock;
@@ -1261,6 +1304,8 @@ struct macb {
struct clk *rx_clk;
struct clk *tsu_clk;
struct net_device *dev;
+ /* Protects hw_stats and ethtool_stats */
+ spinlock_t stats_lock;
union {
struct macb_stats macb;
struct gem_stats gem;
@@ -1271,7 +1316,8 @@ struct macb {
struct mii_bus *mii_bus;
struct phylink *phylink;
struct phylink_config phylink_config;
- struct phylink_pcs phylink_pcs;
+ struct phylink_pcs phylink_usx_pcs;
+ struct phylink_pcs phylink_sgmii_pcs;
u32 caps;
unsigned int dma_burst_length;
@@ -1288,24 +1334,28 @@ struct macb {
unsigned int jumbo_max_len;
u32 wol;
+ u32 wolopts;
+
+ /* holds value of rx watermark value for pbuf_rxcutthru register */
+ u32 rx_watermark;
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
-#ifdef MACB_EXT_DESC
- uint8_t hw_dma_cap;
-#endif
+
+ struct phy *phy;
+
spinlock_t tsu_clk_lock; /* gem tsu clock locking */
unsigned int tsu_rate;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct tsu_incr tsu_incr;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
/* RX queue filer rule set*/
struct ethtool_rx_fs_list rx_fs_list;
spinlock_t rx_fs_lock;
unsigned int max_tuples;
- struct tasklet_struct hresp_err_tasklet;
+ struct work_struct hresp_err_bh_work;
int rx_bd_rd_prefetch;
int tx_bd_rd_prefetch;
@@ -1330,14 +1380,14 @@ enum macb_bd_control {
void gem_ptp_init(struct net_device *ndev);
void gem_ptp_remove(struct net_device *ndev);
-int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *des);
+void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
-static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
+static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
{
- if (queue->bp->tstamp_config.tx_type == TSTAMP_DISABLED)
- return -ENOTSUPP;
+ if (bp->tstamp_config.tx_type == TSTAMP_DISABLED)
+ return;
- return gem_ptp_txstamp(queue, skb, desc);
+ gem_ptp_txstamp(bp, skb, desc);
}
static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
@@ -1347,17 +1397,17 @@ static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, stru
gem_ptp_rxstamp(bp, skb, desc);
}
-int gem_get_hwtst(struct net_device *dev, struct ifreq *rq);
-int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+int gem_get_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config);
+int gem_set_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config,
+ struct netlink_ext_ack *extack);
#else
static inline void gem_ptp_init(struct net_device *ndev) { }
static inline void gem_ptp_remove(struct net_device *ndev) { }
-static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
-{
- return -1;
-}
-
+static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
#endif
@@ -1368,7 +1418,32 @@ static inline bool macb_is_gem(struct macb *bp)
static inline bool gem_has_ptp(struct macb *bp)
{
- return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP);
+ return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) && (bp->caps & MACB_CAPS_GEM_HAS_PTP);
+}
+
+/* ENST Helper functions */
+static inline u64 enst_ns_to_hw_units(size_t ns, u32 speed_mbps)
+{
+ return DIV_ROUND_UP((ns) * (speed_mbps),
+ (ENST_TIME_GRANULARITY_NS * 1000));
+}
+
+static inline u64 enst_max_hw_interval(u32 speed_mbps)
+{
+ return DIV_ROUND_UP(GENMASK(GEM_ON_TIME_SIZE - 1, 0) *
+ ENST_TIME_GRANULARITY_NS * 1000, (speed_mbps));
+}
+
+static inline bool macb_dma64(struct macb *bp)
+{
+ return IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ bp->caps & MACB_CAPS_DMA_64B;
+}
+
+static inline bool macb_dma_ptp(struct macb *bp)
+{
+ return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) &&
+ bp->caps & MACB_CAPS_DMA_PTP;
}
/**
@@ -1381,4 +1456,21 @@ struct macb_platform_data {
struct clk *hclk;
};
+/**
+ * struct macb_queue_enst_config - Configuration for Enhanced Scheduled Traffic
+ * @start_time_mask: Bitmask representing the start time for the queue
+ * @on_time_bytes: "on" time nsec expressed in bytes
+ * @off_time_bytes: "off" time nsec expressed in bytes
+ * @queue_id: Identifier for the queue
+ *
+ * This structure holds the configuration parameters for an ENST queue,
+ * used to control time-based transmission scheduling in the MACB driver.
+ */
+struct macb_queue_enst_config {
+ u32 start_time_mask;
+ u32 on_time_bytes;
+ u32 off_time_bytes;
+ u8 queue_id;
+};
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 309371abfe23..e461f5072884 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -6,35 +6,37 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/clk.h>
+#include <linux/circ_buf.h>
#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/crc32.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/circ_buf.h>
-#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/inetdevice.h>
#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/phylink.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/tcp.h>
-#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/ptp_classify.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -50,14 +52,10 @@ struct sifive_fu540_macb_mgmt {
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
-#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
-#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
@@ -81,8 +79,7 @@ struct sifive_fu540_macb_mgmt {
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_NETIF_LSO NETIF_F_TSO
-#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
-#define MACB_WOL_ENABLED (0x1 << 1)
+#define MACB_WOL_ENABLED BIT(0)
#define HS_SPEED_10000M 4
#define MACB_SERDES_RATE_10G 1
@@ -90,8 +87,7 @@ struct sifive_fu540_macb_mgmt {
/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
-#define MACB_HALT_TIMEOUT 1230
-
+#define MACB_HALT_TIMEOUT 14000
#define MACB_PM_TIMEOUT 100 /* ms */
#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
@@ -125,56 +121,26 @@ struct sifive_fu540_macb_mgmt {
*/
static unsigned int macb_dma_desc_get_size(struct macb *bp)
{
-#ifdef MACB_EXT_DESC
- unsigned int desc_size;
+ unsigned int desc_size = sizeof(struct macb_dma_desc);
+
+ if (macb_dma64(bp))
+ desc_size += sizeof(struct macb_dma_desc_64);
+ if (macb_dma_ptp(bp))
+ desc_size += sizeof(struct macb_dma_desc_ptp);
- switch (bp->hw_dma_cap) {
- case HW_DMA_CAP_64B:
- desc_size = sizeof(struct macb_dma_desc)
- + sizeof(struct macb_dma_desc_64);
- break;
- case HW_DMA_CAP_PTP:
- desc_size = sizeof(struct macb_dma_desc)
- + sizeof(struct macb_dma_desc_ptp);
- break;
- case HW_DMA_CAP_64B_PTP:
- desc_size = sizeof(struct macb_dma_desc)
- + sizeof(struct macb_dma_desc_64)
- + sizeof(struct macb_dma_desc_ptp);
- break;
- default:
- desc_size = sizeof(struct macb_dma_desc);
- }
return desc_size;
-#endif
- return sizeof(struct macb_dma_desc);
}
static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
{
-#ifdef MACB_EXT_DESC
- switch (bp->hw_dma_cap) {
- case HW_DMA_CAP_64B:
- case HW_DMA_CAP_PTP:
- desc_idx <<= 1;
- break;
- case HW_DMA_CAP_64B_PTP:
- desc_idx *= 3;
- break;
- default:
- break;
- }
-#endif
- return desc_idx;
+ return desc_idx * (1 + macb_dma64(bp) + macb_dma_ptp(bp));
}
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
{
return (struct macb_dma_desc_64 *)((void *)desc
+ sizeof(struct macb_dma_desc));
}
-#endif
/* Ring buffer accessors */
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
@@ -279,11 +245,16 @@ static void macb_set_hwaddr(struct macb *bp)
u32 bottom;
u16 top;
- bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+ bottom = get_unaligned_le32(bp->dev->dev_addr);
macb_or_gem_writel(bp, SA1B, bottom);
- top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+ top = get_unaligned_le16(bp->dev->dev_addr + 4);
macb_or_gem_writel(bp, SA1T, top);
+ if (gem_has_ptp(bp)) {
+ gem_writel(bp, RXPTPUNI, bottom);
+ gem_writel(bp, TXPTPUNI, bottom);
+ }
+
/* Clear unused address register sets */
macb_or_gem_writel(bp, SA2B, 0);
macb_or_gem_writel(bp, SA2T, 0);
@@ -330,7 +301,39 @@ static int macb_mdio_wait_for_idle(struct macb *bp)
1, MACB_MDIO_TIMEOUT);
}
-static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct macb *bp = bus->priv;
+ int status;
+
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
+ goto mdio_pm_exit;
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
+
+mdio_read_exit:
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
+mdio_pm_exit:
+ return status;
+}
+
+static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad,
+ int regnum)
{
struct macb *bp = bus->priv;
int status;
@@ -345,30 +348,22 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (status < 0)
goto mdio_read_exit;
- if (regnum & MII_ADDR_C45) {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_ADDR)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(DATA, regnum & 0xFFFF)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
-
- status = macb_mdio_wait_for_idle(bp);
- if (status < 0)
- goto mdio_read_exit;
-
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_READ)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
- } else {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
- | MACB_BF(RW, MACB_MAN_C22_READ)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_C22_CODE)));
- }
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -377,14 +372,45 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
mdio_read_exit:
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
return status;
}
-static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
+static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct macb *bp = bus->priv;
+ int status;
+
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
+ goto mdio_pm_exit;
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)
+ | MACB_BF(DATA, value)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+mdio_write_exit:
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
+mdio_pm_exit:
+ return status;
+}
+
+static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum,
+ u16 value)
{
struct macb *bp = bus->priv;
int status;
@@ -399,39 +425,29 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (status < 0)
goto mdio_write_exit;
- if (regnum & MII_ADDR_C45) {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_ADDR)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(DATA, regnum & 0xFFFF)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
-
- status = macb_mdio_wait_for_idle(bp);
- if (status < 0)
- goto mdio_write_exit;
-
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_WRITE)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)
- | MACB_BF(DATA, value)));
- } else {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
- | MACB_BF(RW, MACB_MAN_C22_WRITE)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_C22_CODE)
- | MACB_BF(DATA, value)));
- }
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)
+ | MACB_BF(DATA, value)));
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
goto mdio_write_exit;
mdio_write_exit:
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
mdio_pm_exit:
return status;
@@ -442,19 +458,17 @@ static void macb_init_buffers(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
+ /* Single register for all queues' high 32 bits. */
+ if (macb_dma64(bp)) {
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->queues[0].rx_ring_dma));
+ macb_writel(bp, TBQPH,
+ upper_32_bits(bp->queues[0].tx_ring_dma));
+ }
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH,
- upper_32_bits(queue->rx_ring_dma));
-#endif
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH,
- upper_32_bits(queue->tx_ring_dma));
-#endif
}
}
@@ -474,19 +488,9 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
return;
- switch (speed) {
- case SPEED_10:
- rate = 2500000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0)
return;
- }
rate_rounded = clk_round_rate(bp->tx_clk, rate);
if (rate_rounded < 0)
@@ -506,79 +510,11 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
netdev_err(bp->dev, "adjusting tx_clk failed.\n");
}
-static void macb_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct net_device *ndev = to_net_dev(config->dev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct macb *bp = netdev_priv(ndev);
-
- /* We only support MII, RMII, GMII, RGMII & SGMII. */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_RMII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_10GBASER &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- linkmode_zero(supported);
- return;
- }
-
- if (!macb_is_gem(bp) &&
- (state->interface == PHY_INTERFACE_MODE_GMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- linkmode_zero(supported);
- return;
- }
-
- if (state->interface == PHY_INTERFACE_MODE_10GBASER &&
- !(bp->caps & MACB_CAPS_HIGH_SPEED &&
- bp->caps & MACB_CAPS_PCS)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Asym_Pause);
-
- if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
- (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_10GBASER)) {
- phylink_set_10g_modes(mask);
- phylink_set(mask, 10000baseKR_Full);
- if (state->interface != PHY_INTERFACE_MODE_NA)
- goto out;
- }
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
- (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_GMII ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
-
- if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
- phylink_set(mask, 1000baseT_Half);
- }
-out:
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed,
int duplex)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
u32 config;
config = gem_readl(bp, USX_CONTROL);
@@ -590,9 +526,10 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
}
static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
u32 val;
state->speed = SPEED_10000;
@@ -607,12 +544,12 @@ static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
}
static int macb_usx_pcs_config(struct phylink_pcs *pcs,
- unsigned int mode,
+ unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
GEM_BIT(SIGNAL_OK));
@@ -620,7 +557,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs,
return 0;
}
-static void macb_pcs_get_state(struct phylink_pcs *pcs,
+static void macb_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
state->link = 0;
@@ -632,7 +569,7 @@ static void macb_pcs_an_restart(struct phylink_pcs *pcs)
}
static int macb_pcs_config(struct phylink_pcs *pcs,
- unsigned int mode,
+ unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
@@ -768,8 +705,6 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
- macb_set_tx_clk(bp, speed);
-
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
*/
@@ -789,34 +724,35 @@ static void macb_mac_link_up(struct phylink_config *config,
spin_unlock_irqrestore(&bp->lock, flags);
- /* Enable Rx and Tx */
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
+ macb_set_tx_clk(bp, speed);
+
+ /* Enable Rx and Tx; Enable PTP unicast */
+ ctrl = macb_readl(bp, NCR);
+ if (gem_has_ptp(bp))
+ ctrl |= MACB_BIT(PTPUNI);
+
+ macb_writel(bp, NCR, ctrl | MACB_BIT(RE) | MACB_BIT(TE));
netif_tx_wake_all_queues(ndev);
}
-static int macb_mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface)
+static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct net_device *ndev = to_net_dev(config->dev);
struct macb *bp = netdev_priv(ndev);
if (interface == PHY_INTERFACE_MODE_10GBASER)
- bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops;
+ return &bp->phylink_usx_pcs;
else if (interface == PHY_INTERFACE_MODE_SGMII)
- bp->phylink_pcs.ops = &macb_phylink_pcs_ops;
+ return &bp->phylink_sgmii_pcs;
else
- bp->phylink_pcs.ops = NULL;
-
- if (bp->phylink_pcs.ops)
- phylink_set_pcs(bp->phylink, &bp->phylink_pcs);
-
- return 0;
+ return NULL;
}
static const struct phylink_mac_ops macb_phylink_ops = {
- .validate = macb_validate,
- .mac_prepare = macb_mac_prepare,
+ .mac_select_pcs = macb_mac_select_pcs,
.mac_config = macb_mac_config,
.mac_link_down = macb_mac_link_down,
.mac_link_up = macb_mac_link_up,
@@ -874,14 +810,47 @@ static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
+ bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
+
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
+ bp->phylink_config.mac_managed_pm = true;
if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
bp->phylink_config.poll_fixed_state = true;
bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
}
+ bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ bp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ bp->phylink_config.supported_interfaces);
+
+ /* Determine what modes are supported */
+ if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
+ bp->phylink_config.mac_capabilities |= MAC_1000FD;
+ if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
+ bp->phylink_config.mac_capabilities |= MAC_1000HD;
+
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ bp->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_PCS)
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_HIGH_SPEED) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ bp->phylink_config.supported_interfaces);
+ bp->phylink_config.mac_capabilities |= MAC_10000FD;
+ }
+ }
+
bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
bp->phy_interface, &macb_phylink_ops);
if (IS_ERR(bp->phylink)) {
@@ -893,23 +862,15 @@ static int macb_mii_probe(struct net_device *dev)
return 0;
}
-static int macb_mdiobus_register(struct macb *bp)
+static int macb_mdiobus_register(struct macb *bp, struct device_node *mdio_np)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
/* If we have a child named mdio, probe it instead of looking for PHYs
* directly under the MAC node
*/
- child = of_get_child_by_name(np, "mdio");
- if (np) {
- int ret = of_mdiobus_register(bp->mii_bus, child);
-
- of_node_put(child);
- return ret;
- }
-
- if (of_phy_is_fixed_link(np))
- return mdiobus_register(bp->mii_bus);
+ if (mdio_np)
+ return of_mdiobus_register(bp->mii_bus, mdio_np);
/* Only create the PHY from the device tree if at least one PHY is
* described. Otherwise scan the entire MDIO bus. We do this to support
@@ -931,8 +892,17 @@ static int macb_mdiobus_register(struct macb *bp)
static int macb_mii_init(struct macb *bp)
{
+ struct device_node *mdio_np, *np = bp->pdev->dev.of_node;
int err = -ENXIO;
+ /* With fixed-link, we don't need to register the MDIO bus,
+ * except if we have a child named "mdio" in the device tree.
+ * In that case, some devices may be attached to the MACB's MDIO bus.
+ */
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (!mdio_np && of_phy_is_fixed_link(np))
+ return macb_mii_probe(bp->dev);
+
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -943,8 +913,10 @@ static int macb_mii_init(struct macb *bp)
}
bp->mii_bus->name = "MACB_mii_bus";
- bp->mii_bus->read = &macb_mdio_read;
- bp->mii_bus->write = &macb_mdio_write;
+ bp->mii_bus->read = &macb_mdio_read_c22;
+ bp->mii_bus->write = &macb_mdio_write_c22;
+ bp->mii_bus->read_c45 = &macb_mdio_read_c45;
+ bp->mii_bus->write_c45 = &macb_mdio_write_c45;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
@@ -952,7 +924,7 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
- err = macb_mdiobus_register(bp);
+ err = macb_mdiobus_register(bp, mdio_np);
if (err)
goto err_out_free_mdiobus;
@@ -967,13 +939,15 @@ err_out_unregister_bus:
err_out_free_mdiobus:
mdiobus_free(bp->mii_bus);
err_out:
+ of_node_put(mdio_np);
+
return err;
}
static void macb_update_stats(struct macb *bp)
{
- u32 *p = &bp->hw_stats.macb.rx_pause_frames;
- u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+ u64 *p = &bp->hw_stats.macb.rx_pause_frames;
+ u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
int offset = MACB_PFR;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
@@ -984,25 +958,18 @@ static void macb_update_stats(struct macb *bp)
static int macb_halt_tx(struct macb *bp)
{
- unsigned long halt_time, timeout;
- u32 status;
+ u32 status;
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
- timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
- do {
- halt_time = jiffies;
- status = macb_readl(bp, TSR);
- if (!(status & MACB_BIT(TGO)))
- return 0;
-
- udelay(250);
- } while (time_before(halt_time, timeout));
-
- return -ETIMEDOUT;
+ /* Poll TSR until TGO is cleared or timeout. */
+ return read_poll_timeout_atomic(macb_readl, status,
+ !(status & MACB_BIT(TGO)),
+ 250, MACB_HALT_TIMEOUT, false,
+ bp, TSR);
}
-static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
+static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
{
if (tx_skb->mapping) {
if (tx_skb->mapped_as_page)
@@ -1015,17 +982,16 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
}
if (tx_skb->skb) {
- dev_kfree_skb_any(tx_skb->skb);
+ napi_consume_skb(tx_skb->skb, budget);
tx_skb->skb = NULL;
}
}
static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
{
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- struct macb_dma_desc_64 *desc_64;
+ if (macb_dma64(bp)) {
+ struct macb_dma_desc_64 *desc_64;
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
desc_64->addrh = upper_32_bits(addr);
/* The low bits of RX address contain the RX_USED bit, clearing
@@ -1034,22 +1000,23 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
*/
dma_wmb();
}
-#endif
+
desc->addr = lower_32_bits(addr);
}
static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
{
dma_addr_t addr = 0;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- struct macb_dma_desc_64 *desc_64;
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
+ if (macb_dma64(bp)) {
+ struct macb_dma_desc_64 *desc_64;
+
desc_64 = macb_64b_desc(bp, desc);
addr = ((u64)(desc_64->addrh) << 32);
}
-#endif
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+ if (macb_dma_ptp(bp))
+ addr &= ~GEM_BIT(DMA_RXVALID);
return addr;
}
@@ -1057,23 +1024,28 @@ static void macb_tx_error_task(struct work_struct *work)
{
struct macb_queue *queue = container_of(work, struct macb_queue,
tx_error_task);
+ bool halt_timeout = false;
struct macb *bp = queue->bp;
+ u32 queue_index;
+ u32 packets = 0;
+ u32 bytes = 0;
struct macb_tx_skb *tx_skb;
struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
unsigned long flags;
+ queue_index = queue - bp->queues;
netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
- (unsigned int)(queue - bp->queues),
- queue->tx_tail, queue->tx_head);
+ queue_index, queue->tx_tail, queue->tx_head);
- /* Prevent the queue IRQ handlers from running: each of them may call
- * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
+ /* Prevent the queue NAPI TX poll from running, as it calls
+ * macb_tx_complete(), which in turn may call netif_wake_subqueue().
* As explained below, we have to halt the transmission before updating
* TBQP registers so we call netif_tx_stop_all_queues() to notify the
* network engine about the macb/gem being halted.
*/
+ napi_disable(&queue->napi_tx);
spin_lock_irqsave(&bp->lock, flags);
/* Make sure nobody is trying to queue up new packets */
@@ -1083,9 +1055,11 @@ static void macb_tx_error_task(struct work_struct *work)
* (in case we have just queued new packets)
* macb/gem must be halted to write TBQP register
*/
- if (macb_halt_tx(bp))
- /* Just complain for now, reinitializing TX path can be good */
+ if (macb_halt_tx(bp)) {
netdev_err(bp->dev, "BUG: halt tx timed out\n");
+ macb_writel(bp, NCR, macb_readl(bp, NCR) & (~MACB_BIT(TE)));
+ halt_timeout = true;
+ }
/* Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer.
@@ -1101,7 +1075,7 @@ static void macb_tx_error_task(struct work_struct *work)
if (ctrl & MACB_BIT(TX_USED)) {
/* skb is set for the last buffer of the frame */
while (!skb) {
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
tail++;
tx_skb = macb_tx_skb(queue, tail);
skb = tx_skb->skb;
@@ -1116,8 +1090,10 @@ static void macb_tx_error_task(struct work_struct *work)
skb->data);
bp->dev->stats.tx_packets++;
queue->stats.tx_packets++;
+ packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -1131,9 +1107,12 @@ static void macb_tx_error_task(struct work_struct *work)
desc->ctrl = ctrl | MACB_BIT(TX_USED);
}
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
macb_set_addr(bp, desc, 0);
@@ -1144,10 +1123,6 @@ static void macb_tx_error_task(struct work_struct *work)
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
-#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
queue->tx_tail = 0;
@@ -1156,32 +1131,60 @@ static void macb_tx_error_task(struct work_struct *work)
macb_writel(bp, TSR, macb_readl(bp, TSR));
queue_writel(queue, IER, MACB_TX_INT_FLAGS);
+ if (halt_timeout)
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
+
/* Now we are ready to start transmission again */
netif_tx_start_all_queues(bp->dev);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
spin_unlock_irqrestore(&bp->lock, flags);
+ napi_enable(&queue->napi_tx);
}
-static void macb_tx_interrupt(struct macb_queue *queue)
+static bool ptp_one_step_sync(struct sk_buff *skb)
{
- unsigned int tail;
- unsigned int head;
- u32 status;
- struct macb *bp = queue->bp;
- u16 queue_index = queue - bp->queues;
+ struct ptp_header *hdr;
+ unsigned int ptp_class;
+ u8 msgtype;
- status = macb_readl(bp, TSR);
- macb_writel(bp, TSR, status);
+ /* No need to parse packet if PTP TS is not involved */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ goto not_oss;
- if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- queue_writel(queue, ISR, MACB_BIT(TCOMP));
+ /* Identify and return whether PTP one step sync is being processed */
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ goto not_oss;
- netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
- (unsigned long)status);
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ goto not_oss;
+ if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP)
+ goto not_oss;
+
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ if (msgtype == PTP_MSGTYPE_SYNC)
+ return true;
+
+not_oss:
+ return false;
+}
+
+static int macb_tx_complete(struct macb_queue *queue, int budget)
+{
+ struct macb *bp = queue->bp;
+ u16 queue_index = queue - bp->queues;
+ unsigned long flags;
+ unsigned int tail;
+ unsigned int head;
+ int packets = 0;
+ u32 bytes = 0;
+
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
head = queue->tx_head;
- for (tail = queue->tx_tail; tail != head; tail++) {
+ for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
struct macb_tx_skb *tx_skb;
struct sk_buff *skb;
struct macb_dma_desc *desc;
@@ -1207,14 +1210,10 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
- if (unlikely(skb_shinfo(skb)->tx_flags &
- SKBTX_HW_TSTAMP) &&
- gem_ptp_do_txstamp(queue, skb, desc) == 0) {
- /* skb now belongs to timestamp buffer
- * and will be removed later
- */
- tx_skb->skb = NULL;
- }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ !ptp_one_step_sync(skb))
+ gem_ptp_do_txstamp(bp, skb, desc);
+
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
@@ -1222,10 +1221,12 @@ static void macb_tx_interrupt(struct macb_queue *queue)
queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ packets++;
+ bytes += skb->len;
}
/* Now we can safely release resources */
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, budget);
/* skb is set only for the last buffer of the frame.
* WARNING: at this point skb has been freed by
@@ -1236,11 +1237,17 @@ static void macb_tx_interrupt(struct macb_queue *queue)
}
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
queue->tx_tail = tail;
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
CIRC_CNT(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
netif_wake_subqueue(bp->dev, queue_index);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+
+ return packets;
}
static void gem_rx_refill(struct macb_queue *queue)
@@ -1258,7 +1265,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */
rmb();
- queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@@ -1290,13 +1296,25 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
macb_set_addr(bp, desc, paddr);
- /* properly align Ethernet header */
- skb_reserve(skb, NET_IP_ALIGN);
+ /* Properly align Ethernet header.
+ *
+ * Hardware can add dummy bytes if asked using the RBOF
+ * field inside the NCFGR register. That feature isn't
+ * available if hardware is RSC capable.
+ *
+ * We cannot fallback to doing the 2-byte shift before
+ * DMA mapping because the address field does not allow
+ * setting the low 2/3 bits.
+ * It is 3 bits if HW_DMA_CAP_PTP, else 2 bits.
+ */
+ if (!(bp->caps & MACB_CAPS_RSC))
+ skb_reserve(skb, NET_IP_ALIGN);
} else {
desc->ctrl = 0;
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
+ queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */
@@ -1597,31 +1615,51 @@ static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
return received;
}
-static int macb_poll(struct napi_struct *napi, int budget)
+static bool macb_rx_pending(struct macb_queue *queue)
{
- struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
struct macb *bp = queue->bp;
- int work_done;
- u32 status;
+ unsigned int entry;
+ struct macb_dma_desc *desc;
+
+ entry = macb_rx_ring_wrap(bp, queue->rx_tail);
+ desc = macb_rx_desc(queue, entry);
- status = macb_readl(bp, RSR);
- macb_writel(bp, RSR, status);
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
- netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
- (unsigned long)status, budget);
+ return (desc->addr & MACB_BIT(RX_USED)) != 0;
+}
+
+static int macb_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx);
+ struct macb *bp = queue->bp;
+ int work_done;
work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- /* Packets received while interrupts were disabled */
- status = macb_readl(bp, RSR);
- if (status) {
+ netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n",
+ (unsigned int)(queue - bp->queues), work_done, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ queue_writel(queue, IER, bp->rx_intr_mask);
+
+ /* Packet completions only seem to propagate to raise
+ * interrupts when interrupts are enabled at the time, so if
+ * packets were received while interrupts were disabled,
+ * they will not cause another interrupt to be generated when
+ * interrupts are re-enabled.
+ * Check for this case here to avoid losing a wakeup. This can
+ * potentially race with the interrupt handler doing the same
+ * actions if an interrupt is raised just after enabling them,
+ * but this should be harmless.
+ */
+ if (macb_rx_pending(queue)) {
+ queue_writel(queue, IDR, bp->rx_intr_mask);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- napi_reschedule(napi);
- } else {
- queue_writel(queue, IER, bp->rx_intr_mask);
+ netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n");
+ napi_schedule(napi);
}
}
@@ -1630,9 +1668,95 @@ static int macb_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void macb_hresp_error_task(struct tasklet_struct *t)
+static void macb_tx_restart(struct macb_queue *queue)
+{
+ struct macb *bp = queue->bp;
+ unsigned int head_idx, tbqp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
+
+ if (queue->tx_head == queue->tx_tail)
+ goto out_tx_ptr_unlock;
+
+ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head));
+
+ if (tbqp == head_idx)
+ goto out_tx_ptr_unlock;
+
+ spin_lock(&bp->lock);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_unlock(&bp->lock);
+
+out_tx_ptr_unlock:
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+}
+
+static bool macb_tx_complete_pending(struct macb_queue *queue)
+{
+ bool retval = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
+ if (queue->tx_head != queue->tx_tail) {
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
+ retval = true;
+ }
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+ return retval;
+}
+
+static int macb_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx);
+ struct macb *bp = queue->bp;
+ int work_done;
+
+ work_done = macb_tx_complete(queue, budget);
+
+ rmb(); // ensure txubr_pending is up to date
+ if (queue->txubr_pending) {
+ queue->txubr_pending = false;
+ netdev_vdbg(bp->dev, "poll: tx restart\n");
+ macb_tx_restart(queue);
+ }
+
+ netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n",
+ (unsigned int)(queue - bp->queues), work_done, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ queue_writel(queue, IER, MACB_BIT(TCOMP));
+
+ /* Packet completions only seem to propagate to raise
+ * interrupts when interrupts are enabled at the time, so if
+ * packets were sent while interrupts were disabled,
+ * they will not cause another interrupt to be generated when
+ * interrupts are re-enabled.
+ * Check for this case here to avoid losing a wakeup. This can
+ * potentially race with the interrupt handler doing the same
+ * actions if an interrupt is raised just after enabling them,
+ * but this should be harmless.
+ */
+ if (macb_tx_complete_pending(queue)) {
+ queue_writel(queue, IDR, MACB_BIT(TCOMP));
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(TCOMP));
+ netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n");
+ napi_schedule(napi);
+ }
+ }
+
+ return work_done;
+}
+
+static void macb_hresp_error_task(struct work_struct *work)
{
- struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
+ struct macb *bp = from_work(bp, work, hresp_err_bh_work);
struct net_device *dev = bp->dev;
struct macb_queue *queue;
unsigned int q;
@@ -1669,21 +1793,6 @@ static void macb_hresp_error_task(struct tasklet_struct *t)
netif_tx_start_all_queues(dev);
}
-static void macb_tx_restart(struct macb_queue *queue)
-{
- unsigned int head = queue->tx_head;
- unsigned int tail = queue->tx_tail;
- struct macb *bp = queue->bp;
-
- if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- queue_writel(queue, ISR, MACB_BIT(TXUBR));
-
- if (head == tail)
- return;
-
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
-}
-
static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
@@ -1780,9 +1889,27 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- if (napi_schedule_prep(&queue->napi)) {
+ if (napi_schedule_prep(&queue->napi_rx)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
- __napi_schedule(&queue->napi);
+ __napi_schedule(&queue->napi_rx);
+ }
+ }
+
+ if (status & (MACB_BIT(TCOMP) |
+ MACB_BIT(TXUBR))) {
+ queue_writel(queue, IDR, MACB_BIT(TCOMP));
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(TCOMP) |
+ MACB_BIT(TXUBR));
+
+ if (status & MACB_BIT(TXUBR)) {
+ queue->txubr_pending = true;
+ wmb(); // ensure softirq can see update
+ }
+
+ if (napi_schedule_prep(&queue->napi_tx)) {
+ netdev_vdbg(bp->dev, "scheduling TX softirq\n");
+ __napi_schedule(&queue->napi_tx);
}
}
@@ -1796,12 +1923,6 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
break;
}
- if (status & MACB_BIT(TCOMP))
- macb_tx_interrupt(queue);
-
- if (status & MACB_BIT(TXUBR))
- macb_tx_restart(queue);
-
/* Link change detection isn't possible with RMII, so we'll
* add that if/when we get our hands on a full-blown MII PHY.
*/
@@ -1825,17 +1946,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
+ spin_lock(&bp->stats_lock);
if (macb_is_gem(bp))
bp->hw_stats.gem.rx_overruns++;
else
bp->hw_stats.macb.rx_overruns++;
+ spin_unlock(&bp->stats_lock);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
}
if (status & MACB_BIT(HRESP)) {
- tasklet_schedule(&bp->hresp_err_tasklet);
+ queue_work(system_bh_wq, &bp->hresp_err_bh_work);
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1872,14 +1995,14 @@ static unsigned int macb_tx_map(struct macb *bp,
struct sk_buff *skb,
unsigned int hdrlen)
{
- dma_addr_t mapping;
- unsigned int len, entry, i, tx_head = queue->tx_head;
- struct macb_tx_skb *tx_skb = NULL;
- struct macb_dma_desc *desc;
- unsigned int offset, size, count = 0;
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int eof = 1, mss_mfs = 0;
+ unsigned int len, i, tx_head = queue->tx_head;
u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
+ unsigned int eof = 1, mss_mfs = 0;
+ struct macb_tx_skb *tx_skb = NULL;
+ struct macb_dma_desc *desc;
+ unsigned int offset, size;
+ dma_addr_t mapping;
/* LSO */
if (skb_shinfo(skb)->gso_size != 0) {
@@ -1899,8 +2022,7 @@ static unsigned int macb_tx_map(struct macb *bp,
offset = 0;
while (len) {
- entry = macb_tx_ring_wrap(bp, tx_head);
- tx_skb = &queue->tx_skb[entry];
+ tx_skb = macb_tx_skb(queue, tx_head);
mapping = dma_map_single(&bp->pdev->dev,
skb->data + offset,
@@ -1916,10 +2038,9 @@ static unsigned int macb_tx_map(struct macb *bp,
len -= size;
offset += size;
- count++;
tx_head++;
- size = min(len, bp->max_tx_length);
+ size = umin(len, bp->max_tx_length);
}
/* Then, map paged data from fragments */
@@ -1929,9 +2050,8 @@ static unsigned int macb_tx_map(struct macb *bp,
len = skb_frag_size(frag);
offset = 0;
while (len) {
- size = min(len, bp->max_tx_length);
- entry = macb_tx_ring_wrap(bp, tx_head);
- tx_skb = &queue->tx_skb[entry];
+ size = umin(len, bp->max_tx_length);
+ tx_skb = macb_tx_skb(queue, tx_head);
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
offset, size, DMA_TO_DEVICE);
@@ -1946,7 +2066,6 @@ static unsigned int macb_tx_map(struct macb *bp,
len -= size;
offset += size;
- count++;
tx_head++;
}
}
@@ -1968,9 +2087,8 @@ static unsigned int macb_tx_map(struct macb *bp,
* to set the end of TX queue
*/
i = tx_head;
- entry = macb_tx_ring_wrap(bp, i);
ctrl = MACB_BIT(TX_USED);
- desc = macb_tx_desc(queue, entry);
+ desc = macb_tx_desc(queue, i);
desc->ctrl = ctrl;
if (lso_ctrl) {
@@ -1990,16 +2108,15 @@ static unsigned int macb_tx_map(struct macb *bp,
do {
i--;
- entry = macb_tx_ring_wrap(bp, i);
- tx_skb = &queue->tx_skb[entry];
- desc = macb_tx_desc(queue, entry);
+ tx_skb = macb_tx_skb(queue, i);
+ desc = macb_tx_desc(queue, i);
ctrl = (u32)tx_skb->size;
if (eof) {
ctrl |= MACB_BIT(TX_LAST);
eof = 0;
}
- if (unlikely(entry == (bp->tx_ring_size - 1)))
+ if (unlikely(macb_tx_ring_wrap(bp, i) == bp->tx_ring_size - 1))
ctrl |= MACB_BIT(TX_WRAP);
/* First descriptor is header descriptor */
@@ -2007,7 +2124,8 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
if ((bp->dev->features & NETIF_F_HW_CSUM) &&
- skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
+ skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
+ !ptp_one_step_sync(skb))
ctrl |= MACB_BIT(TX_NOCRC);
} else
/* Only set MSS/MFS on payload descriptors
@@ -2026,7 +2144,7 @@ static unsigned int macb_tx_map(struct macb *bp,
queue->tx_head = tx_head;
- return count;
+ return 0;
dma_error:
netdev_err(bp->dev, "TX DMA map failed\n");
@@ -2034,10 +2152,10 @@ dma_error:
for (i = queue->tx_head; i != tx_head; i++) {
tx_skb = macb_tx_skb(queue, i);
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
}
- return 0;
+ return -ENOMEM;
}
static netdev_features_t macb_features_check(struct sk_buff *skb,
@@ -2098,23 +2216,19 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
- int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);
struct sk_buff *nskb;
u32 fcs;
if (!(ndev->features & NETIF_F_HW_CSUM) ||
!((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
- skb_shinfo(*skb)->gso_size) /* Not available for GSO */
+ skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb))
return 0;
if (padlen <= 0) {
/* FCS could be appeded to tailroom. */
if (tailroom >= ETH_FCS_LEN)
goto add_fcs;
- /* FCS could be appeded by moving data to headroom. */
- else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
- padlen = 0;
/* No room for FCS, need to reallocate skb. */
else
padlen = ETH_FCS_LEN;
@@ -2123,10 +2237,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
padlen += ETH_FCS_LEN;
}
- if (!cloned && headroom + tailroom >= padlen) {
- (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
- skb_set_tail_pointer(*skb, (*skb)->len);
- } else {
+ if (cloned || tailroom < padlen) {
nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;
@@ -2156,9 +2267,9 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 queue_index = skb_get_queue_mapping(skb);
struct macb *bp = netdev_priv(dev);
struct macb_queue *queue = &bp->queues[queue_index];
- unsigned long flags;
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
+ unsigned long flags;
bool is_lso;
netdev_tx_t ret = NETDEV_TX_OK;
@@ -2172,6 +2283,10 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
+ if (macb_dma_ptp(bp) &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
is_lso = (skb_shinfo(skb)->gso_size != 0);
if (is_lso) {
@@ -2180,14 +2295,14 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* only queue eth + ip headers separately for UDP */
hdrlen = skb_transport_offset(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
if (skb_headlen(skb) < hdrlen) {
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
/* if this is required, would need to copy to single buffer */
return NETDEV_TX_BUSY;
}
} else
- hdrlen = min(skb_headlen(skb), bp->max_tx_length);
+ hdrlen = umin(skb_headlen(skb), bp->max_tx_length);
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev,
@@ -2213,20 +2328,20 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
- spin_lock_irqsave(&bp->lock, flags);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
/* This is a hard error, log it. */
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) < desc_cnt) {
netif_stop_subqueue(dev, queue_index);
- spin_unlock_irqrestore(&bp->lock, flags);
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
queue->tx_head, queue->tx_tail);
- return NETDEV_TX_BUSY;
+ ret = NETDEV_TX_BUSY;
+ goto unlock;
}
/* Map socket buffer for DMA transfer */
- if (!macb_tx_map(bp, queue, skb, hdrlen)) {
+ if (macb_tx_map(bp, queue, skb, hdrlen)) {
dev_kfree_skb_any(skb);
goto unlock;
}
@@ -2234,14 +2349,18 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Make newly initialized descriptor visible to hardware */
wmb();
skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ skb->len);
+ spin_lock(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_unlock(&bp->lock);
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
netif_stop_subqueue(dev, queue_index);
unlock:
- spin_unlock_irqrestore(&bp->lock, flags);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return ret;
}
@@ -2311,29 +2430,42 @@ static void macb_free_rx_buffers(struct macb *bp)
}
}
+static unsigned int macb_tx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch;
+}
+
+static unsigned int macb_rx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch;
+}
+
static void macb_free_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
struct macb_queue *queue;
unsigned int q;
- int size;
+ size_t size;
+
+ if (bp->rx_ring_tieoff) {
+ dma_free_coherent(dev, macb_dma_desc_get_size(bp),
+ bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+ bp->rx_ring_tieoff = NULL;
+ }
bp->macbgem_ops.mog_free_rx_buffers(bp);
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
- if (queue->tx_ring) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->tx_ring, queue->tx_ring_dma);
- queue->tx_ring = NULL;
- }
- if (queue->rx_ring) {
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->rx_ring, queue->rx_ring_dma);
- queue->rx_ring = NULL;
- }
+ queue->tx_ring = NULL;
+ queue->rx_ring = NULL;
}
}
@@ -2375,39 +2507,59 @@ static int macb_alloc_rx_buffers(struct macb *bp)
static int macb_alloc_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
+ dma_addr_t tx_dma, rx_dma;
struct macb_queue *queue;
unsigned int q;
- int size;
+ void *tx, *rx;
+ size_t size;
+
+ /*
+ * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match!
+ * We cannot enforce this guarantee, the best we can do is do a single
+ * allocation and hope it will land into alloc_pages() that guarantees
+ * natural alignment of physical addresses.
+ */
+
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL);
+ if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)tx_dma, tx);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL);
+ if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)rx_dma, rx);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->tx_ring_dma,
- GFP_KERNEL);
- if (!queue->tx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
- q, size, (unsigned long)queue->tx_ring_dma,
- queue->tx_ring);
+ queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q;
+ queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q;
+
+ queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
+ queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
-
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->rx_ring_dma, GFP_KERNEL);
- if (!queue->rx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
}
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
+ /* Required for tie off descriptor for PM cases */
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
+ bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+ macb_dma_desc_get_size(bp),
+ &bp->rx_ring_tieoff_dma,
+ GFP_KERNEL);
+ if (!bp->rx_ring_tieoff)
+ goto out_err;
+ }
+
return 0;
out_err:
@@ -2415,6 +2567,19 @@ out_err:
return -ENOMEM;
}
+static void macb_init_tieoff(struct macb *bp)
+{
+ struct macb_dma_desc *desc = bp->rx_ring_tieoff;
+
+ if (bp->caps & MACB_CAPS_QUEUE_DISABLE)
+ return;
+ /* Setup a wrapping descriptor with no free slots
+ * (WRAP and USED) to tie off/disable unused RX queues.
+ */
+ macb_set_addr(bp, desc, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
+ desc->ctrl = 0;
+}
+
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
@@ -2438,6 +2603,7 @@ static void gem_init_rings(struct macb *bp)
gem_rx_refill(queue);
}
+ macb_init_tieoff(bp);
}
static void macb_init_rings(struct macb *bp)
@@ -2455,6 +2621,8 @@ static void macb_init_rings(struct macb *bp)
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
desc->ctrl |= MACB_BIT(TX_WRAP);
+
+ macb_init_tieoff(bp);
}
static void macb_reset_hw(struct macb *bp)
@@ -2477,6 +2645,9 @@ static void macb_reset_hw(struct macb *bp)
macb_writel(bp, TSR, -1);
macb_writel(bp, RSR, -1);
+ /* Disable RX partial store and forward and reset watermark value */
+ gem_writel(bp, PBUFRXCUT, 0);
+
/* Disable all interrupts */
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, IDR, -1);
@@ -2501,8 +2672,12 @@ static u32 gem_mdc_clk_div(struct macb *bp)
config = GEM_BF(CLK, GEM_CLK_DIV48);
else if (pclk_hz <= 160000000)
config = GEM_BF(CLK, GEM_CLK_DIV64);
- else
+ else if (pclk_hz <= 240000000)
config = GEM_BF(CLK, GEM_CLK_DIV96);
+ else if (pclk_hz <= 320000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV128);
+ else
+ config = GEM_BF(CLK, GEM_CLK_DIV224);
return config;
}
@@ -2587,14 +2762,10 @@ static void macb_configure_dma(struct macb *bp)
dmacfg &= ~GEM_BIT(TXCOEN);
dmacfg &= ~GEM_BIT(ADDR64);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ if (macb_dma64(bp))
dmacfg |= GEM_BIT(ADDR64);
-#endif
-#ifdef CONFIG_MACB_USE_HWSTAMP
- if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+ if (macb_dma_ptp(bp))
dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
-#endif
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
dmacfg);
gem_writel(bp, DMACFG, dmacfg);
@@ -2609,7 +2780,11 @@ static void macb_init_hw(struct macb *bp)
macb_set_hwaddr(bp);
config = macb_mdc_clk_div(bp);
- config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
+ /* Make eth data aligned.
+ * If RSC capable, that offset is ignored by HW.
+ */
+ if (!(bp->caps & MACB_CAPS_RSC))
+ config |= MACB_BF(RBOF, NET_IP_ALIGN);
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
if (bp->caps & MACB_CAPS_JUMBO)
config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
@@ -2630,6 +2805,10 @@ static void macb_init_hw(struct macb *bp)
bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
macb_configure_dma(bp);
+
+ /* Enable RX partial store and forward and set watermark */
+ if (bp->rx_watermark)
+ gem_writel(bp, PBUFRXCUT, (bp->rx_watermark | GEM_BIT(ENCUTTHRU)));
}
/* The hash address register is 64 bits long and takes up two
@@ -2761,9 +2940,9 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
- err = pm_runtime_get_sync(&bp->pdev->dev);
+ err = pm_runtime_resume_and_get(&bp->pdev->dev);
if (err < 0)
- goto pm_exit;
+ return err;
/* RX buffers initialization */
macb_init_rx_buffer_size(bp, bufsz);
@@ -2775,15 +2954,25 @@ static int macb_open(struct net_device *dev)
goto pm_exit;
}
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- napi_enable(&queue->napi);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_enable(&queue->napi_rx);
+ napi_enable(&queue->napi_tx);
+ }
macb_init_hw(bp);
- err = macb_phylink_connect(bp);
+ err = phy_set_mode_ext(bp->phy, PHY_MODE_ETHERNET, bp->phy_interface);
if (err)
goto reset_hw;
+ err = phy_power_on(bp->phy);
+ if (err)
+ goto reset_hw;
+
+ err = macb_phylink_connect(bp);
+ if (err)
+ goto phy_off;
+
netif_tx_start_all_queues(dev);
if (bp->ptp_info)
@@ -2791,10 +2980,15 @@ static int macb_open(struct net_device *dev)
return 0;
+phy_off:
+ phy_power_off(bp->phy);
+
reset_hw:
macb_reset_hw(bp);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- napi_disable(&queue->napi);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ }
macb_free_consistent(bp);
pm_exit:
pm_runtime_put_sync(&bp->pdev->dev);
@@ -2810,12 +3004,17 @@ static int macb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- napi_disable(&queue->napi);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
+ }
phylink_stop(bp->phylink);
phylink_disconnect_phy(bp->phylink);
+ phy_power_off(bp->phy);
+
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
netif_carrier_off(dev);
@@ -2836,18 +3035,30 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev))
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
+static int macb_set_mac_addr(struct net_device *dev, void *addr)
+{
+ int err;
+
+ err = eth_mac_addr(dev, addr);
+ if (err < 0)
+ return err;
+
+ macb_set_hwaddr(netdev_priv(dev));
+ return 0;
+}
+
static void gem_update_stats(struct macb *bp)
{
struct macb_queue *queue;
unsigned int i, q, idx;
unsigned long *stat;
- u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+ u64 *p = &bp->hw_stats.gem.tx_octets;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
u32 offset = gem_statistics[i].offset;
@@ -2860,7 +3071,7 @@ static void gem_update_stats(struct macb *bp)
/* Add GEM_OCTTXH, GEM_OCTRXH */
val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
- *(++p) += val;
+ *p += ((u64)val) << 32;
}
}
@@ -2870,15 +3081,13 @@ static void gem_update_stats(struct macb *bp)
bp->ethtool_stats[idx++] = *stat;
}
-static struct net_device_stats *gem_get_stats(struct macb *bp)
+static void gem_get_stats(struct macb *bp, struct rtnl_link_stats64 *nstat)
{
struct gem_stats *hwstat = &bp->hw_stats.gem;
- struct net_device_stats *nstat = &bp->dev->stats;
- if (!netif_running(bp->dev))
- return nstat;
-
- gem_update_stats(bp);
+ spin_lock_irq(&bp->stats_lock);
+ if (netif_running(bp->dev))
+ gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
hwstat->rx_alignment_errors +
@@ -2907,19 +3116,19 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
-
- return nstat;
+ spin_unlock_irq(&bp->stats_lock);
}
static void gem_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct macb *bp;
+ struct macb *bp = netdev_priv(dev);
- bp = netdev_priv(dev);
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
memcpy(data, &bp->ethtool_stats, sizeof(u64)
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
+ spin_unlock_irq(&bp->stats_lock);
}
static int gem_get_sset_count(struct net_device *dev, int sset)
@@ -2959,16 +3168,20 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
}
}
-static struct net_device_stats *macb_get_stats(struct net_device *dev)
+static void macb_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct macb *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &bp->dev->stats;
struct macb_stats *hwstat = &bp->hw_stats.macb;
- if (macb_is_gem(bp))
- return gem_get_stats(bp);
+ netdev_stats_to_stats64(nstat, &bp->dev->stats);
+ if (macb_is_gem(bp)) {
+ gem_get_stats(bp, nstat);
+ return;
+ }
/* read stats from hardware */
+ spin_lock_irq(&bp->stats_lock);
macb_update_stats(bp);
/* Convert HW stats into netdevice stats */
@@ -3002,8 +3215,171 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
- return nstat;
+static void gem_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_ok;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_cols;
+ mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols;
+ mac_stats->FramesReceivedOK = hwstat->rx_ok;
+ mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_align_errors;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred;
+ mac_stats->LateCollisions = hwstat->tx_late_cols;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors;
+ mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_frames;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames;
+ mac_stats->MultipleCollisionFrames =
+ hwstat->tx_multiple_collision_frames;
+ mac_stats->FramesReceivedOK = hwstat->rx_frames;
+ mac_stats->FrameCheckSequenceErrors =
+ hwstat->rx_frame_check_sequence_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_alignment_errors;
+ mac_stats->OctetsTransmittedOK = hwstat->tx_octets;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames;
+ mac_stats->LateCollisions = hwstat->tx_late_collisions;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors;
+ mac_stats->OctetsReceivedOK = hwstat->rx_octets;
+ mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames;
+ mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames;
+ mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames;
+ mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+/* TODO: Report SQE test errors when added to phy_stats */
+static void macb_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range gem_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 16384 },
+ { },
+};
+
+static void gem_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersized_frames;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_frames;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ rmon_stats->hist[0] = hwstat->rx_64_byte_frames;
+ rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames;
+ rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames;
+ rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames;
+ rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames;
+ rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames;
+ rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames;
+ rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames;
+ rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames;
+ rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames;
+ rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames;
+ rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames;
+ rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames;
+ rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames;
+ spin_unlock_irq(&bp->stats_lock);
+ *ranges = gem_rmon_ranges;
}
static int macb_get_regs_len(struct net_device *netdev)
@@ -3048,13 +3424,11 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct macb *bp = netdev_priv(netdev);
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
- phylink_ethtool_get_wol(bp->phylink, wol);
- wol->supported |= WAKE_MAGIC;
+ phylink_ethtool_get_wol(bp->phylink, wol);
+ wol->supported |= (WAKE_MAGIC | WAKE_ARP);
- if (bp->wol & MACB_WOL_ENABLED)
- wol->wolopts |= WAKE_MAGIC;
- }
+ /* Add macb wolopts to phy wolopts */
+ wol->wolopts |= bp->wolopts;
}
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -3064,22 +3438,15 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
/* Pass the order to phylink layer */
ret = phylink_ethtool_set_wol(bp->phylink, wol);
- /* Don't manage WoL on MAC if handled by the PHY
- * or if there's a failure in talking to the PHY
- */
- if (!ret || ret != -EOPNOTSUPP)
+ /* Don't manage WoL on MAC, if PHY set_wol() fails */
+ if (ret && ret != -EOPNOTSUPP)
return ret;
- if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
- (wol->wolopts & ~WAKE_MAGIC))
- return -EOPNOTSUPP;
+ bp->wolopts = (wol->wolopts & WAKE_MAGIC) ? WAKE_MAGIC : 0;
+ bp->wolopts |= (wol->wolopts & WAKE_ARP) ? WAKE_ARP : 0;
+ bp->wol = (wol->wolopts) ? MACB_WOL_ENABLED : 0;
- if (wol->wolopts & WAKE_MAGIC)
- bp->wol |= MACB_WOL_ENABLED;
- else
- bp->wol &= ~MACB_WOL_ENABLED;
-
- device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
+ device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
return 0;
}
@@ -3101,7 +3468,9 @@ static int macb_set_link_ksettings(struct net_device *netdev,
}
static void macb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
@@ -3113,7 +3482,9 @@ static void macb_get_ringparam(struct net_device *netdev,
}
static int macb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
u32 new_rx_size, new_tx_size;
@@ -3174,19 +3545,17 @@ static s32 gem_get_ptp_max_adj(void)
}
static int gem_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct macb *bp = netdev_priv(dev);
- if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
+ if (!macb_dma_ptp(bp)) {
ethtool_op_get_ts_info(dev, info);
return 0;
}
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -3198,7 +3567,8 @@ static int gem_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
- info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
+ if (bp->ptp_clock)
+ info->phc_index = ptp_clock_index(bp->ptp_clock);
return 0;
}
@@ -3215,7 +3585,7 @@ static struct macb_ptp_info gem_ptp_info = {
#endif
static int macb_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct macb *bp = netdev_priv(netdev);
@@ -3374,7 +3744,8 @@ static int gem_add_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
spin_lock_irqsave(&bp->rx_fs_lock, flags);
@@ -3427,8 +3798,8 @@ static int gem_del_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc),
- htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
gem_writel_n(bp, SCRT2, fs->location, 0);
@@ -3537,6 +3908,10 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_pause_stats = macb_get_pause_stats,
+ .get_eth_mac_stats = macb_get_eth_mac_stats,
+ .get_eth_phy_stats = macb_get_eth_phy_stats,
+ .get_rmon_stats = macb_get_rmon_stats,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
.get_link_ksettings = macb_get_link_ksettings,
@@ -3555,6 +3930,10 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
+ .get_pause_stats = gem_get_pause_stats,
+ .get_eth_mac_stats = gem_get_eth_mac_stats,
+ .get_eth_phy_stats = gem_get_eth_phy_stats,
+ .get_rmon_stats = gem_get_rmon_stats,
.get_link_ksettings = macb_get_link_ksettings,
.set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
@@ -3570,18 +3949,38 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (bp->ptp_info) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bp->ptp_info->set_hwtst(dev, rq, cmd);
- case SIOCGHWTSTAMP:
- return bp->ptp_info->get_hwtst(dev, rq);
- }
- }
-
return phylink_mii_ioctl(bp->phylink, rq, cmd);
}
+static int macb_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!bp->ptp_info)
+ return -EOPNOTSUPP;
+
+ return bp->ptp_info->get_hwtst(dev, cfg);
+}
+
+static int macb_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!bp->ptp_info)
+ return -EOPNOTSUPP;
+
+ return bp->ptp_info->set_hwtst(dev, cfg, extack);
+}
+
static inline void macb_set_txcsum_feature(struct macb *bp,
netdev_features_t features)
{
@@ -3666,21 +4065,246 @@ static void macb_restore_features(struct macb *bp)
macb_set_rxflow_feature(bp, features);
}
+static int macb_taprio_setup_replace(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *conf)
+{
+ u64 total_on_time = 0, start_time_sec = 0, start_time = conf->base_time;
+ u32 configured_queues = 0, speed = 0, start_time_nsec;
+ struct macb_queue_enst_config *enst_queue;
+ struct tc_taprio_sched_entry *entry;
+ struct macb *bp = netdev_priv(ndev);
+ struct ethtool_link_ksettings kset;
+ struct macb_queue *queue;
+ u32 queue_mask;
+ u8 queue_id;
+ size_t i;
+ int err;
+
+ if (conf->num_entries > bp->num_queues) {
+ netdev_err(ndev, "Too many TAPRIO entries: %zu > %d queues\n",
+ conf->num_entries, bp->num_queues);
+ return -EINVAL;
+ }
+
+ if (conf->base_time < 0) {
+ netdev_err(ndev, "Invalid base_time: must be 0 or positive, got %lld\n",
+ conf->base_time);
+ return -ERANGE;
+ }
+
+ /* Get the current link speed */
+ err = phylink_ethtool_ksettings_get(bp->phylink, &kset);
+ if (unlikely(err)) {
+ netdev_err(ndev, "Failed to get link settings: %d\n", err);
+ return err;
+ }
+
+ speed = kset.base.speed;
+ if (unlikely(speed <= 0)) {
+ netdev_err(ndev, "Invalid speed: %d\n", speed);
+ return -EINVAL;
+ }
+
+ enst_queue = kcalloc(conf->num_entries, sizeof(*enst_queue), GFP_KERNEL);
+ if (unlikely(!enst_queue))
+ return -ENOMEM;
+
+ /* Pre-validate all entries before making any hardware changes */
+ for (i = 0; i < conf->num_entries; i++) {
+ entry = &conf->entries[i];
+
+ if (entry->command != TC_TAPRIO_CMD_SET_GATES) {
+ netdev_err(ndev, "Entry %zu: unsupported command %d\n",
+ i, entry->command);
+ err = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ /* Validate gate_mask: must be nonzero, single queue, and within range */
+ if (!is_power_of_2(entry->gate_mask)) {
+ netdev_err(ndev, "Entry %zu: gate_mask 0x%x is not a power of 2 (only one queue per entry allowed)\n",
+ i, entry->gate_mask);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* gate_mask must not select queues outside the valid queues */
+ queue_id = order_base_2(entry->gate_mask);
+ if (queue_id >= bp->num_queues) {
+ netdev_err(ndev, "Entry %zu: gate_mask 0x%x exceeds queue range (max_queues=%d)\n",
+ i, entry->gate_mask, bp->num_queues);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Check for start time limits */
+ start_time_sec = start_time;
+ start_time_nsec = do_div(start_time_sec, NSEC_PER_SEC);
+ if (start_time_sec > GENMASK(GEM_START_TIME_SEC_SIZE - 1, 0)) {
+ netdev_err(ndev, "Entry %zu: Start time %llu s exceeds hardware limit\n",
+ i, start_time_sec);
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ /* Check for on time limit */
+ if (entry->interval > enst_max_hw_interval(speed)) {
+ netdev_err(ndev, "Entry %zu: interval %u ns exceeds hardware limit %llu ns\n",
+ i, entry->interval, enst_max_hw_interval(speed));
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ /* Check for off time limit*/
+ if ((conf->cycle_time - entry->interval) > enst_max_hw_interval(speed)) {
+ netdev_err(ndev, "Entry %zu: off_time %llu ns exceeds hardware limit %llu ns\n",
+ i, conf->cycle_time - entry->interval,
+ enst_max_hw_interval(speed));
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ enst_queue[i].queue_id = queue_id;
+ enst_queue[i].start_time_mask =
+ (start_time_sec << GEM_START_TIME_SEC_OFFSET) |
+ start_time_nsec;
+ enst_queue[i].on_time_bytes =
+ enst_ns_to_hw_units(entry->interval, speed);
+ enst_queue[i].off_time_bytes =
+ enst_ns_to_hw_units(conf->cycle_time - entry->interval, speed);
+
+ configured_queues |= entry->gate_mask;
+ total_on_time += entry->interval;
+ start_time += entry->interval;
+ }
+
+ /* Check total interval doesn't exceed cycle time */
+ if (total_on_time > conf->cycle_time) {
+ netdev_err(ndev, "Total ON %llu ns exceeds cycle time %llu ns\n",
+ total_on_time, conf->cycle_time);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ netdev_dbg(ndev, "TAPRIO setup: %zu entries, base_time=%lld ns, cycle_time=%llu ns\n",
+ conf->num_entries, conf->base_time, conf->cycle_time);
+
+ /* All validations passed - proceed with hardware configuration */
+ scoped_guard(spinlock_irqsave, &bp->lock) {
+ /* Disable ENST queues if running before configuring */
+ queue_mask = BIT_U32(bp->num_queues) - 1;
+ gem_writel(bp, ENST_CONTROL,
+ queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
+
+ for (i = 0; i < conf->num_entries; i++) {
+ queue = &bp->queues[enst_queue[i].queue_id];
+ /* Configure queue timing registers */
+ queue_writel(queue, ENST_START_TIME,
+ enst_queue[i].start_time_mask);
+ queue_writel(queue, ENST_ON_TIME,
+ enst_queue[i].on_time_bytes);
+ queue_writel(queue, ENST_OFF_TIME,
+ enst_queue[i].off_time_bytes);
+ }
+
+ /* Enable ENST for all configured queues in one write */
+ gem_writel(bp, ENST_CONTROL, configured_queues);
+ }
+
+ netdev_info(ndev, "TAPRIO configuration completed successfully: %zu entries, %d queues configured\n",
+ conf->num_entries, hweight32(configured_queues));
+
+cleanup:
+ kfree(enst_queue);
+ return err;
+}
+
+static void macb_taprio_destroy(struct net_device *ndev)
+{
+ struct macb *bp = netdev_priv(ndev);
+ struct macb_queue *queue;
+ u32 queue_mask;
+ unsigned int q;
+
+ netdev_reset_tc(ndev);
+ queue_mask = BIT_U32(bp->num_queues) - 1;
+
+ scoped_guard(spinlock_irqsave, &bp->lock) {
+ /* Single disable command for all queues */
+ gem_writel(bp, ENST_CONTROL,
+ queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
+
+ /* Clear all queue ENST registers in batch */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, ENST_START_TIME, 0);
+ queue_writel(queue, ENST_ON_TIME, 0);
+ queue_writel(queue, ENST_OFF_TIME, 0);
+ }
+ }
+ netdev_info(ndev, "TAPRIO destroy: All gates disabled\n");
+}
+
+static int macb_setup_taprio(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ struct macb *bp = netdev_priv(ndev);
+ int err = 0;
+
+ if (unlikely(!(ndev->hw_features & NETIF_F_HW_TC)))
+ return -EOPNOTSUPP;
+
+ /* Check if Device is in runtime suspend */
+ if (unlikely(pm_runtime_suspended(&bp->pdev->dev))) {
+ netdev_err(ndev, "Device is in runtime suspend\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (taprio->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = macb_taprio_setup_replace(ndev, taprio);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ macb_taprio_destroy(ndev);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int macb_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ if (!dev || !type_data)
+ return -EINVAL;
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return macb_setup_taprio(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops macb_netdev_ops = {
.ndo_open = macb_open,
.ndo_stop = macb_close,
.ndo_start_xmit = macb_start_xmit,
.ndo_set_rx_mode = macb_set_rx_mode,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = macb_set_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = macb_poll_controller,
#endif
.ndo_set_features = macb_set_features,
.ndo_features_check = macb_features_check,
+ .ndo_hwtstamp_set = macb_hwtstamp_set,
+ .ndo_hwtstamp_get = macb_hwtstamp_get,
+ .ndo_setup_tc = macb_setup_tc,
};
/* Configure peripheral capabilities according to device tree
@@ -3689,8 +4313,12 @@ static const struct net_device_ops macb_netdev_ops = {
static void macb_configure_caps(struct macb *bp,
const struct macb_config *dt_conf)
{
+ struct device_node *np = bp->pdev->dev.of_node;
+ bool refclk_ext;
u32 dcfg;
+ refclk_ext = of_property_read_bool(np, "cdns,refclk-ext");
+
if (dt_conf)
bp->caps = dt_conf->caps;
@@ -3708,42 +4336,46 @@ static void macb_configure_caps(struct macb *bp,
dcfg = gem_readl(bp, DCFG2);
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
bp->caps |= MACB_CAPS_FIFO_MODE;
-#ifdef CONFIG_MACB_USE_HWSTAMP
+ if (GEM_BFEXT(PBUF_RSC, gem_readl(bp, DCFG6)))
+ bp->caps |= MACB_CAPS_RSC;
if (gem_has_ptp(bp)) {
if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
dev_err(&bp->pdev->dev,
"GEM doesn't support hardware ptp.\n");
else {
- bp->hw_dma_cap |= HW_DMA_CAP_PTP;
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ bp->caps |= MACB_CAPS_DMA_PTP;
bp->ptp_info = &gem_ptp_info;
+#endif
}
}
-#endif
}
+ if (refclk_ext)
+ bp->caps |= MACB_CAPS_USRIO_HAS_CLKEN;
+
dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
}
-static void macb_probe_queues(void __iomem *mem,
- bool native_io,
- unsigned int *queue_mask,
- unsigned int *num_queues)
+static int macb_probe_queues(struct device *dev, void __iomem *mem, bool native_io)
{
- *queue_mask = 0x1;
- *num_queues = 1;
+ /* BIT(0) is never set but queue 0 always exists. */
+ unsigned int queue_mask = 0x1;
- /* is it macb or gem ?
- *
- * We need to read directly from the hardware here because
- * we are early in the probe process and don't have the
- * MACB_CAPS_MACB_IS_GEM flag positioned
- */
- if (!hw_is_gem(mem, native_io))
- return;
+ /* Use hw_is_gem() as MACB_CAPS_MACB_IS_GEM is not yet positioned. */
+ if (hw_is_gem(mem, native_io)) {
+ if (native_io)
+ queue_mask |= __raw_readl(mem + GEM_DCFG6) & 0xFF;
+ else
+ queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xFF;
+
+ if (fls(queue_mask) != ffz(queue_mask)) {
+ dev_err(dev, "queue mask %#x has a hole\n", queue_mask);
+ return -EINVAL;
+ }
+ }
- /* bit 0 is never set but queue 0 always exists */
- *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;
- *num_queues = hweight32(*queue_mask);
+ return hweight32(queue_mask);
}
static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk,
@@ -3861,13 +4493,12 @@ static int macb_init(struct platform_device *pdev)
* register mapping but we don't want to test the queue index then
* compute the corresponding register offset at run time.
*/
- for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
- if (!(bp->queue_mask & (1 << hw_q)))
- continue;
-
+ for (hw_q = 0, q = 0; hw_q < bp->num_queues; ++hw_q) {
queue = &bp->queues[q];
queue->bp = bp;
- netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
+ spin_lock_init(&queue->tx_ptr_lock);
+ netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
+ netif_napi_add(dev, &queue->napi_tx, macb_tx_poll);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
@@ -3876,12 +4507,6 @@ static int macb_init(struct platform_device *pdev)
queue->TBQP = GEM_TBQP(hw_q - 1);
queue->RBQP = GEM_RBQP(hw_q - 1);
queue->RBQS = GEM_RBQS(hw_q - 1);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = GEM_TBQPH(hw_q - 1);
- queue->RBQPH = GEM_RBQPH(hw_q - 1);
- }
-#endif
} else {
/* queue0 uses legacy registers */
queue->ISR = MACB_ISR;
@@ -3890,14 +4515,12 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
queue->RBQP = MACB_RBQP;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = MACB_TBQPH;
- queue->RBQPH = MACB_RBQPH;
- }
-#endif
}
+ queue->ENST_START_TIME = GEM_ENST_START_TIME(hw_q);
+ queue->ENST_ON_TIME = GEM_ENST_ON_TIME(hw_q);
+ queue->ENST_OFF_TIME = GEM_ENST_OFF_TIME(hw_q);
+
/* get irq: here we use the linux queue index, not the hardware
* queue index. the queue irq definitions in the device tree
* must remove the optional gaps that could exist in the
@@ -3921,14 +4544,12 @@ static int macb_init(struct platform_device *pdev)
/* setup appropriated routines according to adapter type */
if (macb_is_gem(bp)) {
- bp->max_tx_length = GEM_MAX_TX_LEN;
bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
bp->macbgem_ops.mog_init_rings = gem_init_rings;
bp->macbgem_ops.mog_rx = gem_rx;
dev->ethtool_ops = &gem_ethtool_ops;
} else {
- bp->max_tx_length = MACB_MAX_TX_LEN;
bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
bp->macbgem_ops.mog_init_rings = macb_init_rings;
@@ -3936,11 +4557,18 @@ static int macb_init(struct platform_device *pdev)
dev->ethtool_ops = &macb_ethtool_ops;
}
+ netdev_sw_irq_coalesce_default_on(dev);
+
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
/* Set features */
dev->hw_features = NETIF_F_SG;
- /* Check LSO capability */
- if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
+ /* Check LSO capability; runtime detection can be overridden by a cap
+ * flag if the hardware is known to be buggy
+ */
+ if (!(bp->caps & MACB_CAPS_NO_LSO) &&
+ GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
dev->hw_features |= MACB_NETIF_LSO;
/* Checksum offload is only available on gem with packet buffer */
@@ -3948,6 +4576,10 @@ static int macb_init(struct platform_device *pdev)
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
if (bp->caps & MACB_CAPS_SG_DISABLED)
dev->hw_features &= ~NETIF_F_SG;
+ /* Enable HW_TC if hardware supports QBV */
+ if (bp->caps & MACB_CAPS_QBV)
+ dev->hw_features |= NETIF_F_HW_TC;
+
dev->features = dev->hw_features;
/* Check RX Flow Filters support.
@@ -3955,8 +4587,8 @@ static int macb_init(struct platform_device *pdev)
* each 4-tuple define requires 1 T2 screener reg + 3 compare regs
*/
reg = gem_readl(bp, DCFG8);
- bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
- GEM_BFEXT(T2SCR, reg));
+ bp->max_tuples = umin((GEM_BFEXT(SCR2CMP, reg) / 3),
+ GEM_BFEXT(T2SCR, reg));
INIT_LIST_HEAD(&bp->rx_fs_list.list);
if (bp->max_tuples > 0) {
/* also needs one ethtype match to check IPv4 */
@@ -4137,11 +4769,9 @@ static int at91ether_open(struct net_device *dev)
u32 ctl;
int ret;
- ret = pm_runtime_get_sync(&lp->pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&lp->pdev->dev);
+ ret = pm_runtime_resume_and_get(&lp->pdev->dev);
+ if (ret < 0)
return ret;
- }
/* Clear internal statistics */
ctl = macb_readl(lp, NCR);
@@ -4328,7 +4958,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_open = at91ether_open,
.ndo_stop = at91ether_close,
.ndo_start_xmit = at91ether_start_xmit,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_eth_ioctl = macb_ioctl,
@@ -4336,6 +4966,8 @@ static const struct net_device_ops at91ether_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = at91ether_poll_controller,
#endif
+ .ndo_hwtstamp_set = macb_hwtstamp_set,
+ .ndo_hwtstamp_get = macb_hwtstamp_get,
};
static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
@@ -4391,36 +5023,45 @@ static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
return mgmt->rate;
}
-static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- if (WARN_ON(rate < 2500000))
- return 2500000;
- else if (rate == 2500000)
- return 2500000;
- else if (WARN_ON(rate < 13750000))
- return 2500000;
- else if (WARN_ON(rate < 25000000))
- return 25000000;
- else if (rate == 25000000)
- return 25000000;
- else if (WARN_ON(rate < 75000000))
- return 25000000;
- else if (WARN_ON(rate < 125000000))
- return 125000000;
- else if (rate == 125000000)
- return 125000000;
-
- WARN_ON(rate > 125000000);
+static int fu540_macb_tx_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ if (WARN_ON(req->rate < 2500000))
+ req->rate = 2500000;
+ else if (req->rate == 2500000)
+ req->rate = 2500000;
+ else if (WARN_ON(req->rate < 13750000))
+ req->rate = 2500000;
+ else if (WARN_ON(req->rate < 25000000))
+ req->rate = 25000000;
+ else if (req->rate == 25000000)
+ req->rate = 25000000;
+ else if (WARN_ON(req->rate < 75000000))
+ req->rate = 25000000;
+ else if (WARN_ON(req->rate < 125000000))
+ req->rate = 125000000;
+ else if (req->rate == 125000000)
+ req->rate = 125000000;
+ else if (WARN_ON(req->rate > 125000000))
+ req->rate = 125000000;
+ else
+ req->rate = 125000000;
- return 125000000;
+ return 0;
}
static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
- if (rate != 125000000)
+ struct clk_rate_request req;
+ int ret;
+
+ clk_hw_init_rate_request(hw, &req, rate);
+ ret = fu540_macb_tx_determine_rate(hw, &req);
+ if (ret != 0)
+ return ret;
+
+ if (req.rate != 125000000)
iowrite32(1, mgmt->reg);
else
iowrite32(0, mgmt->reg);
@@ -4431,7 +5072,7 @@ static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops fu540_c000_ops = {
.recalc_rate = fu540_macb_tx_recalc_rate,
- .round_rate = fu540_macb_tx_round_rate,
+ .determine_rate = fu540_macb_tx_determine_rate,
.set_rate = fu540_macb_tx_set_rate,
};
@@ -4492,6 +5133,84 @@ static int fu540_c000_init(struct platform_device *pdev)
return macb_init(pdev);
}
+static int init_reset_optional(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(dev);
+ int ret;
+
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ /* Ensure PHY device used in SGMII mode is ready */
+ bp->phy = devm_phy_optional_get(&pdev->dev, NULL);
+
+ if (IS_ERR(bp->phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bp->phy),
+ "failed to get SGMII PHY\n");
+
+ ret = phy_init(bp->phy);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to init SGMII PHY\n");
+
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+ if (!ret) {
+ u32 pm_info[2];
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+ pm_info, ARRAY_SIZE(pm_info));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read power management information\n");
+ goto err_out_phy_exit;
+ }
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+ if (ret)
+ goto err_out_phy_exit;
+
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+ if (ret)
+ goto err_out_phy_exit;
+ }
+
+ }
+
+ /* Fully reset controller at hardware level if mapped in device tree */
+ ret = device_reset_optional(&pdev->dev);
+ if (ret) {
+ phy_exit(bp->phy);
+ return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
+ }
+
+ ret = macb_init(pdev);
+
+err_out_phy_exit:
+ if (ret)
+ phy_exit(bp->phy);
+
+ return ret;
+}
+
+static int eyeq5_init(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(netdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ bp->phy = devm_phy_get(dev, NULL);
+ if (IS_ERR(bp->phy))
+ return dev_err_probe(dev, PTR_ERR(bp->phy),
+ "failed to get PHY\n");
+
+ ret = phy_init(bp->phy);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init PHY\n");
+
+ ret = macb_init(pdev);
+ if (ret)
+ phy_exit(bp->phy);
+ return ret;
+}
+
static const struct macb_usrio_config sama7g5_usrio = {
.mii = 0,
.rmii = 1,
@@ -4518,8 +5237,8 @@ static const struct macb_config at91sam9260_config = {
};
static const struct macb_config sama5d3macb_config = {
- .caps = MACB_CAPS_SG_DISABLED
- | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .caps = MACB_CAPS_SG_DISABLED |
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
.usrio = &macb_default_usrio,
@@ -4534,10 +5253,11 @@ static const struct macb_config pc302gem_config = {
};
static const struct macb_config sama5d2_config = {
- .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .jumbo_max_len = 10240,
.usrio = &macb_default_usrio,
};
@@ -4550,8 +5270,8 @@ static const struct macb_config sama5d29_config = {
};
static const struct macb_config sama5d3_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
- | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4583,11 +5303,11 @@ static const struct macb_config np4_config = {
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
- .init = macb_init,
+ .init = init_reset_optional,
.jumbo_max_len = 10240,
.usrio = &macb_default_usrio,
};
@@ -4601,9 +5321,22 @@ static const struct macb_config zynq_config = {
.usrio = &macb_default_usrio,
};
+static const struct macb_config mpfs_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .usrio = &macb_default_usrio,
+ .max_tx_length = 4040, /* Cadence Erratum 1686 */
+ .jumbo_max_len = 4040,
+};
+
static const struct macb_config sama7g5_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
- MACB_CAPS_MIIONRGMII,
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
+ MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4612,15 +5345,49 @@ static const struct macb_config sama7g5_gem_config = {
static const struct macb_config sama7g5_emac_config = {
.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
- MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII,
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
.usrio = &sama7g5_usrio,
};
+static const struct macb_config versal_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
+ MACB_CAPS_NEED_TSUCLK | MACB_CAPS_QUEUE_DISABLE |
+ MACB_CAPS_QBV,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
+};
+
+static const struct macb_config eyeq5_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_QUEUE_DISABLE |
+ MACB_CAPS_NO_LSO,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = eyeq5_init,
+ .jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
+};
+
+static const struct macb_config raspberrypi_rp1_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+};
+
static const struct of_device_id macb_dt_ids[] = {
- { .compatible = "cdns,at32ap7000-macb" },
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
{ .compatible = "cdns,macb" },
{ .compatible = "cdns,np4-macb", .data = &np4_config },
@@ -4634,11 +5401,17 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
- { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
- { .compatible = "cdns,zynq-gem", .data = &zynq_config },
+ { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
+ { .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
+ { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
+ { .compatible = "mobileye,eyeq5-gem", .data = &eyeq5_config },
+ { .compatible = "raspberrypi,rp1-gem", .data = &raspberrypi_rp1_config },
+ { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
+ { .compatible = "xlnx,zynq-gem", .data = &zynq_config },
+ { .compatible = "xlnx,versal-gem", .data = &versal_config},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
@@ -4646,8 +5419,8 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
static const struct macb_config default_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4658,20 +5431,17 @@ static const struct macb_config default_gem_config = {
static int macb_probe(struct platform_device *pdev)
{
const struct macb_config *macb_config = &default_gem_config;
- int (*clk_init)(struct platform_device *, struct clk **,
- struct clk **, struct clk **, struct clk **,
- struct clk **) = macb_config->clk_init;
- int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
struct clk *tsu_clk = NULL;
- unsigned int queue_mask, num_queues;
- bool native_io;
phy_interface_t interface;
struct net_device *dev;
struct resource *regs;
+ u32 wtrmrk_rst_val;
void __iomem *mem;
struct macb *bp;
+ int num_queues;
+ bool native_io;
int err, val;
mem = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
@@ -4682,14 +5452,11 @@ static int macb_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(macb_dt_ids, np);
- if (match && match->data) {
+ if (match && match->data)
macb_config = match->data;
- clk_init = macb_config->clk_init;
- init = macb_config->init;
- }
}
- err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
+ err = macb_config->clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
if (err)
return err;
@@ -4700,7 +5467,12 @@ static int macb_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
native_io = hw_is_native_io(mem);
- macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
+ num_queues = macb_probe_queues(&pdev->dev, mem, native_io);
+ if (num_queues < 0) {
+ err = num_queues;
+ goto err_disable_clocks;
+ }
+
dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
if (!dev) {
err = -ENOMEM;
@@ -4724,33 +5496,59 @@ static int macb_probe(struct platform_device *pdev)
bp->macb_reg_writel = hw_writel;
}
bp->num_queues = num_queues;
- bp->queue_mask = queue_mask;
- if (macb_config)
- bp->dma_burst_length = macb_config->dma_burst_length;
+ bp->dma_burst_length = macb_config->dma_burst_length;
bp->pclk = pclk;
bp->hclk = hclk;
bp->tx_clk = tx_clk;
bp->rx_clk = rx_clk;
bp->tsu_clk = tsu_clk;
- if (macb_config)
- bp->jumbo_max_len = macb_config->jumbo_max_len;
+ bp->jumbo_max_len = macb_config->jumbo_max_len;
+
+ if (!hw_is_gem(bp->regs, bp->native_io))
+ bp->max_tx_length = MACB_MAX_TX_LEN;
+ else if (macb_config->max_tx_length)
+ bp->max_tx_length = macb_config->max_tx_length;
+ else
+ bp->max_tx_length = GEM_MAX_TX_LEN;
bp->wol = 0;
- if (of_get_property(np, "magic-packet", NULL))
- bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&pdev->dev, 1);
bp->usrio = macb_config->usrio;
+ /* By default we set to partial store and forward mode for zynqmp.
+ * Disable if not set in devicetree.
+ */
+ if (GEM_BFEXT(PBUF_CUTTHRU, gem_readl(bp, DCFG6))) {
+ err = of_property_read_u32(bp->pdev->dev.of_node,
+ "cdns,rx-watermark",
+ &bp->rx_watermark);
+
+ if (!err) {
+ /* Disable partial store and forward in case of error or
+ * invalid watermark value
+ */
+ wtrmrk_rst_val = (1 << (GEM_BFEXT(RX_PBUF_ADDR, gem_readl(bp, DCFG2)))) - 1;
+ if (bp->rx_watermark > wtrmrk_rst_val || !bp->rx_watermark) {
+ dev_info(&bp->pdev->dev, "Invalid watermark value\n");
+ bp->rx_watermark = 0;
+ }
+ }
+ }
spin_lock_init(&bp->lock);
+ spin_lock_init(&bp->stats_lock);
/* setup capabilities */
macb_configure_caps(bp, macb_config);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
- bp->hw_dma_cap |= HW_DMA_CAP_64B;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (err) {
+ dev_err(&pdev->dev, "failed to set DMA mask\n");
+ goto err_out_free_netdev;
+ }
+ bp->caps |= MACB_CAPS_DMA_64B;
}
#endif
platform_set_drvdata(pdev, dev);
@@ -4761,12 +5559,12 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_free_netdev;
}
- /* MTU range: 68 - 1500 or 10240 */
+ /* MTU range: 68 - 1518 or 10240 */
dev->min_mtu = GEM_MTU_MIN_SIZE;
- if (bp->caps & MACB_CAPS_JUMBO)
- dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+ if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
+ dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
else
- dev->max_mtu = ETH_DATA_LEN;
+ dev->max_mtu = 1536 - ETH_HLEN - ETH_FCS_LEN;
if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
@@ -4798,13 +5596,13 @@ static int macb_probe(struct platform_device *pdev)
bp->phy_interface = interface;
/* IP specific init */
- err = init(pdev);
+ err = macb_config->init(pdev);
if (err)
goto err_out_free_netdev;
err = macb_mii_init(bp);
if (err)
- goto err_out_free_netdev;
+ goto err_out_phy_exit;
netif_carrier_off(dev);
@@ -4814,13 +5612,12 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
- tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
+ INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
dev->base_addr, dev->irq, dev->dev_addr);
- pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
return 0;
@@ -4829,6 +5626,9 @@ err_out_unregister_mdio:
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
+err_out_phy_exit:
+ phy_exit(bp->phy);
+
err_out_free_netdev:
free_netdev(dev);
@@ -4841,7 +5641,7 @@ err_disable_clocks:
return err;
}
-static int macb_remove(struct platform_device *pdev)
+static void macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
@@ -4850,50 +5650,89 @@ static int macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
+ unregister_netdev(dev);
+ phy_exit(bp->phy);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
- unregister_netdev(dev);
- tasklet_kill(&bp->hresp_err_tasklet);
+ device_set_wakeup_enable(&bp->pdev->dev, 0);
+ cancel_work_sync(&bp->hresp_err_bh_work);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- if (!pm_runtime_suspended(&pdev->dev)) {
- macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
- bp->rx_clk, bp->tsu_clk);
- pm_runtime_set_suspended(&pdev->dev);
- }
+ pm_runtime_set_suspended(&pdev->dev);
phylink_destroy(bp->phylink);
free_netdev(dev);
}
-
- return 0;
}
static int __maybe_unused macb_suspend(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
+ struct in_ifaddr *ifa = NULL;
struct macb_queue *queue;
+ struct in_device *idev;
unsigned long flags;
unsigned int q;
int err;
+ u32 tmp;
+
+ if (!device_may_wakeup(&bp->dev->dev))
+ phy_exit(bp->phy);
if (!netif_running(netdev))
return 0;
if (bp->wol & MACB_WOL_ENABLED) {
+ /* Check for IP address in WOL ARP mode */
+ idev = __in_dev_get_rcu(bp->dev);
+ if (idev)
+ ifa = rcu_dereference(idev->ifa_list);
+ if ((bp->wolopts & WAKE_ARP) && !ifa) {
+ netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n");
+ return -EOPNOTSUPP;
+ }
spin_lock_irqsave(&bp->lock, flags);
- /* Flush all status bits */
- macb_writel(bp, TSR, -1);
- macb_writel(bp, RSR, -1);
+
+ /* Disable Tx and Rx engines before disabling the queues,
+ * this is mandatory as per the IP spec sheet
+ */
+ tmp = macb_readl(bp, NCR);
+ macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->rx_ring_tieoff_dma));
+#endif
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
+ /* Disable RX queues */
+ if (bp->caps & MACB_CAPS_QUEUE_DISABLE) {
+ queue_writel(queue, RBQP, MACB_BIT(QUEUE_DISABLE));
+ } else {
+ /* Tie off RX queues */
+ queue_writel(queue, RBQP,
+ lower_32_bits(bp->rx_ring_tieoff_dma));
+ }
/* Disable all interrupts */
queue_writel(queue, IDR, -1);
queue_readl(queue, ISR);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, -1);
}
+ /* Enable Receive engine */
+ macb_writel(bp, NCR, tmp | MACB_BIT(RE));
+ /* Flush all status bits */
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
+
+ tmp = (bp->wolopts & WAKE_MAGIC) ? MACB_BIT(MAG) : 0;
+ if (bp->wolopts & WAKE_ARP) {
+ tmp |= MACB_BIT(ARP);
+ /* write IP address into register */
+ tmp |= MACB_BFEXT(IP, be32_to_cpu(ifa->ifa_local));
+ }
+
/* Change interrupt handler and
* Enable WoL IRQ on queue 0
*/
@@ -4909,7 +5748,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
return err;
}
queue_writel(bp->queues, IER, GEM_BIT(WOL));
- gem_writel(bp, WOL, MACB_BIT(MAG));
+ gem_writel(bp, WOL, tmp);
} else {
err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
IRQF_SHARED, netdev->name, bp->queues);
@@ -4921,7 +5760,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
return err;
}
queue_writel(bp->queues, IER, MACB_BIT(WOL));
- macb_writel(bp, WOL, MACB_BIT(MAG));
+ macb_writel(bp, WOL, tmp);
}
spin_unlock_irqrestore(&bp->lock, flags);
@@ -4930,8 +5769,10 @@ static int __maybe_unused macb_suspend(struct device *dev)
netif_device_detach(netdev);
for (q = 0, queue = bp->queues; q < bp->num_queues;
- ++q, ++queue)
- napi_disable(&queue->napi);
+ ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ }
if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock();
@@ -4965,6 +5806,9 @@ static int __maybe_unused macb_resume(struct device *dev)
unsigned int q;
int err;
+ if (!device_may_wakeup(&bp->dev->dev))
+ phy_init(bp->phy);
+
if (!netif_running(netdev))
return 0;
@@ -5009,8 +5853,10 @@ static int __maybe_unused macb_resume(struct device *dev)
}
for (q = 0, queue = bp->queues; q < bp->num_queues;
- ++q, ++queue)
- napi_enable(&queue->napi);
+ ++q, ++queue) {
+ napi_enable(&queue->napi_rx);
+ napi_enable(&queue->napi_tx);
+ }
if (netdev->hw_features & NETIF_F_NTUPLE)
gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
@@ -5023,6 +5869,7 @@ static int __maybe_unused macb_resume(struct device *dev)
macb_set_rx_mode(netdev);
macb_restore_features(bp);
rtnl_lock();
+
phylink_start(bp->phylink);
rtnl_unlock();
@@ -5040,7 +5887,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
if (!(device_may_wakeup(dev)))
macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
- else
+ else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
return 0;
@@ -5056,12 +5903,28 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
clk_prepare_enable(bp->rx_clk);
+ clk_prepare_enable(bp->tsu_clk);
+ } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
+ clk_prepare_enable(bp->tsu_clk);
}
- clk_prepare_enable(bp->tsu_clk);
return 0;
}
+static void macb_shutdown(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ dev_close(netdev);
+
+ netif_device_detach(netdev);
+
+ rtnl_unlock();
+}
+
static const struct dev_pm_ops macb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
@@ -5075,6 +5938,7 @@ static struct platform_driver macb_driver = {
.of_match_table = of_match_ptr(macb_dt_ids),
.pm = &macb_pm_ops,
},
+ .shutdown = macb_shutdown,
};
module_platform_driver(macb_driver);
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index f66d22de5168..fc4f5aee6ab3 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -19,8 +19,7 @@
#define PCI_DRIVER_NAME "macb_pci"
#define PLAT_DRIVER_NAME "macb"
-#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0xe007
+#define PCI_DEVICE_ID_CDNS_MACB 0xe007
#define GEM_PCLK_RATE 50000000
#define GEM_HCLK_RATE 50000000
@@ -117,7 +116,7 @@ static void macb_remove(struct pci_dev *pdev)
}
static const struct pci_device_id dev_id_table[] = {
- { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), },
+ { PCI_VDEVICE(CDNS, PCI_DEVICE_ID_CDNS_MACB) },
{ 0, }
};
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 095c5a2144a7..c9e77819196e 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -28,14 +28,16 @@
static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp,
struct macb_dma_desc *desc)
{
- if (bp->hw_dma_cap == HW_DMA_CAP_PTP)
- return (struct macb_dma_desc_ptp *)
- ((u8 *)desc + sizeof(struct macb_dma_desc));
- if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP)
+ if (!macb_dma_ptp(bp))
+ return NULL;
+
+ if (macb_dma64(bp))
return (struct macb_dma_desc_ptp *)
((u8 *)desc + sizeof(struct macb_dma_desc)
+ sizeof(struct macb_dma_desc_64));
- return NULL;
+ else
+ return (struct macb_dma_desc_ptp *)
+ ((u8 *)desc + sizeof(struct macb_dma_desc));
}
static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts,
@@ -258,6 +260,8 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
*/
gem_tsu_get_time(&bp->ptp_clock_info, &tsu, NULL);
+ ts->tv_sec |= ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
+
/* If the top bit is set in the timestamp,
* but not in 1588 timer, it has rolled over,
* so subtract max size
@@ -266,8 +270,6 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
!(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1)))
ts->tv_sec -= GEM_DMA_SEC_TOP;
- ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
-
return 0;
}
@@ -292,79 +294,39 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
}
}
-static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb,
- struct macb_dma_desc_ptp *desc_ptp)
+void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb,
+ struct macb_dma_desc *desc)
{
struct skb_shared_hwtstamps shhwtstamps;
- struct timespec64 ts;
-
- gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb, &shhwtstamps);
-}
-
-int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
- struct macb_dma_desc *desc)
-{
- unsigned long tail = READ_ONCE(queue->tx_ts_tail);
- unsigned long head = queue->tx_ts_head;
struct macb_dma_desc_ptp *desc_ptp;
- struct gem_tx_ts *tx_timestamp;
-
- if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl))
- return -EINVAL;
+ struct timespec64 ts;
- if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
- return -ENOMEM;
+ if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl)) {
+ dev_warn_ratelimited(&bp->pdev->dev,
+ "Timestamp not set in TX BD as expected\n");
+ return;
+ }
- desc_ptp = macb_ptp_desc(queue->bp, desc);
+ desc_ptp = macb_ptp_desc(bp, desc);
/* Unlikely but check */
- if (!desc_ptp)
- return -EINVAL;
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_timestamp = &queue->tx_timestamps[head];
- tx_timestamp->skb = skb;
+ if (!desc_ptp) {
+ dev_warn_ratelimited(&bp->pdev->dev,
+ "Timestamp not supported in BD\n");
+ return;
+ }
+
/* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
dma_rmb();
- tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
- tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
- /* move head */
- smp_store_release(&queue->tx_ts_head,
- (head + 1) & (PTP_TS_BUFFER_SIZE - 1));
-
- schedule_work(&queue->tx_ts_task);
- return 0;
-}
+ gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
-static void gem_tx_timestamp_flush(struct work_struct *work)
-{
- struct macb_queue *queue =
- container_of(work, struct macb_queue, tx_ts_task);
- unsigned long head, tail;
- struct gem_tx_ts *tx_ts;
-
- /* take current head */
- head = smp_load_acquire(&queue->tx_ts_head);
- tail = queue->tx_ts_tail;
-
- while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) {
- tx_ts = &queue->tx_timestamps[tail];
- gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp);
- /* cleanup */
- dev_kfree_skb_any(tx_ts->skb);
- /* remove old tail */
- smp_store_release(&queue->tx_ts_tail,
- (tail + 1) & (PTP_TS_BUFFER_SIZE - 1));
- tail = queue->tx_ts_tail;
- }
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
}
void gem_ptp_init(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct macb_queue *queue;
- unsigned int q;
bp->ptp_clock_info = gem_ptp_caps_template;
@@ -384,11 +346,6 @@ void gem_ptp_init(struct net_device *dev)
}
spin_lock_init(&bp->tsu_clk_lock);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue->tx_ts_head = 0;
- queue->tx_ts_tail = 0;
- INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush);
- }
gem_ptp_init_tsu(bp);
@@ -419,22 +376,19 @@ static int gem_ptp_set_ts_mode(struct macb *bp,
return 0;
}
-int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
+int gem_get_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config)
{
- struct hwtstamp_config *tstamp_config;
struct macb *bp = netdev_priv(dev);
- tstamp_config = &bp->tstamp_config;
- if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
+ *tstamp_config = bp->tstamp_config;
+ if (!macb_dma_ptp(bp))
return -EOPNOTSUPP;
- if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config)))
- return -EFAULT;
- else
- return 0;
+ return 0;
}
-static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
+static void gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
{
u32 reg_val;
@@ -444,38 +398,29 @@ static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
macb_writel(bp, NCR, reg_val | MACB_BIT(OSSMODE));
else
macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE));
-
- return 0;
}
-int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
+int gem_set_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config,
+ struct netlink_ext_ack *extack)
{
enum macb_bd_control tx_bd_control = TSTAMP_DISABLED;
enum macb_bd_control rx_bd_control = TSTAMP_DISABLED;
- struct hwtstamp_config *tstamp_config;
struct macb *bp = netdev_priv(dev);
u32 regval;
- tstamp_config = &bp->tstamp_config;
- if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
+ if (!macb_dma_ptp(bp))
return -EOPNOTSUPP;
- if (copy_from_user(tstamp_config, ifr->ifr_data,
- sizeof(*tstamp_config)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (tstamp_config->flags)
- return -EINVAL;
-
switch (tstamp_config->tx_type) {
case HWTSTAMP_TX_OFF:
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- if (gem_ptp_set_one_step_sync(bp, 1) != 0)
- return -ERANGE;
- fallthrough;
+ gem_ptp_set_one_step_sync(bp, 1);
+ tx_bd_control = TSTAMP_ALL_FRAMES;
+ break;
case HWTSTAMP_TX_ON:
+ gem_ptp_set_one_step_sync(bp, 0);
tx_bd_control = TSTAMP_ALL_FRAMES;
break;
default:
@@ -513,12 +458,11 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
return -ERANGE;
}
+ bp->tstamp_config = *tstamp_config;
+
if (gem_ptp_set_ts_mode(bp, tx_bd_control, rx_bd_control) != 0)
return -ERANGE;
- if (copy_to_user(ifr->ifr_data, tstamp_config, sizeof(*tstamp_config)))
- return -EFAULT;
- else
- return 0;
+ return 0;
}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 457cb7121000..331ac6a3dc38 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1224,7 +1224,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
* @budget : maximum number of packets that the current CPU can receive from
* all interfaces.
* Description :
- * This function implements the the reception process.
+ * This function implements the reception process.
* Also it runs the TX completion thread
*/
static int xgmac_poll(struct napi_struct *napi, int budget)
@@ -1358,7 +1358,7 @@ static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
/* Bring interface down, change mtu and bring interface back up */
xgmac_stop(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return xgmac_open(dev);
}
@@ -1792,7 +1792,7 @@ static int xgmac_probe(struct platform_device *pdev)
netdev_warn(ndev, "MAC address %pM not valid",
ndev->dev_addr);
- netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+ netif_napi_add(ndev, &priv->napi, xgmac_poll);
ret = register_netdev(ndev);
if (ret)
goto err_reg;
@@ -1820,7 +1820,7 @@ err_alloc:
* changes the link status, releases the DMA descriptor rings,
* unregisters the MDIO bus and unmaps the allocated memory.
*/
-static int xgmac_remove(struct platform_device *pdev)
+static void xgmac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct xgmac_priv *priv = netdev_priv(ndev);
@@ -1840,8 +1840,6 @@ static int xgmac_remove(struct platform_device *pdev)
release_mem_region(res->start, resource_size(res));
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 1c76c95b0b27..7dae5aad3689 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -62,13 +62,17 @@ config CAVIUM_PTP
Precision Time Protocol or other purposes. Timestamps can be used in
BGX, TNS, GTI, and NIC blocks.
+config LIQUIDIO_CORE
+ tristate
+
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT && PCI
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
select FW_LOADER
- select LIBCRC32C
+ select LIQUIDIO_CORE
select NET_DEVLINK
help
This driver supports Cavium LiquidIO Intelligent Server Adapters
@@ -92,6 +96,7 @@ config LIQUIDIO_VF
tristate "Cavium LiquidIO VF support"
depends on 64BIT && PCI_MSI
depends on PTP_1588_CLOCK_OPTIONAL
+ select LIQUIDIO_CORE
help
This driver supports Cavium LiquidIO Intelligent Server Adapter
based on CN23XX chips.
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 9fd717b9cf69..61e261657073 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -209,7 +209,7 @@ static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
return -EOPNOTSUPP;
}
-static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+static u64 cavium_ptp_cc_read(struct cyclecounter *cc)
{
struct cavium_ptp *clock =
container_of(cc, struct cavium_ptp, cycle_counter);
@@ -239,12 +239,11 @@ static int cavium_ptp_probe(struct pci_dev *pdev,
if (err)
goto error_free;
- err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ clock->reg_base = pcim_iomap_region(pdev, PCI_PTP_BAR_NO, pci_name(pdev));
+ err = PTR_ERR_OR_ZERO(clock->reg_base);
if (err)
goto error_free;
- clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
-
spin_lock_init(&clock->spin_lock);
cc = &clock->cycle_counter;
@@ -292,7 +291,7 @@ error_stop:
clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
- pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO);
+ pcim_iounmap_region(pdev, PCI_PTP_BAR_NO);
error_free:
devm_kfree(dev, clock);
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile
index bc9937502043..4ee80af88e79 100644
--- a/drivers/net/ethernet/cavium/liquidio/Makefile
+++ b/drivers/net/ethernet/cavium/liquidio/Makefile
@@ -3,7 +3,9 @@
# Cavium Liquidio ethernet device driver
#
-common-objs := lio_ethtool.o \
+obj-$(CONFIG_LIQUIDIO_CORE) += liquidio-core.o
+liquidio-core-y := \
+ lio_ethtool.o \
lio_core.o \
request_manager.o \
response_manager.o \
@@ -18,7 +20,7 @@ common-objs := lio_ethtool.o \
octeon_nic.o
obj-$(CONFIG_LIQUIDIO) += liquidio.o
-liquidio-y := lio_main.o octeon_console.o lio_vf_rep.o $(common-objs)
+liquidio-y := lio_main.o octeon_console.o lio_vf_rep.o
obj-$(CONFIG_LIQUIDIO_VF) += liquidio_vf.o
-liquidio_vf-y := lio_vf_main.o $(common-objs)
+liquidio_vf-y := lio_vf_main.o
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 9ed3d1ab2ca5..75f22f74774c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -36,175 +36,6 @@
*/
#define CN23XX_INPUT_JABBER 64600
-void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
-{
- int i = 0;
- u32 regval = 0;
- struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
-
- /*In cn23xx_soft_reset*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
- "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST,
- lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
-
- /*In cn23xx_set_dpi_regs*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL,
- lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
-
- for (i = 0; i < 6; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_DPI_DMA_ENG_ENB", i,
- CN23XX_DPI_DMA_ENG_ENB(i),
- lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_DPI_DMA_ENG_BUF", i,
- CN23XX_DPI_DMA_ENG_BUF(i),
- lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
- }
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
- CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
-
- /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */
- pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_CONFIG_PCIE_DEVCTL",
- CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval));
-
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
- CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
- lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
-
- /*In cn23xx_specific_regs_setup */
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
- CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST),
- (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
-
- /*In cn23xx_setup_global_mac_regs*/
- for (i = 0; i < CN23XX_MAX_MACS; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_PKT_MAC_RINFO64", i,
- CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_PKT_MAC_RINFO64
- (i, oct->pf_num))));
- }
-
- /*In cn23xx_setup_global_input_regs*/
- for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_PKT_CONTROL64", i,
- CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)),
- CVM_CAST64(octeon_read_csr64
- (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
- }
-
- /*In cn23xx_setup_global_output_regs*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
-
- for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKT_CONTROL", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)),
- CVM_CAST64(octeon_read_csr(
- oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKT_INT_LEVELS", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
- }
-
- /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "cn23xx->intr_enb_reg64",
- CVM_CAST64((long)(cn23xx->intr_enb_reg64)),
- CVM_CAST64(readq(cn23xx->intr_enb_reg64)));
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "cn23xx->intr_sum_reg64",
- CVM_CAST64((long)(cn23xx->intr_sum_reg64)),
- CVM_CAST64(readq(cn23xx->intr_sum_reg64)));
-
- /*In cn23xx_setup_iq_regs*/
- for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_BASE_ADDR64", i,
- CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_SIZE", i,
- CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)),
- CVM_CAST64(octeon_read_csr
- (oct, CN23XX_SLI_IQ_SIZE(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_DOORBELL", i,
- CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_IQ_DOORBELL(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_IQ_INSTR_COUNT64", i,
- CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
- }
-
- /*In cn23xx_setup_oq_regs*/
- for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_BASE_ADDR64", i,
- CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_SIZE", i,
- CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)),
- CVM_CAST64(octeon_read_csr
- (oct, CN23XX_SLI_OQ_SIZE(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i,
- CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)),
- CVM_CAST64(octeon_read_csr(
- oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKTS_SENT", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
- dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
- "CN23XX_SLI_OQ_PKTS_CREDIT", i,
- CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)),
- CVM_CAST64(octeon_read_csr64(
- oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
- }
-
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_PKT_TIME_INT",
- CVM_CAST64(CN23XX_SLI_PKT_TIME_INT),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
- dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
- "CN23XX_SLI_PKT_CNT_INT",
- CVM_CAST64(CN23XX_SLI_PKT_CNT_INT),
- CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
-}
-
static int cn23xx_pf_soft_reset(struct octeon_device *oct)
{
octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
@@ -218,7 +49,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
- /* Wait for 100ms as Octeon resets. */
+ /* Wait for 100ms as Octeon resets */
mdelay(100);
if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
@@ -230,7 +61,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
oct->octeon_id);
- /* restore the reset value*/
+ /* Restore the reset value */
octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
return 0;
@@ -290,7 +121,7 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
oqticks_per_us /= 1024;
/* time_intr is in microseconds. The next 2 steps gives the oq ticks
- * corressponding to time_intr.
+ * corresponding to time_intr.
*/
oqticks_per_us *= time_intr_in_us;
oqticks_per_us /= 1000;
@@ -305,11 +136,11 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
u64 reg_val;
u64 temp;
- /* programming SRN and TRS for each MAC(0..3) */
+ /* Programming SRN and TRS for each MAC(0..3) */
dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
__func__, mac_no);
- /* By default, mapping all 64 IOQs to a single MACs */
+ /* By default, map all 64 IOQs to a single MAC */
reg_val =
octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
@@ -333,7 +164,7 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
temp = oct->sriov_info.max_vfs & 0xff;
reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
- /* write these settings to MAC register */
+ /* Write these settings to MAC register */
octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
reg_val);
@@ -352,10 +183,10 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
srn = oct->sriov_info.pf_srn;
ern = srn + oct->sriov_info.num_pf_rings;
- /*As per HRM reg description, s/w cant write 0 to ENB. */
- /*to make the queue off, need to set the RST bit. */
+ /* As per HRM reg description, s/w can't write 0 to ENB. */
+ /* We need to set the RST bit, to turn the queue off. */
- /* Reset the Enable bit for all the 64 IQs. */
+ /* Reset the enable bit for all the 64 IQs. */
for (q_no = srn; q_no < ern; q_no++) {
/* set RST bit to 1. This bit applies to both IQ and OQ */
d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -363,7 +194,7 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
}
- /*wait until the RST bit is clear or the RST and quite bits are set*/
+ /* Wait until the RST bit is clear or the RST and quiet bits are set */
for (q_no = srn; q_no < ern; q_no++) {
u64 reg_val = octeon_read_csr64(oct,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -414,15 +245,15 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
if (cn23xx_reset_io_queues(oct))
return -1;
- /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
- * for all queues.Only PF can set these bits.
+ /* Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+ * for all queues. Only PF can set these bits.
* bits 29:30 indicate the MAC num.
* bits 32:47 indicate the PVF num.
*/
for (q_no = 0; q_no < ern; q_no++) {
reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
- /* for VF assigned queues. */
+ /* For VF assigned queues. */
if (q_no < oct->sriov_info.pf_srn) {
vf_num = q_no / oct->sriov_info.rings_per_vf;
vf_num += 1; /* VF1, VF2,........ */
@@ -437,7 +268,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
reg_val);
}
- /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ /* Select ES, RO, NS, RDSIZE,DPTR Format#0 for
* pf queues
*/
for (q_no = srn; q_no < ern; q_no++) {
@@ -458,7 +289,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
reg_val);
- /* Set WMARK level for triggering PI_INT */
+ /* Set WMARK level to trigger PI_INT */
/* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
CN23XX_PKT_IN_DONE_WMARK_MASK;
@@ -523,7 +354,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/* set the ES bit */
reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
- /* write all the selected settings */
+ /* Write all the selected settings */
octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
/* Enabling these interrupt in oct->fn_list.enable_interrupt()
@@ -542,7 +373,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/** Setting the water mark level for pko back pressure **/
writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
- /** Disabling setting OQs in reset when ring has no dorebells
+ /* Disabling setting OQs in reset when ring has no doorbells
* enabling this will cause of head of line blocking
*/
/* Do it only for pass1.1. and pass1.2 */
@@ -552,7 +383,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
CN23XX_SLI_GBL_CONTROL) | 0x2,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
- /** Enable channel-level backpressure */
+ /** Enable channel-level backpressure **/
if (oct->pf_num)
writeq(0xffffffffffffffffULL,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
@@ -565,7 +396,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
{
cn23xx_enable_error_reporting(oct);
- /* program the MAC(0..3)_RINFO before setting up input/output regs */
+ /* Program the MAC(0..3)_RINFO before setting up input/output regs */
cn23xx_setup_global_mac_regs(oct);
if (cn23xx_pf_setup_global_input_regs(oct))
@@ -579,7 +410,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
CN23XX_SLI_WINDOW_CTL_DEFAULT);
- /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+ /* Set SLI_PKT_IN_JABBER to handle large VXLAN packets */
octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
return 0;
}
@@ -719,12 +550,10 @@ static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
for (i = 0; i < oct->sriov_info.max_vfs; i++) {
q_no = i * oct->sriov_info.rings_per_vf;
- mbox = vmalloc(sizeof(*mbox));
+ mbox = vzalloc(sizeof(*mbox));
if (!mbox)
goto free_mbox;
- memset(mbox, 0, sizeof(struct octeon_mbox));
-
spin_lock_init(&mbox->lock);
mbox->oct_dev = oct;
@@ -745,7 +574,7 @@ static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
- /*Mail Box Thread creation*/
+ /* Mail Box Thread creation */
INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
cn23xx_pf_mbox_thread);
mbox->mbox_poll_wk.ctxptr = (void *)mbox;
@@ -797,7 +626,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
ern = srn + oct->num_iqs;
for (q_no = srn; q_no < ern; q_no++) {
- /* set the corresponding IQ IS_64B bit */
+ /* Set the corresponding IQ IS_64B bit */
if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -806,7 +635,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
}
- /* set the corresponding IQ ENB bit */
+ /* Set the corresponding IQ ENB bit */
if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
/* IOQs are in reset by default in PEM2 mode,
* clearing reset bit
@@ -852,7 +681,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
}
for (q_no = srn; q_no < ern; q_no++) {
u32 reg_val;
- /* set the corresponding OQ ENB bit */
+ /* Set the corresponding OQ ENB bit */
if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr(
oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
@@ -878,7 +707,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
loop = HZ;
- /* start the Reset for a particular ring */
+ /* Start the Reset for a particular ring */
WRITE_ONCE(d64, octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
WRITE_ONCE(d64, READ_ONCE(d64) &
@@ -911,7 +740,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
loop = HZ;
/* Wait until hardware indicates that the particular IQ
- * is out of reset.It given that SLI_PKT_RING_RST is
+ * is out of reset. Given that SLI_PKT_RING_RST is
* common for both IQs and OQs
*/
WRITE_ONCE(d64, octeon_read_csr64(
@@ -931,7 +760,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
schedule_timeout_uninterruptible(1);
}
- /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+ /* Clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
WRITE_ONCE(d32, octeon_read_csr(
oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
@@ -964,7 +793,7 @@ static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
return ret;
- /* Write count reg in sli_pkt_cnts to clear these int.*/
+ /* Write count reg in sli_pkt_cnts to clear these int. */
if ((pkts_sent & CN23XX_INTR_PO_INT) ||
(pkts_sent & CN23XX_INTR_PI_INT)) {
if (pkts_sent & CN23XX_INTR_PO_INT)
@@ -1079,7 +908,7 @@ static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
}
-/* always call with lock held */
+/* Always call with lock held */
static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
{
u32 new_idx;
@@ -1090,7 +919,7 @@ static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
iq->pkt_in_done = pkt_in_done;
/* Modulo of the new index with the IQ size will give us
- * the new index. The iq->reset_instr_cnt is always zero for
+ * the new index. The iq->reset_instr_cnt is always zero for
* cn23xx, so no extra adjustments are needed.
*/
new_idx = (iq->octeon_read_index +
@@ -1105,8 +934,8 @@ static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
u64 intr_val = 0;
- /* Divide the single write to multiple writes based on the flag. */
- /* Enable Interrupt */
+ /* Divide the single write to multiple writes based on the flag. */
+ /* Enable Interrupts */
if (intr_flag == OCTEON_ALL_INTR) {
writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
} else if (intr_flag & OCTEON_OUTPUT_INTR) {
@@ -1161,7 +990,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
ret = 0;
- /** Read Function Dependency Link reg to get the function number */
+ /* Read Function Dependency Link reg to get the function number */
if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
&fdl_bit) == 0) {
oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
@@ -1174,13 +1003,13 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
* In this case, read the PF number from the
* SLI_PKT0_INPUT_CONTROL reg (written by f/w)
*/
- pkt0_in_ctl = octeon_read_csr64(oct,
- CN23XX_SLI_IQ_PKT_CONTROL64(0));
+ pkt0_in_ctl =
+ octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(0));
pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
- /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
+ /* Validate PF num by reading RINFO; f/w writes RINFO.trs == 1 */
d64 = octeon_read_csr64(oct,
CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
@@ -1377,54 +1206,16 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
return 0;
}
-
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx)
-{
- if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_IQ_MAX_Q(conf23xx),
- CN23XX_MAX_INPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_OQ_MAX_Q(conf23xx),
- CN23XX_MAX_OUTPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
- CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
- __func__);
- return 1;
- }
-
- if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- return 0;
-}
+EXPORT_SYMBOL_GPL(setup_cn23xx_octeon_pf_device);
int cn23xx_fw_loaded(struct octeon_device *oct)
{
u64 val;
/* If there's more than one active PF on this NIC, then that
- * implies that the NIC firmware is loaded and running. This check
+ * implies that the NIC firmware is loaded and running. This check
* prevents a rare false negative that might occur if we only relied
- * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
+ * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
* happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
* though the firmware was already loaded but still booting and has yet
* to set SCR2_BIT_FW_LOADED.
@@ -1435,6 +1226,7 @@ int cn23xx_fw_loaded(struct octeon_device *oct)
val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
return (val >> SCR2_BIT_FW_LOADED) & 1ULL;
}
+EXPORT_SYMBOL_GPL(cn23xx_fw_loaded);
void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
u8 *mac)
@@ -1456,6 +1248,7 @@ void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
octeon_mbox_write(oct, &mbox_cmd);
}
}
+EXPORT_SYMBOL_GPL(cn23xx_tell_vf_its_macaddr_changed);
static void
cn23xx_get_vf_stats_callback(struct octeon_device *oct,
@@ -1489,7 +1282,7 @@ int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
mbox_cmd.recv_len = 0;
mbox_cmd.recv_status = 0;
- mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback;
+ mbox_cmd.fn = cn23xx_get_vf_stats_callback;
ctx.stats = stats;
atomic_set(&ctx.status, 0);
mbox_cmd.fn_arg = (void *)&ctx;
@@ -1510,3 +1303,4 @@ int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
return 0;
}
+EXPORT_SYMBOL_GPL(cn23xx_get_vf_stats);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index e6f31d0d5c0b..bbe9f3133b07 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -54,13 +54,8 @@ struct oct_vf_stats {
int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx);
-
u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
-void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
-
int cn23xx_sriov_config(struct octeon_device *oct);
int cn23xx_fw_loaded(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
index 3f1c189646f4..a0fd32476225 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -87,8 +87,8 @@
*/
#define CN23XX_SLI_PKT_IN_JABBER 0x29170
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index fda49404968c..d2fcb3da484e 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -279,12 +279,10 @@ static int cn23xx_setup_vf_mbox(struct octeon_device *oct)
{
struct octeon_mbox *mbox = NULL;
- mbox = vmalloc(sizeof(*mbox));
+ mbox = vzalloc(sizeof(*mbox));
if (!mbox)
return 1;
- memset(mbox, 0, sizeof(struct octeon_mbox));
-
spin_lock_init(&mbox->lock);
mbox->oct_dev = oct;
@@ -386,6 +384,7 @@ void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct)
octeon_mbox_write(oct, &mbox_cmd);
}
+EXPORT_SYMBOL_GPL(cn23xx_vf_ask_pf_to_do_flr);
static void octeon_pfvf_hs_callback(struct octeon_device *oct,
struct octeon_mbox_cmd *cmd,
@@ -430,7 +429,7 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
mbox_cmd.q_no = 0;
mbox_cmd.recv_len = 0;
mbox_cmd.recv_status = 0;
- mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
+ mbox_cmd.fn = octeon_pfvf_hs_callback;
mbox_cmd.fn_arg = &status;
octeon_mbox_write(oct, &mbox_cmd);
@@ -468,6 +467,7 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
return 0;
}
+EXPORT_SYMBOL_GPL(cn23xx_octeon_pfvf_handshake);
static void cn23xx_handle_vf_mbox_intr(struct octeon_ioq_vector *ioq_vector)
{
@@ -680,3 +680,4 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
return 0;
}
+EXPORT_SYMBOL_GPL(cn23xx_setup_octeon_vf_device);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
index 2d06097d3f61..40f529d0bc4c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
@@ -43,6 +43,4 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
int cn23xx_setup_octeon_vf_device(struct octeon_device *oct);
u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
-
-void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
index d33dd8f4226f..e956109415cd 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
@@ -36,8 +36,8 @@
#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index 39643be8c30a..93fccfec288d 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -697,6 +697,7 @@ int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
return 0;
}
+EXPORT_SYMBOL_GPL(lio_setup_cn66xx_octeon_device);
int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
struct octeon_config *conf6xxx)
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index 8ed57134ee0c..129c8b84f549 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -86,7 +86,6 @@ u32
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused);
void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused);
-void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
struct octeon_reg_list *reg_list);
u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 30254e4cf70f..b5103def3761 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -181,3 +181,4 @@ int lio_setup_cn68xx_octeon_device(struct octeon_device *oct)
return 0;
}
+EXPORT_SYMBOL_GPL(lio_setup_cn68xx_octeon_device);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 73cb03266549..215dac201b4a 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -26,6 +26,10 @@
#include "octeon_main.h"
#include "octeon_network.h"
+MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
+MODULE_LICENSE("GPL");
+
/* OOM task polling interval */
#define LIO_OOM_POLL_INTERVAL_MS 250
@@ -71,6 +75,7 @@ void lio_delete_glists(struct lio *lio)
kfree(lio->glist);
lio->glist = NULL;
}
+EXPORT_SYMBOL_GPL(lio_delete_glists);
/**
* lio_setup_glists - Setup gather lists
@@ -154,6 +159,7 @@ int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
return 0;
}
+EXPORT_SYMBOL_GPL(lio_setup_glists);
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
{
@@ -180,6 +186,7 @@ int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
}
return ret;
}
+EXPORT_SYMBOL_GPL(liquidio_set_feature);
void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
unsigned int bytes_compl)
@@ -395,6 +402,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
nctrl->ncmd.s.cmd);
}
}
+EXPORT_SYMBOL_GPL(liquidio_link_ctrl_cmd_completion);
void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
{
@@ -464,7 +472,7 @@ int setup_rx_oom_poll_fn(struct net_device *netdev)
q_no = lio->linfo.rxpciq[q].s.q_no;
wq = &lio->rxq_status_wq[q_no];
wq->wq = alloc_workqueue("rxq-oom-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!wq->wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
return -ENOMEM;
@@ -478,6 +486,7 @@ int setup_rx_oom_poll_fn(struct net_device *netdev)
return 0;
}
+EXPORT_SYMBOL_GPL(setup_rx_oom_poll_fn);
void cleanup_rx_oom_poll_fn(struct net_device *netdev)
{
@@ -495,6 +504,7 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev)
}
}
}
+EXPORT_SYMBOL_GPL(cleanup_rx_oom_poll_fn);
/* Runs in interrupt context. */
static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
@@ -851,7 +861,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
napi = &droq->napi;
dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
(u64)netdev, (u64)octeon_dev);
- netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
+ netif_napi_add(netdev, napi, liquidio_napi_poll);
/* designate a CPU for this droq */
droq->cpu_id = cpu_id;
@@ -899,6 +909,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
return 0;
}
+EXPORT_SYMBOL_GPL(liquidio_setup_io_queues);
static
int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
@@ -1194,6 +1205,7 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
}
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_setup_interrupt);
/**
* liquidio_change_mtu - Net device change_mtu
@@ -1250,12 +1262,13 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
lio->mtu = new_mtu;
WRITE_ONCE(sc->caller_is_done, true);
return 0;
}
+EXPORT_SYMBOL_GPL(liquidio_change_mtu);
int lio_wait_for_clean_oq(struct octeon_device *oct)
{
@@ -1279,6 +1292,7 @@ int lio_wait_for_clean_oq(struct octeon_device *oct)
return pending_pkts;
}
+EXPORT_SYMBOL_GPL(lio_wait_for_clean_oq);
static void
octnet_nic_stats_callback(struct octeon_device *oct_dev,
@@ -1509,6 +1523,7 @@ lio_fetch_stats_exit:
return;
}
+EXPORT_SYMBOL_GPL(lio_fetch_stats);
int liquidio_set_speed(struct lio *lio, int speed)
{
@@ -1659,6 +1674,7 @@ int liquidio_get_speed(struct lio *lio)
return retval;
}
+EXPORT_SYMBOL_GPL(liquidio_get_speed);
int liquidio_set_fec(struct lio *lio, int on_off)
{
@@ -1812,3 +1828,4 @@ int liquidio_get_fec(struct lio *lio)
return retval;
}
+EXPORT_SYMBOL_GPL(liquidio_get_fec);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 2b9747867d4c..c849e2c871a9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -442,10 +442,11 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
oct = lio->oct_dev;
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
- strcpy(drvinfo->driver, "liquidio");
- strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
- ETHTOOL_FWVERS_LEN);
- strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
+ strscpy(drvinfo->driver, "liquidio", sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
+ sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(oct->pci_dev),
+ sizeof(drvinfo->bus_info));
}
static void
@@ -458,10 +459,11 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
oct = lio->oct_dev;
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
- strcpy(drvinfo->driver, "liquidio_vf");
- strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
- ETHTOOL_FWVERS_LEN);
- strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
+ strscpy(drvinfo->driver, "liquidio_vf", sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
+ sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(oct->pci_dev),
+ sizeof(drvinfo->bus_info));
}
static int
@@ -947,7 +949,9 @@ static int lio_set_phys_id(struct net_device *netdev,
static void
lio_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
@@ -1252,8 +1256,11 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
return 0;
}
-static int lio_ethtool_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static int
+lio_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
u32 rx_count, tx_count, rx_count_old, tx_count_old;
struct lio *lio = GET_LIO(netdev);
@@ -2489,37 +2496,31 @@ ret_intrmod:
return ret;
}
+#ifdef PTP_HARDWARE_TIMESTAMPING
static int lio_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct lio *lio = GET_LIO(netdev);
info->so_timestamping =
-#ifdef PTP_HARDWARE_TIMESTAMPING
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
-#endif
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ SOF_TIMESTAMPING_TX_SOFTWARE;
if (lio->ptp_clock)
info->phc_index = ptp_clock_index(lio->ptp_clock);
- else
- info->phc_index = -1;
-#ifdef PTP_HARDWARE_TIMESTAMPING
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
-#endif
return 0;
}
+#endif
/* Return register dump len. */
static int lio_get_regs_len(struct net_device *dev)
@@ -3139,7 +3140,9 @@ static const struct ethtool_ops lio_ethtool_ops = {
.set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
.set_priv_flags = lio_set_priv_flags,
+#ifdef PTP_HARDWARE_TIMESTAMPING
.get_ts_info = lio_get_ts_info,
+#endif
};
static const struct ethtool_ops lio_vf_ethtool_ops = {
@@ -3162,7 +3165,9 @@ static const struct ethtool_ops lio_vf_ethtool_ops = {
.set_coalesce = lio_set_intr_coalesce,
.get_priv_flags = lio_get_priv_flags,
.set_priv_flags = lio_set_priv_flags,
+#ifdef PTP_HARDWARE_TIMESTAMPING
.get_ts_info = lio_get_ts_info,
+#endif
};
void liquidio_set_ethtool_ops(struct net_device *netdev)
@@ -3175,3 +3180,4 @@ void liquidio_set_ethtool_ops(struct net_device *netdev)
else
netdev->ethtool_ops = &lio_ethtool_ops;
}
+EXPORT_SYMBOL_GPL(liquidio_set_ethtool_ops);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 12eee2bc7f5c..0732440eeacd 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -92,17 +92,6 @@ static int octeon_console_debug_enabled(u32 console)
/* time to wait for possible in-flight requests in milliseconds */
#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
-struct lio_trusted_vf_ctx {
- struct completion complete;
- int status;
-};
-
-struct oct_link_status_resp {
- u64 rh;
- struct oct_link_info link_info;
- u64 status;
-};
-
struct oct_timestamp_resp {
u64 rh;
u64 timestamp;
@@ -196,8 +185,7 @@ static void octeon_droq_bh(struct tasklet_struct *t)
static int lio_wait_for_oq_pkts(struct octeon_device *oct)
{
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = oct->priv;
int retry = 100, pkt_cnt = 0, pending_pkts = 0;
int i;
@@ -538,7 +526,8 @@ static inline int setup_link_status_change_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
return -1;
@@ -671,7 +660,8 @@ static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->sync_octeon_time_wq.wq =
- alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
+ alloc_workqueue("update-octeon-time",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!lio->sync_octeon_time_wq.wq) {
dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
return -1;
@@ -955,8 +945,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
{
int i, refcount;
struct msix_entry *msix_entries;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = oct->priv;
struct handshake *hs;
@@ -1134,7 +1123,6 @@ static void octeon_destroy_resources(struct octeon_device *oct)
fallthrough;
case OCT_DEV_PCI_ENABLE_DONE:
- pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
@@ -1217,8 +1205,7 @@ static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{
struct net_device *netdev = oct->props[ifidx].netdev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = oct->priv;
struct napi_struct *napi, *n;
struct lio *lio;
@@ -1517,14 +1504,17 @@ static void free_netsgbuf_with_resp(void *buf)
}
/**
- * liquidio_ptp_adjfreq - Adjust ptp frequency
+ * liquidio_ptp_adjfine - Adjust ptp frequency
* @ptp: PTP clock info
- * @ppb: how much to adjust by, in parts-per-billion
+ * @scaled_ppm: how much to adjust by, in scaled parts-per-million
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct lio *lio = container_of(ptp, struct lio, ptp_info);
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
u64 comp, delta;
unsigned long flags;
bool neg_adj = false;
@@ -1648,7 +1638,7 @@ static void oct_ptp_open(struct net_device *netdev)
lio->ptp_info.n_ext_ts = 0;
lio->ptp_info.n_per_out = 0;
lio->ptp_info.pps = 0;
- lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
+ lio->ptp_info.adjfine = liquidio_ptp_adjfine;
lio->ptp_info.adjtime = liquidio_ptp_adjtime;
lio->ptp_info.gettime64 = liquidio_ptp_gettime;
lio->ptp_info.settime64 = liquidio_ptp_settime;
@@ -1695,7 +1685,7 @@ static int load_firmware(struct octeon_device *oct)
if (fw_type_is_auto()) {
tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
- strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
+ strscpy_pad(fw_type, tmp_fw_type, sizeof(fw_type));
} else {
tmp_fw_type = fw_type;
}
@@ -1746,7 +1736,7 @@ static inline int setup_tx_poll_fn(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->txq_status_wq.wq = alloc_workqueue("txq-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
return -1;
@@ -1777,8 +1767,7 @@ static int liquidio_open(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = oct->priv;
struct napi_struct *napi, *n;
int ret = 0;
@@ -1799,13 +1788,10 @@ static int liquidio_open(struct net_device *netdev)
ifstate_set(lio, LIO_IFSTATE_RUNNING);
- if (OCTEON_CN23XX_PF(oct)) {
- if (!oct->msix_on)
- if (setup_tx_poll_fn(netdev))
- return -1;
- } else {
- if (setup_tx_poll_fn(netdev))
- return -1;
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
+ ret = setup_tx_poll_fn(netdev);
+ if (ret)
+ goto err_poll;
}
netif_tx_start_all_queues(netdev);
@@ -1818,7 +1804,7 @@ static int liquidio_open(struct net_device *netdev)
/* tell Octeon to start forwarding packets to host */
ret = send_rx_ctrl_cmd(lio, 1);
if (ret)
- return ret;
+ goto err_rx_ctrl;
/* start periodical statistics fetch */
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
@@ -1829,6 +1815,27 @@ static int liquidio_open(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
+ return 0;
+
+err_rx_ctrl:
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
+ cleanup_tx_poll_fn(netdev);
+err_poll:
+ if (lio->ptp_clock) {
+ ptp_clock_unregister(lio->ptp_clock);
+ lio->ptp_clock = NULL;
+ }
+
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
+ }
+
return ret;
}
@@ -1840,8 +1847,7 @@ static int liquidio_stop(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = oct->priv;
struct napi_struct *napi, *n;
int ret = 0;
@@ -2101,23 +2107,16 @@ liquidio_get_stats64(struct net_device *netdev,
lstats->tx_fifo_errors;
}
-/**
- * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
- * @netdev: network device
- * @ifr: interface request
- */
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int liquidio_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config conf;
struct lio *lio = GET_LIO(netdev);
- if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
- return -EFAULT;
-
- if (conf.flags)
- return -EINVAL;
+ if (!lio->oct_dev->ptp_enable)
+ return -EOPNOTSUPP;
- switch (conf.tx_type) {
+ switch (conf->tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
break;
@@ -2125,7 +2124,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (conf.rx_filter) {
+ switch (conf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
@@ -2143,39 +2142,32 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ conf->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
- if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ if (conf->rx_filter == HWTSTAMP_FILTER_ALL)
ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
else
ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
- return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+ return 0;
}
-/**
- * liquidio_ioctl - ioctl handler
- * @netdev: network device
- * @ifr: interface request
- * @cmd: command
- */
-static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int liquidio_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf)
{
struct lio *lio = GET_LIO(netdev);
- switch (cmd) {
- case SIOCSHWTSTAMP:
- if (lio->oct_dev->ptp_enable)
- return hwtstamp_ioctl(netdev, ifr);
- fallthrough;
- default:
- return -EOPNOTSUPP;
- }
+ /* TX timestamping is technically always on */
+ conf->tx_type = HWTSTAMP_TX_ON;
+ conf->rx_filter = ifstate_check(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
/**
@@ -3224,7 +3216,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
- .ndo_eth_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
.ndo_set_vf_mac = liquidio_set_vf_mac,
@@ -3235,6 +3226,8 @@ static const struct net_device_ops lionetdevops = {
.ndo_set_vf_link_state = liquidio_set_vf_link_state,
.ndo_get_vf_stats = liquidio_get_vf_stats,
.ndo_get_port_parent_id = liquidio_get_port_parent_id,
+ .ndo_hwtstamp_get = liquidio_hwtstamp_get,
+ .ndo_hwtstamp_set = liquidio_hwtstamp_set,
};
/**
@@ -3566,7 +3559,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
| NETIF_F_TSO | NETIF_F_TSO6
| NETIF_F_LRO;
}
- netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+ netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
/* Copy of transmit encapsulation capabilities:
* TSO, TSO6, Checksums for this device
@@ -4045,8 +4038,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
char bootcmd[] = "\n";
char *dbg_enb = NULL;
enum lio_fw_state fw_state;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)octeon_dev->priv;
+ struct octeon_device_priv *oct_priv = octeon_dev->priv;
atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
/* Enable access to the octeon device and make its DMA capability
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index c607756b731f..e02942dbbcce 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -72,8 +72,7 @@ static int liquidio_stop(struct net_device *netdev);
static int lio_wait_for_oq_pkts(struct octeon_device *oct)
{
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = oct->priv;
int retry = MAX_IO_PENDING_PKT_COUNT;
int pkt_cnt = 0, pending_pkts;
int i;
@@ -305,7 +304,8 @@ static int setup_link_status_change_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
return -1;
@@ -442,8 +442,7 @@ static void octeon_pci_flr(struct octeon_device *oct)
*/
static void octeon_destroy_resources(struct octeon_device *oct)
{
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = oct->priv;
struct msix_entry *msix_entries;
int i;
@@ -577,7 +576,6 @@ static void octeon_destroy_resources(struct octeon_device *oct)
fallthrough;
case OCT_DEV_PCI_ENABLE_DONE:
- pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
@@ -660,8 +658,7 @@ static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{
struct net_device *netdev = oct->props[ifidx].netdev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = oct->priv;
struct napi_struct *napi, *n;
struct lio *lio;
@@ -910,8 +907,7 @@ static int liquidio_open(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = oct->priv;
struct napi_struct *napi, *n;
int ret = 0;
@@ -957,8 +953,7 @@ static int liquidio_stop(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = oct->priv;
struct napi_struct *napi, *n;
int ret = 0;
@@ -1241,23 +1236,13 @@ liquidio_get_stats64(struct net_device *netdev,
lstats->tx_carrier_errors;
}
-/**
- * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
- * @netdev: network device
- * @ifr: interface request
- */
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int liquidio_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf,
+ struct netlink_ext_ack *extack)
{
struct lio *lio = GET_LIO(netdev);
- struct hwtstamp_config conf;
-
- if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
- return -EFAULT;
- if (conf.flags)
- return -EINVAL;
-
- switch (conf.tx_type) {
+ switch (conf->tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
break;
@@ -1265,7 +1250,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (conf.rx_filter) {
+ switch (conf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
@@ -1283,35 +1268,31 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ conf->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
- if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ if (conf->rx_filter == HWTSTAMP_FILTER_ALL)
ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
else
ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
- return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+ return 0;
}
-/**
- * liquidio_ioctl - ioctl handler
- * @netdev: network device
- * @ifr: interface request
- * @cmd: command
- */
-static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int liquidio_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *conf)
{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return hwtstamp_ioctl(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ struct lio *lio = GET_LIO(netdev);
+
+ /* TX timestamping is techically always on */
+ conf->tx_type = HWTSTAMP_TX_ON;
+ conf->rx_filter = ifstate_check(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ return 0;
}
static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
@@ -1889,9 +1870,10 @@ static const struct net_device_ops lionetdevops = {
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
- .ndo_eth_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
+ .ndo_hwtstamp_get = liquidio_hwtstamp_get,
+ .ndo_hwtstamp_set = liquidio_hwtstamp_set,
};
static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
@@ -2097,7 +2079,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
| NETIF_F_TSO | NETIF_F_TSO6
| NETIF_F_GRO
| NETIF_F_LRO;
- netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+ netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
/* Copy of transmit encapsulation capabilities:
* TSO, TSO6, Checksums for this device
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index 600de587d7a9..989b4ddae342 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -218,7 +218,7 @@ lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
return -EIO;
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
@@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
pg_info->page_offset;
memcpy(skb->data, va, MIN_SKB_SIZE);
skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset + MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
}
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- pg_info->page,
- pg_info->page_offset + MIN_SKB_SIZE,
- len - MIN_SKB_SIZE,
- LIO_RXBUFFER_SZ);
} else {
struct octeon_skb_page_info *pg_info =
((struct octeon_skb_page_info *)(skb->cb));
@@ -638,7 +637,8 @@ lio_vf_rep_netdev_event(struct notifier_block *nb,
memset(&rep_cfg, 0, sizeof(rep_cfg));
rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
rep_cfg.ifidx = vf_rep->ifidx;
- strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
+ strscpy(rep_cfg.rep_name.name, ndev->name,
+ sizeof(rep_cfg.rep_name.name));
ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
sizeof(rep_cfg), NULL, 0);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 28feabec8fbb..67c3570f875f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -247,8 +247,7 @@ static const struct cvmx_bootmem_named_block_desc
struct cvmx_bootmem_named_block_desc,
size));
- strncpy(desc->name, name, sizeof(desc->name));
- desc->name[sizeof(desc->name) - 1] = 0;
+ strscpy(desc->name, name, sizeof(desc->name));
return &oct->bootmem_named_block_desc;
} else {
return NULL;
@@ -471,8 +470,8 @@ static void output_console_line(struct octeon_device *oct,
if (line != &console_buffer[bytes_read]) {
console_buffer[bytes_read] = '\0';
len = strlen(console->leftover);
- strncpy(&console->leftover[len], line,
- sizeof(console->leftover) - len);
+ strscpy(&console->leftover[len], line,
+ sizeof(console->leftover) - len + 1);
}
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index e159194d0aef..1753bb87dfbd 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -564,6 +564,7 @@ void octeon_init_device_list(int conf_type)
for (i = 0; i < MAX_OCTEON_DEVICES; i++)
oct_set_config_info(i, conf_type);
}
+EXPORT_SYMBOL_GPL(octeon_init_device_list);
static void *__retrieve_octeon_config_info(struct octeon_device *oct,
u16 card_type)
@@ -633,6 +634,7 @@ char *lio_get_state_string(atomic_t *state_ptr)
return oct_dev_state_str[OCT_DEV_STATE_INVALID];
return oct_dev_state_str[istate];
}
+EXPORT_SYMBOL_GPL(lio_get_state_string);
static char *get_oct_app_string(u32 app_mode)
{
@@ -661,6 +663,7 @@ void octeon_free_device_mem(struct octeon_device *oct)
octeon_device[i] = NULL;
octeon_device_count--;
}
+EXPORT_SYMBOL_GPL(octeon_free_device_mem);
static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
u32 priv_size)
@@ -747,6 +750,7 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
return oct;
}
+EXPORT_SYMBOL_GPL(octeon_allocate_device);
/** Register a device's bus location at initialization time.
* @param octeon_dev - pointer to the octeon device structure.
@@ -804,6 +808,7 @@ int octeon_register_device(struct octeon_device *oct,
return refcount;
}
+EXPORT_SYMBOL_GPL(octeon_register_device);
/** Deregister a device at de-initialization time.
* @param octeon_dev - pointer to the octeon device structure.
@@ -821,6 +826,7 @@ int octeon_deregister_device(struct octeon_device *oct)
return refcount;
}
+EXPORT_SYMBOL_GPL(octeon_deregister_device);
int
octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs)
@@ -853,12 +859,14 @@ octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs)
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_allocate_ioq_vector);
void
octeon_free_ioq_vector(struct octeon_device *oct)
{
vfree(oct->ioq_vector);
}
+EXPORT_SYMBOL_GPL(octeon_free_ioq_vector);
/* this function is only for setting up the first queue */
int octeon_setup_instr_queues(struct octeon_device *oct)
@@ -904,6 +912,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
oct->num_iqs++;
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_setup_instr_queues);
int octeon_setup_output_queues(struct octeon_device *oct)
{
@@ -940,6 +949,7 @@ int octeon_setup_output_queues(struct octeon_device *oct)
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_setup_output_queues);
int octeon_set_io_queues_off(struct octeon_device *oct)
{
@@ -989,6 +999,7 @@ int octeon_set_io_queues_off(struct octeon_device *oct)
}
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_set_io_queues_off);
void octeon_set_droq_pkt_op(struct octeon_device *oct,
u32 q_no,
@@ -1027,6 +1038,7 @@ int octeon_init_dispatch_list(struct octeon_device *oct)
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_init_dispatch_list);
void octeon_delete_dispatch_list(struct octeon_device *oct)
{
@@ -1058,6 +1070,7 @@ void octeon_delete_dispatch_list(struct octeon_device *oct)
kfree(temp);
}
}
+EXPORT_SYMBOL_GPL(octeon_delete_dispatch_list);
octeon_dispatch_fn_t
octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
@@ -1180,6 +1193,7 @@ octeon_register_dispatch_fn(struct octeon_device *oct,
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_register_dispatch_fn);
int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
{
@@ -1203,10 +1217,10 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
goto core_drv_init_err;
}
- strncpy(app_name,
+ strscpy(app_name,
get_oct_app_string(
(u32)recv_pkt->rh.r_core_drv_init.app_mode),
- sizeof(app_name) - 1);
+ sizeof(app_name));
oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
oct->fw_info.max_nic_ports =
@@ -1243,9 +1257,10 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
memcpy(cs, get_rbd(
recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs));
- strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
- strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
- OCT_SERIAL_LEN);
+ strscpy(oct->boardinfo.name, cs->boardname,
+ sizeof(oct->boardinfo.name));
+ strscpy(oct->boardinfo.serial_number, cs->board_serial_number,
+ sizeof(oct->boardinfo.serial_number));
octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
@@ -1262,6 +1277,7 @@ core_drv_init_err:
octeon_free_recv_info(recv_info);
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_core_drv_init);
int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
@@ -1272,6 +1288,7 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
return -1;
}
+EXPORT_SYMBOL_GPL(octeon_get_tx_qsize);
int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
{
@@ -1280,6 +1297,7 @@ int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
return oct->droq[q_no]->max_count;
return -1;
}
+EXPORT_SYMBOL_GPL(octeon_get_rx_qsize);
/* Retruns the host firmware handshake OCTEON specific configuration */
struct octeon_config *octeon_get_conf(struct octeon_device *oct)
@@ -1302,6 +1320,7 @@ struct octeon_config *octeon_get_conf(struct octeon_device *oct)
}
return default_oct_conf;
}
+EXPORT_SYMBOL_GPL(octeon_get_conf);
/* scratch register address is same in all the OCT-II and CN70XX models */
#define CNXX_SLI_SCRATCH1 0x3C0
@@ -1318,6 +1337,7 @@ struct octeon_device *lio_get_device(u32 octeon_id)
else
return octeon_device[octeon_id];
}
+EXPORT_SYMBOL_GPL(lio_get_device);
u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
{
@@ -1349,6 +1369,7 @@ u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
return val64;
}
+EXPORT_SYMBOL_GPL(lio_pci_readq);
void lio_pci_writeq(struct octeon_device *oct,
u64 val,
@@ -1369,6 +1390,7 @@ void lio_pci_writeq(struct octeon_device *oct,
spin_unlock_irqrestore(&oct->pci_win_lock, flags);
}
+EXPORT_SYMBOL_GPL(lio_pci_writeq);
int octeon_mem_access_ok(struct octeon_device *oct)
{
@@ -1388,6 +1410,7 @@ int octeon_mem_access_ok(struct octeon_device *oct)
return access_okay ? 0 : 1;
}
+EXPORT_SYMBOL_GPL(octeon_mem_access_ok);
int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
{
@@ -1408,22 +1431,7 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
return ret;
}
-
-/* Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev)
-{
- struct octeon_device *octeon_dev = (struct octeon_device *)dev;
- u32 i;
-
- for (i = 0; i < MAX_OCTEON_DEVICES; i++)
- if (octeon_device[i] == octeon_dev)
- return octeon_dev->octeon_id;
- return -1;
-}
+EXPORT_SYMBOL_GPL(octeon_wait_for_ddr_init);
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
{
@@ -1462,3 +1470,4 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
}
}
}
+EXPORT_SYMBOL_GPL(lio_enable_irq);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index fb380b4f3e02..19344b21f8fb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -705,13 +705,6 @@ octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
*/
struct octeon_device *lio_get_device(u32 octeon_id);
-/** Get the octeon id assigned to the octeon device passed as argument.
- * This function is exported to other modules.
- * @param dev - octeon device pointer passed as a void *.
- * @return octeon device id
- */
-int lio_get_device_id(void *dev);
-
/** Read windowed register.
* @param oct - pointer to the Octeon device.
* @param addr - Address of the register to read.
@@ -804,13 +797,6 @@ int octeon_init_consoles(struct octeon_device *oct);
int octeon_add_console(struct octeon_device *oct, u32 console_num,
char *dbg_enb);
-/** write or read from a console */
-int octeon_console_write(struct octeon_device *oct, u32 console_num,
- char *buffer, u32 write_request_size, u32 flags);
-int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
-
-int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
-
/** Removes all attached consoles. */
void octeon_remove_consoles(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index d4080bddcb6b..eef12fdd246d 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -30,11 +30,6 @@
#include "cn23xx_pf_device.h"
#include "cn23xx_vf_device.h"
-struct niclist {
- struct list_head list;
- void *ptr;
-};
-
struct __dispatch {
struct list_head list;
struct octeon_recv_info *rinfo;
@@ -107,6 +102,7 @@ u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
return last_count;
}
+EXPORT_SYMBOL_GPL(octeon_droq_check_hw_for_pkts);
static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
{
@@ -216,6 +212,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_delete_droq);
int octeon_init_droq(struct octeon_device *oct,
u32 q_no,
@@ -773,6 +770,7 @@ octeon_droq_process_packets(struct octeon_device *oct,
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_droq_process_packets);
/*
* Utility function to poll for packets. check_hw_for_packets must be
@@ -921,6 +919,7 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_unregister_droq_ops);
int octeon_create_droq(struct octeon_device *oct,
u32 q_no, u32 num_descs,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index c9b19e624dce..232ae72c0e37 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -395,8 +395,6 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
void *octeon_get_dispatch_arg(struct octeon_device *oct,
u16 opcode, u16 subcode);
-void octeon_droq_print_stats(void);
-
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
int octeon_create_droq(struct octeon_device *oct, u32 q_no,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index bebf3bd349c6..a04f36a0e1a0 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -378,9 +378,6 @@ int octeon_send_command(struct octeon_device *oct, u32 iq_no,
u32 force_db, void *cmd, void *buf,
u32 datasize, u32 reqtype);
-void octeon_dump_soft_command(struct octeon_device *oct,
- struct octeon_soft_command *sc);
-
void octeon_prepare_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc,
u8 opcode, u8 subcode,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
index d92bd7e16477..9ac85d22c615 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -57,7 +57,10 @@ union octeon_mbox_message {
} s;
};
-typedef void (*octeon_mbox_callback_t)(void *, void *, void *);
+struct octeon_mbox_cmd;
+
+typedef void (*octeon_mbox_callback_t)(struct octeon_device *,
+ struct octeon_mbox_cmd *, void *);
struct octeon_mbox_cmd {
union octeon_mbox_message msg;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 5b4cb725f60f..953edf0c7096 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -157,7 +157,7 @@ err_release_region:
response of the request.
* 0: the request will wait until its response gets back
* from the firmware within LIO_SC_MAX_TMO_MS milli sec.
- * It the response does not return within
+ * If the response does not return within
* LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list()
* will move the request to zombie response list.
*
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 7ccab36143c1..d70132437af3 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -164,6 +164,7 @@ octeon_pci_read_core_mem(struct octeon_device *oct,
{
__octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
}
+EXPORT_SYMBOL_GPL(octeon_pci_read_core_mem);
void
octeon_pci_write_core_mem(struct octeon_device *oct,
@@ -173,6 +174,7 @@ octeon_pci_write_core_mem(struct octeon_device *oct,
{
__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)buf, len, 0);
}
+EXPORT_SYMBOL_GPL(octeon_pci_write_core_mem);
u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
{
@@ -182,6 +184,7 @@ u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
return be64_to_cpu(ret);
}
+EXPORT_SYMBOL_GPL(octeon_read_device_mem64);
u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
{
@@ -191,6 +194,7 @@ u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
return be32_to_cpu(ret);
}
+EXPORT_SYMBOL_GPL(octeon_read_device_mem32);
void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
u32 val)
@@ -199,3 +203,4 @@ void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
}
+EXPORT_SYMBOL_GPL(octeon_write_device_mem32);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 1a706f81bbb0..dee56ea740e7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -79,6 +79,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
return sc;
}
+EXPORT_SYMBOL_GPL(octeon_alloc_soft_command_resp);
int octnet_send_nic_data_pkt(struct octeon_device *oct,
struct octnic_data_pkt *ndata,
@@ -90,6 +91,7 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
ndata->buf, ndata->datasize,
ndata->reqtype);
}
+EXPORT_SYMBOL_GPL(octnet_send_nic_data_pkt);
static inline struct octeon_soft_command
*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
@@ -196,3 +198,4 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
return retval;
}
+EXPORT_SYMBOL_GPL(octnet_send_nic_ctrl_pkt);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 87dd6f89ce51..c139fc423764 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -268,7 +268,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
* @param oct - octeon device pointer
* @param ndata - control structure with queueing, and buffer information
*
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int octnet_send_nic_data_pkt(struct octeon_device *oct,
@@ -278,7 +278,7 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
* @param nctrl - control structure with command, timout, and callback info
- * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 8e59c2825533..d7cfb20eea00 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -40,15 +40,6 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
-static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
-{
- struct octeon_instr_queue *iq =
- (struct octeon_instr_queue *)oct->instr_queue[iq_no];
- return iq->iqcmd_64B;
-}
-
-#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
-
/* Define this to return the request status comaptible to old code */
/*#define OCTEON_USE_OLD_REQ_STATUS*/
@@ -135,13 +126,13 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */
- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
iq->iqcmd_64B = (conf->instr_type == 64);
oct->fn_list.setup_iq_regs(oct, iq_no);
oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
- WQ_MEM_RECLAIM,
+ WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!oct->check_db_wq[iq_no].wq) {
vfree(iq->request_list);
@@ -194,6 +185,7 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
}
return 1;
}
+EXPORT_SYMBOL_GPL(octeon_delete_instr_queue);
/* Return 0 on success, 1 on failure */
int octeon_setup_iq(struct octeon_device *oct,
@@ -267,6 +259,7 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct)
return instr_cnt;
}
+EXPORT_SYMBOL_GPL(lio_wait_for_instr_fetch);
static inline void
ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
@@ -291,6 +284,7 @@ octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no)
ring_doorbell(oct, iq);
spin_unlock(&iq->post_lock);
}
+EXPORT_SYMBOL_GPL(octeon_ring_doorbell_locked);
static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
u8 *cmd)
@@ -354,6 +348,7 @@ octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_register_reqtype_free_fn);
static inline void
__add_to_request_list(struct octeon_instr_queue *iq,
@@ -439,6 +434,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
return inst_count;
}
+EXPORT_SYMBOL_GPL(lio_process_iq_request_list);
/* Can only be called from process context */
int
@@ -575,6 +571,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
return st.status;
}
+EXPORT_SYMBOL_GPL(octeon_send_command);
void
octeon_prepare_soft_command(struct octeon_device *oct,
@@ -682,6 +679,7 @@ octeon_prepare_soft_command(struct octeon_device *oct,
}
}
}
+EXPORT_SYMBOL_GPL(octeon_prepare_soft_command);
int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
@@ -735,6 +733,7 @@ int octeon_send_soft_command(struct octeon_device *oct,
return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
len, REQTYPE_SOFT_COMMAND));
}
+EXPORT_SYMBOL_GPL(octeon_send_soft_command);
int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
{
@@ -764,6 +763,7 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_setup_sc_buffer_pool);
int octeon_free_sc_done_list(struct octeon_device *oct)
{
@@ -803,6 +803,7 @@ int octeon_free_sc_done_list(struct octeon_device *oct)
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_free_sc_done_list);
int octeon_free_sc_zombie_list(struct octeon_device *oct)
{
@@ -827,6 +828,7 @@ int octeon_free_sc_zombie_list(struct octeon_device *oct)
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_free_sc_zombie_list);
int octeon_free_sc_buffer_pool(struct octeon_device *oct)
{
@@ -851,6 +853,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
return 0;
}
+EXPORT_SYMBOL_GPL(octeon_free_sc_buffer_pool);
struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
u32 datasize,
@@ -922,6 +925,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
return sc;
}
+EXPORT_SYMBOL_GPL(octeon_alloc_soft_command);
void octeon_free_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
@@ -934,3 +938,4 @@ void octeon_free_soft_command(struct octeon_device *oct,
spin_unlock_bh(&oct->sc_buf_pool.lock);
}
+EXPORT_SYMBOL_GPL(octeon_free_soft_command);
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index ac7747ccf56a..de1a8335b545 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -39,7 +39,8 @@ int octeon_setup_response_list(struct octeon_device *oct)
}
spin_lock_init(&oct->cmd_resp_wqlock);
- oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
+ oct->dma_comp_wq.wq = alloc_workqueue("dma-comp",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!oct->dma_comp_wq.wq) {
dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
return -ENOMEM;
@@ -52,12 +53,14 @@ int octeon_setup_response_list(struct octeon_device *oct)
return ret;
}
+EXPORT_SYMBOL_GPL(octeon_setup_response_list);
void octeon_delete_response_list(struct octeon_device *oct)
{
cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
destroy_workqueue(oct->dma_comp_wq.wq);
}
+EXPORT_SYMBOL_GPL(octeon_delete_response_list);
int lio_process_ordered_list(struct octeon_device *octeon_dev,
u32 force_quit)
@@ -219,6 +222,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
return 0;
}
+EXPORT_SYMBOL_GPL(lio_process_ordered_list);
static void oct_poll_req_completion(struct work_struct *work)
{
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4e39d712e121..c190fc6538d4 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -548,7 +548,7 @@ struct octeon_mgmt_cam_state {
};
static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
- unsigned char *addr)
+ const unsigned char *addr)
{
int i;
@@ -649,7 +649,7 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
struct octeon_mgmt *p = netdev_priv(netdev);
int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
/* HW lifts the limit if the frame is VLAN tagged
* (+4 bytes per each tag, up to two tags)
@@ -690,22 +690,16 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
return IRQ_HANDLED;
}
-static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
- struct ifreq *rq, int cmd)
+static int octeon_mgmt_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- struct hwtstamp_config config;
- union cvmx_mio_ptp_clock_cfg ptp;
union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
+ union cvmx_mio_ptp_clock_cfg ptp;
bool have_hw_timestamps = false;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- if (config.flags) /* reserved for future extensions */
- return -EINVAL;
-
- /* Check the status of hardware for tiemstamps */
+ /* Check the status of hardware for timestamps */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
/* Get the current state of the PTP clock */
ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
@@ -736,10 +730,12 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
have_hw_timestamps = true;
}
- if (!have_hw_timestamps)
+ if (!have_hw_timestamps) {
+ NL_SET_ERR_MSG_MOD(extack, "HW doesn't support timestamping");
return -EINVAL;
+ }
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
@@ -747,7 +743,7 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
p->has_rx_tstamp = false;
rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
@@ -769,33 +765,34 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- p->has_rx_tstamp = have_hw_timestamps;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- if (p->has_rx_tstamp) {
- rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
- rxx_frm_ctl.s.ptp_mode = 1;
- cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
- }
+ p->has_rx_tstamp = true;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
+ rxx_frm_ctl.s.ptp_mode = 1;
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
break;
default:
return -ERANGE;
}
- if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
- return -EFAULT;
-
return 0;
}
-static int octeon_mgmt_ioctl(struct net_device *netdev,
- struct ifreq *rq, int cmd)
+static int octeon_mgmt_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
- default:
- return phy_do_ioctl(netdev, rq, cmd);
- }
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ /* Check the status of hardware for timestamps */
+ if (!OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ return -EINVAL;
+
+ config->tx_type = HWTSTAMP_TX_ON;
+ config->rx_filter = p->has_rx_tstamp ?
+ HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
@@ -1345,7 +1342,7 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int octeon_mgmt_nway_reset(struct net_device *dev)
@@ -1373,11 +1370,13 @@ static const struct net_device_ops octeon_mgmt_ops = {
.ndo_start_xmit = octeon_mgmt_xmit,
.ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
.ndo_set_mac_address = octeon_mgmt_set_mac_address,
- .ndo_eth_ioctl = octeon_mgmt_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_change_mtu = octeon_mgmt_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = octeon_mgmt_poll_controller,
#endif
+ .ndo_hwtstamp_get = octeon_mgmt_hwtstamp_get,
+ .ndo_hwtstamp_set = octeon_mgmt_hwtstamp_set,
};
static int octeon_mgmt_probe(struct platform_device *pdev)
@@ -1399,8 +1398,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev);
p = netdev_priv(netdev);
- netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
- OCTEON_MGMT_NAPI_WEIGHT);
+ netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll,
+ OCTEON_MGMT_NAPI_WEIGHT);
p->netdev = netdev;
p->dev = &pdev->dev;
@@ -1524,7 +1523,7 @@ err:
return result;
}
-static int octeon_mgmt_remove(struct platform_device *pdev)
+static void octeon_mgmt_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct octeon_mgmt *p = netdev_priv(netdev);
@@ -1532,7 +1531,6 @@ static int octeon_mgmt_remove(struct platform_device *pdev)
unregister_netdev(netdev);
of_node_put(p->phy_np);
free_netdev(netdev);
- return 0;
}
static const struct of_device_id octeon_mgmt_match[] = {
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index f2f1ce81fd9c..0ec65ec634df 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -59,7 +59,7 @@ struct nicpf {
/* MSI-X */
u8 num_vec;
- bool irq_allocated[NIC_PF_MSIX_VECTORS];
+ unsigned int irq_allocated[NIC_PF_MSIX_VECTORS];
char irq_name[NIC_PF_MSIX_VECTORS][20];
};
@@ -1150,7 +1150,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
u64 intr;
u8 vf;
- if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0))
+ if (irq == nic->irq_allocated[NIC_PF_INTR_ID_MBOX0])
mbx = 0;
else
mbx = 1;
@@ -1176,14 +1176,14 @@ static void nic_free_all_interrupts(struct nicpf *nic)
for (irq = 0; irq < nic->num_vec; irq++) {
if (nic->irq_allocated[irq])
- free_irq(pci_irq_vector(nic->pdev, irq), nic);
- nic->irq_allocated[irq] = false;
+ free_irq(nic->irq_allocated[irq], nic);
+ nic->irq_allocated[irq] = 0;
}
}
static int nic_register_interrupts(struct nicpf *nic)
{
- int i, ret;
+ int i, ret, irq;
nic->num_vec = pci_msix_vec_count(nic->pdev);
/* Enable MSI-X */
@@ -1201,13 +1201,13 @@ static int nic_register_interrupts(struct nicpf *nic)
sprintf(nic->irq_name[i],
"NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
- ret = request_irq(pci_irq_vector(nic->pdev, i),
- nic_mbx_intr_handler, 0,
+ irq = pci_irq_vector(nic->pdev, i);
+ ret = request_irq(irq, nic_mbx_intr_handler, 0,
nic->irq_name[i], nic);
if (ret)
goto fail;
- nic->irq_allocated[i] = true;
+ nic->irq_allocated[i] = irq;
}
/* Enable mailbox interrupt */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 7f2882109b16..413028bdcacb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -191,8 +191,8 @@ static void nicvf_get_drvinfo(struct net_device *netdev,
{
struct nicvf *nic = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
}
static u32 nicvf_get_msglevel(struct net_device *netdev)
@@ -467,7 +467,9 @@ static int nicvf_get_coalesce(struct net_device *netdev,
}
static void nicvf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
@@ -479,7 +481,9 @@ static void nicvf_get_ringparam(struct net_device *netdev,
}
static int nicvf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
@@ -512,8 +516,8 @@ static int nicvf_set_ringparam(struct net_device *netdev,
return 0;
}
-static int nicvf_get_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
info->data = 0;
@@ -537,36 +541,29 @@ static int nicvf_get_rss_hash_opts(struct nicvf *nic,
return 0;
}
-static int nicvf_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info, u32 *rules)
+static u32 nicvf_get_rx_ring_count(struct net_device *dev)
{
struct nicvf *nic = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = nic->rx_queues;
- ret = 0;
- break;
- case ETHTOOL_GRXFH:
- return nicvf_get_rss_hash_opts(nic, info);
- default:
- break;
- }
- return ret;
+ return nic->rx_queues;
}
-static int nicvf_set_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
- struct nicvf_rss_info *rss = &nic->rss_info;
- u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss;
+ u64 rss_cfg;
+
+ rss = &nic->rss_info;
+ rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
if (!rss->enable)
netdev_err(nic->netdev,
"RSS is disabled, hash cannot be set\n");
- netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+ netdev_info(nic->netdev, "Set RSS flow type = %d, data = %u\n",
info->flow_type, info->data);
if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
@@ -624,19 +621,6 @@ static int nicvf_set_rss_hash_opts(struct nicvf *nic,
return 0;
}
-static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct nicvf *nic = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return nicvf_set_rss_hash_opts(nic, info);
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
{
return RSS_HASH_KEY_SIZE * sizeof(u64);
@@ -649,35 +633,36 @@ static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
return nic->rss_info.rss_size;
}
-static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
- u8 *hfunc)
+static int nicvf_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct nicvf *nic = netdev_priv(dev);
struct nicvf_rss_info *rss = &nic->rss_info;
int idx;
- if (indir) {
+ if (rxfh->indir) {
for (idx = 0; idx < rss->rss_size; idx++)
- indir[idx] = rss->ind_tbl[idx];
+ rxfh->indir[idx] = rss->ind_tbl[idx];
}
- if (hkey)
- memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
+ if (rxfh->key)
+ memcpy(rxfh->key, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+static int nicvf_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(dev);
struct nicvf_rss_info *rss = &nic->rss_info;
int idx;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (!rss->enable) {
@@ -686,13 +671,13 @@ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
return -EIO;
}
- if (indir) {
+ if (rxfh->indir) {
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] = indir[idx];
+ rss->ind_tbl[idx] = rxfh->indir[idx];
}
- if (hkey) {
- memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
+ if (rxfh->key) {
+ memcpy(rss->key, rxfh->key, RSS_HASH_KEY_SIZE * sizeof(u64));
nicvf_set_rss_key(nic);
}
@@ -731,12 +716,17 @@ static int nicvf_set_channels(struct net_device *dev,
if (channel->tx_count > nic->max_queues)
return -EINVAL;
- if (nic->xdp_prog &&
- ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
- netdev_err(nic->netdev,
- "XDP mode, RXQs + TXQs > Max %d\n",
- nic->max_queues);
- return -EINVAL;
+ if (channel->tx_count + channel->rx_count > nic->max_queues) {
+ if (nic->xdp_prog) {
+ netdev_err(nic->netdev,
+ "XDP mode, RXQs + TXQs > Max %d\n",
+ nic->max_queues);
+ return -EINVAL;
+ }
+
+ xdp_clear_features_flag(nic->netdev);
+ } else if (!pass1_silicon(nic->pdev)) {
+ xdp_set_features_flag(dev, NETDEV_XDP_ACT_BASIC);
}
if (if_up)
@@ -826,7 +816,7 @@ static int nicvf_set_pauseparam(struct net_device *dev,
}
static int nicvf_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct nicvf *nic = netdev_priv(netdev);
@@ -834,8 +824,6 @@ static int nicvf_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -863,12 +851,13 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.get_coalesce = nicvf_get_coalesce,
.get_ringparam = nicvf_get_ringparam,
.set_ringparam = nicvf_set_ringparam,
- .get_rxnfc = nicvf_get_rxnfc,
- .set_rxnfc = nicvf_set_rxnfc,
+ .get_rx_ring_count = nicvf_get_rx_ring_count,
.get_rxfh_key_size = nicvf_get_rxfh_key_size,
.get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
.get_rxfh = nicvf_get_rxfh,
.set_rxfh = nicvf_set_rxfh,
+ .get_rxfh_fields = nicvf_get_rxfh_fields,
+ .set_rxfh_fields = nicvf_set_rxfh_fields,
.get_channels = nicvf_get_channels,
.set_channels = nicvf_set_channels,
.get_pauseparam = nicvf_get_pauseparam,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index bb45d5df2856..0b6e30a8feb0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -590,7 +590,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
return true;
default:
- bpf_warn_invalid_xdp_action(action);
+ bpf_warn_invalid_xdp_action(nic->netdev, prog, action);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action);
@@ -1472,8 +1472,7 @@ int nicvf_open(struct net_device *netdev)
}
cq_poll->cq_idx = qidx;
cq_poll->nicvf = nic;
- netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &cq_poll->napi, nicvf_poll);
napi_enable(&cq_poll->napi);
nic->napi[qidx] = cq_poll;
}
@@ -1579,7 +1578,6 @@ napi_del:
static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nicvf *nic = netdev_priv(netdev);
- int orig_mtu = netdev->mtu;
/* For now just support only the usual MTU sized frames,
* plus some headroom for VLAN, QinQ.
@@ -1590,15 +1588,10 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- netdev->mtu = new_mtu;
-
- if (!netif_running(netdev))
- return 0;
-
- if (nicvf_update_hw_max_frs(nic, new_mtu)) {
- netdev->mtu = orig_mtu;
+ if (netif_running(netdev) && nicvf_update_hw_max_frs(nic, new_mtu))
return -EINVAL;
- }
+
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
@@ -1906,22 +1899,18 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
}
}
-static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+static int nicvf_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
struct nicvf *nic = netdev_priv(netdev);
- if (!nic->ptp_clock)
+ if (!nic->ptp_clock) {
+ NL_SET_ERR_MSG_MOD(extack, "HW timestamping is not supported");
return -ENODEV;
+ }
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
@@ -1929,7 +1918,7 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
nic->hw_rx_tstamp = false;
break;
@@ -1948,7 +1937,7 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
nic->hw_rx_tstamp = true;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
@@ -1957,20 +1946,24 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (netif_running(netdev))
nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
-
return 0;
}
-static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+static int nicvf_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return nicvf_config_hwtstamp(netdev, req);
- default:
- return -EOPNOTSUPP;
- }
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return -ENODEV;
+
+ /* TX timestamping is technically always on */
+ config->tx_type = HWTSTAMP_TX_ON;
+ config->rx_filter = nic->hw_rx_tstamp ?
+ HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
@@ -2028,9 +2021,6 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
u8 mode;
struct xcast_addr_list *mc;
- if (!vf_work)
- return;
-
/* Save message data locally to prevent them from
* being overwritten by next ndo_set_rx_mode call().
*/
@@ -2095,8 +2085,9 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_bpf = nicvf_xdp,
- .ndo_eth_ioctl = nicvf_ioctl,
.ndo_set_rx_mode = nicvf_set_rx_mode,
+ .ndo_hwtstamp_get = nicvf_hwtstamp_get,
+ .ndo_hwtstamp_set = nicvf_hwtstamp_set,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -2226,6 +2217,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
+ if (!pass1_silicon(nic->pdev) &&
+ nic->rx_queues + nic->tx_queues <= nic->max_queues)
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+
/* MTU range: 64 - 9200 */
netdev->min_mtu = NIC_HW_MIN_FRS;
netdev->max_mtu = NIC_HW_MAX_FRS;
@@ -2247,7 +2242,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_unregister_interrupts;
+ goto err_destroy_workqueue;
}
nic->msg_enable = debug;
@@ -2256,6 +2251,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+err_destroy_workqueue:
+ destroy_workqueue(nic->nicvf_rx_mode_wq);
err_unregister_interrupts:
nicvf_unregister_interrupts(nic);
err_free_netdev:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 50bbe79fb93d..5211759bfe47 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -10,6 +10,7 @@
#include <linux/iommu.h>
#include <net/ip.h>
#include <net/tso.h>
+#include <uapi/linux/bpf.h>
#include "nic_reg.h"
#include "nic.h"
@@ -1260,7 +1261,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
static int nicvf_tso_count_subdescs(struct sk_buff *skb)
{
struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int sh_len = skb_tcp_all_headers(skb);
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
long f_id = -1; /* id of the current fragment */
@@ -1381,18 +1382,16 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
hdr->tso = 1;
- hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr->tso_start = skb_tcp_all_headers(skb);
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
- /* Check if timestamp is requested */
- if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- skb_tx_timestamp(skb);
+ /* Check if hw timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
return;
- }
/* Tx timestamping not supported along with TSO, so ignore request */
if (skb_shinfo(skb)->gso_size)
@@ -1471,6 +1470,8 @@ static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
netdev_tx_sent_queue(txq, skb->len);
+ skb_tx_timestamp(skb);
+
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8453defc296c..b7531041c56d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -359,8 +359,6 @@ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
/* Register access APIs */
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
-void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
-u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
u64 qidx, u64 val);
u64 nicvf_queue_reg_read(struct nicvf *nic,
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 574a32f23f96..9efb60842ad1 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -54,7 +54,7 @@ struct lmac {
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
- struct net_device netdev;
+ struct net_device *netdev;
struct phy_device *phydev;
unsigned int last_duplex;
unsigned int last_link;
@@ -590,10 +590,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
static void bgx_lmac_handler(struct net_device *netdev)
{
- struct lmac *lmac = container_of(netdev, struct lmac, netdev);
struct phy_device *phydev;
+ struct lmac *lmac, **priv;
int link_changed = 0;
+ priv = netdev_priv(netdev);
+ lmac = *priv;
phydev = lmac->phydev;
if (!phydev->link && lmac->last_link)
@@ -1116,7 +1118,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
}
lmac->phydev->dev_flags = 0;
- if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+ if (phy_connect_direct(lmac->netdev, lmac->phydev,
bgx_lmac_handler,
phy_interface_mode(lmac->lmac_type)))
return -ENODEV;
@@ -1126,8 +1128,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
}
poll:
- lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
- WQ_MEM_RECLAIM, 1);
+ lmac->check_link = alloc_ordered_workqueue("check_link", WQ_MEM_RECLAIM);
if (!lmac->check_link)
return -ENOMEM;
INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
@@ -1409,12 +1410,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
struct device *dev = &bgx->pdev->dev;
struct acpi_device *adev;
- if (acpi_bus_get_device(handle, &adev))
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
goto out;
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
- SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
+ SET_NETDEV_DEV(bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
bgx->acpi_lmac_idx++; /* move to next LMAC */
@@ -1427,16 +1429,18 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
{
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
struct bgx *bgx = context;
- char bgx_sel[5];
+ char bgx_sel[7];
- snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+ snprintf(bgx_sel, sizeof(bgx_sel), "BGX%d", bgx->bgx_id);
if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
pr_warn("Invalid link device\n");
return AE_OK;
}
- if (strncmp(string.pointer, bgx_sel, 4))
+ if (strncmp(string.pointer, bgx_sel, 4)) {
+ kfree(string.pointer);
return AE_OK;
+ }
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
bgx_acpi_register_phy, NULL, bgx, NULL);
@@ -1481,7 +1485,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
of_get_mac_address(node, bgx->lmac[lmac].mac);
- SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
+ SET_NETDEV_DEV(bgx->lmac[lmac].netdev, &bgx->pdev->dev);
bgx->lmac[lmac].lmacid = lmac;
phy_np = of_parse_phandle(node, "phy-handle", 0);
@@ -1489,13 +1493,17 @@ static int bgx_init_of_phy(struct bgx *bgx)
* this cortina phy, for which there is no driver
* support, ignore it.
*/
- if (phy_np &&
- !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
- /* Wait until the phy drivers are available */
- pd = of_phy_find_device(phy_np);
- if (!pd)
- goto defer;
- bgx->lmac[lmac].phydev = pd;
+ if (phy_np) {
+ if (!of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
+ /* Wait until the phy drivers are available */
+ pd = of_phy_find_device(phy_np);
+ if (!pd) {
+ of_node_put(phy_np);
+ goto defer;
+ }
+ bgx->lmac[lmac].phydev = pd;
+ }
+ of_node_put(phy_np);
}
lmac++;
@@ -1511,11 +1519,11 @@ defer:
* for phy devices we may have already found.
*/
while (lmac) {
+ lmac--;
if (bgx->lmac[lmac].phydev) {
put_device(&bgx->lmac[lmac].phydev->mdio.dev);
bgx->lmac[lmac].phydev = NULL;
}
- lmac--;
}
of_node_put(node);
return -EPROBE_DEFER;
@@ -1601,10 +1609,10 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
- goto err_disable_device;
+ goto err_zero_drv_data;
}
/* MAP configuration registers */
@@ -1612,7 +1620,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!bgx->reg_base) {
dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_zero_drv_data;
}
set_max_bgx_per_node(pdev);
@@ -1642,6 +1650,23 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_get_qlm_mode(bgx);
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ struct lmac *lmacp, **priv;
+
+ lmacp = &bgx->lmac[lmac];
+ lmacp->netdev = alloc_netdev_dummy(sizeof(struct lmac *));
+
+ if (!lmacp->netdev) {
+ for (int i = 0; i < lmac; i++)
+ free_netdev(bgx->lmac[i].netdev);
+ err = -ENOMEM;
+ goto err_enable;
+ }
+
+ priv = netdev_priv(lmacp->netdev);
+ *priv = lmacp;
+ }
+
err = bgx_init_phy(bgx);
if (err)
goto err_enable;
@@ -1667,10 +1692,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_enable:
bgx_vnic[bgx->bgx_id] = NULL;
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
-err_release_regions:
- pci_release_regions(pdev);
-err_disable_device:
- pci_disable_device(pdev);
+err_zero_drv_data:
pci_set_drvdata(pdev, NULL);
return err;
}
@@ -1681,14 +1703,14 @@ static void bgx_remove(struct pci_dev *pdev)
u8 lmac;
/* Disable all LMACs */
- for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
bgx_lmac_disable(bgx, lmac);
+ free_netdev(bgx->lmac[lmac].netdev);
+ }
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
bgx_vnic[bgx->bgx_id] = NULL;
- pci_release_regions(pdev);
- pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cdea49392185..84f16ababaee 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -219,9 +219,7 @@
void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, u64 mac, u8 vf);
void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf);
void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode);
-void octeon_mdiobus_force_mod_depencency(void);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
-void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
unsigned bgx_get_map(int node);
int bgx_get_lmac_count(int node, int bgx);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 0321be77366c..304bb282ab03 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: common.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -338,8 +329,6 @@ irqreturn_t t1_slow_intr_handler(adapter_t *adapter);
int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct board_info *t1_get_board_info(unsigned int board_id);
-const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
- unsigned short ssid);
int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
index bf43da6c6a63..12639b688ddc 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cphy.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: cphy.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
index 5249686afe71..a30fb407115d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: cpl5_cmd.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -635,4 +626,3 @@ struct cpl_mss_change {
};
#endif /* _CXGB_CPL5_CMD_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 609820e214a3..4a0e2d2eb60a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -351,7 +351,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
adapter->msg_enable = val;
}
-static const char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] __nonstring_array = {
"TxOctetsOK",
"TxOctetsBad",
"TxUnicastFramesOK",
@@ -429,8 +429,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = dev->ml_priv;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -710,7 +710,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct adapter *adapter = dev->ml_priv;
int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
@@ -724,7 +726,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = adapter->params.sge.cmdQ_size[0];
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct adapter *adapter = dev->ml_priv;
int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
@@ -840,7 +844,7 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
return -EOPNOTSUPP;
if ((ret = mac->ops->set_mtu(mac, new_mtu)))
return ret;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -940,11 +944,11 @@ static const struct net_device_ops cxgb_netdev_ops = {
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int i, err, pci_using_dac = 0;
unsigned long mmio_start, mmio_len;
const struct board_info *bi;
struct adapter *adapter = NULL;
struct port_info *pi;
+ int i, err;
err = pci_enable_device(pdev);
if (err)
@@ -957,17 +961,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_pdev;
}
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
-
- if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n",
- pci_name(pdev));
- err = -ENODEV;
- goto out_disable_pdev;
- }
-
- } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
goto out_disable_pdev;
}
@@ -1039,10 +1034,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_RXCSUM;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_RXCSUM | NETIF_F_LLTX;
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ netdev->lltx = true;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
if (vlan_tso_capable(adapter)) {
netdev->features |=
NETIF_F_HW_VLAN_CTAG_TX |
@@ -1060,7 +1054,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
- netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, t1_poll);
netdev->ethtool_ops = &t1_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
index 81526ad36339..0427e894c277 100644
--- a/drivers/net/ethernet/chelsio/cxgb/elmer0.h
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: elmer0.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -154,4 +145,3 @@ enum {
#define MI1_OP_INDIRECT_READ 3
#endif /* _CXGB_ELMER0_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
index 3e182eee799e..ef70569435be 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.c
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: espi.c *
@@ -7,16 +8,6 @@
* Ethernet SPI functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
index 162de5259df9..f588e9f3b37a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.h
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: espi.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index 5913eaf442b5..96077da1ed5e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: gmac.h *
@@ -7,16 +8,6 @@
* Generic MAC functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
index 7ddb301bcba0..556c8ad68fa8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: mv88x201x.c *
@@ -7,16 +8,6 @@
* Marvell PHY (mv88x201x) functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index 0bb37e4680c7..f3ada6e7cdc5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: pm3393.c *
@@ -7,16 +8,6 @@
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -150,7 +141,7 @@ static int pm3393_interrupt_enable(struct cmac *cmac)
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
@@ -188,7 +179,7 @@ static int pm3393_interrupt_disable(struct cmac *cmac)
elmer &= ~ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT */
+ /* TERMINATOR - PL_INTERRUPTS_EXT */
/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
* COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
*/
@@ -231,7 +222,7 @@ static int pm3393_interrupt_clear(struct cmac *cmac)
elmer |= ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
- /* TERMINATOR - PL_INTERUPTS_EXT
+ /* TERMINATOR - PL_INTERRUPTS_EXT
*/
pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
pl_intr |= F_PL_INTR_EXT;
@@ -765,7 +756,7 @@ static int pm3393_mac_reset(adapter_t * adapter)
/* ??? If this fails, might be able to software reset the XAUI part
* and try to recover... thus saving us from doing another HW reset */
- /* Has the XAUI MABC PLL circuitry stablized? */
+ /* Has the XAUI MABC PLL circuitry stabilized? */
is_xaui_mabc_pll_locked =
(val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
index 964ce59ee169..f751e680cf7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: regs.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index cda01f22c71c..5f354cf62cdd 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: sge.c *
@@ -7,16 +8,6 @@
* DMA engine. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -1359,7 +1350,7 @@ static void restart_sched(struct tasklet_struct *t)
* @fl: the free list that contains the packet buffer
* @len: the packet length
*
- * Process an ingress ethernet pakcet and deliver it to the stack.
+ * Process an ingress ethernet packet and deliver it to the stack.
*/
static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
{
@@ -1931,7 +1922,7 @@ send:
static void sge_tx_reclaim_cb(struct timer_list *t)
{
int i;
- struct sge *sge = from_timer(sge, t, tx_reclaim_timer);
+ struct sge *sge = timer_container_of(sge, t, tx_reclaim_timer);
for (i = 0; i < SGE_CMDQ_N; ++i) {
struct cmdQ *q = &sge->cmdQ[i];
@@ -1993,9 +1984,9 @@ void t1_sge_stop(struct sge *sge)
readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
if (is_T2(sge->adapter))
- del_timer_sync(&sge->espibug_timer);
+ timer_delete_sync(&sge->espibug_timer);
- del_timer_sync(&sge->tx_reclaim_timer);
+ timer_delete_sync(&sge->tx_reclaim_timer);
if (sge->tx_sched)
tx_sched_stop(sge);
@@ -2026,7 +2017,7 @@ void t1_sge_start(struct sge *sge)
*/
static void espibug_workaround_t204(struct timer_list *t)
{
- struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct sge *sge = timer_container_of(sge, t, espibug_timer);
struct adapter *adapter = sge->adapter;
unsigned int nports = adapter->params.nports;
u32 seop[MAX_NPORTS];
@@ -2069,7 +2060,7 @@ static void espibug_workaround_t204(struct timer_list *t)
static void espibug_workaround(struct timer_list *t)
{
- struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct sge *sge = timer_container_of(sge, t, espibug_timer);
struct adapter *adapter = sge->adapter;
if (netif_running(adapter->port[0].dev)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index 716705b96f26..f7e6f64040ea 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: sge.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 007c591b8bf5..367a9e4581d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: subr.c *
@@ -7,16 +8,6 @@
* Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
index 7f79cc7ceb75..4c883170683b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: suni1x10gexp_regs.h *
@@ -7,16 +8,6 @@
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -1639,4 +1630,3 @@
#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I 0x0002
#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.h b/drivers/net/ethernet/chelsio/cxgb/tp.h
index ba15675d56df..64f93dcc676b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/tp.h
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.h
@@ -65,9 +65,7 @@ void t1_tp_intr_enable(struct petp *tp);
void t1_tp_intr_clear(struct petp *tp);
int t1_tp_intr_handler(struct petp *tp);
-void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
-int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
index 6d682b7c7aac..9d11e55981a0 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -237,7 +237,7 @@ struct adapter {
int msix_nvectors;
struct {
unsigned short vec;
- char desc[22];
+ char desc[IFNAMSIZ + 1 + 12]; /* Needs space for "%s-%d" */
} msix_info[SGE_QSETS + 1];
/* T3 modules */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index a309016f7f8c..ecd025dda8d6 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -676,8 +676,6 @@ void t3_link_changed(struct adapter *adapter, int port_id);
void t3_link_fault(struct adapter *adapter, int port_id);
int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
-int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data);
-int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data);
int t3_seeprom_wp(struct adapter *adapter, int enable);
int t3_get_tp_version(struct adapter *adapter, u32 *vers);
int t3_check_tpsram_version(struct adapter *adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
index f04e81f33795..a08fc762a438 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
@@ -106,6 +106,4 @@ static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
return &e->t3c_tid;
}
-int attach_t3cdev(struct t3cdev *dev);
-void detach_t3cdev(struct t3cdev *dev);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 9cf9e33664e4..3b1321c8ed14 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -380,19 +380,18 @@ static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
*/
static void name_msix_vecs(struct adapter *adap)
{
- int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
+ int i, j, msi_idx = 1;
- snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
- adap->msix_info[0].desc[n] = 0;
+ strscpy(adap->msix_info[0].desc, adap->name, sizeof(adap->msix_info[0].desc));
for_each_port(adap, j) {
struct net_device *d = adap->port[j];
const struct port_info *pi = netdev_priv(d);
for (i = 0; i < pi->nqsets; i++, msi_idx++) {
- snprintf(adap->msix_info[msi_idx].desc, n,
+ snprintf(adap->msix_info[msi_idx].desc,
+ sizeof(adap->msix_info[0].desc),
"%s-%d", d->name, pi->first_qset + i);
- adap->msix_info[msi_idx].desc[n] = 0;
}
}
}
@@ -609,8 +608,7 @@ static void init_napi(struct adapter *adap)
struct sge_qset *qs = &adap->sge.qs[i];
if (qs->adap)
- netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
- 64);
+ netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll);
}
/*
@@ -1302,6 +1300,7 @@ static int cxgb_up(struct adapter *adap)
if (ret < 0) {
CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
+ quiesce_rx(adap);
free_irq_resources(adap);
err = ret;
goto out;
@@ -1627,8 +1626,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
t3_get_tp_version(adapter, &tp_vers);
spin_unlock(&adapter->stats_lock);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
if (fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
@@ -1948,7 +1947,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -1964,7 +1965,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = q->txq_size[0];
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2036,20 +2039,16 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- int i, err = 0;
-
- u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ int cnt;
e->magic = EEPROM_MAGIC;
- for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
- err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
+ cnt = pci_read_vpd(adapter->pdev, e->offset, e->len, data);
+ if (cnt < 0)
+ return cnt;
- if (!err)
- memcpy(data, buf + e->offset, e->len);
- kfree(buf);
- return err;
+ e->len = cnt;
+
+ return 0;
}
static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
@@ -2058,7 +2057,6 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
u32 aligned_offset, aligned_len;
- __le32 *p;
u8 *buf;
int err;
@@ -2072,12 +2070,9 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
buf = kmalloc(aligned_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
- if (!err && aligned_len > 4)
- err = t3_seeprom_read(adapter,
- aligned_offset + aligned_len - 4,
- (__le32 *) & buf[aligned_len - 4]);
- if (err)
+ err = pci_read_vpd(adapter->pdev, aligned_offset, aligned_len,
+ buf);
+ if (err < 0)
goto out;
memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
} else
@@ -2087,17 +2082,13 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
if (err)
goto out;
- for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
- err = t3_seeprom_write(adapter, aligned_offset, *p);
- aligned_offset += 4;
- }
-
- if (!err)
+ err = pci_write_vpd(adapter->pdev, aligned_offset, aligned_len, buf);
+ if (err >= 0)
err = t3_seeprom_wp(adapter, 1);
out:
if (buf != data)
kfree(buf);
- return err;
+ return err < 0 ? err : 0;
}
static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2134,7 +2125,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.set_link_ksettings = set_link_ksettings,
};
-static int in_range(int val, int lo, int hi)
+static int cxgb_in_range(int val, int lo, int hi)
{
return val < 0 || (val <= hi && val >= lo);
}
@@ -2170,19 +2161,19 @@ static int cxgb_siocdevprivate(struct net_device *dev,
return -EINVAL;
if (t.qset_idx >= SGE_QSETS)
return -EINVAL;
- if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
- !in_range(t.cong_thres, 0, 255) ||
- !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
+ if (!cxgb_in_range(t.intr_lat, 0, M_NEWTIMER) ||
+ !cxgb_in_range(t.cong_thres, 0, 255) ||
+ !cxgb_in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
MAX_TXQ_ENTRIES) ||
- !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
+ !cxgb_in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
MAX_TXQ_ENTRIES) ||
- !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
+ !cxgb_in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
MAX_CTRL_TXQ_ENTRIES) ||
- !in_range(t.fl_size[0], MIN_FL_ENTRIES,
+ !cxgb_in_range(t.fl_size[0], MIN_FL_ENTRIES,
MAX_RX_BUFFERS) ||
- !in_range(t.fl_size[1], MIN_FL_ENTRIES,
+ !cxgb_in_range(t.fl_size[1], MIN_FL_ENTRIES,
MAX_RX_JUMBO_BUFFERS) ||
- !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
+ !cxgb_in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
MAX_RSPQ_ENTRIES))
return -EINVAL;
@@ -2568,7 +2559,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
return ret;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
init_port_mtus(adapter);
if (adapter->params.rev == 0 && offload_running(adapter))
t3_load_mtus(adapter, adapter->params.mtus,
@@ -2942,7 +2933,6 @@ static int t3_reenable_adapter(struct adapter *adapter)
}
pci_set_master(adapter->pdev);
pci_restore_state(adapter->pdev);
- pci_save_state(adapter->pdev);
/* Free sge resources */
t3_free_sge_resources(adapter);
@@ -3212,7 +3202,7 @@ static void cxgb3_init_iscsi_mac(struct net_device *dev)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int i, err, pci_using_dac = 0;
+ int i, err;
resource_size_t mmio_start, mmio_len;
const struct adapter_info *ai;
struct adapter *adapter = NULL;
@@ -3239,9 +3229,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto out_release_regions;
}
@@ -3317,8 +3306,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= netdev->hw_features |
NETIF_F_HW_VLAN_CTAG_TX;
netdev->vlan_features |= netdev->features & VLAN_FEAT;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->netdev_ops = &cxgb_netdev_ops;
netdev->ethtool_ops = &cxgb_ethtool_ops;
@@ -3358,6 +3347,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (!adapter->registered_device_map) {
dev_err(&pdev->dev, "could not register any net devices\n");
+ err = -ENODEV;
goto out_free_dev;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 84604aff53ce..5a9f6925e1fa 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -243,7 +243,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
/*
* on rx, the iscsi pdu has to be < rx page size and the
- * the max rx data length programmed in TP
+ * max rx data length programmed in TP
*/
val = min(adapter->params.tp.rx_pg_size,
((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
@@ -515,23 +515,6 @@ void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
EXPORT_SYMBOL(cxgb3_free_atid);
-/*
- * Free a server TID and return it to the free pool.
- */
-void cxgb3_free_stid(struct t3cdev *tdev, int stid)
-{
- struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
- union listen_entry *p = stid2entry(t, stid);
-
- spin_lock_bh(&t->stid_lock);
- p->next = t->sfree;
- t->sfree = p;
- t->stids_in_use--;
- spin_unlock_bh(&t->stid_lock);
-}
-
-EXPORT_SYMBOL(cxgb3_free_stid);
-
void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
void *ctx, unsigned int tid)
{
@@ -671,28 +654,6 @@ int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
EXPORT_SYMBOL(cxgb3_alloc_atid);
-int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
- void *ctx)
-{
- int stid = -1;
- struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
-
- spin_lock_bh(&t->stid_lock);
- if (t->sfree) {
- union listen_entry *p = t->sfree;
-
- stid = (p - t->stid_tab) + t->stid_base;
- t->sfree = p->next;
- p->t3c_tid.ctx = ctx;
- p->t3c_tid.client = client;
- t->stids_in_use++;
- }
- spin_unlock_bh(&t->stid_lock);
- return stid;
-}
-
-EXPORT_SYMBOL(cxgb3_alloc_stid);
-
/* Get the t3cdev associated with a net_device */
struct t3cdev *dev2t3cdev(struct net_device *dev)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
index 929c298115ca..7419824f9926 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h
@@ -95,10 +95,7 @@ struct cxgb3_client {
*/
int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx);
-int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
- void *ctx);
void *cxgb3_free_atid(struct t3cdev *dev, int atid);
-void cxgb3_free_stid(struct t3cdev *dev, int stid);
void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx, unsigned int tid);
void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 9749d1239f58..5d5f3380ecca 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -176,43 +176,6 @@ again:
EXPORT_SYMBOL(t3_l2t_send_slow);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
-{
-again:
- switch (e->state) {
- case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
- neigh_event_send(e->neigh, NULL);
- spin_lock_bh(&e->lock);
- if (e->state == L2T_STATE_STALE) {
- e->state = L2T_STATE_VALID;
- }
- spin_unlock_bh(&e->lock);
- return;
- case L2T_STATE_VALID: /* fast-path, send the packet on */
- return;
- case L2T_STATE_RESOLVING:
- spin_lock_bh(&e->lock);
- if (e->state != L2T_STATE_RESOLVING) {
- /* ARP already completed */
- spin_unlock_bh(&e->lock);
- goto again;
- }
- spin_unlock_bh(&e->lock);
-
- /*
- * Only the first packet added to the arpq should kick off
- * resolution. However, because the alloc_skb below can fail,
- * we allow each packet added to the arpq to retry resolution
- * as a way of recovering from transient memory exhaustion.
- * A better way would be to use a work request to retry L2T
- * entries when there's no memory.
- */
- neigh_event_send(e->neigh, NULL);
- }
-}
-
-EXPORT_SYMBOL(t3_l2t_send_event);
-
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index ea75f275023f..33558f177497 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -76,7 +76,7 @@ struct l2t_data {
atomic_t nfree; /* number of free entries */
rwlock_t lock;
struct rcu_head rcu_head; /* to handle rcu cleanup */
- struct l2t_entry l2tab[];
+ struct l2t_entry l2tab[] __counted_by(nentries);
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
@@ -113,7 +113,6 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev, const void *daddr);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index c3afec1041f8..b59735d0e065 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -126,8 +126,10 @@ struct rsp_desc { /* response queue descriptor */
struct rss_header rss_hdr;
__be32 flags;
__be32 len_cq;
- u8 imm_data[47];
- u8 intr_gen;
+ struct_group(immediate,
+ u8 imm_data[47];
+ u8 intr_gen;
+ );
};
/*
@@ -164,11 +166,6 @@ static u8 flit_desc_map[] = {
#endif
};
-static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
-{
- return container_of(q, struct sge_qset, fl[qidx]);
-}
-
static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
{
return container_of(q, struct sge_qset, rspq);
@@ -925,7 +922,8 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
if (skb) {
__skb_put(skb, IMMED_PKT_SIZE);
- skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
+ BUILD_BUG_ON(IMMED_PKT_SIZE != sizeof(resp->immediate));
+ skb_copy_to_linear_data(skb, &resp->immediate, IMMED_PKT_SIZE);
}
return skb;
}
@@ -1953,7 +1951,7 @@ static int ofld_poll(struct napi_struct *napi, int budget)
* @rx_gather: a gather list of packets if we are building a bundle
* @gather_idx: index of the next available slot in the bundle
*
- * Process an ingress offload pakcet and add it to the offload ingress
+ * Process an ingress offload packet and add it to the offload ingress
* queue. Returns the index of the next available slot in the bundle.
*/
static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
@@ -2079,7 +2077,7 @@ static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
* @pad: padding
* @lro: large receive offload
*
- * Process an ingress ethernet pakcet and deliver it to the stack.
+ * Process an ingress ethernet packet and deliver it to the stack.
* The padding is 2 if the packet was delivered in an Rx buffer and 0
* if it was immediate data in a response.
*/
@@ -2186,9 +2184,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
len -= offset;
rx_frag += nr_frags;
- __skb_frag_set_page(rx_frag, sd->pg_chunk.page);
- skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
- skb_frag_size_set(rx_frag, len);
+ skb_frag_fill_page_desc(rx_frag, sd->pg_chunk.page,
+ sd->pg_chunk.offset + offset, len);
skb->len += len;
skb->data_len += len;
@@ -2504,14 +2501,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
return work_done;
}
-/*
- * Returns true if the device is already scheduled for polling.
- */
-static inline int napi_is_scheduled(struct napi_struct *napi)
-{
- return test_bit(NAPI_STATE_SCHED, &napi->state);
-}
-
/**
* process_pure_responses - process pure responses from a response queue
* @adap: the adapter
@@ -2677,12 +2666,7 @@ static int rspq_check_napi(struct sge_qset *qs)
{
struct sge_rspq *q = &qs->rspq;
- if (!napi_is_scheduled(&qs->napi) &&
- is_new_response(&q->desc[q->cidx], q)) {
- napi_schedule(&qs->napi);
- return 1;
- }
- return 0;
+ return is_new_response(&q->desc[q->cidx], q) && napi_schedule(&qs->napi);
}
/*
@@ -2922,7 +2906,7 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
*/
static void sge_timer_tx(struct timer_list *t)
{
- struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
+ struct sge_qset *qs = timer_container_of(qs, t, tx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
@@ -2963,7 +2947,7 @@ static void sge_timer_tx(struct timer_list *t)
static void sge_timer_rx(struct timer_list *t)
{
spinlock_t *lock;
- struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
+ struct sge_qset *qs = timer_container_of(qs, t, rx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
u32 status;
@@ -3239,9 +3223,9 @@ void t3_stop_sge_timers(struct adapter *adap)
struct sge_qset *q = &adap->sge.qs[i];
if (q->tx_reclaim_timer.function)
- del_timer_sync(&q->tx_reclaim_timer);
+ timer_delete_sync(&q->tx_reclaim_timer);
if (q->rx_reclaim_timer.function)
- del_timer_sync(&q->rx_reclaim_timer);
+ timer_delete_sync(&q->rx_reclaim_timer);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 53feac8da503..a06003bfa04b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -596,81 +596,10 @@ struct t3_vpd {
u32 pad; /* for multiple-of-4 sizing and alignment */
};
-#define EEPROM_MAX_POLL 40
#define EEPROM_STAT_ADDR 0x4000
#define VPD_BASE 0xc00
/**
- * t3_seeprom_read - read a VPD EEPROM location
- * @adapter: adapter to read
- * @addr: EEPROM address
- * @data: where to store the read data
- *
- * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
- * VPD ROM capability. A zero is written to the flag bit when the
- * address is written to the control register. The hardware device will
- * set the flag to 1 when 4 bytes have been read into the data register.
- */
-int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
-{
- u16 val;
- int attempts = EEPROM_MAX_POLL;
- u32 v;
- unsigned int base = adapter->params.pci.vpd_cap_addr;
-
- if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
- return -EINVAL;
-
- pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
- do {
- udelay(10);
- pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
- } while (!(val & PCI_VPD_ADDR_F) && --attempts);
-
- if (!(val & PCI_VPD_ADDR_F)) {
- CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
- return -EIO;
- }
- pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
- *data = cpu_to_le32(v);
- return 0;
-}
-
-/**
- * t3_seeprom_write - write a VPD EEPROM location
- * @adapter: adapter to write
- * @addr: EEPROM address
- * @data: value to write
- *
- * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
- * VPD ROM capability.
- */
-int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
-{
- u16 val;
- int attempts = EEPROM_MAX_POLL;
- unsigned int base = adapter->params.pci.vpd_cap_addr;
-
- if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
- return -EINVAL;
-
- pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
- le32_to_cpu(data));
- pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
- addr | PCI_VPD_ADDR_F);
- do {
- msleep(1);
- pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
- } while ((val & PCI_VPD_ADDR_F) && --attempts);
-
- if (val & PCI_VPD_ADDR_F) {
- CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
- return -EIO;
- }
- return 0;
-}
-
-/**
* t3_seeprom_wp - enable/disable EEPROM write protection
* @adapter: the adapter
* @enable: 1 to enable write protection, 0 to disable it
@@ -679,7 +608,14 @@ int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
*/
int t3_seeprom_wp(struct adapter *adapter, int enable)
{
- return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
+ u32 data = enable ? 0xc : 0;
+ int ret;
+
+ /* EEPROM_STAT_ADDR is outside VPD area, use pci_write_vpd_any() */
+ ret = pci_write_vpd_any(adapter->pdev, EEPROM_STAT_ADDR, sizeof(u32),
+ &data);
+
+ return ret < 0 ? ret : 0;
}
static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val)
@@ -709,24 +645,22 @@ static int vpdstrtou16(char *s, u8 len, unsigned int base, u16 *val)
*/
static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- int i, addr, ret;
struct t3_vpd vpd;
+ u8 base_val = 0;
+ int addr, ret;
/*
* Card information is normally at VPD_BASE but some early cards had
* it at 0.
*/
- ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
- if (ret)
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
+ if (ret < 0)
return ret;
- addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
+ addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : 0;
- for (i = 0; i < sizeof(vpd); i += 4) {
- ret = t3_seeprom_read(adapter, addr + i,
- (__le32 *)((u8 *)&vpd + i));
- if (ret)
- return ret;
- }
+ ret = pci_read_vpd(adapter->pdev, addr, sizeof(vpd), &vpd);
+ if (ret < 0)
+ return ret;
ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
if (ret)
@@ -3679,6 +3613,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
adapter->params.pci.vpd_cap_addr =
pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+ if (!adapter->params.pci.vpd_cap_addr)
+ return -ENODEV;
ret = get_vpd_params(adapter, &adapter->params.vpd);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 163efab27e9b..5060d3998889 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -120,7 +120,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
write_unlock_bh(&ctbl->lock);
dev_err(adap->pdev_dev,
"CLIP FW cmd failed with error %d, "
- "Connections using %pI6c wont be "
+ "Connections using %pI6c won't be "
"offloaded",
ret, ce->addr6.sin6_addr.s6_addr);
return ret;
@@ -133,7 +133,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
} else {
write_unlock_bh(&ctbl->lock);
dev_info(adap->pdev_dev, "CLIP table overflow, "
- "Connections using %pI6c wont be offloaded",
+ "Connections using %pI6c won't be offloaded",
(void *)lip);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 290c1058069a..847c7fc2bbd9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -29,7 +29,7 @@ struct clip_tbl {
atomic_t nfree;
struct list_head ce_free_head;
void *cl_list;
- struct list_head hash_list[];
+ struct list_head hash_list[] __counted_by(clipt_size);
};
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index a7f291c89702..557c591a6ce3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -14,6 +14,7 @@
#include "cudbg_entity.h"
#include "cudbg_lib.h"
#include "cudbg_zlib.h"
+#include "cxgb4_tc_mqprio.h"
static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
{0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
@@ -3458,7 +3459,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < utxq->ntxq; i++)
QDESC_GET_TXQ(&utxq->uldtxq[i].q,
cudbg_uld_txq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
@@ -3475,7 +3476,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
cudbg_uld_rxq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD FLQ */
@@ -3487,7 +3488,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
cudbg_uld_flq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD CIQ */
@@ -3500,29 +3501,34 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nciq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
cudbg_uld_ciq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
+ mutex_unlock(&uld_mutex);
+
+ if (!padap->tc_mqprio)
+ goto out;
+ mutex_lock(&padap->tc_mqprio->mqprio_mutex);
/* ETHOFLD TXQ */
if (s->eohw_txq)
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_TXQ(&s->eohw_txq[i].q,
- CUDBG_QTYPE_ETHOFLD_TXQ, out);
+ CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio);
/* ETHOFLD RXQ and FLQ */
if (s->eohw_rxq) {
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
- CUDBG_QTYPE_ETHOFLD_RXQ, out);
+ CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio);
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
- CUDBG_QTYPE_ETHOFLD_FLQ, out);
+ CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio);
}
-out_unlock:
- mutex_unlock(&uld_mutex);
+out_unlock_mqprio:
+ mutex_unlock(&padap->tc_mqprio->mqprio_mutex);
out:
qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
@@ -3559,6 +3565,10 @@ out_free:
#undef QDESC_GET
return rc;
+
+out_unlock_uld:
+ mutex_unlock(&uld_mutex);
+ goto out;
}
int cudbg_collect_flash(struct cudbg_init *pdbg_init,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 5657ac8cfca0..f20f4bc58492 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -674,7 +674,7 @@ struct port_info {
struct cxgb_fcoe fcoe;
#endif /* CONFIG_CHELSIO_T4_FCOE */
bool rxtstamp; /* Enable TS */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
bool ptp_enable;
struct sched_table *sched_tbl;
u32 eth_flags;
@@ -1079,8 +1079,6 @@ struct mbox_list {
#if IS_ENABLED(CONFIG_THERMAL)
struct ch_thermal {
struct thermal_zone_device *tzdev;
- int trip_temp;
- int trip_type;
};
#endif
@@ -1213,9 +1211,6 @@ struct adapter {
struct timer_list flower_stats_timer;
struct work_struct flower_stats_work;
- /* Ethtool Dump */
- struct ethtool_dump eth_dump;
-
/* HMA */
struct hma_data hma;
@@ -1235,6 +1230,10 @@ struct adapter {
/* Ethtool n-tuple */
struct cxgb4_ethtool_filter *ethtool_filters;
+
+ /* Ethtool Dump */
+ /* Must be last - ends in a flex-array member. */
+ struct ethtool_dump eth_dump;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1317,7 +1316,7 @@ struct ch_sched_flowc {
* (value, mask) tuples. The associated ingress packet field matches the
* tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
* rule can be constructed by specifying a tuple of (0, 0).) A filter rule
- * matches an ingress packet when all of the individual individual field
+ * matches an ingress packet when all of the individual field
* matching rules are true.
*
* Partial field masks are always valid, however, while it may be easy to
@@ -1610,7 +1609,6 @@ void t4_os_portmod_changed(struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
void t4_free_sge_resources(struct adapter *adap);
-void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
irq_handler_t t4_intr_handler(struct adapter *adap);
netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
int cxgb4_selftest_lb_pkt(struct net_device *netdev);
@@ -1960,11 +1958,6 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
-void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
- const u8 *addr);
-int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
- u64 mask0, u64 mask1, unsigned int crc, bool enable);
-
int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
enum dev_master master, enum dev_state *state);
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
@@ -2084,7 +2077,7 @@ void t4_idma_monitor(struct adapter *adapter,
struct sge_idma_monitor_state *idma,
int hz, int ticks);
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
- unsigned int naddr, u8 *addr);
+ u8 start, unsigned int naddr, u8 *addr);
void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
u32 start_index, bool sleep_ok);
void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
@@ -2148,28 +2141,6 @@ int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
unsigned int naddr, const u8 **addr, bool sleep_ok);
int cxgb4_init_mps_ref_entries(struct adapter *adap);
void cxgb4_free_mps_ref_entries(struct adapter *adap);
-int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
- const u8 *addr, const u8 *mask,
- unsigned int vni, unsigned int vni_mask,
- u8 dip_hit, u8 lookup_type, bool sleep_ok);
-int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
- int idx, bool sleep_ok);
-int cxgb4_free_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok);
-int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok);
int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
int *tcam_idx, const u8 *addr,
bool persistent, u8 *smt_idx);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 4a872f328fea..7d5204834ee2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -85,7 +85,7 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
if (err) {
dev_err(adap->pdev_dev,
- "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
+ "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, err=%d\n",
dcb_ver_array[dcb->dcb_version], app.selector,
app.protocol, -err);
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index 80c6627fe981..c80a93347a8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -122,7 +122,6 @@ void cxgb4_dcb_version_init(struct net_device *);
void cxgb4_dcb_reset(struct net_device *dev);
void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input);
void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
-void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops;
static inline __u8 bitswap_1(unsigned char val)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 7d49fd4edc9e..14e0d989c3ba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3429,18 +3429,18 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
unsigned long *t;
struct adapter *adap = filp->private_data;
- t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+ t = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!t)
return -ENOMEM;
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
if (err) {
- kfree(t);
+ bitmap_free(t);
return err;
}
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
- kfree(t);
+ bitmap_free(t);
return count;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 5903bdb78916..23326235d4ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -199,8 +199,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct adapter *adapter = netdev2adap(dev);
u32 exprom_vers;
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
info->regdump_len = get_regs_len(dev);
@@ -890,7 +890,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
@@ -906,7 +908,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = s->ethtxq[pi->first_qset].q.size;
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
int i;
const struct port_info *pi = netdev_priv(dev);
@@ -1546,18 +1550,15 @@ out_free_fw:
return ret;
}
-static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
+static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts_info)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-
- ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -1571,8 +1572,6 @@ static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
if (adapter->ptp_clock)
ts_info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- ts_info->phc_index = -1;
return 0;
}
@@ -1584,22 +1583,23 @@ static u32 get_rss_table_size(struct net_device *dev)
return pi->rss_size;
}
-static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
+static int get_rss_table(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
const struct port_info *pi = netdev_priv(dev);
unsigned int n = pi->rss_size;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!p)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
while (n--)
- p[n] = pi->rss[n];
+ rxfh->indir[n] = pi->rss[n];
return 0;
}
-static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
- const u8 hfunc)
+static int set_rss_table(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
@@ -1607,16 +1607,17 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!p)
+ if (!rxfh->indir)
return 0;
/* Interface must be brought up atleast once */
if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
for (i = 0; i < pi->rss_size; i++)
- pi->rss[i] = p[i];
+ pi->rss[i] = rxfh->indir[i];
return cxgb4_write_rss(pi, pi->rss);
}
@@ -1729,6 +1730,60 @@ static int cxgb4_ntuple_get_filter(struct net_device *dev,
return 0;
}
+static int cxgb4_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ unsigned int v = pi->rss_mode;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V4_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ }
+ return 0;
+}
+
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -1738,56 +1793,6 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
int ret = 0;
switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- unsigned int v = pi->rss_mode;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V4_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V6_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- }
- return 0;
- }
case ETHTOOL_GRXRINGS:
info->data = pi->nqsets;
return 0;
@@ -1989,6 +1994,15 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
return 0;
}
+static bool cxgb4_fw_mod_type_info_available(unsigned int fw_mod_type)
+{
+ /* Read port module EEPROM as long as it is plugged-in and
+ * safe to read.
+ */
+ return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+ fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+}
+
static int cxgb4_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -1997,7 +2011,7 @@ static int cxgb4_get_module_info(struct net_device *dev,
struct adapter *adapter = pi->adapter;
int ret;
- if (!t4_is_inserted_mod_type(pi->mod_type))
+ if (!cxgb4_fw_mod_type_info_available(pi->mod_type))
return -EINVAL;
switch (pi->port_type) {
@@ -2015,12 +2029,15 @@ static int cxgb4_get_module_info(struct net_device *dev,
if (ret)
return ret;
- if (!sff8472_comp || (sff_diag_type & 4)) {
+ if (!sff8472_comp || (sff_diag_type & SFP_DIAG_ADDRMODE)) {
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ if (sff_diag_type & SFP_DIAG_IMPLEMENTED)
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ else
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2;
}
break;
@@ -2186,6 +2203,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
+ .get_rxfh_fields = cxgb4_get_rxfh_fields,
.self_test = cxgb4_self_test,
.flash_device = set_flash,
.get_ts_info = get_ts_info,
@@ -2211,7 +2229,7 @@ void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
if (eth_filter_info) {
for (i = 0; i < adap->params.nports; i++) {
kvfree(eth_filter_info[i].loc_array);
- kfree(eth_filter_info[i].bmap);
+ bitmap_free(eth_filter_info[i].bmap);
}
kfree(eth_filter_info);
}
@@ -2254,11 +2272,10 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
goto free_eth_finfo;
}
- eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
- sizeof(unsigned long),
- GFP_KERNEL);
+ eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
if (!eth_filter->port[i].bmap) {
ret = -ENOMEM;
+ kvfree(eth_filter->port[i].loc_array);
goto free_eth_finfo;
}
}
@@ -2268,7 +2285,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
free_eth_finfo:
while (i-- > 0) {
- kfree(eth_filter->port[i].bmap);
+ bitmap_free(eth_filter->port[i].bmap);
kvfree(eth_filter->port[i].loc_array);
}
kfree(eth_filter_info);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
index 33b2c0c45509..f6f745f5c022 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
@@ -81,8 +81,7 @@ int cxgb_fcoe_enable(struct net_device *netdev)
netdev->features |= NETIF_F_FCOE_CRC;
netdev->vlan_features |= NETIF_F_FCOE_CRC;
- netdev->features |= NETIF_F_FCOE_MTU;
- netdev->vlan_features |= NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = true;
netdev_features_change(netdev);
@@ -112,8 +111,7 @@ int cxgb_fcoe_disable(struct net_device *netdev)
netdev->features &= ~NETIF_F_FCOE_CRC;
netdev->vlan_features &= ~NETIF_F_FCOE_CRC;
- netdev->features &= ~NETIF_F_FCOE_MTU;
- netdev->vlan_features &= ~NETIF_F_FCOE_MTU;
+ netdev->fcoe_mtu = false;
netdev_features_change(netdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 786ceae34488..dd9e68465e69 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1244,7 +1244,8 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
* in the Compressed Filter Tuple.
*/
if (tp->vlan_shift >= 0 && fs->mask.ivlan)
- ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
+ ntuple |= (u64)(FT_VLAN_VLD_F |
+ fs->val.ivlan) << tp->vlan_shift;
if (tp->port_shift >= 0 && fs->mask.iport)
ntuple |= (u64)fs->val.iport << tp->port_shift;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index dde1cf51d0ab..043733c5c812 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -51,7 +51,6 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
@@ -1176,7 +1175,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
txq = netdev_pick_tx(dev, skb, sb_dev);
if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
skb->encapsulation ||
- cxgb4_is_ktls_skb(skb) ||
+ tls_is_skb_tx_device_offloaded(skb) ||
(proto != IPPROTO_TCP && proto != IPPROTO_UDP))
txq = txq % pi->nqsets;
@@ -1800,7 +1799,10 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
struct adapter *adap = container_of(t, struct adapter, tids);
struct sk_buff *skb;
- WARN_ON(tid_out_of_range(&adap->tids, tid));
+ if (tid_out_of_range(&adap->tids, tid)) {
+ dev_err(adap->pdev_dev, "tid %d out of range\n", tid);
+ return;
+ }
if (t->tid_tab[tid - adap->tids.tid_base]) {
t->tid_tab[tid - adap->tids.tid_base] = NULL;
@@ -2189,18 +2191,6 @@ void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
}
EXPORT_SYMBOL(cxgb4_get_tcp_stats);
-void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
- const unsigned int *pgsz_order)
-{
- struct adapter *adap = netdev2adap(dev);
-
- t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
- t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
- HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
- HPZ3_V(pgsz_order[3]));
-}
-EXPORT_SYMBOL(cxgb4_iscsi_init);
-
int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
@@ -3052,12 +3042,87 @@ static void cxgb_get_stats(struct net_device *dev,
ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
}
+static int cxgb_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct port_info *pi = netdev_priv(dev);
+
+ *config = pi->tstamp_config;
+ return 0;
+}
+
+static int cxgb_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (is_t4(adapter->params.chip)) {
+ /* For T4 Adapters */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pi->rxtstamp = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ pi->rxtstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+ pi->tstamp_config = *config;
+ return 0;
+ }
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pi->rxtstamp = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L4);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L2_L4);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ pi->rxtstamp = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config->tx_type == HWTSTAMP_TX_OFF &&
+ config->rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
+ pi->ptp_enable = false;
+ }
+
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (cxgb4_ptp_redirect_rx_packet(adapter, pi) >= 0)
+ pi->ptp_enable = true;
+ }
+ pi->tstamp_config = *config;
+ return 0;
+}
+
static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
unsigned int mbox;
int ret = 0, prtad, devad;
struct port_info *pi = netdev_priv(dev);
- struct adapter *adapter = pi->adapter;
struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
switch (cmd) {
@@ -3086,81 +3151,6 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
data->reg_num, data->val_in);
break;
- case SIOCGHWTSTAMP:
- return copy_to_user(req->ifr_data, &pi->tstamp_config,
- sizeof(pi->tstamp_config)) ?
- -EFAULT : 0;
- case SIOCSHWTSTAMP:
- if (copy_from_user(&pi->tstamp_config, req->ifr_data,
- sizeof(pi->tstamp_config)))
- return -EFAULT;
-
- if (!is_t4(adapter->params.chip)) {
- switch (pi->tstamp_config.tx_type) {
- case HWTSTAMP_TX_OFF:
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- switch (pi->tstamp_config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- pi->rxtstamp = false;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- cxgb4_ptprx_timestamping(pi, pi->port_id,
- PTP_TS_L4);
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- cxgb4_ptprx_timestamping(pi, pi->port_id,
- PTP_TS_L2_L4);
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- pi->rxtstamp = true;
- break;
- default:
- pi->tstamp_config.rx_filter =
- HWTSTAMP_FILTER_NONE;
- return -ERANGE;
- }
-
- if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
- (pi->tstamp_config.rx_filter ==
- HWTSTAMP_FILTER_NONE)) {
- if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
- pi->ptp_enable = false;
- }
-
- if (pi->tstamp_config.rx_filter !=
- HWTSTAMP_FILTER_NONE) {
- if (cxgb4_ptp_redirect_rx_packet(adapter,
- pi) >= 0)
- pi->ptp_enable = true;
- }
- } else {
- /* For T4 Adapters */
- switch (pi->tstamp_config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- pi->rxtstamp = false;
- break;
- case HWTSTAMP_FILTER_ALL:
- pi->rxtstamp = true;
- break;
- default:
- pi->tstamp_config.rx_filter =
- HWTSTAMP_FILTER_NONE;
- return -ERANGE;
- }
- }
- return copy_to_user(req->ifr_data, &pi->tstamp_config,
- sizeof(pi->tstamp_config)) ?
- -EFAULT : 0;
default:
return -EOPNOTSUPP;
}
@@ -3181,7 +3171,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
if (!ret)
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return ret;
}
@@ -3247,7 +3237,7 @@ static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
dev_info(pi->adapter->pdev_dev,
"Setting MAC %pM on VF %d\n", mac, vf);
- ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
+ ret = t4_set_vf_mac_acl(adap, vf + 1, pi->lport, 1, mac);
if (!ret)
ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
return ret;
@@ -3307,7 +3297,7 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
}
if (max_tx_rate == 0) {
- /* unbind VF to to any Traffic Class */
+ /* unbind VF to any Traffic Class */
fw_pfvf =
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
@@ -3495,7 +3485,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
struct adapter *adap = pi->adapter;
struct ch_sched_queue qe = { 0 };
struct ch_sched_params p = { 0 };
- struct sched_class *e;
+ struct ch_sched_class *e;
u32 req_rate;
int err = 0;
@@ -3885,6 +3875,8 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_setup_tc = cxgb_setup_tc,
.ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
+ .ndo_hwtstamp_get = cxgb_hwtstamp_get,
+ .ndo_hwtstamp_set = cxgb_hwtstamp_set,
};
#ifdef CONFIG_PCI_IOV
@@ -3903,8 +3895,8 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -4826,7 +4818,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
goto bye;
}
- /* Get FW from from /lib/firmware/ */
+ /* Get FW from /lib/firmware/ */
ret = request_firmware(&fw, fw_info->fw_mod_name,
adap->pdev_dev);
if (ret < 0) {
@@ -5047,28 +5039,24 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/* Allocate the memory for the vaious egress queue bitmaps
* ie starving_fl, txq_maperr and blocked_fl.
*/
- adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.starving_fl) {
ret = -ENOMEM;
goto bye;
}
- adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.txq_maperr) {
ret = -ENOMEM;
goto bye;
}
#ifdef CONFIG_DEBUG_FS
- adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.blocked_fl) {
ret = -ENOMEM;
goto bye;
}
- bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
#endif
params[0] = FW_PARAM_PFVF(CLIP_START);
@@ -5417,10 +5405,10 @@ bye:
adap_free_hma_mem(adap);
kfree(adap->sge.egr_map);
kfree(adap->sge.ingr_map);
- kfree(adap->sge.starving_fl);
- kfree(adap->sge.txq_maperr);
+ bitmap_free(adap->sge.starving_fl);
+ bitmap_free(adap->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adap->sge.blocked_fl);
+ bitmap_free(adap->sge.blocked_fl);
#endif
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
@@ -5470,7 +5458,6 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
if (!adap) {
pci_restore_state(pdev);
- pci_save_state(pdev);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -5485,7 +5472,6 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
@@ -5854,8 +5840,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
if (!msix_info)
return -ENOMEM;
- adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
- sizeof(long), GFP_KERNEL);
+ adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL);
if (!adap->msix_bmap.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
@@ -5870,7 +5855,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
static void free_msix_info(struct adapter *adap)
{
- kfree(adap->msix_bmap.msix_bmap);
+ bitmap_free(adap->msix_bmap.msix_bmap);
kfree(adap->msix_info);
}
@@ -6189,10 +6174,10 @@ static void free_some_resources(struct adapter *adapter)
cxgb4_cleanup_ethtool_filters(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
- kfree(adapter->sge.starving_fl);
- kfree(adapter->sge.txq_maperr);
+ bitmap_free(adapter->sge.starving_fl);
+ bitmap_free(adapter->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adapter->sge.blocked_fl);
+ bitmap_free(adapter->sge.blocked_fl);
#endif
disable_msi(adapter);
@@ -6495,21 +6480,23 @@ static const struct tlsdev_ops cxgb4_ktls_ops = {
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
-static int cxgb4_xfrm_add_state(struct xfrm_state *x)
+static int cxgb4_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
int ret;
if (!mutex_trylock(&uld_mutex)) {
- dev_dbg(adap->pdev_dev,
- "crypto uld critical resource is under use\n");
+ NL_SET_ERR_MSG_MOD(extack, "crypto uld critical resource is under use");
return -EBUSY;
}
ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
if (ret)
goto out_unlock;
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(dev, x,
+ extack);
out_unlock:
mutex_unlock(&uld_mutex);
@@ -6517,9 +6504,9 @@ out_unlock:
return ret;
}
-static void cxgb4_xfrm_del_state(struct xfrm_state *x)
+static void cxgb4_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6529,15 +6516,15 @@ static void cxgb4_xfrm_del_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
}
-static void cxgb4_xfrm_free_state(struct xfrm_state *x)
+static void cxgb4_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6547,36 +6534,19 @@ static void cxgb4_xfrm_free_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
-
-out_unlock:
- mutex_unlock(&uld_mutex);
-}
-
-static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- struct adapter *adap = netdev2adap(x->xso.dev);
- bool ret = false;
-
- if (!mutex_trylock(&uld_mutex)) {
- dev_dbg(adap->pdev_dev,
- "crypto uld critical resource is under use\n");
- return ret;
- }
- if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
- goto out_unlock;
-
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
- return ret;
}
static void cxgb4_advance_esn_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
+ if (x->xso.dir != XFRM_DEV_OFFLOAD_IN)
+ return;
+
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
@@ -6595,7 +6565,6 @@ static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
.xdo_dev_state_add = cxgb4_xfrm_add_state,
.xdo_dev_state_delete = cxgb4_xfrm_del_state,
.xdo_dev_state_free = cxgb4_xfrm_free_state,
- .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
.xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
};
@@ -6608,7 +6577,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int adap_idx = 1;
int s_qpp, qpp, num_seg;
struct port_info *pi;
- bool highdma = false;
enum chip_type chip;
void __iomem *regs;
int func, chip_ver;
@@ -6687,17 +6655,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- highdma = true;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_free_adapter;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto out_free_adapter;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
adap_idx++;
@@ -6823,7 +6786,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_TC | NETIF_F_NTUPLE;
+ NETIF_F_HW_TC | NETIF_F_NTUPLE | NETIF_F_HIGHDMA;
if (chip_ver > CHELSIO_T5) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM |
@@ -6841,8 +6804,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
}
- if (highdma)
- netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
@@ -7104,7 +7065,6 @@ fw_attach_fail:
out_unmap_bar0:
iounmap(regs);
out_disable_device:
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
out_release_regions:
pci_release_regions(pdev);
@@ -7183,7 +7143,6 @@ static void remove_one(struct pci_dev *pdev)
}
#endif
iounmap(adapter->regs);
- pci_disable_pcie_error_reporting(pdev);
if ((adapter->flags & CXGB4_DEV_ENABLED)) {
pci_disable_device(pdev);
adapter->flags &= ~CXGB4_DEV_ENABLED;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
index a020e8490681..60f4d5b5eb3a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
@@ -28,28 +28,6 @@ static int cxgb4_mps_ref_dec_by_mac(struct adapter *adap,
return ret;
}
-static int cxgb4_mps_ref_dec(struct adapter *adap, u16 idx)
-{
- struct mps_entries_ref *mps_entry, *tmp;
- int ret = -EINVAL;
-
- spin_lock(&adap->mps_ref_lock);
- list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
- if (mps_entry->idx == idx) {
- if (!refcount_dec_and_test(&mps_entry->refcnt)) {
- spin_unlock(&adap->mps_ref_lock);
- return -EBUSY;
- }
- list_del(&mps_entry->list);
- kfree(mps_entry);
- ret = 0;
- break;
- }
- }
- spin_unlock(&adap->mps_ref_lock);
- return ret;
-}
-
static int cxgb4_mps_ref_inc(struct adapter *adap, const u8 *mac_addr,
u16 idx, const u8 *mask)
{
@@ -141,82 +119,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
return ret;
}
-int cxgb4_free_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok)
-{
- int ret = 0;
-
- if (!cxgb4_mps_ref_dec(adap, idx))
- ret = t4_free_raw_mac_filt(adap, viid, addr,
- mask, idx, lookup_type,
- port_id, sleep_ok);
-
- return ret;
-}
-
-int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
- unsigned int viid,
- const u8 *addr,
- const u8 *mask,
- unsigned int idx,
- u8 lookup_type,
- u8 port_id,
- bool sleep_ok)
-{
- int ret;
-
- ret = t4_alloc_raw_mac_filt(adap, viid, addr,
- mask, idx, lookup_type,
- port_id, sleep_ok);
- if (ret < 0)
- return ret;
-
- if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
- ret = -ENOMEM;
- t4_free_raw_mac_filt(adap, viid, addr,
- mask, idx, lookup_type,
- port_id, sleep_ok);
- }
-
- return ret;
-}
-
-int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
- int idx, bool sleep_ok)
-{
- int ret = 0;
-
- if (!cxgb4_mps_ref_dec(adap, idx))
- ret = t4_free_encap_mac_filt(adap, viid, idx, sleep_ok);
-
- return ret;
-}
-
-int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
- const u8 *addr, const u8 *mask,
- unsigned int vni, unsigned int vni_mask,
- u8 dip_hit, u8 lookup_type, bool sleep_ok)
-{
- int ret;
-
- ret = t4_alloc_encap_mac_filt(adap, viid, addr, mask, vni, vni_mask,
- dip_hit, lookup_type, sleep_ok);
- if (ret < 0)
- return ret;
-
- if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
- ret = -ENOMEM;
- t4_free_encap_mac_filt(adap, viid, ret, sleep_ok);
- }
- return ret;
-}
-
int cxgb4_init_mps_ref_entries(struct adapter *adap)
{
spin_lock_init(&adap->mps_ref_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 5bf117d2179f..cbd06d9b95d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -194,17 +194,20 @@ int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi)
}
/**
- * cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter
+ * cxgb4_ptp_adjfine - Adjust frequency of PHC cycle counter
* @ptp: ptp clock structure
- * @ppb: Desired frequency change in parts per billion
+ * @scaled_ppm: Desired frequency in scaled parts per billion
*
- * Adjust the frequency of the PHC cycle counter by the indicated ppb from
+ * Adjust the frequency of the PHC cycle counter by the indicated amount from
* the base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int cxgb4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct adapter *adapter = (struct adapter *)container_of(ptp,
struct adapter, ptp_clock_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
struct fw_ptp_cmd c;
int err;
@@ -404,7 +407,7 @@ static const struct ptp_clock_info cxgb4_ptp_clock_info = {
.n_ext_ts = 0,
.n_per_out = 0,
.pps = 0,
- .adjfreq = cxgb4_ptp_adjfreq,
+ .adjfine = cxgb4_ptp_adjfine,
.adjtime = cxgb4_ptp_adjtime,
.gettime64 = cxgb4_ptp_gettime,
.settime64 = cxgb4_ptp_settime,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index dd9be229819a..e2b5554531b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -161,20 +161,9 @@ static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
static void cxgb4_process_flow_match(struct net_device *dev,
struct flow_rule *rule,
+ u16 addr_type,
struct ch_filter_specification *fs)
{
- u16 addr_type = 0;
-
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_match_control match;
-
- flow_rule_match_control(rule, &match);
- addr_type = match.key->addr_type;
- } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
- } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
- addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
- }
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -305,7 +294,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->mask.iport = ~0;
}
-static int cxgb4_validate_flow_match(struct net_device *dev,
+static int cxgb4_validate_flow_match(struct netlink_ext_ack *extack,
struct flow_rule *rule)
{
struct flow_dissector *dissector = rule->match.dissector;
@@ -313,16 +302,17 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
u16 ethtype_key = 0;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_IP))) {
- netdev_warn(dev, "Unsupported key used: 0x%x\n",
- dissector->used_keys);
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP))) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported key used: 0x%llx",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -339,13 +329,15 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
struct flow_match_ip match;
if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
- netdev_err(dev, "IP Key supported only with IPv4/v6");
+ NL_SET_ERR_MSG_MOD(extack,
+ "IP Key supported only with IPv4/v6");
return -EINVAL;
}
flow_rule_match_ip(rule, &match);
if (match.mask->ttl) {
- netdev_warn(dev, "ttl match unsupported for offload");
+ NL_SET_ERR_MSG_MOD(extack,
+ "ttl match unsupported for offload");
return -EOPNOTSUPP;
}
}
@@ -576,7 +568,7 @@ static bool valid_l4_mask(u32 mask)
return hi && lo ? false : true;
}
-static bool valid_pedit_action(struct net_device *dev,
+static bool valid_pedit_action(struct netlink_ext_ack *extack,
const struct flow_action_entry *act,
u8 *natmode_flags)
{
@@ -595,8 +587,7 @@ static bool valid_pedit_action(struct net_device *dev,
case PEDIT_ETH_SMAC_47_16:
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -609,8 +600,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -629,8 +619,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -638,8 +627,8 @@ static bool valid_pedit_action(struct net_device *dev,
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported mask for TCP L4 ports");
return false;
}
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -648,8 +637,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -657,8 +645,8 @@ static bool valid_pedit_action(struct net_device *dev,
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported mask for UDP L4 ports");
return false;
}
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -667,13 +655,12 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
default:
- netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit type");
return false;
}
return true;
@@ -727,8 +714,7 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
* the provided output port is not valid
*/
if (!found) {
- netdev_err(dev, "%s: Out port invalid\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Out port invalid");
return -EINVAL;
}
act_redir = true;
@@ -745,21 +731,21 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_MANGLE:
if (proto != ETH_P_8021Q) {
- netdev_err(dev, "%s: Unsupported vlan proto\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported vlan proto");
return -EOPNOTSUPP;
}
break;
default:
- netdev_err(dev, "%s: Unsupported vlan action\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported vlan action");
return -EOPNOTSUPP;
}
act_vlan = true;
}
break;
case FLOW_ACTION_MANGLE: {
- bool pedit_valid = valid_pedit_action(dev, act,
+ bool pedit_valid = valid_pedit_action(extack, act,
&natmode_flags);
if (!pedit_valid)
@@ -771,14 +757,14 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
/* Do nothing. cxgb4_set_filter will validate */
break;
default:
- netdev_err(dev, "%s: Unsupported action\n", __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
return -EOPNOTSUPP;
}
}
if ((act_pedit || act_vlan) && !act_redir) {
- netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "pedit/vlan rewrite invalid without egress redirect");
return -EINVAL;
}
@@ -858,16 +844,38 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
{
struct adapter *adap = netdev2adap(dev);
struct filter_ctx ctx;
+ u16 addr_type = 0;
u8 inet_family;
int fidx, ret;
if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0))
return -EOPNOTSUPP;
- if (cxgb4_validate_flow_match(dev, rule))
+ if (cxgb4_validate_flow_match(extack, rule))
return -EOPNOTSUPP;
- cxgb4_process_flow_match(dev, rule, fs);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ fs->val.frag = match.key->flags & FLOW_DIS_IS_FRAGMENT;
+ fs->mask.frag = true;
+ }
+
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
+
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ }
+
+ cxgb4_process_flow_match(dev, rule, addr_type, fs);
cxgb4_process_flow_actions(dev, &rule->action, fs);
fs->hash = is_filter_exact_match(adap, fs);
@@ -901,8 +909,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
init_completion(&ctx.completion);
ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
if (ret) {
- netdev_err(dev, "%s: filter creation err %d\n",
- __func__, ret);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "filter creation err %d", ret);
return ret;
}
@@ -1052,7 +1059,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
static void ch_flower_stats_cb(struct timer_list *t)
{
- struct adapter *adap = from_timer(adap, t, flower_stats_timer);
+ struct adapter *adap = timer_container_of(adap, t, flower_stats_timer);
schedule_work(&adap->flower_stats_work);
}
@@ -1135,7 +1142,7 @@ void cxgb4_cleanup_tc_flower(struct adapter *adap)
return;
if (adap->flower_stats_timer.function)
- del_timer_sync(&adap->flower_stats_timer);
+ timer_shutdown_sync(&adap->flower_stats_timer);
cancel_work_sync(&adap->flower_stats_work);
rhashtable_destroy(&adap->flower_tbl);
adap->tc_flower_initialized = false;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 28fd2de9e4cf..f8dcf0b4abcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -8,6 +8,46 @@
#include "cxgb4_filter.h"
#include "cxgb4_tc_flower.h"
+static int cxgb4_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
@@ -16,7 +56,7 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct flow_action_entry *entry;
struct ch_sched_queue qe;
- struct sched_class *e;
+ struct ch_sched_class *e;
u64 max_link_rate;
u32 i, speed;
int ret;
@@ -48,11 +88,10 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
flow_action_for_each(i, entry, actions) {
switch (entry->id) {
case FLOW_ACTION_POLICE:
- if (entry->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+ ret = cxgb4_policer_validate(actions, entry, extack);
+ if (ret)
+ return ret;
+
/* Convert bytes per second to bits per second */
if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
NL_SET_ERR_MSG_MOD(extack,
@@ -141,7 +180,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *entry;
- struct sched_class *e;
+ struct ch_sched_class *e;
int ret;
u32 i;
@@ -150,11 +189,11 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
flow_action_for_each(i, entry, &cls->rule->action)
if (entry->id == FLOW_ACTION_POLICE)
break;
- if (entry->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+
+ ret = cxgb4_policer_validate(&cls->rule->action, entry, extack);
+ if (ret)
+ return ret;
+
/* Convert from bytes per second to Kbps */
p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
p.u.params.channel = pi->tx_chan;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 338b04f339b3..a2dcd2e24263 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -330,7 +330,7 @@ static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
struct cxgb4_tc_port_mqprio *tc_port_mqprio;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
- struct sched_class *e;
+ struct ch_sched_class *e;
int ret;
u8 i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
index be96f1dc0372..d4a862a9fd7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -4,7 +4,7 @@
#ifndef __CXGB4_TC_MQPRIO_H__
#define __CXGB4_TC_MQPRIO_H__
-#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index a5d2f84dcdd5..8524246fd67e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -186,7 +186,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
@@ -422,7 +422,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
uhtid = TC_U32_USERHTID(cls->knode.handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
- * or a a valid linked bucket.
+ * or a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
index f59dd4b2ae6f..64663112cad8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -242,7 +242,7 @@ struct cxgb4_next_header {
* field's value to jump to next header such as IHL field
* in IPv4 header.
*/
- struct tc_u32_sel sel;
+ struct tc_u32_sel_hdr sel;
struct tc_u32_key key;
/* location of jump to make */
const struct cxgb4_match_field *jump;
@@ -331,6 +331,6 @@ struct cxgb4_link {
struct cxgb4_tc_u32_table {
unsigned int size; /* number of entries in table */
- struct cxgb4_link table[]; /* Jump table */
+ struct cxgb4_link table[] __counted_by(size); /* Jump table */
};
#endif /* __CXGB4_TC_U32_PARSE_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
index 9a6d65243334..7bab8da8f6e6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -12,7 +12,7 @@
static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
int *temp)
{
- struct adapter *adap = tzdev->devdata;
+ struct adapter *adap = thermal_zone_device_priv(tzdev);
u32 param, val;
int ret;
@@ -29,36 +29,12 @@ static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static int cxgb4_thermal_get_trip_type(struct thermal_zone_device *tzdev,
- int trip, enum thermal_trip_type *type)
-{
- struct adapter *adap = tzdev->devdata;
-
- if (!adap->ch_thermal.trip_temp)
- return -EINVAL;
-
- *type = adap->ch_thermal.trip_type;
- return 0;
-}
-
-static int cxgb4_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
- int trip, int *temp)
-{
- struct adapter *adap = tzdev->devdata;
-
- if (!adap->ch_thermal.trip_temp)
- return -EINVAL;
-
- *temp = adap->ch_thermal.trip_temp;
- return 0;
-}
-
-static struct thermal_zone_device_ops cxgb4_thermal_ops = {
+static const struct thermal_zone_device_ops cxgb4_thermal_ops = {
.get_temp = cxgb4_thermal_get_temp,
- .get_trip_type = cxgb4_thermal_get_trip_type,
- .get_trip_temp = cxgb4_thermal_get_trip_temp,
};
+static struct thermal_trip trip = { .type = THERMAL_TRIP_CRITICAL } ;
+
int cxgb4_thermal_init(struct adapter *adap)
{
struct ch_thermal *ch_thermal = &adap->ch_thermal;
@@ -79,15 +55,14 @@ int cxgb4_thermal_init(struct adapter *adap)
if (ret < 0) {
num_trip = 0; /* could not get trip temperature */
} else {
- ch_thermal->trip_temp = val * 1000;
- ch_thermal->trip_type = THERMAL_TRIP_CRITICAL;
+ trip.temperature = val * 1000;
}
snprintf(ch_tz_name, sizeof(ch_tz_name), "cxgb4_%s", adap->name);
- ch_thermal->tzdev = thermal_zone_device_register(ch_tz_name, num_trip,
- 0, adap,
- &cxgb4_thermal_ops,
- NULL, 0, 0);
+ ch_thermal->tzdev = thermal_zone_device_register_with_trips(ch_tz_name, &trip, num_trip,
+ adap,
+ &cxgb4_thermal_ops,
+ NULL, 0, 0);
if (IS_ERR(ch_thermal->tzdev)) {
ret = PTR_ERR(ch_thermal->tzdev);
dev_err(adap->pdev_dev, "Failed to register thermal zone\n");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 17faac715882..5c13bcb4550d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -406,7 +406,7 @@ free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
for (i = 0; i < nq; i++) {
struct sge_uld_txq *txq = &txq_info->uldtxq[i];
- if (txq && txq->q.desc) {
+ if (txq->q.desc) {
tasklet_kill(&txq->qresume_tsk);
t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
txq->q.cntxt_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 34546f5312ee..d7713038386c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -497,11 +497,6 @@ struct cxgb4_uld_info {
#endif
};
-static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
-{
- return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
-}
-
void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
@@ -513,7 +508,6 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev);
unsigned int cxgb4_port_e2cchan(const struct net_device *dev);
unsigned int cxgb4_port_viid(const struct net_device *dev);
-unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid);
unsigned int cxgb4_port_idx(const struct net_device *dev);
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx);
@@ -524,8 +518,6 @@ unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
unsigned int *mtu_idxp);
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
-void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
- const unsigned int *pgsz_order);
struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index a10a6862a9a4..c02b4e9c06b2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -59,7 +59,7 @@ struct l2t_data {
rwlock_t lock;
atomic_t nfree; /* number of free entries */
struct l2t_entry *rover; /* starting point for next allocation */
- struct l2t_entry l2tab[]; /* MUST BE LAST */
+ struct l2t_entry l2tab[] __counted_by(l2t_size); /* MUST BE LAST */
};
static inline unsigned int vlan_prio(const struct l2t_entry *e)
@@ -608,25 +608,6 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
return e;
}
-/**
- * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters
- * @dev: net_device pointer
- * @vlan: VLAN Id
- * @port: Associated port
- * @dmac: Destination MAC address to add to L2T
- * Returns pointer to the allocated l2t entry
- *
- * Allocates an L2T entry for use by switching rule of a filter
- */
-struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
- u8 port, u8 *dmac)
-{
- struct adapter *adap = netdev2adap(dev);
-
- return t4_l2t_alloc_switching(adap, vlan, port, dmac);
-}
-EXPORT_SYMBOL(cxgb4_l2t_alloc_switching);
-
struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
{
unsigned int l2t_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 340fecb28a13..8aad7e9dee6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -115,8 +115,6 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
unsigned int priority);
u64 cxgb4_select_ntuple(struct net_device *dev,
const struct l2t_entry *l2t);
-struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan,
- u8 port, u8 *dmac);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
u8 port, u8 *dmac);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index a1b14468d1ff..38a30aeee122 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -44,7 +44,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
{
struct adapter *adap = pi->adapter;
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
e = &s->tab[p->u.params.class];
@@ -122,7 +122,7 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
const u32 val)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e, *end;
+ struct ch_sched_class *e, *end;
void *found = NULL;
/* Look for an entry with matching @val */
@@ -166,8 +166,8 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
return found;
}
-struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
- struct ch_sched_queue *p)
+struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p)
{
struct port_info *pi = netdev2pinfo(dev);
struct sched_queue_entry *qe = NULL;
@@ -187,7 +187,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->queue < 0 || p->queue >= pi->nqsets)
@@ -218,7 +218,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- struct sched_class *e;
+ struct ch_sched_class *e;
unsigned int qid;
int err = 0;
@@ -260,7 +260,7 @@ static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
{
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
@@ -288,7 +288,7 @@ static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
struct sched_table *s = pi->sched_tbl;
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
@@ -322,7 +322,7 @@ out_err:
}
static void t4_sched_class_unbind_all(struct port_info *pi,
- struct sched_class *e,
+ struct ch_sched_class *e,
enum sched_bind_type type)
{
if (!e)
@@ -476,12 +476,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
}
/* If @p is NULL, fetch any available unused class */
-static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
- const struct ch_sched_params *p)
+static struct ch_sched_class *t4_sched_class_lookup(struct port_info *pi,
+ const struct ch_sched_params *p)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *found = NULL;
- struct sched_class *e, *end;
+ struct ch_sched_class *found = NULL;
+ struct ch_sched_class *e, *end;
if (!p) {
/* Get any available unused class */
@@ -522,10 +522,10 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
return found;
}
-static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
- struct ch_sched_params *p)
+static struct ch_sched_class *t4_sched_class_alloc(struct port_info *pi,
+ struct ch_sched_params *p)
{
- struct sched_class *e = NULL;
+ struct ch_sched_class *e = NULL;
u8 class_id;
int err;
@@ -579,8 +579,8 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
* scheduling class with matching @p is found, then the matching class is
* returned.
*/
-struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
- struct ch_sched_params *p)
+struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p)
{
struct port_info *pi = netdev2pinfo(dev);
u8 class_id;
@@ -607,7 +607,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
struct port_info *pi = netdev2pinfo(dev);
struct sched_table *s = pi->sched_tbl;
struct ch_sched_params p;
- struct sched_class *e;
+ struct ch_sched_class *e;
u32 speed;
int ret;
@@ -640,7 +640,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
}
}
-static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
+static void t4_sched_class_free(struct net_device *dev, struct ch_sched_class *e)
{
struct port_info *pi = netdev2pinfo(dev);
@@ -660,7 +660,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
s->sched_size = sched_size;
for (i = 0; i < s->sched_size; i++) {
- memset(&s->tab[i], 0, sizeof(struct sched_class));
+ memset(&s->tab[i], 0, sizeof(struct ch_sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
INIT_LIST_HEAD(&s->tab[i].entry_list);
@@ -682,7 +682,7 @@ void t4_cleanup_sched(struct adapter *adap)
continue;
for (i = 0; i < s->sched_size; i++) {
- struct sched_class *e;
+ struct ch_sched_class *e;
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 5f8b871d79af..4d3b5a757536 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -71,7 +71,7 @@ struct sched_flowc_entry {
struct ch_sched_flowc param;
};
-struct sched_class {
+struct ch_sched_class {
u8 state;
u8 idx;
struct ch_sched_params info;
@@ -82,7 +82,7 @@ struct sched_class {
struct sched_table { /* per port scheduling table */
u8 sched_size;
- struct sched_class tab[];
+ struct ch_sched_class tab[] __counted_by(sched_size);
};
static inline bool can_sched(struct net_device *dev)
@@ -103,15 +103,15 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id)
return true;
}
-struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
- struct ch_sched_queue *p);
+struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p);
int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type);
int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
enum sched_bind_type type);
-struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
- struct ch_sched_params *p);
+struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p);
void cxgb4_sched_class_free(struct net_device *dev, u8 classid);
struct sched_table *t4_init_sched(unsigned int size);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fa5b596ff23a..9fccb8ea9bcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -163,7 +163,7 @@ static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
* for DMA, but this is of course never sent to the hardware and is only used
* to prevent double unmappings. All of the above requires that the Free List
* Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
- * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
+ * 32-byte or a power of 2 greater in alignment. Since the SGE's minimal
* Free List Buffer alignment is 32 bytes, this works out for us ...
*/
enum {
@@ -804,20 +804,6 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
}
/**
- * calc_tx_descs - calculate the number of Tx descriptors for a packet
- * @skb: the packet
- * @chip_ver: chip version
- *
- * Returns the number of Tx descriptors needed for the given Ethernet
- * packet, including the needed WR and CPL headers.
- */
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
- unsigned int chip_ver)
-{
- return flits_to_desc(calc_tx_flits(skb, chip_ver));
-}
-
-/**
* cxgb4_write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @q: the Tx queue we are writing into
@@ -1530,8 +1516,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#endif /* CHELSIO_IPSEC_INLINE */
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
- if (cxgb4_is_ktls_skb(skb) &&
- (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
+ if (tls_is_skb_tx_device_offloaded(skb) &&
+ (skb->len - skb_tcp_all_headers(skb)))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
@@ -1547,7 +1533,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
q = &adap->sge.ethtxq[qidx + pi->first_qset];
}
- skb_tx_timestamp(skb);
reclaim_completed_tx(adap, &q->q, -1, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
@@ -1720,6 +1705,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
+ skb_tx_timestamp(skb);
+
if (immediate) {
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
@@ -1842,8 +1829,10 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* (including the VLAN tag) into the header so we reject anything
* smaller than that ...
*/
- fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) + sizeof(wr->vlantci);
+ BUILD_BUG_ON(sizeof(wr->firmware) !=
+ (sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) + sizeof(wr->vlantci)));
+ fw_hdr_copy_len = sizeof(wr->firmware);
ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
if (ret)
goto out_free;
@@ -1924,7 +1913,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
- skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
end = (u64 *)wr + flits;
/* If this is a Large Send Offload packet we'll put in an LSO CPL
@@ -2280,7 +2269,6 @@ static int ethofld_hard_xmit(struct net_device *dev,
d = &eosw_txq->desc[eosw_txq->last_pidx];
skb = d->skb;
- skb_tx_timestamp(skb);
wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
@@ -2385,6 +2373,7 @@ write_wr_headers:
eohw_txq->vlan_ins++;
txq_advance(&eohw_txq->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
@@ -2682,12 +2671,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
lb->loopback = 1;
q = &adap->sge.ethtxq[pi->first_qset];
- __netif_tx_lock(q->txq, smp_processor_id());
+ __netif_tx_lock_bh(q->txq);
reclaim_completed_tx(adap, &q->q, -1, true);
credits = txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
return -ENOMEM;
}
@@ -2722,7 +2711,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
init_completion(&lb->completion);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
/* wait for the pkt to return */
ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
@@ -4245,7 +4234,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
{
unsigned long m;
unsigned int i;
- struct adapter *adap = from_timer(adap, t, sge.rx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.rx_timer);
struct sge *s = &adap->sge;
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
@@ -4259,7 +4248,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
if (fl_starving(adap, fl)) {
rxq = container_of(fl, struct sge_eth_rxq, fl);
- if (napi_reschedule(&rxq->rspq.napi))
+ if (napi_schedule(&rxq->rspq.napi))
fl->starving++;
else
set_bit(id, s->starving_fl);
@@ -4280,7 +4269,7 @@ done:
static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adap = from_timer(adap, t, sge.tx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.tx_timer);
struct sge *s = &adap->sge;
unsigned long m, period;
unsigned int i, budget;
@@ -4465,7 +4454,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
if (ret)
goto err;
- netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &iq->napi, napi_rx_handler);
iq->cur_desc = iq->desc;
iq->cidx = 0;
iq->gen = 1;
@@ -4886,22 +4875,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
}
}
-/**
- * t4_free_ofld_rxqs - free a block of consecutive Rx queues
- * @adap: the adapter
- * @n: number of queues
- * @q: pointer to first queue
- *
- * Release the resources of a consecutive block of offload Rx queues.
- */
-void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
-{
- for ( ; n; n--, q++)
- if (q->rspq.desc)
- free_rspq_fl(adap, &q->rspq,
- q->fl.size ? &q->fl : NULL);
-}
-
void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
{
if (txq->q.desc) {
@@ -5024,9 +4997,9 @@ void t4_sge_stop(struct adapter *adap)
struct sge *s = &adap->sge;
if (s->rx_timer.function)
- del_timer_sync(&s->rx_timer);
+ timer_delete_sync(&s->rx_timer);
if (s->tx_timer.function)
- del_timer_sync(&s->tx_timer);
+ timer_delete_sync(&s->tx_timer);
if (is_offload(adap)) {
struct sge_uld_txq_info *txq_info;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h
index 541249d78914..109c1dff563a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h
@@ -66,7 +66,7 @@ struct smt_entry {
struct smt_data {
unsigned int smt_size;
rwlock_t lock;
- struct smt_entry smtab[];
+ struct smt_entry smtab[] __counted_by(smt_size);
};
struct smt_data *t4_init_smt(void);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c
index 9a54302bb046..a77d6ac1ee8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c
@@ -51,64 +51,6 @@ struct srq_data *t4_init_srq(int srq_size)
return s;
}
-/* cxgb4_get_srq_entry: read the SRQ table entry
- * @dev: Pointer to the net_device
- * @idx: Index to the srq
- * @entryp: pointer to the srq entry
- *
- * Sends CPL_SRQ_TABLE_REQ message for the given index.
- * Contents will be returned in CPL_SRQ_TABLE_RPL message.
- *
- * Returns zero if the read is successful, else a error
- * number will be returned. Caller should not use the srq
- * entry if the return value is non-zero.
- *
- *
- */
-int cxgb4_get_srq_entry(struct net_device *dev,
- int srq_idx, struct srq_entry *entryp)
-{
- struct cpl_srq_table_req *req;
- struct adapter *adap;
- struct sk_buff *skb;
- struct srq_data *s;
- int rc = -ENODEV;
-
- adap = netdev2adap(dev);
- s = adap->srq;
-
- if (!(adap->flags & CXGB4_FULL_INIT_DONE) || !s)
- goto out;
-
- skb = alloc_skb(sizeof(*req), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- req = (struct cpl_srq_table_req *)
- __skb_put_zero(skb, sizeof(*req));
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ,
- TID_TID_V(srq_idx) |
- TID_QID_V(adap->sge.fw_evtq.abs_id)));
- req->idx = srq_idx;
-
- mutex_lock(&s->lock);
-
- s->entryp = entryp;
- t4_mgmt_tx(adap, skb);
-
- rc = wait_for_completion_timeout(&s->comp, SRQ_WAIT_TO);
- if (rc)
- rc = 0;
- else /* !rc means we timed out */
- rc = -ETIMEDOUT;
-
- WARN_ON_ONCE(entryp->idx != srq_idx);
- mutex_unlock(&s->lock);
-out:
- return rc;
-}
-EXPORT_SYMBOL(cxgb4_get_srq_entry);
-
void do_srq_table_rpl(struct adapter *adap,
const struct cpl_srq_table_rpl *rpl)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.h b/drivers/net/ethernet/chelsio/cxgb4/srq.h
index ec85cf93865a..d9f04bd5ffa3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.h
@@ -58,8 +58,6 @@ struct srq_data {
};
struct srq_data *t4_init_srq(int srq_size);
-int cxgb4_get_srq_entry(struct net_device *dev,
- int srq_idx, struct srq_entry *entryp);
void do_srq_table_rpl(struct adapter *adap,
const struct cpl_srq_table_rpl *rpl);
#endif /* __CXGB4_SRQ_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e7b4e3ed056c..171750fad44f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2793,14 +2793,14 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
goto out;
na = ret;
- memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
+ memcpy(p->id, vpd + id, min_t(unsigned int, id_len, ID_LEN));
strim(p->id);
- memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
+ memcpy(p->sn, vpd + sn, min_t(unsigned int, sn_len, SERNUM_LEN));
strim(p->sn);
- memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
+ memcpy(p->pn, vpd + pn, min_t(unsigned int, pn_len, PN_LEN));
strim(p->pn);
- memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
- strim((char *)p->na);
+ memcpy(p->na, vpd + na, min_t(unsigned int, na_len, MACADDR_LEN));
+ strim(p->na);
out:
vfree(vpd);
@@ -3816,6 +3816,8 @@ int t4_load_phy_fw(struct adapter *adap, int win,
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
&param, &val, 30000);
+ if (ret)
+ return ret;
/* If we have version number support, then check to see that the new
* firmware got loaded properly.
@@ -9346,7 +9348,7 @@ int t4_init_devlog_params(struct adapter *adap)
return 0;
}
- /* Otherwise, ask the firmware for it's Device Log Parameters.
+ /* Otherwise, ask the firmware for its Device Log Parameters.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
@@ -10213,11 +10215,12 @@ out:
* t4_set_vf_mac_acl - Set MAC address for the specified VF
* @adapter: The adapter
* @vf: one of the VFs instantiated by the specified PF
+ * @start: The start port id associated with specified VF
* @naddr: the number of MAC addresses
* @addr: the MAC address(es) to be set to the specified VF
*/
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
- unsigned int naddr, u8 *addr)
+ u8 start, unsigned int naddr, u8 *addr)
{
struct fw_acl_mac_cmd cmd;
@@ -10232,7 +10235,7 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
cmd.nmac = naddr;
- switch (adapter->pf) {
+ switch (start) {
case 3:
memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 002fc62ea726..63bc956d2037 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -293,6 +293,8 @@ enum {
#define I2C_PAGE_SIZE 0x100
#define SFP_DIAG_TYPE_ADDR 0x5c
#define SFP_DIAG_TYPE_LEN 0x1
+#define SFP_DIAG_ADDRMODE BIT(2)
+#define SFP_DIAG_IMPLEMENTED BIT(6)
#define SFF_8472_COMP_ADDR 0x5e
#define SFF_8472_COMP_LEN 0x1
#define SFF_REV_ADDR 0x1
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 0a326c054707..2419459a0b85 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -794,10 +794,12 @@ struct fw_eth_tx_pkt_vm_wr {
__be32 op_immdlen;
__be32 equiq_to_len16;
__be32 r3[2];
- u8 ethmacdst[6];
- u8 ethmacsrc[6];
- __be16 ethtype;
- __be16 vlantci;
+ struct_group(firmware,
+ u8 ethmacdst[ETH_ALEN];
+ u8 ethmacsrc[ETH_ALEN];
+ __be16 ethtype;
+ __be16 vlantci;
+ );
};
#define FW_CMD_MAX_TIMEOUT 10000
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 64479c464b4e..2fbe0f059a0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -858,7 +858,7 @@ static int cxgb4vf_open(struct net_device *dev)
*/
err = t4vf_update_port_info(pi);
if (err < 0)
- return err;
+ goto err_unwind;
/*
* Note that this interface is up and start everything up ...
@@ -1169,7 +1169,7 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
-1, -1, -1, -1, true);
if (!ret)
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return ret;
}
@@ -1553,8 +1553,8 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
@@ -1591,7 +1591,9 @@ static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
* first Queue Set.
*/
static void cxgb4vf_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
@@ -1614,7 +1616,9 @@ static void cxgb4vf_get_ringparam(struct net_device *dev,
* device -- after vetting them of course!
*/
static int cxgb4vf_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2855,7 +2859,7 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
* address stored on the adapter
* @adapter: The adapter
*
- * Find the the port mask for the VF based on the index of mac
+ * Find the port mask for the VF based on the index of mac
* address stored in the adapter. If no mac address is stored on
* the adapter for the VF, use the port mask received from the
* firmware.
@@ -2895,7 +2899,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct port_info *pi;
unsigned int pmask;
- int pci_using_dac;
int err, pidx;
/*
@@ -2916,19 +2919,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
}
/*
- * Set up our DMA mask: try for 64-bit address masking first and
- * fall back to 32-bit if we can't get 64 bits ...
+ * Set up our DMA mask
*/
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err == 0) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err != 0) {
- dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto err_release_regions;
- }
- pci_using_dac = 0;
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto err_release_regions;
}
/*
@@ -3074,9 +3070,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
- netdev->features = netdev->hw_features;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -3196,6 +3190,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
}
if (adapter->registered_device_map == 0) {
dev_err(&pdev->dev, "could not register any net devices\n");
+ err = -EINVAL;
goto err_disable_interrupts;
}
@@ -3263,7 +3258,6 @@ err_free_adapter:
err_release_regions:
pci_release_regions(pdev);
- pci_clear_master(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -3343,7 +3337,6 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
* Disable the device and release its PCI resources.
*/
pci_disable_device(pdev);
- pci_clear_master(pdev);
pci_release_regions(pdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0295b2406646..31fab2415743 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1167,10 +1167,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
- const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
- sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) +
- sizeof(wr->vlantci));
+ const size_t fw_hdr_copy_len = sizeof(wr->firmware);
/*
* The chip minimum packet length is 10 octets but the firmware
@@ -1267,7 +1264,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
- skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
end = (u64 *)wr + flits;
/*
@@ -2065,7 +2062,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
*/
static void sge_rx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
+ struct adapter *adapter = timer_container_of(adapter, t, sge.rx_timer);
struct sge *s = &adapter->sge;
unsigned int i;
@@ -2097,7 +2094,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
struct sge_eth_rxq *rxq;
rxq = container_of(fl, struct sge_eth_rxq, fl);
- if (napi_reschedule(&rxq->rspq.napi))
+ if (napi_schedule(&rxq->rspq.napi))
fl->starving++;
else
set_bit(id, s->starving_fl);
@@ -2124,7 +2121,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
*/
static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
+ struct adapter *adapter = timer_container_of(adapter, t, sge.tx_timer);
struct sge *s = &adapter->sge;
unsigned int i, budget;
@@ -2194,7 +2191,7 @@ static void __iomem *bar2_address(struct adapter *adapter,
/**
* t4vf_sge_alloc_rxq - allocate an SGE RX Queue
* @adapter: the adapter
- * @rspq: pointer to to the new rxq's Response Queue to be filled in
+ * @rspq: pointer to the new rxq's Response Queue to be filled in
* @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
* @dev: the network device associated with the new rspq
* @intr_dest: MSI-X vector index (overriden in MSI mode)
@@ -2339,7 +2336,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
if (ret)
goto err;
- netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &rspq->napi, napi_rx_handler);
rspq->cur_desc = rspq->desc;
rspq->cidx = 0;
rspq->gen = 1;
@@ -2612,9 +2609,9 @@ void t4vf_sge_stop(struct adapter *adapter)
struct sge *s = &adapter->sge;
if (s->rx_timer.function)
- del_timer_sync(&s->rx_timer);
+ timer_delete_sync(&s->rx_timer);
if (s->tx_timer.function)
- del_timer_sync(&s->tx_timer);
+ timer_delete_sync(&s->tx_timer);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index d546993bda09..56fcc531af2e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -706,7 +706,7 @@ int t4vf_fl_pkt_align(struct adapter *adapter)
* separately. The actual Ingress Packet Data alignment boundary
* within Packed Buffer Mode is the maximum of these two
* specifications. (Note that it makes no real practical sense to
- * have the Pading Boudary be larger than the Packing Boundary but you
+ * have the Padding Boundary be larger than the Packing Boundary but you
* could set the chip up that way and, in fact, legacy T4 code would
* end doing this because it would initialize the Padding Boundary and
* leave the Packing Boundary initialized to 0 (16 bytes).)
@@ -877,7 +877,7 @@ int t4vf_get_sge_params(struct adapter *adapter)
/* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these
- * separately with the Padding Boundary in SGE_CONTROL and and Packing
+ * separately with the Padding Boundary in SGE_CONTROL and Packing
* Boundary in SGE_CONTROL2. So for T5 and later we need to grab
* SGE_CONTROL in order to determine how ingress packet data will be
* laid out in Packed Buffer Mode. Unfortunately, older versions of
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index 585590520076..49b57bb5fac1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -39,7 +39,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/crypto.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/highmem.h>
@@ -49,7 +48,6 @@
#include <net/esp.h>
#include <net/xfrm.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
@@ -73,20 +71,22 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x);
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x);
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x);
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = ch_ipsec_xfrm_add_state,
.xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
.xdo_dev_state_free = ch_ipsec_xfrm_free_state,
- .xdo_dev_offload_ok = ch_ipsec_offload_ok,
.xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
};
@@ -226,67 +226,79 @@ out:
* returns 0 on success, negative error if failed to send message to FPGA
* positive error if FPGA returned a bad response
*/
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct ipsec_sa_entry *sa_entry;
int res = 0;
if (x->props.aalgo != SADB_AALG_NONE) {
- pr_debug("Cannot offload authenticated xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
return -EINVAL;
}
if (x->props.calgo != SADB_X_CALG_NONE) {
- pr_debug("Cannot offload compressed xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
return -EINVAL;
}
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
- pr_debug("Only IPv4/6 xfrm state offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm state offloaded");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
- pr_debug("Only transport and tunnel xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm offload");
return -EINVAL;
}
if (x->id.proto != IPPROTO_ESP) {
- pr_debug("Only ESP xfrm state offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state offloaded");
return -EINVAL;
}
if (x->encap) {
- pr_debug("Encapsulated xfrm state not offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state not offloaded");
return -EINVAL;
}
if (!x->aead) {
- pr_debug("Cannot offload xfrm states without aead\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
return -EINVAL;
}
if (x->aead->alg_icv_len != 128 &&
x->aead->alg_icv_len != 96) {
- pr_debug("Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
- return -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 96b & 128b");
+ return -EINVAL;
}
if ((x->aead->alg_key_len != 128 + 32) &&
(x->aead->alg_key_len != 256 + 32)) {
- pr_debug("cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ NL_SET_ERR_MSG_MOD(extack, "cannot offload xfrm states with AEAD key length other than 128/256 bit");
return -EINVAL;
}
if (x->tfcpad) {
- pr_debug("Cannot offload xfrm states with tfc padding\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
return -EINVAL;
}
if (!x->geniv) {
- pr_debug("Cannot offload xfrm states without geniv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
return -EINVAL;
}
if (strcmp(x->geniv, "seqiv")) {
- pr_debug("Cannot offload xfrm states with geniv other than seqiv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
+ return -EINVAL;
+ }
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload");
return -EINVAL;
}
+ if (unlikely(!try_module_get(THIS_MODULE))) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire module reference");
+ return -ENODEV;
+ }
+
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
if (!sa_entry) {
res = -ENOMEM;
+ module_put(THIS_MODULE);
goto out;
}
@@ -295,19 +307,20 @@ static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
sa_entry->esn = 1;
ch_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
- try_module_get(THIS_MODULE);
out:
return res;
}
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x)
{
/* do nothing */
if (!x->xso.offload_handle)
return;
}
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x)
{
struct ipsec_sa_entry *sa_entry;
@@ -319,20 +332,6 @@ static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
module_put(THIS_MODULE);
}
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IP options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
- return true;
-}
-
static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
{
/* do nothing */
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h
index 1d110d2edd64..0d42e7d15714 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h
@@ -4,7 +4,6 @@
#ifndef __CHCR_IPSEC_H__
#define __CHCR_IPSEC_H__
-#include <crypto/algapi.h>
#include "t4_hw.h"
#include "cxgb4.h"
#include "t4_msg.h"
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 59683f79959c..4e2096e49684 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -10,6 +10,7 @@
#include <net/ipv6.h>
#include <linux/netdevice.h>
#include <crypto/aes.h>
+#include <linux/skbuff_ref.h>
#include "chcr_ktls.h"
static LIST_HEAD(uld_ctx_list);
@@ -361,9 +362,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction)
{
- struct chcr_ktls_ofld_ctx_tx *tx_ctx =
- chcr_get_ktls_tx_context(tls_ctx);
- struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
+ struct chcr_ktls_info *tx_info = chcr_get_ktls_tx_info(tls_ctx);
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_uld_ctx *u_ctx;
@@ -396,7 +395,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info);
- tx_ctx->chcr_info = NULL;
+ chcr_set_ktls_tx_info(tls_ctx, NULL);
/* release module refcount */
module_put(THIS_MODULE);
}
@@ -417,7 +416,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct ch_ktls_port_stats_debug *port_stats;
- struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct chcr_ktls_uld_ctx *u_ctx;
struct chcr_ktls_info *tx_info;
struct dst_entry *dst;
@@ -427,8 +425,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
u8 daaddr[16];
int ret = -1;
- tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
-
pi = netdev_priv(netdev);
adap = pi->adapter;
port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
@@ -440,7 +436,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
goto out;
}
- if (tx_ctx->chcr_info)
+ if (chcr_get_ktls_tx_info(tls_ctx))
goto out;
if (u_ctx && u_ctx->detach)
@@ -483,7 +479,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
tx_info->ip_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
- if (!sk->sk_ipv6only &&
+ if (!ipv6_only_sock(sk) &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
memcpy(daaddr, &sk->sk_daddr, 4);
tx_info->ip_family = AF_INET;
@@ -566,7 +562,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
goto free_tid;
atomic64_inc(&port_stats->ktls_tx_ctx);
- tx_ctx->chcr_info = tx_info;
+ chcr_set_ktls_tx_info(tls_ctx, tx_info);
return 0;
@@ -647,7 +643,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
{
const struct cpl_act_open_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
- struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct tls_offload_context_tx *tx_ctx;
struct chcr_ktls_uld_ctx *u_ctx;
unsigned int atid, tid, status;
struct tls_context *tls_ctx;
@@ -686,7 +682,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
/* Adding tid */
tls_ctx = tls_get_ctx(tx_info->sk);
- tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+ tx_ctx = tls_offload_ctx_tx(tls_ctx);
u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
if (u_ctx) {
ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
@@ -1012,7 +1008,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pktlen = skb_tcp_all_headers(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -1644,6 +1640,7 @@ static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
return 0;
}
@@ -1839,9 +1836,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
*/
if (prior_data_len) {
int i = 0;
- u8 *data = NULL;
skb_frag_t *f;
- u8 *vaddr;
int frag_size = 0, frag_delta = 0;
while (remaining > 0) {
@@ -1853,24 +1848,24 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
i++;
}
f = &record->frags[i];
- vaddr = kmap_atomic(skb_frag_page(f));
-
- data = vaddr + skb_frag_off(f) + remaining;
frag_delta = skb_frag_size(f) - remaining;
if (frag_delta >= prior_data_len) {
- memcpy(prior_data, data, prior_data_len);
- kunmap_atomic(vaddr);
+ memcpy_from_page(prior_data, skb_frag_page(f),
+ skb_frag_off(f) + remaining,
+ prior_data_len);
} else {
- memcpy(prior_data, data, frag_delta);
- kunmap_atomic(vaddr);
+ memcpy_from_page(prior_data, skb_frag_page(f),
+ skb_frag_off(f) + remaining,
+ frag_delta);
+
/* get the next page */
f = &record->frags[i + 1];
- vaddr = kmap_atomic(skb_frag_page(f));
- data = vaddr + skb_frag_off(f);
- memcpy(prior_data + frag_delta,
- data, (prior_data_len - frag_delta));
- kunmap_atomic(vaddr);
+
+ memcpy_from_page(prior_data + frag_delta,
+ skb_frag_page(f),
+ skb_frag_off(f),
+ prior_data_len - frag_delta);
}
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
@@ -1907,9 +1902,8 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
return 0;
th = tcp_hdr(nskb);
- skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
+ skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
- skb_tx_timestamp(nskb);
if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
goto out;
@@ -1926,30 +1920,36 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
struct ch_ktls_port_stats_debug *port_stats;
- struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct tls_offload_context_tx *tx_ctx;
struct ch_ktls_stats_debug *stats;
struct tcphdr *th = tcp_hdr(skb);
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
struct chcr_ktls_info *tx_info;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
struct sge_eth_txq *q;
struct adapter *adap;
unsigned long flags;
tcp_seq = ntohl(th->seq);
- skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_offset = skb_tcp_all_headers(skb);
skb_data_len = skb->len - skb_offset;
data_len = skb_data_len;
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != dev))
+ tx_ctx = tls_offload_ctx_tx(tls_ctx);
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't quit on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (unlikely(tls_netdev && tls_netdev != dev))
goto out;
- tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
- tx_info = tx_ctx->chcr_info;
+ tx_info = chcr_get_ktls_tx_info(tls_ctx);
if (unlikely(!tx_info))
goto out;
@@ -1975,19 +1975,19 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
* we will send the complete record again.
*/
- spin_lock_irqsave(&tx_ctx->base.lock, flags);
+ spin_lock_irqsave(&tx_ctx->lock, flags);
do {
cxgb4_reclaim_completed_tx(adap, &q->q, true);
/* fetch the tls record */
- record = tls_get_record(&tx_ctx->base, tcp_seq,
+ record = tls_get_record(tx_ctx, tcp_seq,
&tx_info->record_no);
/* By the time packet reached to us, ACK is received, and record
* won't be found in that case, handle it gracefully.
*/
if (unlikely(!record)) {
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ spin_unlock_irqrestore(&tx_ctx->lock, flags);
atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
goto out;
}
@@ -2011,7 +2011,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
tls_end_offset !=
record->len);
if (ret) {
- spin_unlock_irqrestore(&tx_ctx->base.lock,
+ spin_unlock_irqrestore(&tx_ctx->lock,
flags);
goto out;
}
@@ -2042,7 +2042,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* free the refcount taken earlier */
if (tls_end_offset < data_len)
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ spin_unlock_irqrestore(&tx_ctx->lock, flags);
goto out;
}
@@ -2078,7 +2078,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* if any failure, come out from the loop. */
if (ret) {
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ spin_unlock_irqrestore(&tx_ctx->lock, flags);
if (th->fin)
dev_kfree_skb_any(skb);
@@ -2093,7 +2093,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
} while (data_len > 0);
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ spin_unlock_irqrestore(&tx_ctx->lock, flags);
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
@@ -2181,17 +2181,17 @@ static void clear_conn_resources(struct chcr_ktls_info *tx_info)
static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
{
struct ch_ktls_port_stats_debug *port_stats;
- struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct tls_offload_context_tx *tx_ctx;
struct chcr_ktls_info *tx_info;
unsigned long index;
xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
- tx_info = tx_ctx->chcr_info;
+ tx_info = __chcr_get_ktls_tx_info(tx_ctx);
clear_conn_resources(tx_info);
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info);
- tx_ctx->chcr_info = NULL;
+ memset(tx_ctx->driver_state, 0, TLS_DRIVER_STATE_SIZE_TX);
/* release module refcount */
module_put(THIS_MODULE);
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index 10572dc55365..dbbba92bf540 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -67,8 +67,7 @@ struct chcr_ktls_info {
bool pending_close;
};
-struct chcr_ktls_ofld_ctx_tx {
- struct tls_offload_context_tx base;
+struct chcr_ktls_ctx_tx {
struct chcr_ktls_info *chcr_info;
};
@@ -79,14 +78,33 @@ struct chcr_ktls_uld_ctx {
bool detach;
};
-static inline struct chcr_ktls_ofld_ctx_tx *
-chcr_get_ktls_tx_context(struct tls_context *tls_ctx)
+static inline struct chcr_ktls_info *
+__chcr_get_ktls_tx_info(struct tls_offload_context_tx *octx)
{
- BUILD_BUG_ON(sizeof(struct chcr_ktls_ofld_ctx_tx) >
- TLS_OFFLOAD_CONTEXT_SIZE_TX);
- return container_of(tls_offload_ctx_tx(tls_ctx),
- struct chcr_ktls_ofld_ctx_tx,
- base);
+ struct chcr_ktls_ctx_tx *priv_ctx;
+
+ BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX);
+ priv_ctx = (struct chcr_ktls_ctx_tx *)octx->driver_state;
+ return priv_ctx->chcr_info;
+}
+
+static inline struct chcr_ktls_info *
+chcr_get_ktls_tx_info(struct tls_context *tls_ctx)
+{
+ struct chcr_ktls_ctx_tx *priv_ctx;
+
+ BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX);
+ priv_ctx = (struct chcr_ktls_ctx_tx *)__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
+ return priv_ctx->chcr_info;
+}
+
+static inline void
+chcr_set_ktls_tx_info(struct tls_context *tls_ctx, struct chcr_ktls_info *chcr_info)
+{
+ struct chcr_ktls_ctx_tx *priv_ctx;
+
+ priv_ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
+ priv_ctx->chcr_info = chcr_info;
}
static inline int chcr_get_first_rx_qid(struct adapter *adap)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 9e2378013642..21e0dfeff158 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -7,7 +7,6 @@
#define __CHTLS_H__
#include <crypto/aes.h>
-#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
@@ -22,6 +21,7 @@
#include <crypto/internal/hash.h>
#include <linux/tls.h>
#include <net/tls.h>
+#include <net/tls_prot.h>
#include <net/tls_toe.h>
#include "t4fw_api.h"
@@ -567,14 +567,12 @@ void chtls_shutdown(struct sock *sk, int how);
void chtls_destroy_sock(struct sock *sk);
int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len);
-int chtls_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
+ size_t len, int flags, int *addr_len);
+void chtls_splice_eof(struct socket *sock);
int send_tx_flowc_wr(struct sock *sk, int compl,
u32 snd_nxt, u32 rcv_nxt);
void chtls_tcp_push(struct sock *sk, int flags);
int chtls_push_frames(struct chtls_sock *csk, int comp);
-int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
u64 mask, u64 val, u8 cookie,
int through_l2t);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 4af5561cbfc5..ee0154337a9c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -171,7 +171,7 @@ static void chtls_purge_receive_queue(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- skb_dst_set(skb, (void *)NULL);
+ skb_dstref_steal(skb);
kfree_skb(skb);
}
}
@@ -194,7 +194,7 @@ static void chtls_purge_recv_queue(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
kfree_skb(skb);
}
}
@@ -505,7 +505,7 @@ static void reset_listen_child(struct sock *child)
chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
sock_orphan(child);
- INC_ORPHAN_COUNT(child);
+ tcp_orphan_count_inc();
if (child->sk_state == TCP_CLOSE)
inet_csk_destroy_sock(child);
}
@@ -870,7 +870,7 @@ static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
* created only after 3 way handshake is done.
*/
sock_orphan(child);
- INC_ORPHAN_COUNT(child);
+ tcp_orphan_count_inc();
chtls_release_resources(child);
chtls_conn_done(child);
} else {
@@ -951,6 +951,7 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
struct tcp_sock *tp;
unsigned int mss;
struct sock *sk;
+ u16 user_mss;
mss = ntohs(req->tcpopt.mss);
sk = csk->sk;
@@ -969,8 +970,9 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
tp->advmss = dst_metric_advmss(dst);
- if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
- tp->advmss = USER_MSS(tp);
+ user_mss = USER_MSS(tp);
+ if (user_mss && tp->advmss > user_mss)
+ tp->advmss = user_mss;
if (tp->advmss > pmtu - iphdrsz)
tp->advmss = pmtu - iphdrsz;
if (mss && tp->advmss > mss)
@@ -1063,14 +1065,13 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp));
rpl5->opt0 = cpu_to_be64(opt0);
rpl5->opt2 = cpu_to_be32(opt2);
- rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+ rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
}
-static void inet_inherit_port(struct inet_hashinfo *hash_info,
- struct sock *lsk, struct sock *newsk)
+static void inet_inherit_port(struct sock *lsk, struct sock *newsk)
{
local_bh_disable();
__inet_inherit_port(lsk, newsk);
@@ -1198,12 +1199,12 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct ipv6_pinfo *newnp = inet6_sk(newsk);
struct ipv6_pinfo *np = inet6_sk(lsk);
- inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
+ newinet->pinet6 = &newtcp6sk->inet6;
+ newinet->ipv6_fl_list = NULL;
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
newsk->sk_v6_daddr = treq->ir_v6_rmt_addr;
newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr;
inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr;
- newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newsk->sk_bound_dev_if = treq->ir_iif;
newinet->inet_opt = NULL;
@@ -1236,11 +1237,11 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = newsk->sk_sndbuf;
csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
- sock_net(newsk)->
- ipv4.sysctl_tcp_window_scaling,
+ READ_ONCE(sock_net(newsk)->
+ ipv4.sysctl_tcp_window_scaling),
tp->window_clamp);
neigh_release(n);
- inet_inherit_port(&tcp_hashinfo, lsk, newsk);
+ inet_inherit_port(lsk, newsk);
csk_set_flag(csk, CSK_CONN_INLINE);
bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
@@ -1384,7 +1385,7 @@ static void chtls_pass_accept_request(struct sock *sk,
#endif
}
if (req->tcpopt.wsf <= 14 &&
- sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
}
@@ -1392,7 +1393,7 @@ static void chtls_pass_accept_request(struct sock *sk,
th_ecn = tcph->ece && tcph->cwr;
if (th_ecn) {
ect = !INET_ECN_is_not_ect(ip_dsfield);
- ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
+ ecn_ok = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn);
if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
inet_rsk(oreq)->ecn_ok = 1;
}
@@ -1467,7 +1468,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
tp->write_seq = snd_isn;
tp->snd_nxt = snd_isn;
tp->snd_una = snd_isn;
- inet_sk(sk)->inet_id = prandom_u32();
+ atomic_set(&inet_sk(sk)->inet_id, get_random_u16());
assign_rxopt(sk, opt);
if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
@@ -1735,7 +1736,7 @@ static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_recv_data, sk, skb);
return 0;
}
@@ -1787,7 +1788,7 @@ static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_recv_pdu, sk, skb);
return 0;
}
@@ -1856,7 +1857,7 @@ static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_rx_hdr, sk, skb);
return 0;
@@ -2260,7 +2261,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
if (tp->snd_una != snd_una) {
tp->snd_una = snd_una;
- tp->rcv_tstamp = tcp_time_stamp(tp);
+ tp->rcv_tstamp = tcp_jiffies32;
if (tp->snd_una == tp->snd_nxt &&
!csk_flag_nochk(csk, CSK_TX_FAILOVER))
csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index f61ca657601c..29ceff5a5fcb 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -90,12 +90,11 @@ struct deferred_skb_cb {
#define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
#define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
-#define USER_MSS(tp) ((tp)->rx_opt.user_mss)
+#define USER_MSS(tp) (READ_ONCE((tp)->rx_opt.user_mss))
#define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
-#define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count)
/* TLS SKB */
#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
@@ -171,14 +170,14 @@ static inline void chtls_set_req_addr(struct request_sock *oreq,
static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
{
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
{
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index 1e67140b0f80..fab6df21f01c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -106,15 +106,6 @@ void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
send_or_defer(sk, tcp_sk(sk), skb, through_l2t);
}
-/*
- * Set one of the t_flags bits in the TCB.
- */
-int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
-{
- return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
- (u64)val << bit_pos);
-}
-
static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
{
return chtls_set_tcb_field(sk, 31, 0xFFFFFFFFULL, keyid);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index c320cc8ca68d..ee19933e2cca 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -159,19 +159,13 @@ static u8 tcp_state_to_flowc_state(u8 state)
int send_tx_flowc_wr(struct sock *sk, int compl,
u32 snd_nxt, u32 rcv_nxt)
{
- struct flowc_packed {
- struct fw_flowc_wr fc;
- struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX];
- } __packed sflowc;
+ DEFINE_RAW_FLEX(struct fw_flowc_wr, flowc, mnemval, FW_FLOWC_MNEM_MAX);
int nparams, paramidx, flowclen16, flowclen;
- struct fw_flowc_wr *flowc;
struct chtls_sock *csk;
struct tcp_sock *tp;
csk = rcu_dereference_sk_user_data(sk);
tp = tcp_sk(sk);
- memset(&sflowc, 0, sizeof(sflowc));
- flowc = &sflowc.fc;
#define FLOWC_PARAM(__m, __v) \
do { \
@@ -911,7 +905,7 @@ static int csk_wait_memory(struct chtls_dev *cdev,
struct sock *sk, long *timeo_p)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int err = 0;
+ int ret, err = 0;
long current_timeo;
long vm_wait = 0;
bool noblock;
@@ -919,8 +913,8 @@ static int csk_wait_memory(struct chtls_dev *cdev,
current_timeo = *timeo_p;
noblock = (*timeo_p ? false : true);
if (csk_mem_free(cdev, sk)) {
- current_timeo = (prandom_u32() % (HZ / 5)) + 2;
- vm_wait = (prandom_u32() % (HZ / 5)) + 2;
+ current_timeo = get_random_u32_below(HZ / 5) + 2;
+ vm_wait = get_random_u32_below(HZ / 5) + 2;
}
add_wait_queue(sk_sleep(sk), &wait);
@@ -942,10 +936,13 @@ static int csk_wait_memory(struct chtls_dev *cdev,
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk->sk_write_pending++;
- sk_wait_event(sk, &current_timeo, sk->sk_err ||
- (sk->sk_shutdown & SEND_SHUTDOWN) ||
- (csk_mem_free(cdev, sk) && !vm_wait), &wait);
+ ret = sk_wait_event(sk, &current_timeo, sk->sk_err ||
+ (sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (csk_mem_free(cdev, sk) && !vm_wait),
+ &wait);
sk->sk_write_pending--;
+ if (ret < 0)
+ goto do_error;
if (vm_wait) {
vm_wait -= current_timeo;
@@ -1092,7 +1089,16 @@ new_buf:
if (copy > size)
copy = size;
- if (skb_tailroom(skb) > 0) {
+ if (msg->msg_flags & MSG_SPLICE_PAGES) {
+ err = skb_splice_from_iter(skb, &msg->msg_iter, copy);
+ if (err < 0) {
+ if (err == -EMSGSIZE)
+ goto new_buf;
+ goto do_fault;
+ }
+ copy = err;
+ sk_wmem_queued_add(sk, copy);
+ } else if (skb_tailroom(skb) > 0) {
copy = min(copy, skb_tailroom(skb));
if (is_tls_tx(csk))
copy = min_t(int, copy, csk->tlshws.txleft);
@@ -1227,113 +1233,13 @@ out_err:
goto done;
}
-int chtls_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
+void chtls_splice_eof(struct socket *sock)
{
- struct chtls_sock *csk;
- struct chtls_dev *cdev;
- int mss, err, copied;
- struct tcp_sock *tp;
- long timeo;
+ struct sock *sk = sock->sk;
- tp = tcp_sk(sk);
- copied = 0;
- csk = rcu_dereference_sk_user_data(sk);
- cdev = csk->cdev;
lock_sock(sk);
- timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
-
- err = sk_stream_wait_connect(sk, &timeo);
- if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
- err != 0)
- goto out_err;
-
- mss = csk->mss;
- csk_set_flag(csk, CSK_TX_MORE_DATA);
-
- while (size > 0) {
- struct sk_buff *skb = skb_peek_tail(&csk->txq);
- int copy, i;
-
- if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
- (copy = mss - skb->len) <= 0) {
-new_buf:
- if (!csk_mem_free(cdev, sk))
- goto wait_for_sndbuf;
-
- if (is_tls_tx(csk)) {
- skb = get_record_skb(sk,
- select_size(sk, size,
- flags,
- TX_TLSHDR_LEN),
- true);
- } else {
- skb = get_tx_skb(sk, 0);
- }
- if (!skb)
- goto wait_for_memory;
- copy = mss;
- }
- if (copy > size)
- copy = size;
-
- i = skb_shinfo(skb)->nr_frags;
- if (skb_can_coalesce(skb, i, page, offset)) {
- skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
- } else if (i < MAX_SKB_FRAGS) {
- get_page(page);
- skb_fill_page_desc(skb, i, page, offset, copy);
- } else {
- tx_skb_finalize(skb);
- push_frames_if_head(sk);
- goto new_buf;
- }
-
- skb->len += copy;
- if (skb->len == mss)
- tx_skb_finalize(skb);
- skb->data_len += copy;
- skb->truesize += copy;
- sk->sk_wmem_queued += copy;
- tp->write_seq += copy;
- copied += copy;
- offset += copy;
- size -= copy;
-
- if (corked(tp, flags) &&
- (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
- ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
-
- if (!size)
- break;
-
- if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND))
- push_frames_if_head(sk);
- continue;
-wait_for_sndbuf:
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-wait_for_memory:
- err = csk_wait_memory(cdev, sk, &timeo);
- if (err)
- goto do_error;
- }
-out:
- csk_reset_flag(csk, CSK_TX_MORE_DATA);
- if (copied)
- chtls_tcp_push(sk, flags);
-done:
+ chtls_tcp_push(sk, 0);
release_sock(sk);
- return copied;
-
-do_error:
- if (copied)
- goto out;
-
-out_err:
- if (csk_conn_inline(csk))
- csk_reset_flag(csk, CSK_TX_MORE_DATA);
- copied = sk_stream_error(sk, flags, err);
- goto done;
}
static void chtls_select_window(struct sock *sk)
@@ -1426,7 +1332,7 @@ static void chtls_cleanup_rbuf(struct sock *sk, int copied)
}
static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
struct chtls_hws *hws = &csk->tlshws;
@@ -1438,10 +1344,11 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int copied = 0;
int target;
long timeo;
+ int ret;
buffers_freed = 0;
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
@@ -1513,11 +1420,15 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (copied >= target)
break;
chtls_cleanup_rbuf(sk, copied);
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0) {
+ copied = copied ? : ret;
+ goto unlock;
+ }
continue;
found_ok_skb:
if (!skb->len) {
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
@@ -1608,6 +1519,8 @@ skip_copy:
if (buffers_freed)
chtls_cleanup_rbuf(sk, copied);
+
+unlock:
release_sock(sk);
return copied;
}
@@ -1616,7 +1529,7 @@ skip_copy:
* Peek at data in a socket's receive buffer.
*/
static int peekmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags)
+ size_t len, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 peek_seq, offset;
@@ -1624,9 +1537,10 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
int copied = 0;
size_t avail; /* amount of available data in current skb */
long timeo;
+ int ret;
lock_sock(sk);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
peek_seq = tp->copied_seq;
do {
@@ -1675,7 +1589,12 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
release_sock(sk);
lock_sock(sk);
} else {
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0) {
+ /* here 'copied' is 0 due to previous checks */
+ copied = ret;
+ break;
+ }
}
if (unlikely(peek_seq != tp->copied_seq)) {
@@ -1737,7 +1656,7 @@ found_ok_skb:
}
int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct tcp_sock *tp = tcp_sk(sk);
struct chtls_sock *csk;
@@ -1746,29 +1665,28 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int copied = 0;
long timeo;
int target; /* Read at least this many bytes */
+ int ret;
buffers_freed = 0;
if (unlikely(flags & MSG_OOB))
- return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
- addr_len);
+ return tcp_prot.recvmsg(sk, msg, len, flags, addr_len);
if (unlikely(flags & MSG_PEEK))
- return peekmsg(sk, msg, len, nonblock, flags);
+ return peekmsg(sk, msg, len, flags);
if (sk_can_busy_loop(sk) &&
skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk->sk_state == TCP_ESTABLISHED)
- sk_busy_loop(sk, nonblock);
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
lock_sock(sk);
csk = rcu_dereference_sk_user_data(sk);
if (is_tls_rx(csk))
- return chtls_pt_recvmsg(sk, msg, len, nonblock,
- flags, addr_len);
+ return chtls_pt_recvmsg(sk, msg, len, flags, addr_len);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
@@ -1839,7 +1757,11 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (copied >= target)
break;
chtls_cleanup_rbuf(sk, copied);
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0) {
+ copied = copied ? : ret;
+ goto unlock;
+ }
continue;
found_ok_skb:
@@ -1908,6 +1830,7 @@ skip_copy:
if (buffers_freed)
chtls_cleanup_rbuf(sk, copied);
+unlock:
release_sock(sk);
return copied;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 9098b3eed4da..daa1ebaef511 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -193,7 +193,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
{
struct tls_toe_device *tlsdev = &cdev->tlsdev;
- strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
+ strscpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
TLS_TOE_DEVICE_NAME_MAX);
tlsdev->feature = chtls_inline_feature;
@@ -342,12 +342,13 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
{
struct sk_buff *skb;
- /* Allocate space for cpl_pass_accpet_req which will be synthesized by
- * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
+ /* Allocate space for cpl_pass_accept_req which will be synthesized by
+ * driver. Once driver synthesizes cpl_pass_accept_req the skb will go
* through the regular cpl_pass_accept_req processing in TOM.
*/
- skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
- - pktshift, GFP_ATOMIC);
+ skb = alloc_skb(size_add(gl->tot_len,
+ sizeof(struct cpl_pass_accept_req)) -
+ pktshift, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
@@ -606,7 +607,7 @@ static void __init chtls_init_ulp_ops(void)
chtls_cpl_prot.destroy = chtls_destroy_sock;
chtls_cpl_prot.shutdown = chtls_shutdown;
chtls_cpl_prot.sendmsg = chtls_sendmsg;
- chtls_cpl_prot.sendpage = chtls_sendpage;
+ chtls_cpl_prot.splice_eof = chtls_splice_eof;
chtls_cpl_prot.recvmsg = chtls_recvmsg;
chtls_cpl_prot.setsockopt = chtls_setsockopt;
chtls_cpl_prot.getsockopt = chtls_getsockopt;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile
index aa79264e72ba..fbedc31674b3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/Makefile
+++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../cxgb4
+ccflags-y := -I $(src)/../cxgb4
obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
index d04a6c163445..da8d10475a08 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/ipv6.h>
+#include <net/inet_ecn.h>
#include <net/route.h>
#include <net/ip6_route.h>
@@ -99,7 +100,7 @@ cxgb_find_route(struct cxgb4_lld_info *lldi,
rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
peer_port, local_port, IPPROTO_TCP,
- tos, 0);
+ tos & ~INET_ECN_MASK, 0);
if (IS_ERR(rt))
return NULL;
n = dst_neigh_lookup(&rt->dst, &peer_ip);
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 854d87e1125c..2e3973a32d9d 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -342,10 +342,10 @@ int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
}
EXPORT_SYMBOL(cxgbi_ppm_release);
-static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
- unsigned int *pcpu_ppmax)
+static struct cxgbi_ppm_pool __percpu *
+ppm_alloc_cpu_pool(unsigned int *total, unsigned int *pcpu_ppmax)
{
- struct cxgbi_ppm_pool *pools;
+ struct cxgbi_ppm_pool __percpu *pools;
unsigned int ppmax = (*total) / num_possible_cpus();
unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
unsigned int bmap;
@@ -392,7 +392,7 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
unsigned int iscsi_edram_size)
{
struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
- struct cxgbi_ppm_pool *pool = NULL;
+ struct cxgbi_ppm_pool __percpu *pool = NULL;
unsigned int pool_index_max = 0;
unsigned int ppmax_pool = 0;
unsigned int ppod_bmap_size;
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 4a97aa8e1387..fa5857923db4 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -54,7 +54,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -72,6 +71,8 @@
#include <linux/gfp.h>
#include <linux/io.h>
+#include <net/Space.h>
+
#include <asm/irq.h>
#include <linux/atomic.h>
#if ALLOW_DMA
@@ -985,7 +986,7 @@ release_irq:
if (result == DETECTED_NONE) {
pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name);
if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
- result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */
+ result = DETECTED_AUI; /* Yes! I don't care if I see a carrier */
}
break;
case A_CNF_MEDIA_10B_2:
@@ -1853,9 +1854,8 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev)
return -ENOMEM;
dev->irq = platform_get_irq(pdev, 0);
- if (dev->irq <= 0) {
- dev_warn(&dev->dev, "interrupt resource missing\n");
- err = -ENXIO;
+ if (dev->irq < 0) {
+ err = dev->irq;
goto free;
}
@@ -1879,7 +1879,7 @@ free:
return err;
}
-static int cs89x0_platform_remove(struct platform_device *pdev)
+static void cs89x0_platform_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -1889,7 +1889,6 @@ static int cs89x0_platform_remove(struct platform_device *pdev)
*/
unregister_netdev(dev);
free_netdev(dev);
- return 0;
}
static const struct of_device_id __maybe_unused cs89x0_match[] = {
@@ -1904,7 +1903,7 @@ static struct platform_driver cs89x0_driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(cs89x0_match),
},
- .remove = cs89x0_platform_remove,
+ .remove = cs89x0_platform_remove,
};
module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 21ba6e893072..a4972457edd9 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -16,13 +16,12 @@
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/platform_data/eth-ep93xx.h>
-
#define DRV_MODULE_NAME "ep93xx-eth"
#define RX_QUEUE_ENTRIES 64
@@ -689,7 +688,7 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int ep93xx_get_link_ksettings(struct net_device *dev,
@@ -738,26 +737,7 @@ static const struct net_device_ops ep93xx_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
-{
- struct net_device *dev;
-
- dev = alloc_etherdev(sizeof(struct ep93xx_priv));
- if (dev == NULL)
- return NULL;
-
- eth_hw_addr_set(dev, data->dev_addr);
-
- dev->ethtool_ops = &ep93xx_ethtool_ops;
- dev->netdev_ops = &ep93xx_netdev_ops;
-
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
-
- return dev;
-}
-
-
-static int ep93xx_eth_remove(struct platform_device *pdev)
+static void ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct ep93xx_priv *ep;
@@ -765,7 +745,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
dev = platform_get_drvdata(pdev);
if (dev == NULL)
- return 0;
+ return;
ep = netdev_priv(dev);
@@ -782,37 +762,57 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
}
free_netdev(dev);
-
- return 0;
}
static int ep93xx_eth_probe(struct platform_device *pdev)
{
- struct ep93xx_eth_data *data;
struct net_device *dev;
struct ep93xx_priv *ep;
struct resource *mem;
+ void __iomem *base_addr;
+ struct device_node *np;
+ u8 addr[ETH_ALEN];
+ u32 phy_id;
int irq;
int err;
if (pdev == NULL)
return -ENODEV;
- data = dev_get_platdata(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!mem || irq < 0)
return -ENXIO;
- dev = ep93xx_dev_alloc(data);
+ base_addr = ioremap(mem->start, resource_size(mem));
+ if (!base_addr)
+ return dev_err_probe(&pdev->dev, -EIO, "Failed to ioremap ethernet registers\n");
+
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!np)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Please provide \"phy-handle\"\n");
+
+ err = of_property_read_u32(np, "reg", &phy_id);
+ of_node_put(np);
+ if (err)
+ return dev_err_probe(&pdev->dev, -ENOENT, "Failed to locate \"phy_id\"\n");
+
+ dev = alloc_etherdev(sizeof(struct ep93xx_priv));
if (dev == NULL) {
err = -ENOMEM;
goto err_out;
}
+
+ memcpy_fromio(addr, base_addr + 0x50, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+ dev->ethtool_ops = &ep93xx_ethtool_ops;
+ dev->netdev_ops = &ep93xx_netdev_ops;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+
ep = netdev_priv(dev);
ep->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
- netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
+ netif_napi_add(dev, &ep->napi, ep93xx_poll);
platform_set_drvdata(pdev, dev);
@@ -824,15 +824,10 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
goto err_out;
}
- ep->base_addr = ioremap(mem->start, resource_size(mem));
- if (ep->base_addr == NULL) {
- dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
- err = -EIO;
- goto err_out;
- }
+ ep->base_addr = base_addr;
ep->irq = irq;
- ep->mii.phy_id = data->phy_id;
+ ep->mii.phy_id = phy_id;
ep->mii.phy_id_mask = 0x1f;
ep->mii.reg_num_mask = 0x1f;
ep->mii.dev = dev;
@@ -859,16 +854,23 @@ err_out:
return err;
}
+static const struct of_device_id ep93xx_eth_of_ids[] = {
+ { .compatible = "cirrus,ep9301-eth" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_eth_of_ids);
static struct platform_driver ep93xx_eth_driver = {
.probe = ep93xx_eth_probe,
.remove = ep93xx_eth_remove,
.driver = {
.name = "ep93xx-eth",
+ .of_match_table = ep93xx_eth_of_ids,
},
};
module_platform_driver(ep93xx_eth_driver);
+MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 84251b85fc93..6723df9b65d9 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -242,12 +242,15 @@ static int mac89x0_device_probe(struct platform_device *pdev)
pr_info("No EEPROM, giving up now.\n");
goto out1;
} else {
+ u8 addr[ETH_ALEN];
+
for (i = 0; i < ETH_ALEN; i += 2) {
/* Big-endian (why??!) */
unsigned short s = readreg(dev, PP_IA + i);
- dev->dev_addr[i] = s >> 8;
- dev->dev_addr[i+1] = s & 0xff;
+ addr[i] = s >> 8;
+ addr[i+1] = s & 0xff;
}
+ eth_hw_addr_set(dev, addr);
}
dev->irq = SLOT2IRQ(slot);
@@ -551,16 +554,16 @@ static int set_mac_address(struct net_device *dev, void *addr)
return 0;
}
+MODULE_DESCRIPTION("Macintosh CS89x0-based Ethernet driver");
MODULE_LICENSE("GPL");
-static int mac89x0_device_remove(struct platform_device *pdev)
+static void mac89x0_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
nubus_writew(0, dev->base_addr + ADD_PORT);
free_netdev(dev);
- return 0;
}
static struct platform_driver mac89x0_platform_driver = {
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
index ad80c0fa96a6..96709875fe4f 100644
--- a/drivers/net/ethernet/cisco/enic/Kconfig
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -6,5 +6,6 @@
config ENIC
tristate "Cisco VIC Ethernet NIC Support"
depends on PCI
+ select PAGE_POOL
help
This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index c3b6febfdbe4..a96b8332e6e2 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
- enic_ethtool.o enic_api.o enic_clsf.o
+ enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o
diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
index d6dd1b4edf6e..bfb3f14e89f5 100644
--- a/drivers/net/ethernet/cisco/enic/cq_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _CQ_DESC_H_
@@ -53,28 +40,7 @@ struct cq_desc {
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
-static inline void cq_desc_dec(const struct cq_desc *desc_arg,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- const struct cq_desc *desc = desc_arg;
- const u8 type_color = desc->type_color;
-
- *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
-
- /*
- * Make sure color bit is read from desc *before* other fields
- * are read from desc. Hardware guarantees color bit is last
- * bit (byte) written. Adding the rmb() prevents the compiler
- * and/or CPU from reordering the reads which would potentially
- * result in reading stale values.
- */
-
- rmb();
-
- *type = type_color & CQ_DESC_TYPE_MASK;
- *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
- *completed_index = le16_to_cpu(desc->completed_index) &
- CQ_DESC_COMP_NDX_MASK;
-}
+#define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1))
+#define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1))
#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index ac37cacc6136..50787cff29db 100644
--- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _CQ_ENET_DESC_H_
@@ -30,12 +17,22 @@ struct cq_enet_wq_desc {
u8 type_color;
};
-static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-}
+/*
+ * Defines and Capabilities for CMD_CQ_ENTRY_SIZE_SET
+ */
+#define VNIC_RQ_ALL (~0ULL)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16 0
+#define VNIC_RQ_CQ_ENTRY_SIZE_32 1
+#define VNIC_RQ_CQ_ENTRY_SIZE_64 2
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_16)
+#define VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_32)
+#define VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_64)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT (VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE)
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_desc {
@@ -49,6 +46,45 @@ struct cq_enet_rq_desc {
u8 type_color;
};
+/* Completion queue descriptor: Ethernet receive queue, 32B */
+struct cq_enet_rq_desc_32 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 type_color;
+};
+
+/* Completion queue descriptor: Ethernet receive queue, 64B */
+struct cq_enet_rq_desc_64 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 reserved[32];
+ u8 type_color;
+};
+
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
@@ -101,85 +137,4 @@ struct cq_enet_rq_desc {
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
-static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
- u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
- u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
- u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
- u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
- u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
- u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
-{
- u16 completed_index_flags;
- u16 q_number_rss_type_flags;
- u16 bytes_written_flags;
-
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-
- completed_index_flags = le16_to_cpu(desc->completed_index_flags);
- q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
-
- *ingress_port = (completed_index_flags &
- CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
- *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
- 1 : 0;
- *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
- 1 : 0;
- *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
- 1 : 0;
-
- *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
- CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
- *csum_not_calc = (q_number_rss_type_flags &
- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
-
- *rss_hash = le32_to_cpu(desc->rss_hash);
-
- *bytes_written = bytes_written_flags &
- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
- *packet_error = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
- *vlan_stripped = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
-
- /*
- * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
- */
- *vlan_tci = le16_to_cpu(desc->vlan);
-
- if (*fcoe) {
- *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
- CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
- *fcoe_fc_crc_ok = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
- *fcoe_enc_error = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
- *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
- CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
- CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
- *checksum = 0;
- } else {
- *fcoe_sof = 0;
- *fcoe_fc_crc_ok = 0;
- *fcoe_enc_error = 0;
- *fcoe_eof = 0;
- *checksum = le16_to_cpu(desc->checksum_fcoe);
- }
-
- *tcp_udp_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
- *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
- *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
- *ipv4_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
- *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
- *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
- *ipv4_fragment =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
- *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
-}
-
#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index c67a16a48d62..301b3f3114af 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _ENIC_H_
@@ -30,21 +17,28 @@
#include "vnic_nic.h"
#include "vnic_rss.h"
#include <linux/irq.h>
+#include <net/page_pool/helpers.h>
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
#define ENIC_BARS_MAX 6
-#define ENIC_WQ_MAX 8
-#define ENIC_RQ_MAX 8
-#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
-#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_WQ_MAX 256
+#define ENIC_RQ_MAX 256
+#define ENIC_RQ_MIN_DEFAULT 8
#define ENIC_WQ_NAPI_BUDGET 256
#define ENIC_AIC_LARGE_PKT_DIFF 3
+enum ext_cq {
+ ENIC_RQ_CQ_ENTRY_SIZE_16,
+ ENIC_RQ_CQ_ENTRY_SIZE_32,
+ ENIC_RQ_CQ_ENTRY_SIZE_64,
+ ENIC_RQ_CQ_ENTRY_SIZE_MAX,
+};
+
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ + 8];
@@ -90,6 +84,10 @@ struct enic_rx_coal {
#define ENIC_SET_INSTANCE (1 << 3)
#define ENIC_SET_HOST (1 << 4)
+#define MAX_TSO BIT(16)
+#define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS)
+#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
+
struct enic_port_profile {
u32 set;
u8 request;
@@ -141,6 +139,53 @@ struct vxlan_offload {
u8 flags;
};
+struct enic_wq_stats {
+ u64 packets; /* pkts queued for Tx */
+ u64 stopped; /* Tx ring almost full, queue stopped */
+ u64 wake; /* Tx ring no longer full, queue woken up*/
+ u64 tso; /* non-encap tso pkt */
+ u64 encap_tso; /* encap tso pkt */
+ u64 encap_csum; /* encap HW csum */
+ u64 csum_partial; /* skb->ip_summed = CHECKSUM_PARTIAL */
+ u64 csum_none; /* HW csum not required */
+ u64 bytes; /* bytes queued for Tx */
+ u64 add_vlan; /* HW adds vlan tag */
+ u64 cq_work; /* Tx completions processed */
+ u64 cq_bytes; /* Tx bytes processed */
+ u64 null_pkt; /* skb length <= 0 */
+ u64 skb_linear_fail; /* linearize failures */
+ u64 desc_full_awake; /* TX ring full while queue awake */
+};
+
+struct enic_rq_stats {
+ u64 packets; /* pkts received */
+ u64 bytes; /* bytes received */
+ u64 l4_rss_hash; /* hashed on l4 */
+ u64 l3_rss_hash; /* hashed on l3 */
+ u64 csum_unnecessary; /* HW verified csum */
+ u64 csum_unnecessary_encap; /* HW verified csum on encap packet */
+ u64 vlan_stripped; /* HW stripped vlan */
+ u64 napi_complete; /* napi complete intr reenabled */
+ u64 napi_repoll; /* napi poll again */
+ u64 bad_fcs; /* bad pkts */
+ u64 pkt_truncated; /* truncated pkts */
+ u64 no_skb; /* out of skbs */
+ u64 desc_skip; /* Rx pkt went into later buffer */
+ u64 pp_alloc_fail; /* page pool alloc failure */
+};
+
+struct enic_wq {
+ spinlock_t lock; /* spinlock for wq */
+ struct vnic_wq vwq;
+ struct enic_wq_stats stats;
+} ____cacheline_aligned;
+
+struct enic_rq {
+ struct vnic_rq vrq;
+ struct enic_rq_stats stats;
+ struct page_pool *pool;
+} ____cacheline_aligned;
+
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
@@ -152,8 +197,8 @@ struct enic {
struct work_struct reset;
struct work_struct tx_hang_reset;
struct work_struct change_mtu_work;
- struct msix_entry msix_entry[ENIC_INTR_MAX];
- struct enic_msix_entry msix[ENIC_INTR_MAX];
+ struct msix_entry *msix_entry;
+ struct enic_msix_entry *msix;
u32 msg_enable;
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
@@ -172,33 +217,30 @@ struct enic {
bool enic_api_busy;
struct enic_port_profile *pp;
- /* work queue cache line section */
- ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
- spinlock_t wq_lock[ENIC_WQ_MAX];
+ struct enic_wq *wq;
+ unsigned int wq_avail;
unsigned int wq_count;
u16 loop_enable;
u16 loop_tag;
- /* receive queue cache line section */
- ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
+ struct enic_rq *rq;
+ unsigned int rq_avail;
unsigned int rq_count;
struct vxlan_offload vxlan;
- u64 rq_truncated_pkts;
- u64 rq_bad_fcs;
- struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
+ struct napi_struct *napi;
- /* interrupt resource cache line section */
- ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
+ struct vnic_intr *intr;
+ unsigned int intr_avail;
unsigned int intr_count;
u32 __iomem *legacy_pba; /* memory-mapped */
- /* completion queue cache line section */
- ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
+ struct vnic_cq *cq;
+ unsigned int cq_avail;
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
- u32 rx_copybreak;
u8 rss_key[ENIC_RSS_LEN];
struct vnic_gen_stats gen_stats;
+ enum ext_cq ext_cq;
};
static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
@@ -239,21 +281,6 @@ static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
return enic->rq_count + wq;
}
-static inline unsigned int enic_legacy_io_intr(void)
-{
- return 0;
-}
-
-static inline unsigned int enic_legacy_err_intr(void)
-{
- return 1;
-}
-
-static inline unsigned int enic_legacy_notify_intr(void)
-{
- return 2;
-}
-
static inline unsigned int enic_msix_rq_intr(struct enic *enic,
unsigned int rq)
{
@@ -266,21 +293,35 @@ static inline unsigned int enic_msix_wq_intr(struct enic *enic,
return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
}
+/* MSIX interrupts are organized as the error interrupt, then the notify
+ * interrupt followed by all the I/O interrupts. The error interrupt needs
+ * to fit in 7 bits due to hardware constraints
+ */
+#define ENIC_MSIX_RESERVED_INTR 2
+#define ENIC_MSIX_ERR_INTR 0
+#define ENIC_MSIX_NOTIFY_INTR 1
+#define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR
+#define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2)
+
+#define ENIC_LEGACY_IO_INTR 0
+#define ENIC_LEGACY_ERR_INTR 1
+#define ENIC_LEGACY_NOTIFY_INTR 2
+
static inline unsigned int enic_msix_err_intr(struct enic *enic)
{
- return enic->rq_count + enic->wq_count;
+ return ENIC_MSIX_ERR_INTR;
}
static inline unsigned int enic_msix_notify_intr(struct enic *enic)
{
- return enic->rq_count + enic->wq_count + 1;
+ return ENIC_MSIX_NOTIFY_INTR;
}
static inline bool enic_is_err_intr(struct enic *enic, int intr)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
- return intr == enic_legacy_err_intr();
+ return intr == ENIC_LEGACY_ERR_INTR;
case VNIC_DEV_INTR_MODE_MSIX:
return intr == enic_msix_err_intr(enic);
case VNIC_DEV_INTR_MODE_MSI:
@@ -293,7 +334,7 @@ static inline bool enic_is_notify_intr(struct enic *enic, int intr)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
- return intr == enic_legacy_notify_intr();
+ return intr == ENIC_LEGACY_NOTIFY_INTR;
case VNIC_DEV_INTR_MODE_MSIX:
return intr == enic_msix_notify_intr(enic);
case VNIC_DEV_INTR_MODE_MSI:
@@ -304,7 +345,7 @@ static inline bool enic_is_notify_intr(struct enic *enic, int intr)
static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
{
- if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
+ if (unlikely(dma_mapping_error(&enic->pdev->dev, dma_addr))) {
net_warn_ratelimited("%s: PCI dma mapping failed!\n",
enic->netdev->name);
enic->gen_stats.dma_map_error++;
@@ -321,5 +362,6 @@ int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
void enic_set_ethtool_ops(struct net_device *netdev);
int __enic_set_rsskey(struct enic *enic);
+void enic_ext_cq(struct enic *enic);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
index 3bdc74fba1e3..e3b700c28bc4 100644
--- a/drivers/net/ethernet/cisco/enic/enic_api.c
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2013 Cisco Systems, Inc. All rights reserved.
#include <linux/netdevice.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h
index 6b9f9255af28..e01790fb0415 100644
--- a/drivers/net/ethernet/cisco/enic/enic_api.h
+++ b/drivers/net/ethernet/cisco/enic/enic_api.h
@@ -1,20 +1,5 @@
-/**
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2013 Cisco Systems, Inc. All rights reserved. */
#ifndef __ENIC_API_H__
#define __ENIC_API_H__
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 9900993b6aea..837f954873ee 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -125,7 +125,7 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
#ifdef CONFIG_RFS_ACCEL
void enic_flow_may_expire(struct timer_list *t)
{
- struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
+ struct enic *enic = timer_container_of(enic, t, rfs_h.rfs_may_expire);
bool res;
int j;
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h
index 8c4ce50da6e1..5f5284102fb0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.h
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h
@@ -26,7 +26,7 @@ static inline void enic_rfs_timer_start(struct enic *enic)
static inline void enic_rfs_timer_stop(struct enic *enic)
{
- del_timer_sync(&enic->rfs_h.rfs_may_expire);
+ timer_delete_sync(&enic->rfs_h.rfs_may_expire);
}
#else
static inline void enic_rfs_timer_start(struct enic *enic) {}
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index f8d2a6a34282..2cbae7c6cc3d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2011 Cisco Systems, Inc. All rights reserved.
#include <linux/pci.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index f5bb058b3f96..698d0cb02064 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2011 Cisco Systems, Inc. All rights reserved. */
#ifndef _ENIC_DEV_H_
#define _ENIC_DEV_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 6ded4d9fa32a..a50f5dad34d5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2013 Cisco Systems, Inc. All rights reserved.
#include <linux/netdevice.h>
#include <linux/ethtool.h>
@@ -47,6 +32,41 @@ struct enic_stat {
.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
}
+#define ENIC_PER_RQ_STAT(stat) { \
+ .name = "rq[%d]_"#stat, \
+ .index = offsetof(struct enic_rq_stats, stat) / sizeof(u64) \
+}
+
+#define ENIC_PER_WQ_STAT(stat) { \
+ .name = "wq[%d]_"#stat, \
+ .index = offsetof(struct enic_wq_stats, stat) / sizeof(u64) \
+}
+
+static const struct enic_stat enic_per_rq_stats[] = {
+ ENIC_PER_RQ_STAT(l4_rss_hash),
+ ENIC_PER_RQ_STAT(l3_rss_hash),
+ ENIC_PER_RQ_STAT(csum_unnecessary_encap),
+ ENIC_PER_RQ_STAT(vlan_stripped),
+ ENIC_PER_RQ_STAT(napi_complete),
+ ENIC_PER_RQ_STAT(napi_repoll),
+ ENIC_PER_RQ_STAT(no_skb),
+ ENIC_PER_RQ_STAT(desc_skip),
+};
+
+#define NUM_ENIC_PER_RQ_STATS ARRAY_SIZE(enic_per_rq_stats)
+
+static const struct enic_stat enic_per_wq_stats[] = {
+ ENIC_PER_WQ_STAT(encap_tso),
+ ENIC_PER_WQ_STAT(encap_csum),
+ ENIC_PER_WQ_STAT(add_vlan),
+ ENIC_PER_WQ_STAT(cq_work),
+ ENIC_PER_WQ_STAT(cq_bytes),
+ ENIC_PER_WQ_STAT(null_pkt),
+ ENIC_PER_WQ_STAT(skb_linear_fail),
+ ENIC_PER_WQ_STAT(desc_full_awake),
+};
+
+#define NUM_ENIC_PER_WQ_STATS ARRAY_SIZE(enic_per_wq_stats)
static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_frames_ok),
ENIC_TX_STAT(tx_unicast_frames_ok),
@@ -61,6 +81,8 @@ static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_tso),
};
+#define NUM_ENIC_TX_STATS ARRAY_SIZE(enic_tx_stats)
+
static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_ok),
ENIC_RX_STAT(rx_frames_total),
@@ -85,13 +107,13 @@ static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_to_max),
};
+#define NUM_ENIC_RX_STATS ARRAY_SIZE(enic_rx_stats)
+
static const struct enic_stat enic_gen_stats[] = {
ENIC_GEN_STAT(dma_map_error),
};
-static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
-static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
-static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
+#define NUM_ENIC_GEN_STATS ARRAY_SIZE(enic_gen_stats)
static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
{
@@ -146,50 +168,70 @@ static void enic_get_drvinfo(struct net_device *netdev,
if (err == -ENOMEM)
return;
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, fw_info->fw_version,
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
+ strscpy(drvinfo->bus_info, pci_name(enic->pdev),
sizeof(drvinfo->bus_info));
}
static void enic_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
+ struct enic *enic = netdev_priv(netdev);
unsigned int i;
+ unsigned int j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < enic_n_tx_stats; i++) {
+ for (i = 0; i < NUM_ENIC_TX_STATS; i++) {
memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
- for (i = 0; i < enic_n_rx_stats; i++) {
+ for (i = 0; i < NUM_ENIC_RX_STATS; i++) {
memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
- for (i = 0; i < enic_n_gen_stats; i++) {
+ for (i = 0; i < NUM_ENIC_GEN_STATS; i++) {
memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ for (i = 0; i < enic->rq_count; i++) {
+ for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ enic_per_rq_stats[j].name, i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ for (i = 0; i < enic->wq_count; i++) {
+ for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ enic_per_wq_stats[j].name, i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
break;
}
}
static void enic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
- ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
+ ring->rx_max_pending = c->max_rq_ring;
ring->rx_pending = c->rq_desc_count;
- ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
+ ring->tx_max_pending = c->max_wq_ring;
ring->tx_pending = c->wq_desc_count;
}
static int enic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
@@ -210,18 +252,18 @@ static int enic_set_ringparam(struct net_device *netdev,
}
rx_pending = c->rq_desc_count;
tx_pending = c->wq_desc_count;
- if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
+ if (ring->rx_pending > c->max_rq_ring ||
ring->rx_pending < ENIC_MIN_RQ_DESCS) {
netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
ring->rx_pending, ENIC_MIN_RQ_DESCS,
- ENIC_MAX_RQ_DESCS);
+ c->max_rq_ring);
return -EINVAL;
}
- if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
+ if (ring->tx_pending > c->max_wq_ring ||
ring->tx_pending < ENIC_MIN_WQ_DESCS) {
netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
ring->tx_pending, ENIC_MIN_WQ_DESCS,
- ENIC_MAX_WQ_DESCS);
+ c->max_wq_ring);
return -EINVAL;
}
if (running)
@@ -253,9 +295,19 @@ err_out:
static int enic_get_sset_count(struct net_device *netdev, int sset)
{
+ struct enic *enic = netdev_priv(netdev);
+ unsigned int n_per_rq_stats;
+ unsigned int n_per_wq_stats;
+ unsigned int n_stats;
+
switch (sset) {
case ETH_SS_STATS:
- return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
+ n_per_rq_stats = NUM_ENIC_PER_RQ_STATS * enic->rq_count;
+ n_per_wq_stats = NUM_ENIC_PER_WQ_STATS * enic->wq_count;
+ n_stats = NUM_ENIC_TX_STATS + NUM_ENIC_RX_STATS +
+ NUM_ENIC_GEN_STATS +
+ n_per_rq_stats + n_per_wq_stats;
+ return n_stats;
default:
return -EOPNOTSUPP;
}
@@ -267,6 +319,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *vstats;
unsigned int i;
+ unsigned int j;
int err;
err = enic_dev_stats_dump(enic, &vstats);
@@ -277,12 +330,30 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
if (err == -ENOMEM)
return;
- for (i = 0; i < enic_n_tx_stats; i++)
+ for (i = 0; i < NUM_ENIC_TX_STATS; i++)
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
- for (i = 0; i < enic_n_rx_stats; i++)
+ for (i = 0; i < NUM_ENIC_RX_STATS; i++)
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
- for (i = 0; i < enic_n_gen_stats; i++)
+ for (i = 0; i < NUM_ENIC_GEN_STATS; i++)
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
+ for (i = 0; i < enic->rq_count; i++) {
+ struct enic_rq_stats *rqstats = &enic->rq[i].stats;
+ int index;
+
+ for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
+ index = enic_per_rq_stats[j].index;
+ *(data++) = ((u64 *)rqstats)[index];
+ }
+ }
+ for (i = 0; i < enic->wq_count; i++) {
+ struct enic_wq_stats *wqstats = &enic->wq[i].stats;
+ int index;
+
+ for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
+ index = enic_per_wq_stats[j].index;
+ *(data++) = ((u64 *)wqstats)[index];
+ }
+ }
}
static u32 enic_get_msglevel(struct net_device *netdev)
@@ -457,8 +528,10 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
return 0;
}
-static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
+static int enic_get_rx_flow_hash(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct enic *enic = netdev_priv(dev);
u8 rss_hash_type = 0;
cmd->data = 0;
@@ -526,9 +599,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
ret = enic_grxclsrule(enic, cmd);
spin_unlock_bh(&enic->rfs_h.lock);
break;
- case ETHTOOL_GRXFH:
- ret = enic_get_rx_flow_hash(enic, cmd);
- break;
default:
ret = -EOPNOTSUPP;
break;
@@ -537,87 +607,71 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int enic_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = enic->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int enic_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- enic->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static u32 enic_get_rxfh_key_size(struct net_device *netdev)
{
return ENIC_RSS_LEN;
}
-static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
- u8 *hfunc)
+static int enic_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct enic *enic = netdev_priv(netdev);
- if (hkey)
- memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
+ if (rxfh->key)
+ memcpy(rxfh->key, enic->rss_key, ENIC_RSS_LEN);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+static int enic_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
- indir)
+ if (rxfh->indir ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
- if (hkey)
- memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
+ if (rxfh->key)
+ memcpy(enic->rss_key, rxfh->key, ENIC_RSS_LEN);
return __enic_set_rsskey(enic);
}
static int enic_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
+static void enic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_MSIX:
+ channels->max_rx = min(enic->rq_avail, ENIC_RQ_MAX);
+ channels->max_tx = min(enic->wq_avail, ENIC_WQ_MAX);
+ channels->rx_count = enic->rq_count;
+ channels->tx_count = enic->wq_count;
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ case VNIC_DEV_INTR_MODE_INTX:
+ channels->max_combined = 1;
+ channels->combined_count = 1;
+ break;
+ default:
+ break;
+ }
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
@@ -635,13 +689,13 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_rxnfc = enic_get_rxnfc,
- .get_tunable = enic_get_tunable,
- .set_tunable = enic_set_tunable,
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
+ .get_rxfh_fields = enic_get_rx_flow_hash,
.get_link_ksettings = enic_get_ksettings,
.get_ts_info = enic_get_ts_info,
+ .get_channels = enic_get_channels,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index aacf141986d5..6bc8dfdb3d4b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -46,6 +46,7 @@
#include <linux/crash_dump.h>
#include <net/busy_poll.h>
#include <net/vxlan.h>
+#include <net/netdev_queues.h>
#include "cq_enet_desc.h"
#include "vnic_dev.h"
@@ -57,18 +58,15 @@
#include "enic_dev.h"
#include "enic_pp.h"
#include "enic_clsf.h"
+#include "enic_rq.h"
+#include "enic_wq.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
-#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
-#define MAX_TSO (1 << 16)
-#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
-#define RX_COPYBREAK_DEFAULT 256
-
/* Supported devices */
static const struct pci_device_id enic_id_table[] = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
@@ -108,7 +106,7 @@ static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
{0, 0}, /* 0 - 4 Gbps */
{0, 3}, /* 4 - 10 Gbps */
- {3, 6}, /* 10 - 40 Gbps */
+ {3, 6}, /* 10+ Gbps */
};
static void enic_init_affinity_hint(struct enic *enic)
@@ -150,10 +148,10 @@ static void enic_set_affinity_hint(struct enic *enic)
!cpumask_available(enic->msix[i].affinity_mask) ||
cpumask_empty(enic->msix[i].affinity_mask))
continue;
- err = irq_set_affinity_hint(enic->msix_entry[i].vector,
- enic->msix[i].affinity_mask);
+ err = irq_update_affinity_hint(enic->msix_entry[i].vector,
+ enic->msix[i].affinity_mask);
if (err)
- netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
+ netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n",
err);
}
@@ -173,7 +171,7 @@ static void enic_unset_affinity_hint(struct enic *enic)
int i;
for (i = 0; i < enic->intr_count; i++)
- irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
+ irq_update_affinity_hint(enic->msix_entry[i].vector, NULL);
}
static int enic_udp_tunnel_set_port(struct net_device *netdev,
@@ -321,48 +319,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
#endif
}
-static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- if (buf->sop)
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
-
- if (buf->os_buf)
- dev_kfree_skb_any(buf->os_buf);
-}
-
-static void enic_wq_free_buf(struct vnic_wq *wq,
- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
-{
- enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- spin_lock(&enic->wq_lock[q_number]);
-
- vnic_wq_service(&enic->wq[q_number], cq_desc,
- completed_index, enic_wq_free_buf,
- opaque);
-
- if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
- vnic_wq_desc_avail(&enic->wq[q_number]) >=
- (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
- netif_wake_subqueue(enic->netdev, q_number);
-
- spin_unlock(&enic->wq_lock[q_number]);
-
- return 0;
-}
-
static bool enic_log_q_error(struct enic *enic)
{
unsigned int i;
@@ -370,7 +326,7 @@ static bool enic_log_q_error(struct enic *enic)
bool err = false;
for (i = 0; i < enic->wq_count; i++) {
- error_status = vnic_wq_error_status(&enic->wq[i]);
+ error_status = vnic_wq_error_status(&enic->wq[i].vwq);
err |= error_status;
if (error_status)
netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
@@ -378,7 +334,7 @@ static bool enic_log_q_error(struct enic *enic)
}
for (i = 0; i < enic->rq_count; i++) {
- error_status = vnic_rq_error_status(&enic->rq[i]);
+ error_status = vnic_rq_error_status(&enic->rq[i].vrq);
err |= error_status;
if (error_status)
netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
@@ -421,6 +377,36 @@ static void enic_mtu_check(struct enic *enic)
}
}
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (speed > ENIC_LINK_SPEED_10G)
+ index = ENIC_LINK_40G_INDEX;
+ else if (speed > ENIC_LINK_SPEED_4G)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
static void enic_link_check(struct enic *enic)
{
int link_status = vnic_dev_link_status(enic->vdev);
@@ -429,6 +415,7 @@ static void enic_link_check(struct enic *enic)
if (link_status && !carrier_ok) {
netdev_info(enic->netdev, "Link UP\n");
netif_carrier_on(enic->netdev);
+ enic_set_rx_coal_setting(enic);
} else if (!link_status && carrier_ok) {
netdev_info(enic->netdev, "Link DOWN\n");
netif_carrier_off(enic->netdev);
@@ -448,9 +435,9 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
{
struct net_device *netdev = data;
struct enic *enic = netdev_priv(netdev);
- unsigned int io_intr = enic_legacy_io_intr();
- unsigned int err_intr = enic_legacy_err_intr();
- unsigned int notify_intr = enic_legacy_notify_intr();
+ unsigned int io_intr = ENIC_LEGACY_IO_INTR;
+ unsigned int err_intr = ENIC_LEGACY_ERR_INTR;
+ unsigned int notify_intr = ENIC_LEGACY_NOTIFY_INTR;
u32 pba;
vnic_intr_mask(&enic->intr[io_intr]);
@@ -590,6 +577,11 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ /* The enic_queue_wq_desc() above does not do HW checksum */
+ enic->wq[wq->index].stats.csum_none++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
+
return err;
}
@@ -622,6 +614,10 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ enic->wq[wq->index].stats.csum_partial++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
+
return err;
}
@@ -676,16 +672,18 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
unsigned int offset = 0;
unsigned int hdr_len;
dma_addr_t dma_addr;
+ unsigned int pkts;
unsigned int len;
skb_frag_t *frag;
if (skb->encapsulation) {
- hdr_len = skb_inner_transport_header(skb) - skb->data;
- hdr_len += inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
+ enic->wq[wq->index].stats.encap_tso++;
} else {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
+ enic->wq[wq->index].stats.tso++;
}
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
@@ -706,7 +704,7 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
}
if (eop)
- return 0;
+ goto tso_out_stats;
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
* for additional data fragments
@@ -733,6 +731,15 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
}
}
+tso_out_stats:
+ /* calculate how many packets tso sent */
+ len = skb->len - hdr_len;
+ pkts = len / mss;
+ if ((len % mss) > 0)
+ pkts++;
+ enic->wq[wq->index].stats.packets += pkts;
+ enic->wq[wq->index].stats.bytes += (len + (pkts * hdr_len));
+
return 0;
}
@@ -765,6 +772,10 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+ enic->wq[wq->index].stats.encap_csum++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
+
return err;
}
@@ -781,6 +792,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
/* VLAN tag from trunking driver */
vlan_tag_insert = 1;
vlan_tag = skb_vlan_tag_get(skb);
+ enic->wq[wq->index].stats.add_vlan++;
} else if (enic->loop_enable) {
vlan_tag = enic->loop_tag;
loopback = 1;
@@ -793,7 +805,7 @@ static inline int enic_queue_wq_skb(struct enic *enic,
else if (skb->encapsulation)
err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
vlan_tag, loopback);
- else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ else if (skb->ip_summed == CHECKSUM_PARTIAL)
err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
vlan_tag, loopback);
else
@@ -826,13 +838,15 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
unsigned int txq_map;
struct netdev_queue *txq;
+ txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
+ wq = &enic->wq[txq_map].vwq;
+
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
+ enic->wq[wq->index].stats.null_pkt++;
return NETDEV_TX_OK;
}
- txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
- wq = &enic->wq[txq_map];
txq = netdev_get_tx_queue(netdev, txq_map);
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
@@ -844,42 +858,49 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb_linearize(skb)) {
dev_kfree_skb_any(skb);
+ enic->wq[wq->index].stats.skb_linear_fail++;
return NETDEV_TX_OK;
}
- spin_lock(&enic->wq_lock[txq_map]);
+ spin_lock(&enic->wq[txq_map].lock);
if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
- spin_unlock(&enic->wq_lock[txq_map]);
+ spin_unlock(&enic->wq[txq_map].lock);
+ enic->wq[wq->index].stats.desc_full_awake++;
return NETDEV_TX_BUSY;
}
if (enic_queue_wq_skb(enic, wq, skb))
goto error;
- if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
+ if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
+ enic->wq[wq->index].stats.stopped++;
+ }
skb_tx_timestamp(skb);
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
error:
- spin_unlock(&enic->wq_lock[txq_map]);
+ spin_unlock(&enic->wq[txq_map].lock);
return NETDEV_TX_OK;
}
-/* dev_base_lock rwlock held, nominally process context */
+/* rcu_read_lock potentially held, nominally process context */
static void enic_get_stats(struct net_device *netdev,
struct rtnl_link_stats64 *net_stats)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *stats;
+ u64 pkt_truncated = 0;
+ u64 bad_fcs = 0;
int err;
+ int i;
err = enic_dev_stats_dump(enic, &stats);
/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
@@ -898,8 +919,17 @@ static void enic_get_stats(struct net_device *netdev,
net_stats->rx_bytes = stats->rx.rx_bytes_ok;
net_stats->rx_errors = stats->rx.rx_errors;
net_stats->multicast = stats->rx.rx_multicast_frames_ok;
- net_stats->rx_over_errors = enic->rq_truncated_pkts;
- net_stats->rx_crc_errors = enic->rq_bad_fcs;
+
+ for (i = 0; i < enic->rq_count; i++) {
+ struct enic_rq_stats *rqs = &enic->rq[i].stats;
+
+ if (!enic->rq[i].vrq.ctrl)
+ break;
+ pkt_truncated += rqs->pkt_truncated;
+ bad_fcs += rqs->bad_fcs;
+ }
+ net_stats->rx_over_errors = pkt_truncated;
+ net_stats->rx_crc_errors = bad_fcs;
net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
}
@@ -1118,18 +1148,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
if (port[IFLA_PORT_PROFILE]) {
+ if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_NAME;
memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
PORT_PROFILE_MAX);
}
if (port[IFLA_PORT_INSTANCE_UUID]) {
+ if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_INSTANCE;
memcpy(pp->instance_uuid,
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
}
if (port[IFLA_PORT_HOST_UUID]) {
+ if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_HOST;
memcpy(pp->host_uuid,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
@@ -1220,230 +1262,6 @@ nla_put_failure:
return -EMSGSIZE;
}
-static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
-
- if (!buf->os_buf)
- return;
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(buf->os_buf);
- buf->os_buf = NULL;
-}
-
-static int enic_rq_alloc_buf(struct vnic_rq *rq)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
- unsigned int os_buf_index = 0;
- dma_addr_t dma_addr;
- struct vnic_rq_buf *buf = rq->to_use;
-
- if (buf->os_buf) {
- enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
- buf->len);
-
- return 0;
- }
- skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb)
- return -ENOMEM;
-
- dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
- DMA_FROM_DEVICE);
- if (unlikely(enic_dma_map_check(enic, dma_addr))) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- enic_queue_rq_desc(rq, skb, os_buf_index,
- dma_addr, len);
-
- return 0;
-}
-
-static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
- u32 pkt_len)
-{
- if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
- pkt_size->large_pkt_bytes_cnt += pkt_len;
- else
- pkt_size->small_pkt_bytes_cnt += pkt_len;
-}
-
-static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
- struct vnic_rq_buf *buf, u16 len)
-{
- struct enic *enic = netdev_priv(netdev);
- struct sk_buff *new_skb;
-
- if (len > enic->rx_copybreak)
- return false;
- new_skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!new_skb)
- return false;
- dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
- DMA_FROM_DEVICE);
- memcpy(new_skb->data, (*skb)->data, len);
- *skb = new_skb;
-
- return true;
-}
-
-static void enic_rq_indicate_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-
- u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
- u8 packet_error;
- u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
- u32 rss_hash;
- bool outer_csum_ok = true, encap = false;
-
- if (skipped)
- return;
-
- skb = buf->os_buf;
-
- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop, &rss_type,
- &csum_not_calc, &rss_hash, &bytes_written,
- &packet_error, &vlan_stripped, &vlan_tci, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
- &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
- &fcs_ok);
-
- if (packet_error) {
-
- if (!fcs_ok) {
- if (bytes_written > 0)
- enic->rq_bad_fcs++;
- else if (bytes_written == 0)
- enic->rq_truncated_pkts++;
- }
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
-
- return;
- }
-
- if (eop && bytes_written > 0) {
-
- /* Good receive
- */
-
- if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
- buf->os_buf = NULL;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
- buf->len, DMA_FROM_DEVICE);
- }
- prefetch(skb->data - NET_IP_ALIGN);
-
- skb_put(skb, bytes_written);
- skb->protocol = eth_type_trans(skb, netdev);
- skb_record_rx_queue(skb, q_number);
- if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
- (type == 3)) {
- switch (rss_type) {
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
- break;
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
- break;
- }
- }
- if (enic->vxlan.vxlan_udp_port_number) {
- switch (enic->vxlan.patch_level) {
- case 0:
- if (fcoe) {
- encap = true;
- outer_csum_ok = fcoe_fc_crc_ok;
- }
- break;
- case 2:
- if ((type == 7) &&
- (rss_hash & BIT(0))) {
- encap = true;
- outer_csum_ok = (rss_hash & BIT(1)) &&
- (rss_hash & BIT(2));
- }
- break;
- }
- }
-
- /* Hardware does not provide whole packet checksum. It only
- * provides pseudo checksum. Since hw validates the packet
- * checksum but not provide us the checksum value. use
- * CHECSUM_UNNECESSARY.
- *
- * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
- * inner csum_ok. outer_csum_ok is set by hw when outer udp
- * csum is correct or is zero.
- */
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
- tcp_udp_csum_ok && outer_csum_ok &&
- (ipv4_csum_ok || ipv6)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = encap;
- }
-
- if (vlan_stripped)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
-
- skb_mark_napi_id(skb, &enic->napi[rq->index]);
- if (!(netdev->features & NETIF_F_GRO))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&enic->napi[q_number], skb);
- if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_intr_update_pkt_size(&cq->pkt_size_counter,
- bytes_written);
- } else {
-
- /* Buffer overflow
- */
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
- }
-}
-
-static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_rq_service(&enic->rq[q_number], cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_indicate_buf, opaque);
-
- return 0;
-}
-
static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
{
unsigned int intr = enic_msix_rq_intr(enic, rq->index);
@@ -1508,18 +1326,16 @@ static int enic_poll(struct napi_struct *napi, int budget)
struct enic *enic = netdev_priv(netdev);
unsigned int cq_rq = enic_cq_rq(enic, 0);
unsigned int cq_wq = enic_cq_wq(enic, 0);
- unsigned int intr = enic_legacy_io_intr();
+ unsigned int intr = ENIC_LEGACY_IO_INTR;
unsigned int rq_work_to_do = budget;
unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
- wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
- enic_wq_service, NULL);
+ wq_work_done = enic_wq_cq_service(enic, cq_wq, wq_work_to_do);
if (budget > 0)
- rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
- rq_work_to_do, enic_rq_service, NULL);
+ rq_work_done = enic_rq_cq_service(enic, cq_rq, rq_work_to_do);
/* Accumulate intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1534,7 +1350,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[0].vrq, enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@@ -1546,7 +1362,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic.
*/
- enic_calc_int_moderation(enic, &enic->rq[0]);
+ enic_calc_int_moderation(enic, &enic->rq[0].vrq);
if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
@@ -1555,8 +1371,11 @@ static int enic_poll(struct napi_struct *napi, int budget)
*/
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_set_int_moderation(enic, &enic->rq[0]);
+ enic_set_int_moderation(enic, &enic->rq[0].vrq);
vnic_intr_unmask(&enic->intr[intr]);
+ enic->rq[0].stats.napi_complete++;
+ } else {
+ enic->rq[0].stats.napi_repoll++;
}
return rq_work_done;
@@ -1605,7 +1424,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
struct net_device *netdev = napi->dev;
struct enic *enic = netdev_priv(netdev);
unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
- struct vnic_wq *wq = &enic->wq[wq_index];
+ struct vnic_wq *wq = &enic->wq[wq_index].vwq;
unsigned int cq;
unsigned int intr;
unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
@@ -1615,8 +1434,8 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
wq_irq = wq->index;
cq = enic_cq_wq(enic, wq_irq);
intr = enic_msix_wq_intr(enic, wq_irq);
- wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
- enic_wq_service, NULL);
+
+ wq_work_done = enic_wq_cq_service(enic, cq, wq_work_to_do);
vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
0 /* don't unmask intr */,
@@ -1645,8 +1464,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
if (budget > 0)
- work_done = vnic_cq_service(&enic->cq[cq],
- work_to_do, enic_rq_service, NULL);
+ work_done = enic_rq_cq_service(enic, cq, work_to_do);
/* Return intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1659,7 +1477,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[rq].vrq, enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling mode
* so we can try to fill the ring again.
@@ -1671,7 +1489,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic.
*/
- enic_calc_int_moderation(enic, &enic->rq[rq]);
+ enic_calc_int_moderation(enic, &enic->rq[rq].vrq);
if ((work_done < budget) && napi_complete_done(napi, work_done)) {
@@ -1680,8 +1498,11 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_set_int_moderation(enic, &enic->rq[rq]);
+ enic_set_int_moderation(enic, &enic->rq[rq].vrq);
vnic_intr_unmask(&enic->intr[intr]);
+ enic->rq[rq].stats.napi_complete++;
+ } else {
+ enic->rq[rq].stats.napi_repoll++;
}
return work_done;
@@ -1689,7 +1510,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
static void enic_notify_timer(struct timer_list *t)
{
- struct enic *enic = from_timer(enic, t, notify_timer);
+ struct enic *enic = timer_container_of(enic, t, notify_timer);
enic_notify_check(enic);
@@ -1711,7 +1532,7 @@ static void enic_free_intr(struct enic *enic)
free_irq(enic->pdev->irq, enic);
break;
case VNIC_DEV_INTR_MODE_MSIX:
- for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+ for (i = 0; i < enic->intr_count; i++)
if (enic->msix[i].requested)
free_irq(enic->msix_entry[i].vector,
enic->msix[i].devid);
@@ -1778,7 +1599,7 @@ static int enic_request_intr(struct enic *enic)
enic->msix[intr].isr = enic_isr_msix_notify;
enic->msix[intr].devid = enic;
- for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
+ for (i = 0; i < enic->intr_count; i++)
enic->msix[i].requested = 0;
for (i = 0; i < enic->intr_count; i++) {
@@ -1820,36 +1641,6 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
-static void enic_set_rx_coal_setting(struct enic *enic)
-{
- unsigned int speed;
- int index = -1;
- struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
-
- /* 1. Read the link speed from fw
- * 2. Pick the default range for the speed
- * 3. Update it in enic->rx_coalesce_setting
- */
- speed = vnic_dev_port_speed(enic->vdev);
- if (ENIC_LINK_SPEED_10G < speed)
- index = ENIC_LINK_40G_INDEX;
- else if (ENIC_LINK_SPEED_4G < speed)
- index = ENIC_LINK_10G_INDEX;
- else
- index = ENIC_LINK_4G_INDEX;
-
- rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
- rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
- rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
-
- /* Start with the value provided by UCSM */
- for (index = 0; index < enic->rq_count; index++)
- enic->cq[index].cur_rx_coal_timeval =
- enic->config.intr_timer_usec;
-
- rx_coal->use_adaptive_rx_coalesce = 1;
-}
-
static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -1857,8 +1648,7 @@ static int enic_dev_notify_set(struct enic *enic)
spin_lock_bh(&enic->devcmd_lock);
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
- err = vnic_dev_notify_set(enic->vdev,
- enic_legacy_notify_intr());
+ err = vnic_dev_notify_set(enic->vdev, ENIC_LEGACY_NOTIFY_INTR);
break;
case VNIC_DEV_INTR_MODE_MSIX:
err = vnic_dev_notify_set(enic->vdev,
@@ -1891,6 +1681,17 @@ static int enic_open(struct net_device *netdev)
struct enic *enic = netdev_priv(netdev);
unsigned int i;
int err, ret;
+ unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN;
+ struct page_pool_params pp_params = {
+ .order = get_order(max_pkt_len),
+ .pool_size = enic->config.rq_desc_count,
+ .nid = dev_to_node(&enic->pdev->dev),
+ .dev = &enic->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE,
+ .netdev = netdev,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ };
err = enic_request_intr(enic);
if (err) {
@@ -1908,11 +1709,21 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
+ /* create a page pool for each RQ */
+ pp_params.napi = &enic->napi[i];
+ pp_params.queue_idx = i;
+ enic->rq[i].pool = page_pool_create(&pp_params);
+ if (IS_ERR(enic->rq[i].pool)) {
+ err = PTR_ERR(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ goto err_out_free_rq;
+ }
+
/* enable rq before updating rq desc */
- vnic_rq_enable(&enic->rq[i]);
- vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
+ vnic_rq_enable(&enic->rq[i].vrq);
+ vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
/* Need at least one buffer on ring to get going */
- if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
+ if (vnic_rq_desc_used(&enic->rq[i].vrq) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
err = -ENOMEM;
goto err_out_free_rq;
@@ -1920,7 +1731,7 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_enable(&enic->wq[i]);
+ vnic_wq_enable(&enic->wq[i].vwq);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_add_station_addr(enic);
@@ -1947,9 +1758,12 @@ static int enic_open(struct net_device *netdev)
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
- ret = vnic_rq_disable(&enic->rq[i]);
- if (!ret)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ ret = vnic_rq_disable(&enic->rq[i].vrq);
+ if (!ret) {
+ vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
}
enic_dev_notify_unset(enic);
err_out_free_intr:
@@ -1973,7 +1787,7 @@ static int enic_stop(struct net_device *netdev)
enic_synchronize_irqs(enic);
- del_timer_sync(&enic->notify_timer);
+ timer_delete_sync(&enic->notify_timer);
enic_rfs_flw_tbl_free(enic);
enic_dev_disable(enic);
@@ -1991,12 +1805,12 @@ static int enic_stop(struct net_device *netdev)
enic_dev_del_station_addr(enic);
for (i = 0; i < enic->wq_count; i++) {
- err = vnic_wq_disable(&enic->wq[i]);
+ err = vnic_wq_disable(&enic->wq[i].vwq);
if (err)
return err;
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_disable(&enic->rq[i]);
+ err = vnic_rq_disable(&enic->rq[i].vrq);
if (err)
return err;
}
@@ -2006,9 +1820,12 @@ static int enic_stop(struct net_device *netdev)
enic_free_intr(enic);
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
- for (i = 0; i < enic->rq_count; i++)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
+ for (i = 0; i < enic->rq_count; i++) {
+ vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -2029,7 +1846,7 @@ static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (running) {
err = enic_open(netdev);
@@ -2047,10 +1864,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
- if (netdev->mtu > enic->port_mtu)
+ if (new_mtu > enic->port_mtu)
netdev_warn(netdev,
"interface MTU (%d) set higher than port MTU (%d)\n",
- netdev->mtu, enic->port_mtu);
+ new_mtu, enic->port_mtu);
return _enic_change_mtu(netdev, new_mtu);
}
@@ -2324,6 +2141,7 @@ static void enic_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2350,6 +2168,7 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2362,112 +2181,56 @@ static void enic_tx_hang_reset(struct work_struct *work)
static int enic_set_intr_mode(struct enic *enic)
{
- unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
- unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
unsigned int i;
+ int num_intr;
/* Set interrupt mode (INTx, MSI, MSI-X) depending
* on system capabilities.
*
* Try MSI-X first
- *
- * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
- * (the second to last INTR is used for WQ/RQ errors)
- * (the last INTR is used for notifications)
*/
- BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
- for (i = 0; i < n + m + 2; i++)
- enic->msix_entry[i].entry = i;
-
- /* Use multiple RQs if RSS is enabled
- */
-
- if (ENIC_SETTING(enic, RSS) &&
- enic->config.intr_mode < 1 &&
- enic->rq_count >= n &&
- enic->wq_count >= m &&
- enic->cq_count >= n + m &&
- enic->intr_count >= n + m + 2) {
-
- if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
- n + m + 2, n + m + 2) > 0) {
-
- enic->rq_count = n;
- enic->wq_count = m;
- enic->cq_count = n + m;
- enic->intr_count = n + m + 2;
-
- vnic_dev_set_intr_mode(enic->vdev,
- VNIC_DEV_INTR_MODE_MSIX);
-
- return 0;
- }
- }
-
if (enic->config.intr_mode < 1 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= m &&
- enic->cq_count >= 1 + m &&
- enic->intr_count >= 1 + m + 2) {
- if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
- 1 + m + 2, 1 + m + 2) > 0) {
-
- enic->rq_count = 1;
- enic->wq_count = m;
- enic->cq_count = 1 + m;
- enic->intr_count = 1 + m + 2;
-
+ enic->intr_avail >= ENIC_MSIX_MIN_INTR) {
+ for (i = 0; i < enic->intr_avail; i++)
+ enic->msix_entry[i].entry = i;
+
+ num_intr = pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ ENIC_MSIX_MIN_INTR,
+ enic->intr_avail);
+ if (num_intr > 0) {
vnic_dev_set_intr_mode(enic->vdev,
- VNIC_DEV_INTR_MODE_MSIX);
-
+ VNIC_DEV_INTR_MODE_MSIX);
+ enic->intr_avail = num_intr;
return 0;
}
}
/* Next try MSI
*
- * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
+ * We need 1 INTR
*/
if (enic->config.intr_mode < 2 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= 1 &&
- enic->cq_count >= 2 &&
- enic->intr_count >= 1 &&
+ enic->intr_avail >= 1 &&
!pci_enable_msi(enic->pdev)) {
-
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->cq_count = 2;
- enic->intr_count = 1;
-
+ enic->intr_avail = 1;
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
-
return 0;
}
/* Next try INTx
*
- * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
+ * We need 3 INTRs
* (the first INTR is used for WQ/RQ)
* (the second INTR is used for WQ/RQ errors)
* (the last INTR is used for notifications)
*/
if (enic->config.intr_mode < 3 &&
- enic->rq_count >= 1 &&
- enic->wq_count >= 1 &&
- enic->cq_count >= 2 &&
- enic->intr_count >= 3) {
-
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->cq_count = 2;
- enic->intr_count = 3;
-
+ enic->intr_avail >= 3) {
+ enic->intr_avail = 3;
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
-
return 0;
}
@@ -2492,6 +2255,127 @@ static void enic_clear_intr_mode(struct enic *enic)
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
}
+static int enic_adjust_resources(struct enic *enic)
+{
+ unsigned int max_queues;
+ unsigned int rq_default;
+ unsigned int rq_avail;
+ unsigned int wq_avail;
+
+ if (enic->rq_avail < 1 || enic->wq_avail < 1 || enic->cq_avail < 2) {
+ dev_err(enic_get_dev(enic),
+ "Not enough resources available rq: %d wq: %d cq: %d\n",
+ enic->rq_avail, enic->wq_avail,
+ enic->cq_avail);
+ return -ENOSPC;
+ }
+
+ if (is_kdump_kernel()) {
+ dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
+ enic->rq_avail = 1;
+ enic->wq_avail = 1;
+ enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
+ enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
+ enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
+ }
+
+ /* if RSS isn't set, then we can only use one RQ */
+ if (!ENIC_SETTING(enic, RSS))
+ enic->rq_avail = 1;
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSI:
+ enic->rq_count = 1;
+ enic->wq_count = 1;
+ enic->cq_count = 2;
+ enic->intr_count = enic->intr_avail;
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ /* Adjust the number of wqs/rqs/cqs/interrupts that will be
+ * used based on which resource is the most constrained
+ */
+ wq_avail = min(enic->wq_avail, ENIC_WQ_MAX);
+ rq_default = max(netif_get_num_default_rss_queues(),
+ ENIC_RQ_MIN_DEFAULT);
+ rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default);
+ max_queues = min(enic->cq_avail,
+ enic->intr_avail - ENIC_MSIX_RESERVED_INTR);
+ if (wq_avail + rq_avail <= max_queues) {
+ enic->rq_count = rq_avail;
+ enic->wq_count = wq_avail;
+ } else {
+ /* recalculate wq/rq count */
+ if (rq_avail < wq_avail) {
+ enic->rq_count = min(rq_avail, max_queues / 2);
+ enic->wq_count = max_queues - enic->rq_count;
+ } else {
+ enic->wq_count = min(wq_avail, max_queues / 2);
+ enic->rq_count = max_queues - enic->wq_count;
+ }
+ }
+ enic->cq_count = enic->rq_count + enic->wq_count;
+ enic->intr_count = enic->cq_count + ENIC_MSIX_RESERVED_INTR;
+
+ break;
+ default:
+ dev_err(enic_get_dev(enic), "Unknown interrupt mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *rxs)
+{
+ struct enic *enic = netdev_priv(dev);
+ struct enic_rq_stats *rqstats = &enic->rq[idx].stats;
+
+ rxs->bytes = rqstats->bytes;
+ rxs->packets = rqstats->packets;
+ rxs->hw_drops = rqstats->bad_fcs + rqstats->pkt_truncated;
+ rxs->hw_drop_overruns = rqstats->pkt_truncated;
+ rxs->csum_unnecessary = rqstats->csum_unnecessary +
+ rqstats->csum_unnecessary_encap;
+ rxs->alloc_fail = rqstats->pp_alloc_fail;
+}
+
+static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *txs)
+{
+ struct enic *enic = netdev_priv(dev);
+ struct enic_wq_stats *wqstats = &enic->wq[idx].stats;
+
+ txs->bytes = wqstats->bytes;
+ txs->packets = wqstats->packets;
+ txs->csum_none = wqstats->csum_none;
+ txs->needs_csum = wqstats->csum_partial + wqstats->encap_csum +
+ wqstats->tso;
+ txs->hw_gso_packets = wqstats->tso;
+ txs->stop = wqstats->stopped;
+ txs->wake = wqstats->wake;
+}
+
+static void enic_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rxs,
+ struct netdev_queue_stats_tx *txs)
+{
+ rxs->bytes = 0;
+ rxs->packets = 0;
+ rxs->hw_drops = 0;
+ rxs->hw_drop_overruns = 0;
+ rxs->csum_unnecessary = 0;
+ rxs->alloc_fail = 0;
+ txs->bytes = 0;
+ txs->packets = 0;
+ txs->csum_none = 0;
+ txs->needs_csum = 0;
+ txs->hw_gso_packets = 0;
+ txs->stop = 0;
+ txs->wake = 0;
+}
+
static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_open = enic_open,
.ndo_stop = enic_stop,
@@ -2540,6 +2424,77 @@ static const struct net_device_ops enic_netdev_ops = {
.ndo_features_check = enic_features_check,
};
+static const struct netdev_stat_ops enic_netdev_stat_ops = {
+ .get_queue_stats_rx = enic_get_queue_stats_rx,
+ .get_queue_stats_tx = enic_get_queue_stats_tx,
+ .get_base_stats = enic_get_base_stats,
+};
+
+static void enic_free_enic_resources(struct enic *enic)
+{
+ kfree(enic->wq);
+ enic->wq = NULL;
+
+ kfree(enic->rq);
+ enic->rq = NULL;
+
+ kfree(enic->cq);
+ enic->cq = NULL;
+
+ kfree(enic->napi);
+ enic->napi = NULL;
+
+ kfree(enic->msix_entry);
+ enic->msix_entry = NULL;
+
+ kfree(enic->msix);
+ enic->msix = NULL;
+
+ kfree(enic->intr);
+ enic->intr = NULL;
+}
+
+static int enic_alloc_enic_resources(struct enic *enic)
+{
+ enic->wq = kcalloc(enic->wq_avail, sizeof(struct enic_wq), GFP_KERNEL);
+ if (!enic->wq)
+ goto free_queues;
+
+ enic->rq = kcalloc(enic->rq_avail, sizeof(struct enic_rq), GFP_KERNEL);
+ if (!enic->rq)
+ goto free_queues;
+
+ enic->cq = kcalloc(enic->cq_avail, sizeof(struct vnic_cq), GFP_KERNEL);
+ if (!enic->cq)
+ goto free_queues;
+
+ enic->napi = kcalloc(enic->wq_avail + enic->rq_avail,
+ sizeof(struct napi_struct), GFP_KERNEL);
+ if (!enic->napi)
+ goto free_queues;
+
+ enic->msix_entry = kcalloc(enic->intr_avail, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!enic->msix_entry)
+ goto free_queues;
+
+ enic->msix = kcalloc(enic->intr_avail, sizeof(struct enic_msix_entry),
+ GFP_KERNEL);
+ if (!enic->msix)
+ goto free_queues;
+
+ enic->intr = kcalloc(enic->intr_avail, sizeof(struct vnic_intr),
+ GFP_KERNEL);
+ if (!enic->intr)
+ goto free_queues;
+
+ return 0;
+
+free_queues:
+ enic_free_enic_resources(enic);
+ return -ENOMEM;
+}
+
static void enic_dev_deinit(struct enic *enic)
{
unsigned int i;
@@ -2557,18 +2512,7 @@ static void enic_dev_deinit(struct enic *enic)
enic_free_vnic_resources(enic);
enic_clear_intr_mode(enic);
enic_free_affinity_hint(enic);
-}
-
-static void enic_kdump_kernel_config(struct enic *enic)
-{
- if (is_kdump_kernel()) {
- dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
- enic->rq_count = 1;
- enic->wq_count = 1;
- enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
- enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
- enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
- }
+ enic_free_enic_resources(enic);
}
static int enic_dev_init(struct enic *enic)
@@ -2600,19 +2544,28 @@ static int enic_dev_init(struct enic *enic)
enic_get_res_counts(enic);
- /* modify resource count if we are in kdump_kernel
- */
- enic_kdump_kernel_config(enic);
+ enic_ext_cq(enic);
- /* Set interrupt mode based on resource counts and system
- * capabilities
- */
+ err = enic_alloc_enic_resources(enic);
+ if (err) {
+ dev_err(dev, "Failed to allocate enic resources\n");
+ return err;
+ }
+
+ /* Set interrupt mode based on system capabilities */
err = enic_set_intr_mode(enic);
if (err) {
dev_err(dev, "Failed to set intr mode based on resource "
"counts and system capabilities, aborting\n");
- return err;
+ goto err_out_free_vnic_resources;
+ }
+
+ /* Adjust resource counts based on most constrained resources */
+ err = enic_adjust_resources(enic);
+ if (err) {
+ dev_err(dev, "Failed to adjust resources\n");
+ goto err_out_free_vnic_resources;
}
/* Allocate and configure vNIC resources
@@ -2634,16 +2587,17 @@ static int enic_dev_init(struct enic *enic)
switch (vnic_dev_get_intr_mode(enic->vdev)) {
default:
- netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
+ netif_napi_add(netdev, &enic->napi[0], enic_poll);
break;
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < enic->rq_count; i++) {
netif_napi_add(netdev, &enic->napi[i],
- enic_poll_msix_rq, NAPI_POLL_WEIGHT);
+ enic_poll_msix_rq);
}
for (i = 0; i < enic->wq_count; i++)
- netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
- enic_poll_msix_wq, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev,
+ &enic->napi[enic_cq_wq(enic, i)],
+ enic_poll_msix_wq);
break;
}
@@ -2653,6 +2607,7 @@ err_out_free_vnic_resources:
enic_free_affinity_hint(enic);
enic_clear_intr_mode(enic);
enic_free_vnic_resources(enic);
+ enic_free_enic_resources(enic);
return err;
}
@@ -2718,26 +2673,14 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* fail to 32-bit.
*/
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(47));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "No usable DMA configuration, aborting\n");
goto err_out_release_regions;
}
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 32);
- goto err_out_release_regions;
- }
} else {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(47));
- if (err) {
- dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 47);
- goto err_out_release_regions;
- }
using_dac = 1;
}
@@ -2864,13 +2807,12 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&enic->notify_timer, enic_notify_timer, 0);
enic_rfs_flw_tbl_init(enic);
- enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
for (i = 0; i < enic->wq_count; i++)
- spin_lock_init(&enic->wq_lock[i]);
+ spin_lock_init(&enic->wq[i].lock);
/* Register net device
*/
@@ -2893,6 +2835,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &enic_netdev_dynamic_ops;
else
netdev->netdev_ops = &enic_netdev_ops;
+ netdev->stat_ops = &enic_netdev_stat_ops;
netdev->watchdog_timeo = 2 * HZ;
enic_set_ethtool_ops(netdev);
@@ -2979,7 +2922,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot register net device, aborting\n");
goto err_out_dev_deinit;
}
- enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index 80f46dbd5117..4720a952725d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2011 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.h b/drivers/net/ethernet/cisco/enic/enic_pp.h
index a09ff392c1c6..20a2687713ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.h
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2011 Cisco Systems, Inc. All rights reserved. */
#ifndef _ENIC_PP_H_
#define _ENIC_PP_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 40b20817ddd5..bbd3143ed73e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
@@ -72,31 +59,38 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(intr_timer_usec);
GET_CONFIG(loop_tag);
GET_CONFIG(num_arfs);
+ GET_CONFIG(max_rq_ring);
+ GET_CONFIG(max_wq_ring);
+ GET_CONFIG(max_cq_ring);
+
+ if (!c->max_wq_ring)
+ c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT;
+ if (!c->max_rq_ring)
+ c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT;
+ if (!c->max_cq_ring)
+ c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT;
c->wq_desc_count =
- min_t(u32, ENIC_MAX_WQ_DESCS,
- max_t(u32, ENIC_MIN_WQ_DESCS,
- c->wq_desc_count));
+ min_t(u32, c->max_wq_ring,
+ max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count));
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
- min_t(u32, ENIC_MAX_RQ_DESCS,
- max_t(u32, ENIC_MIN_RQ_DESCS,
- c->rq_desc_count));
+ min_t(u32, c->max_rq_ring,
+ max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count));
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
if (c->mtu == 0)
c->mtu = 1500;
- c->mtu = min_t(u16, ENIC_MAX_MTU,
- max_t(u16, ENIC_MIN_MTU,
- c->mtu));
+ c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu));
c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
vnic_dev_get_intr_coal_timer_max(enic->vdev));
dev_info(enic_get_dev(enic),
- "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
- enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
+ "vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n",
+ enic->mac_addr, c->wq_desc_count, c->rq_desc_count,
+ c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu);
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
"tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
@@ -189,9 +183,9 @@ void enic_free_vnic_resources(struct enic *enic)
unsigned int i;
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_free(&enic->wq[i]);
+ vnic_wq_free(&enic->wq[i].vwq);
for (i = 0; i < enic->rq_count; i++)
- vnic_rq_free(&enic->rq[i]);
+ vnic_rq_free(&enic->rq[i].vrq);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_free(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -200,16 +194,21 @@ void enic_free_vnic_resources(struct enic *enic)
void enic_get_res_counts(struct enic *enic)
{
- enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
- enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
- enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
- enic->intr_count = vnic_dev_get_res_count(enic->vdev,
- RES_TYPE_INTR_CTRL);
+ enic->wq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
+ enic->rq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
+ enic->cq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
+ enic->intr_avail = vnic_dev_get_res_count(enic->vdev,
+ RES_TYPE_INTR_CTRL);
+
+ enic->wq_count = enic->wq_avail;
+ enic->rq_count = enic->rq_avail;
+ enic->cq_count = enic->cq_avail;
+ enic->intr_count = enic->intr_avail;
dev_info(enic_get_dev(enic),
"vNIC resources avail: wq %d rq %d cq %d intr %d\n",
- enic->wq_count, enic->rq_count,
- enic->cq_count, enic->intr_count);
+ enic->wq_avail, enic->rq_avail,
+ enic->cq_avail, enic->intr_avail);
}
void enic_init_vnic_resources(struct enic *enic)
@@ -234,9 +233,12 @@ void enic_init_vnic_resources(struct enic *enic)
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_INTX:
+ error_interrupt_enable = 1;
+ error_interrupt_offset = ENIC_LEGACY_ERR_INTR;
+ break;
case VNIC_DEV_INTR_MODE_MSIX:
error_interrupt_enable = 1;
- error_interrupt_offset = enic->intr_count - 2;
+ error_interrupt_offset = enic_msix_err_intr(enic);
break;
default:
error_interrupt_enable = 0;
@@ -246,7 +248,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->rq_count; i++) {
cq_index = i;
- vnic_rq_init(&enic->rq[i],
+ vnic_rq_init(&enic->rq[i].vrq,
cq_index,
error_interrupt_enable,
error_interrupt_offset);
@@ -254,7 +256,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->wq_count; i++) {
cq_index = enic->rq_count + i;
- vnic_wq_init(&enic->wq[i],
+ vnic_wq_init(&enic->wq[i].vwq,
cq_index,
error_interrupt_enable,
error_interrupt_offset);
@@ -262,15 +264,15 @@ void enic_init_vnic_resources(struct enic *enic)
/* Init CQ resources
*
- * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
- * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
+ * All CQs point to INTR[0] for INTx, MSI
+ * CQ[i] point to INTR[ENIC_MSIX_IO_INTR_BASE + i] for MSI-X
*/
for (i = 0; i < enic->cq_count; i++) {
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSIX:
- interrupt_offset = i;
+ interrupt_offset = ENIC_MSIX_IO_INTR_BASE + i;
break;
default:
interrupt_offset = 0;
@@ -317,6 +319,7 @@ void enic_init_vnic_resources(struct enic *enic)
int enic_alloc_vnic_resources(struct enic *enic)
{
enum vnic_dev_intr_mode intr_mode;
+ int rq_cq_desc_size;
unsigned int i;
int err;
@@ -331,11 +334,29 @@ int enic_alloc_vnic_resources(struct enic *enic)
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
"unknown");
+ switch (enic->ext_cq) {
+ case ENIC_RQ_CQ_ENTRY_SIZE_16:
+ rq_cq_desc_size = 16;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_32:
+ rq_cq_desc_size = 32;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_64:
+ rq_cq_desc_size = 64;
+ break;
+ default:
+ dev_err(enic_get_dev(enic),
+ "Unable to determine rq cq desc size: %d",
+ enic->ext_cq);
+ err = -ENODEV;
+ goto err_out;
+ }
+
/* Allocate queue resources
*/
for (i = 0; i < enic->wq_count; i++) {
- err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
+ err = vnic_wq_alloc(enic->vdev, &enic->wq[i].vwq, i,
enic->config.wq_desc_count,
sizeof(struct wq_enet_desc));
if (err)
@@ -343,7 +364,7 @@ int enic_alloc_vnic_resources(struct enic *enic)
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
+ err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i,
enic->config.rq_desc_count,
sizeof(struct rq_enet_desc));
if (err)
@@ -353,8 +374,8 @@ int enic_alloc_vnic_resources(struct enic *enic)
for (i = 0; i < enic->cq_count; i++) {
if (i < enic->rq_count)
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
- enic->config.rq_desc_count,
- sizeof(struct cq_enet_rq_desc));
+ enic->config.rq_desc_count,
+ rq_cq_desc_size);
else
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.wq_desc_count,
@@ -385,6 +406,39 @@ int enic_alloc_vnic_resources(struct enic *enic)
err_out_cleanup:
enic_free_vnic_resources(enic);
-
+err_out:
return err;
}
+
+/*
+ * CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support
+ * that command
+ */
+void enic_ext_cq(struct enic *enic)
+{
+ u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ spin_lock_bh(&enic->devcmd_lock);
+ ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (ret || a0) {
+ dev_info(&enic->pdev->dev,
+ "CMD_CQ_ENTRY_SIZE_SET not supported.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ goto out;
+ }
+ a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT;
+ enic->ext_cq = fls(a1) - 1;
+ a0 = VNIC_RQ_ALL;
+ a1 = enic->ext_cq;
+ ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait);
+ if (ret) {
+ dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ }
+out:
+ spin_unlock_bh(&enic->devcmd_lock);
+ dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes",
+ 16 << enic->ext_cq);
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index 81f98a8b60e9..02dca1ae4a22 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _ENIC_RES_H_
@@ -25,10 +12,13 @@
#include "vnic_wq.h"
#include "vnic_rq.h"
-#define ENIC_MIN_WQ_DESCS 64
-#define ENIC_MAX_WQ_DESCS 4096
-#define ENIC_MIN_RQ_DESCS 64
-#define ENIC_MAX_RQ_DESCS 4096
+#define ENIC_MIN_WQ_DESCS 64
+#define ENIC_MAX_WQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_WQ_DESCS 16384
+#define ENIC_MIN_RQ_DESCS 64
+#define ENIC_MAX_RQ_DESCS 16384
+#define ENIC_MAX_RQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_CQ_DESCS_DEFAULT (64 * 1024)
#define ENIC_MIN_MTU ETH_MIN_MTU
#define ENIC_MAX_MTU 9000
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.c b/drivers/net/ethernet/cisco/enic/enic_rq.c
new file mode 100644
index 000000000000..ccbf5c9a21d0
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2024 Cisco Systems, Inc. All rights reserved.
+
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/busy_poll.h>
+#include "enic.h"
+#include "enic_res.h"
+#include "enic_rq.h"
+#include "vnic_rq.h"
+#include "cq_enet_desc.h"
+
+#define ENIC_LARGE_PKT_THRESHOLD 1000
+
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+ u32 pkt_len)
+{
+ if (pkt_len > ENIC_LARGE_PKT_THRESHOLD)
+ pkt_size->large_pkt_bytes_cnt += pkt_len;
+ else
+ pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
+static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type,
+ u8 *color, u16 *q_number, u16 *completed_index)
+{
+ /* type_color is the last field for all cq structs */
+ u8 type_color;
+
+ switch (cq_desc_size) {
+ case VNIC_RQ_CQ_ENTRY_SIZE_16: {
+ struct cq_enet_rq_desc *desc =
+ (struct cq_enet_rq_desc *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_32: {
+ struct cq_enet_rq_desc_32 *desc =
+ (struct cq_enet_rq_desc_32 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_64: {
+ struct cq_enet_rq_desc_64 *desc =
+ (struct cq_enet_rq_desc_64 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ }
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+ *type = type_color & CQ_DESC_TYPE_MASK;
+}
+
+static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash,
+ u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok,
+ u8 vlan_stripped, u8 csum_not_calc,
+ u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok,
+ u16 vlan_tci, struct sk_buff *skb)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+ bool outer_csum_ok = true, encap = false;
+
+ if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) {
+ switch (rss_type) {
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+ rqstats->l4_rss_hash++;
+ break;
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+ rqstats->l3_rss_hash++;
+ break;
+ }
+ }
+ if (enic->vxlan.vxlan_udp_port_number) {
+ switch (enic->vxlan.patch_level) {
+ case 0:
+ if (fcoe) {
+ encap = true;
+ outer_csum_ok = fcoe_fc_crc_ok;
+ }
+ break;
+ case 2:
+ if (type == 7 && (rss_hash & BIT(0))) {
+ encap = true;
+ outer_csum_ok = (rss_hash & BIT(1)) &&
+ (rss_hash & BIT(2));
+ }
+ break;
+ }
+ }
+
+ /* Hardware does not provide whole packet checksum. It only
+ * provides pseudo checksum. Since hw validates the packet
+ * checksum but not provide us the checksum value. use
+ * CHECSUM_UNNECESSARY.
+ *
+ * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
+ * inner csum_ok. outer_csum_ok is set by hw when outer udp
+ * csum is correct or is zero.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
+ tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = encap;
+ if (encap)
+ rqstats->csum_unnecessary_encap++;
+ else
+ rqstats->csum_unnecessary++;
+ }
+
+ if (vlan_stripped) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ rqstats->vlan_stripped++;
+ }
+}
+
+/*
+ * cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which
+ * is identical for all type (16,32 and 64 byte) of cqs.
+ */
+static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port,
+ u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash,
+ u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan_tci,
+ u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error,
+ u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp,
+ u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4,
+ u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
+
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok,
+ u16 bytes_written)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+
+ if (packet_error) {
+ if (!fcs_ok) {
+ if (bytes_written > 0)
+ rqstats->bad_fcs++;
+ else if (bytes_written == 0)
+ rqstats->pkt_truncated++;
+ }
+ return true;
+ }
+ return false;
+}
+
+int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq *erq = &enic->rq[rq->index];
+ struct enic_rq_stats *rqstats = &erq->stats;
+ unsigned int offset = 0;
+ unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
+ unsigned int os_buf_index = 0;
+ dma_addr_t dma_addr;
+ struct vnic_rq_buf *buf = rq->to_use;
+ struct page *page;
+ unsigned int truesize = len;
+
+ if (buf->os_buf) {
+ enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+ buf->len);
+
+ return 0;
+ }
+
+ page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
+ if (unlikely(!page)) {
+ rqstats->pp_alloc_fail++;
+ return -ENOMEM;
+ }
+ buf->offset = offset;
+ buf->truesize = truesize;
+ dma_addr = page_pool_get_dma_addr(page) + offset;
+ enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
+
+ return 0;
+}
+
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct enic_rq *erq = &enic->rq[rq->index];
+
+ if (!buf->os_buf)
+ return;
+
+ page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
+ buf->os_buf = NULL;
+}
+
+static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq,
+ struct vnic_rq_buf *buf, void *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct sk_buff *skb;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
+ struct napi_struct *napi;
+
+ u8 eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+ u8 packet_error;
+ u16 bytes_written, vlan_tci, checksum;
+ u32 rss_hash;
+
+ rqstats->packets++;
+
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port,
+ &fcoe, &eop, &sop, &rss_type, &csum_not_calc,
+ &rss_hash, &bytes_written, &packet_error,
+ &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof,
+ &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof,
+ &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6,
+ &ipv4, &ipv4_fragment, &fcs_ok);
+
+ if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
+ return;
+
+ if (eop && bytes_written > 0) {
+ /* Good receive
+ */
+ rqstats->bytes += bytes_written;
+ napi = &enic->napi[rq->index];
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
+ enic->netdev->name, rq->index,
+ completed_index);
+ rqstats->no_skb++;
+ return;
+ }
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
+ bytes_written, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ (struct page *)buf->os_buf, buf->offset,
+ bytes_written, buf->truesize);
+ skb_record_rx_queue(skb, q_number);
+ enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
+ fcoe_fc_crc_ok, vlan_stripped,
+ csum_not_calc, tcp_udp_csum_ok, ipv6,
+ ipv4_csum_ok, vlan_tci, skb);
+ skb_mark_for_recycle(skb);
+ napi_gro_frags(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter,
+ bytes_written);
+ buf->os_buf = NULL;
+ buf->dma_addr = 0;
+ buf = buf->next;
+ } else {
+ /* Buffer overflow
+ */
+ rqstats->pkt_truncated++;
+ }
+}
+
+static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type,
+ u16 q_number, u16 completed_index)
+{
+ struct enic_rq_stats *rqstats = &enic->rq[q_number].stats;
+ struct vnic_rq *vrq = &enic->rq[q_number].vrq;
+ struct vnic_rq_buf *vrq_buf = vrq->to_clean;
+ int skipped;
+
+ while (1) {
+ skipped = (vrq_buf->index != completed_index);
+ if (!skipped)
+ enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type,
+ q_number, completed_index);
+ else
+ rqstats->desc_skip++;
+
+ vrq->ring.desc_avail++;
+ vrq->to_clean = vrq_buf->next;
+ vrq_buf = vrq_buf->next;
+ if (!skipped)
+ break;
+ }
+}
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ void *cq_desc = vnic_cq_to_clean(cq);
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ u8 type, color;
+
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number,
+ &completed_index);
+
+ while (color != cq->last_color) {
+ enic_rq_service(enic, cq_desc, type, q_number, completed_index);
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = vnic_cq_to_clean(cq);
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.h b/drivers/net/ethernet/cisco/enic/enic_rq.h
new file mode 100644
index 000000000000..98476a7297af
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2024 Cisco Systems, Inc. All rights reserved.
+ */
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
+int enic_rq_alloc_buf(struct vnic_rq *rq);
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.c b/drivers/net/ethernet/cisco/enic/enic_wq.c
new file mode 100644
index 000000000000..07936f8b4231
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2025 Cisco Systems, Inc. All rights reserved.
+
+#include <net/netdev_queues.h>
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_wq.h"
+
+#define ENET_CQ_DESC_COMP_NDX_BITS 14
+#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0)
+
+static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq,
+ u8 *type, u8 *color, u16 *q_number,
+ u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+
+ if (ext_wq)
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ ENET_CQ_DESC_COMP_NDX_MASK;
+ else
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ if (buf->sop)
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+
+ if (buf->os_buf)
+ dev_kfree_skb_any(buf->os_buf);
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
+ struct vnic_wq_buf *buf, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ enic->wq[wq->index].stats.cq_work++;
+ enic->wq[wq->index].stats.cq_bytes += buf->len;
+ enic_free_wq_buf(wq, buf);
+}
+
+static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ spin_lock(&enic->wq[q_number].lock);
+
+ vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
+ completed_index, enic_wq_free_buf, NULL);
+
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
+ && vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
+ (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
+ netif_wake_subqueue(enic->netdev, q_number);
+ enic->wq[q_number].stats.wake++;
+ }
+
+ spin_unlock(&enic->wq[q_number].lock);
+}
+
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ struct cq_desc *cq_desc;
+ u8 type, color;
+ bool ext_wq;
+
+ ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+ enic_wq_service(cq->vdev, cq_desc, type, q_number,
+ completed_index);
+
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.h b/drivers/net/ethernet/cisco/enic/enic_wq.h
new file mode 100644
index 000000000000..12acb3f2fbc9
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2025 Cisco Systems, Inc. All rights reserved.
+ */
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
diff --git a/drivers/net/ethernet/cisco/enic/rq_enet_desc.h b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
index e6dd30988d6f..0ab5fd6b8d46 100644
--- a/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _RQ_ENET_DESC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index 519323460f26..27c885e91552 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 4e6aa65857f7..0e37f5d5e527 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_CQ_H_
@@ -69,45 +56,18 @@ struct vnic_cq {
ktime_t prev_ts;
};
-static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
- unsigned int work_to_do,
- int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque),
- void *opaque)
+static inline void *vnic_cq_to_clean(struct vnic_cq *cq)
{
- struct cq_desc *cq_desc;
- unsigned int work_done = 0;
- u16 q_number, completed_index;
- u8 type, color;
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
-
- while (color != cq->last_color) {
-
- if ((*q_service)(cq->vdev, cq_desc, type,
- q_number, completed_index, opaque))
- break;
-
- cq->to_clean++;
- if (cq->to_clean == cq->ring.desc_count) {
- cq->to_clean = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
+ return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean);
+}
- work_done++;
- if (work_done >= work_to_do)
- break;
+static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq)
+{
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
}
-
- return work_done;
}
void vnic_cq_free(struct vnic_cq *cq);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 45015931b335..9f6089e81608 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
@@ -159,23 +146,19 @@ EXPORT_SYMBOL(vnic_dev_get_res);
static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
- /* The base address of the desc rings must be 512 byte aligned.
- * Descriptor count is aligned to groups of 32 descriptors. A
- * count of 0 means the maximum 4096 descriptors. Descriptor
- * size is aligned to 16 bytes.
- */
-
- unsigned int count_align = 32;
- unsigned int desc_align = 16;
- ring->base_align = 512;
+ /* Descriptor ring base address alignment in bytes*/
+ ring->base_align = VNIC_DESC_BASE_ALIGN;
+ /* A count of 0 means the maximum descriptors */
if (desc_count == 0)
- desc_count = 4096;
+ desc_count = VNIC_DESC_MAX_COUNT;
- ring->desc_count = ALIGN(desc_count, count_align);
+ /* Descriptor count aligned in groups of VNIC_DESC_COUNT_ALIGN descriptors */
+ ring->desc_count = ALIGN(desc_count, VNIC_DESC_COUNT_ALIGN);
- ring->desc_size = ALIGN(desc_size, desc_align);
+ /* Descriptor size alignment in bytes */
+ ring->desc_size = ALIGN(desc_size, VNIC_DESC_SIZE_ALIGN);
ring->size = ring->desc_count * ring->desc_size;
ring->size_unaligned = ring->size + ring->base_align;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 714fc1ed79e3..7fdd8c661c99 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_DEV_H_
@@ -44,6 +31,11 @@ static inline void writeq(u64 val, void __iomem *reg)
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define VNIC_DESC_SIZE_ALIGN 16
+#define VNIC_DESC_COUNT_ALIGN 32
+#define VNIC_DESC_BASE_ALIGN 512
+#define VNIC_DESC_MAX_COUNT 4096
+
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index fcc4a3ccdd94..605ef17f967e 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_DEVCMD_H_
@@ -449,6 +436,25 @@ enum vnic_devcmd_cmd {
* in: (u16) a2 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Set extended CQ field in MREGS of RQ (or all RQs)
+ * for given vNIC
+ * in: (u64) a0 = RQ selection (VNIC_RQ_ALL for all RQs)
+ * (u32) a1 = CQ entry size
+ * VNIC_RQ_CQ_ENTRY_SIZE_16 --> 16 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_32 --> 32 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_64 --> 64 bytes
+ *
+ * Capability query:
+ * out: (u32) a0 = errno, 0:valid cmd
+ * (u32) a1 = value consisting of supported entries
+ * bit 0: 16 bytes
+ * bit 1: 32 bytes
+ * bit 2: 64 bytes
+ */
+ CMD_CQ_ENTRY_SIZE_SET = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 90),
+
};
/* CMD_ENABLE2 flags */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 7d6fbb5635a4..9e8e86262a3f 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_ENIC_H_
@@ -34,6 +21,11 @@ struct vnic_enet_config {
u16 loop_tag;
u16 vf_rq_count;
u16 num_arfs;
+ u8 reserved[66];
+ u32 max_rq_ring; // MAX RQ ring size
+ u32 max_wq_ring; // MAX WQ ring size
+ u32 max_cq_ring; // MAX CQ ring size
+ u32 rdma_rsvd_lkey; // Reserved (privileged) LKey
};
#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c
index 23604e3d4455..25319f072a04 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.h b/drivers/net/ethernet/cisco/enic/vnic_intr.h
index 2b1636392294..33a72aa10b26 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_INTR_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h
index 84ff8ca17fcb..04fee45b5d39 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_nic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_NIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h
index 4e45f88ac1d4..b4776e334d63 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_resource.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_RESOURCE_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index a3e7b003ada1..5ae80551f17c 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
@@ -216,4 +203,3 @@ void vnic_rq_clean(struct vnic_rq *rq,
vnic_dev_clear_desc_ring(&rq->ring);
}
-
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 0413103ebe94..a1cdd729caec 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_RQ_H_
@@ -63,7 +50,7 @@ struct vnic_rq_ctrl {
(VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(16384)
struct vnic_rq_buf {
struct vnic_rq_buf *next;
@@ -74,6 +61,8 @@ struct vnic_rq_buf {
unsigned int index;
void *desc;
uint64_t wr_id;
+ unsigned int offset;
+ unsigned int truesize;
};
enum enic_poll_state {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rss.h b/drivers/net/ethernet/cisco/enic/vnic_rss.h
index 881fa18542b3..4dcf0e61cb13 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rss.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rss.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_RSS_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_stats.h b/drivers/net/ethernet/cisco/enic/vnic_stats.h
index 74c81ed6fdab..2dd04322d760 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_stats.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_stats.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_STATS_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
index 24ef8cd40545..66b577835338 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2010 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2010 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -64,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
tlv->type = htons(type);
tlv->length = htons(length);
- memcpy(tlv->value, value, length);
+ unsafe_memcpy(tlv->value, value, length,
+ /* Flexible array of flexible arrays */);
vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
vp->length = htonl(ntohl(vp->length) +
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.h b/drivers/net/ethernet/cisco/enic/vnic_vic.h
index 057776908828..b51c1c52f8bf 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2010 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2010 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_VIC_H_
#define _VNIC_VIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index eb75891974df..29c7900349b2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 01209613d57d..3bb4758100ba 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_WQ_H_
@@ -75,7 +62,7 @@ struct vnic_wq_buf {
(VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(16384)
struct vnic_wq {
unsigned int index;
diff --git a/drivers/net/ethernet/cisco/enic/wq_enet_desc.h b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
index c7021e3a631f..425e46a804ee 100644
--- a/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _WQ_ENET_DESC_H_
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 941f175fb911..6a2004bbe87f 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -40,6 +40,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/gro.h>
#include "gemini.h"
@@ -68,7 +69,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define DEFAULT_GMAC_RXQ_ORDER 9
#define DEFAULT_GMAC_TXQ_ORDER 8
#define DEFAULT_RX_BUF_ORDER 11
-#define DEFAULT_NAPI_WEIGHT 64
#define TX_MAX_FRAGS 16
#define TX_QUEUE_NUM 1 /* max: 6 */
#define RX_MAX_ALLOC_ORDER 2
@@ -80,8 +80,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
- NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
/**
* struct gmac_queue_page - page buffer per-page info
@@ -289,13 +289,13 @@ static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx)
spin_unlock_irqrestore(&port->config_lock, flags);
}
-static void gmac_speed_set(struct net_device *netdev)
+static void gmac_adjust_link(struct net_device *netdev)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
union gmac_status status, old_status;
- int pause_tx = 0;
- int pause_rx = 0;
+ bool pause_tx = false;
+ bool pause_rx = false;
status.bits32 = readl(port->gmac_base + GMAC_STATUS);
old_status.bits32 = status.bits32;
@@ -305,21 +305,21 @@ static void gmac_speed_set(struct net_device *netdev)
switch (phydev->speed) {
case 1000:
status.bits.speed = GMAC_SPEED_1000;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n",
phydev_name(phydev));
break;
case 100:
status.bits.speed = GMAC_SPEED_100;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n",
phydev_name(phydev));
break;
case 10:
status.bits.speed = GMAC_SPEED_10;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n",
phydev_name(phydev));
@@ -330,14 +330,9 @@ static void gmac_speed_set(struct net_device *netdev)
}
if (phydev->duplex == DUPLEX_FULL) {
- u16 lcladv = phy_read(phydev, MII_ADVERTISE);
- u16 rmtadv = phy_read(phydev, MII_LPA);
- u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
-
- if (cap & FLOW_CTRL_RX)
- pause_rx = 1;
- if (cap & FLOW_CTRL_TX)
- pause_tx = 1;
+ phy_get_pause(phydev, &pause_tx, &pause_rx);
+ netdev_dbg(netdev, "set negotiated pause params pause TX = %s, pause RX = %s\n",
+ pause_tx ? "ON" : "OFF", pause_rx ? "ON" : "OFF");
}
gmac_set_flow_control(netdev, pause_tx, pause_rx);
@@ -368,7 +363,7 @@ static int gmac_setup_phy(struct net_device *netdev)
phy = of_phy_get_and_connect(netdev,
dev->of_node,
- gmac_speed_set);
+ gmac_adjust_link);
if (!phy)
return -ENODEV;
netdev->phydev = phy;
@@ -389,6 +384,9 @@ static int gmac_setup_phy(struct net_device *netdev)
status.bits.mii_rmii = GMAC_PHY_GMII;
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
netdev_dbg(netdev,
"RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
@@ -430,8 +428,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
.val = CONFIG0_MAXLEN_1536,
},
{
- .max_l3_len = 1542,
- .val = CONFIG0_MAXLEN_1542,
+ .max_l3_len = 1548,
+ .val = CONFIG0_MAXLEN_1548,
},
{
.max_l3_len = 9212,
@@ -1106,10 +1104,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
u32 val, mask;
netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
if (en)
@@ -1118,6 +1119,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
val = en ? val | mask : val & ~mask;
writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
}
static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
@@ -1141,32 +1144,79 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
struct gmac_txdesc *txd;
skb_frag_t *skb_frag;
dma_addr_t mapping;
- unsigned short mtu;
+ bool tcp = false;
void *buffer;
-
- mtu = ETH_HLEN;
- mtu += netdev->mtu;
- if (skb->protocol == htons(ETH_P_8021Q))
- mtu += VLAN_HLEN;
+ u16 mss;
+ int ret;
word1 = skb->len;
word3 = SOF_BIT;
- if (word1 > mtu) {
+ /* Determine if we are doing TCP */
+ if (skb->protocol == htons(ETH_P_IP))
+ tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP);
+ else
+ /* IPv6 */
+ tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP);
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss) {
+ /* This means we are dealing with TCP and skb->len is the
+ * sum total of all the segments. The TSO will deal with
+ * chopping this up for us.
+ */
+ /* The accelerator needs the full frame size here */
+ mss += skb_tcp_all_headers(skb);
+ netdev_dbg(netdev, "segment offloading mss = %04x len=%04x\n",
+ mss, skb->len);
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mss;
+ } else if (tcp) {
+ /* Even if we are not using TSO, use the hardware offloader
+ * for transferring the TCP frame: this hardware has partial
+ * TCP awareness (called TOE - TCP Offload Engine) and will
+ * according to the datasheet put packets belonging to the
+ * same TCP connection in the same queue for the TOE/TSO
+ * engine to process. The engine will deal with chopping
+ * up frames that exceed ETH_DATA_LEN which the
+ * checksumming engine cannot handle (see below) into
+ * manageable chunks. It flawlessly deals with quite big
+ * frames and frames containing custom DSA EtherTypes.
+ */
+ mss = netdev->mtu + skb_tcp_all_headers(skb);
+ mss = min(mss, skb->len);
+ netdev_dbg(netdev, "TOE/TSO len %04x mtu %04x mss %04x\n",
+ skb->len, netdev->mtu, mss);
word1 |= TSS_MTU_ENABLE_BIT;
- word3 |= mtu;
+ word3 |= mss;
+ } else if (skb->len >= ETH_FRAME_LEN) {
+ /* Hardware offloaded checksumming isn't working on non-TCP frames
+ * bigger than 1514 bytes. A hypothesis about this is that the
+ * checksum buffer is only 1518 bytes, so when the frames get
+ * bigger they get truncated, or the last few bytes get
+ * overwritten by the FCS.
+ *
+ * Just use software checksumming and bypass on bigger frames.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ret = skb_checksum_help(skb);
+ if (ret)
+ return ret;
+ }
+ word1 |= TSS_BYPASS_BIT;
}
- if (skb->ip_summed != CHECKSUM_NONE) {
- int tcp = 0;
-
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* We do not switch off the checksumming on non TCP/UDP
+ * frames: as is shown from tests, the checksumming engine
+ * is smart enough to see that a frame is not actually TCP
+ * or UDP and then just pass it through without any changes
+ * to the frame.
+ */
+ if (skb->protocol == htons(ETH_P_IP))
word1 |= TSS_IP_CHKSUM_BIT;
- tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
- } else { /* IPv6 */
+ else
word1 |= TSS_IPV6_ENABLE_BIT;
- tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
- }
word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
}
@@ -1402,15 +1452,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
union gmac_rxdesc_3 word3;
struct page *page = NULL;
unsigned int page_offs;
+ unsigned long flags;
unsigned short r, w;
union dma_rwptr rw;
dma_addr_t mapping;
int frag_nr = 0;
+ spin_lock_irqsave(&geth->irq_lock, flags);
rw.bits32 = readl(ptr_reg);
/* Reset interrupt as all packages until here are taken into account */
writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
r = rw.bits.rptr;
w = rw.bits.wptr;
@@ -1713,10 +1767,9 @@ static irqreturn_t gmac_irq(int irq, void *data)
gmac_update_hw_stats(netdev);
if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+ spin_lock(&geth->irq_lock);
writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
-
- spin_lock(&geth->irq_lock);
u64_stats_update_begin(&port->ir_stats_syncp);
++port->stats.rx_fifo_errors;
u64_stats_update_end(&port->ir_stats_syncp);
@@ -1802,9 +1855,8 @@ static int gmac_open(struct net_device *netdev)
gmac_enable_tx_rx(netdev);
netif_tx_start_all_queues(netdev);
- hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+ hrtimer_setup(&port->rx_coalesce_timer, &gmac_coalesce_delay_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
netdev_dbg(netdev, "opened\n");
@@ -1965,7 +2017,7 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
gmac_disable_tx_rx(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
CONFIG0_MAXLEN_MASK);
@@ -1976,15 +2028,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static netdev_features_t gmac_fix_features(struct net_device *netdev,
- netdev_features_t features)
-{
- if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
- features &= ~GMAC_OFFLOAD_FEATURES;
-
- return features;
-}
-
static int gmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2104,8 +2147,23 @@ static void gmac_get_pauseparam(struct net_device *netdev,
pparam->autoneg = true;
}
+static int gmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pparam)
+{
+ struct phy_device *phydev = netdev->phydev;
+
+ if (!pparam->autoneg)
+ return -EOPNOTSUPP;
+
+ phy_set_asym_pause(phydev, pparam->rx_pause, pparam->tx_pause);
+
+ return 0;
+}
+
static void gmac_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
@@ -2123,7 +2181,9 @@ static void gmac_get_ringparam(struct net_device *netdev,
}
static int gmac_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
int err = 0;
@@ -2206,7 +2266,6 @@ static const struct net_device_ops gmac_351x_ops = {
.ndo_set_mac_address = gmac_set_mac_address,
.ndo_get_stats64 = gmac_get_stats64,
.ndo_change_mtu = gmac_change_mtu,
- .ndo_fix_features = gmac_fix_features,
.ndo_set_features = gmac_set_features,
};
@@ -2221,6 +2280,7 @@ static const struct ethtool_ops gmac_351x_ethtool_ops = {
.set_link_ksettings = gmac_set_ksettings,
.nway_reset = gmac_nway_reset,
.get_pauseparam = gmac_get_pauseparam,
+ .set_pauseparam = gmac_set_pauseparam,
.get_ringparam = gmac_get_ringparam,
.set_ringparam = gmac_set_ringparam,
.get_coalesce = gmac_get_coalesce,
@@ -2356,11 +2416,13 @@ static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port)
static int gemini_ethernet_port_probe(struct platform_device *pdev)
{
char *port_names[2] = { "ethernet0", "ethernet1" };
+ struct device_node *np = pdev->dev.of_node;
struct gemini_ethernet_port *port;
struct device *dev = &pdev->dev;
struct gemini_ethernet *geth;
struct net_device *netdev;
struct device *parent;
+ u8 mac[ETH_ALEN];
unsigned int id;
int irq;
int ret;
@@ -2407,8 +2469,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
/* Interrupt */
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return irq ? irq : -ENODEV;
+ if (irq < 0)
+ return irq;
port->irq = irq;
/* Clock the port */
@@ -2456,15 +2518,21 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->hw_features = GMAC_OFFLOAD_FEATURES;
netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
- /* We can handle jumbo frames up to 10236 bytes so, let's accept
- * payloads of 10236 bytes minus VLAN and ethernet header
+ /* We can receive jumbo frames up to 10236 bytes but only
+ * transmit 2047 bytes so, let's accept payloads of 2047
+ * bytes minus VLAN and ethernet header
*/
netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
+ netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
port->freeq_refill = 0;
- netif_napi_add(netdev, &port->napi, gmac_napi_poll,
- DEFAULT_NAPI_WEIGHT);
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll);
+
+ ret = of_get_mac_address(np, mac);
+ if (!ret) {
+ dev_info(dev, "Setting macaddr from DT %pM\n", mac);
+ memcpy(port->mac_addr, mac, ETH_ALEN);
+ }
if (is_valid_ether_addr((void *)port->mac_addr)) {
eth_hw_addr_set(netdev, (u8 *)port->mac_addr);
@@ -2505,13 +2573,11 @@ unprepare:
return ret;
}
-static int gemini_ethernet_port_remove(struct platform_device *pdev)
+static void gemini_ethernet_port_remove(struct platform_device *pdev)
{
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
-
- return 0;
}
static const struct of_device_id gemini_ethernet_port_of_match[] = {
@@ -2525,7 +2591,7 @@ MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match);
static struct platform_driver gemini_ethernet_port_driver = {
.driver = {
.name = "gemini-ethernet-port",
- .of_match_table = of_match_ptr(gemini_ethernet_port_of_match),
+ .of_match_table = gemini_ethernet_port_of_match,
},
.probe = gemini_ethernet_port_probe,
.remove = gemini_ethernet_port_remove,
@@ -2570,14 +2636,12 @@ static int gemini_ethernet_probe(struct platform_device *pdev)
return devm_of_platform_populate(dev);
}
-static int gemini_ethernet_remove(struct platform_device *pdev)
+static void gemini_ethernet_remove(struct platform_device *pdev)
{
struct gemini_ethernet *geth = platform_get_drvdata(pdev);
geth_cleanup_freeq(geth);
geth->initialized = false;
-
- return 0;
}
static const struct of_device_id gemini_ethernet_of_match[] = {
@@ -2591,7 +2655,7 @@ MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match);
static struct platform_driver gemini_ethernet_driver = {
.driver = {
.name = DRV_NAME,
- .of_match_table = of_match_ptr(gemini_ethernet_of_match),
+ .of_match_table = gemini_ethernet_of_match,
},
.probe = gemini_ethernet_probe,
.remove = gemini_ethernet_remove,
diff --git a/drivers/net/ethernet/cortina/gemini.h b/drivers/net/ethernet/cortina/gemini.h
index 9fdf77d5eb37..24bb989981f2 100644
--- a/drivers/net/ethernet/cortina/gemini.h
+++ b/drivers/net/ethernet/cortina/gemini.h
@@ -502,7 +502,7 @@ union gmac_txdesc_3 {
#define SOF_BIT 0x80000000
#define EOF_BIT 0x40000000
#define EOFIE_BIT BIT(29)
-#define MTU_SIZE_BIT_MASK 0x1fff
+#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */
/* GMAC Tx Descriptor */
struct gmac_txdesc {
@@ -787,7 +787,7 @@ union gmac_config0 {
#define CONFIG0_MAXLEN_1536 0
#define CONFIG0_MAXLEN_1518 1
#define CONFIG0_MAXLEN_1522 2
-#define CONFIG0_MAXLEN_1542 3
+#define CONFIG0_MAXLEN_1548 3
#define CONFIG0_MAXLEN_9k 4 /* 9212 */
#define CONFIG0_MAXLEN_10k 5 /* 10236 */
#define CONFIG0_MAXLEN_1518__6 6
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 7af86b6d4150..02e0caff98e3 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -3,6 +3,19 @@
# Davicom device configuration
#
+config NET_VENDOR_DAVICOM
+ bool "Davicom devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Davicom devices. If you say Y, you will be asked
+ for your specific card in the following selections.
+
+if NET_VENDOR_DAVICOM
+
config DM9000
tristate "DM9000 support"
depends on ARM || MIPS || COLDFIRE || NIOS2 || COMPILE_TEST
@@ -22,3 +35,21 @@ config DM9000_FORCE_SIMPLE_PHY_POLL
bit to determine if the link is up or down instead of the more
costly MII PHY reads. Note, this will not work if the chip is
operating with an external PHY.
+
+config DM9051
+ tristate "DM9051 SPI support"
+ depends on SPI
+ select CRC32
+ select MDIO
+ select PHYLIB
+ select REGMAP_SPI
+ help
+ Support for DM9051 SPI chipset.
+
+ To compile this driver as a module, choose M here. The module
+ will be called dm9051.
+
+ The SPI mode for the host's SPI master to access DM9051 is mode
+ 0 on the SPI bus.
+
+endif # NET_VENDOR_DAVICOM
diff --git a/drivers/net/ethernet/davicom/Makefile b/drivers/net/ethernet/davicom/Makefile
index 173c87d21076..225f85bc1f53 100644
--- a/drivers/net/ethernet/davicom/Makefile
+++ b/drivers/net/ethernet/davicom/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_DM9000) += dm9000.o
+obj-$(CONFIG_DM9051) += dm9051.o
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 0985ab216566..b87eaf0c250c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -28,8 +28,7 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -540,8 +539,8 @@ static void dm9000_get_drvinfo(struct net_device *dev,
{
struct board_info *dm = to_dm9000_board(dev);
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->bus_info, to_platform_device(dm->dev)->name,
sizeof(info->bus_info));
}
@@ -1012,7 +1011,7 @@ static void dm9000_send_packet(struct net_device *dev,
* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
-static int
+static netdev_tx_t
dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
@@ -1394,9 +1393,9 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
if (!pdata)
return ERR_PTR(-ENOMEM);
- if (of_find_property(np, "davicom,ext-phy", NULL))
+ if (of_property_read_bool(np, "davicom,ext-phy"))
pdata->flags |= DM9000_PLATF_EXT_PHY;
- if (of_find_property(np, "davicom,no-eeprom", NULL))
+ if (of_property_read_bool(np, "davicom,no-eeprom"))
pdata->flags |= DM9000_PLATF_NO_EEPROM;
ret = of_get_mac_address(np, pdata->dev_addr);
@@ -1421,8 +1420,7 @@ dm9000_probe(struct platform_device *pdev)
int iosize;
int i;
u32 id_val;
- int reset_gpios;
- enum of_gpio_flags flags;
+ struct gpio_desc *reset_gpio;
struct regulator *power;
bool inv_mac_addr = false;
u8 addr[ETH_ALEN];
@@ -1442,20 +1440,24 @@ dm9000_probe(struct platform_device *pdev)
dev_dbg(dev, "regulator enabled\n");
}
- reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
- &flags);
- if (gpio_is_valid(reset_gpios)) {
- ret = devm_gpio_request_one(dev, reset_gpios, flags,
- "dm9000_reset");
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(reset_gpio);
+ if (ret) {
+ dev_err(dev, "failed to request reset gpio: %d\n", ret);
+ goto out_regulator_disable;
+ }
+
+ if (reset_gpio) {
+ ret = gpiod_set_consumer_name(reset_gpio, "dm9000_reset");
if (ret) {
- dev_err(dev, "failed to request reset gpio %d: %d\n",
- reset_gpios, ret);
+ dev_err(dev, "failed to set reset gpio name: %d\n",
+ ret);
goto out_regulator_disable;
}
/* According to manual PWRST# Low Period Min 1ms */
msleep(2);
- gpio_set_value(reset_gpios, 1);
+ gpiod_set_value_cansleep(reset_gpio, 0);
/* Needs 3ms to read eeprom when PWRST is deasserted */
msleep(4);
}
@@ -1768,20 +1770,19 @@ static const struct dev_pm_ops dm9000_drv_pm_ops = {
.resume = dm9000_drv_resume,
};
-static int
-dm9000_drv_remove(struct platform_device *pdev)
+static void dm9000_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct board_info *dm = to_dm9000_board(ndev);
unregister_netdev(ndev);
dm9000_release_board(pdev, dm);
- free_netdev(ndev); /* free device structure */
if (dm->power_supply)
regulator_disable(dm->power_supply);
+ free_netdev(ndev); /* free device structure */
+
dev_dbg(&pdev->dev, "released and freed device\n");
- return 0;
}
#ifdef CONFIG_OF
@@ -1799,7 +1800,7 @@ static struct platform_driver dm9000_driver = {
.of_match_table = of_match_ptr(dm9000_of_matches),
},
.probe = dm9000_probe,
- .remove = dm9000_drv_remove,
+ .remove = dm9000_drv_remove,
};
module_platform_driver(dm9000_driver);
diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c
new file mode 100644
index 000000000000..59ea48d4c9de
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9051.c
@@ -0,0 +1,1258 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "dm9051.h"
+
+#define DRVNAME_9051 "dm9051"
+
+/**
+ * struct rx_ctl_mach - rx activities record
+ * @status_err_counter: rx status error counter
+ * @large_err_counter: rx get large packet length error counter
+ * @rx_err_counter: receive packet error counter
+ * @tx_err_counter: transmit packet error counter
+ * @fifo_rst_counter: reset operation counter
+ *
+ * To keep track for the driver operation statistics
+ */
+struct rx_ctl_mach {
+ u16 status_err_counter;
+ u16 large_err_counter;
+ u16 rx_err_counter;
+ u16 tx_err_counter;
+ u16 fifo_rst_counter;
+};
+
+/**
+ * struct dm9051_rxctrl - dm9051 driver rx control
+ * @hash_table: Multicast hash-table data
+ * @rcr_all: KS_RXCR1 register setting
+ *
+ * The settings needs to control the receive filtering
+ * such as the multicast hash-filter and the receive register settings
+ */
+struct dm9051_rxctrl {
+ u16 hash_table[4];
+ u8 rcr_all;
+};
+
+/**
+ * struct dm9051_rxhdr - rx packet data header
+ * @headbyte: lead byte equal to 0x01 notifies a valid packet
+ * @status: status bits for the received packet
+ * @rxlen: packet length
+ *
+ * The Rx packed, entered into the FIFO memory, start with these
+ * four bytes which is the Rx header, followed by the ethernet
+ * packet data and ends with an appended 4-byte CRC data.
+ * Both Rx packet and CRC data are for check purpose and finally
+ * are dropped by this driver
+ */
+struct dm9051_rxhdr {
+ u8 headbyte;
+ u8 status;
+ __le16 rxlen;
+};
+
+/**
+ * struct board_info - maintain the saved data
+ * @spidev: spi device structure
+ * @ndev: net device structure
+ * @mdiobus: mii bus structure
+ * @phydev: phy device structure
+ * @txq: tx queue structure
+ * @regmap_dm: regmap for register read/write
+ * @regmap_dmbulk: extra regmap for bulk read/write
+ * @rxctrl_work: Work queue for updating RX mode and multicast lists
+ * @tx_work: Work queue for tx packets
+ * @pause: ethtool pause parameter structure
+ * @spi_lockm: between threads lock structure
+ * @reg_mutex: regmap access lock structure
+ * @bc: rx control statistics structure
+ * @rxhdr: rx header structure
+ * @rctl: rx control setting structure
+ * @msg_enable: message level value
+ * @imr_all: to store operating imr value for register DM9051_IMR
+ * @lcr_all: to store operating rcr value for register DM9051_LMCR
+ *
+ * The saved data variables, keep up to date for retrieval back to use
+ */
+struct board_info {
+ u32 msg_enable;
+ struct spi_device *spidev;
+ struct net_device *ndev;
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
+ struct sk_buff_head txq;
+ struct regmap *regmap_dm;
+ struct regmap *regmap_dmbulk;
+ struct work_struct rxctrl_work;
+ struct work_struct tx_work;
+ struct ethtool_pauseparam pause;
+ struct mutex spi_lockm;
+ struct mutex reg_mutex;
+ struct rx_ctl_mach bc;
+ struct dm9051_rxhdr rxhdr;
+ struct dm9051_rxctrl rctl;
+ u8 imr_all;
+ u8 lcr_all;
+};
+
+static int dm9051_set_reg(struct board_info *db, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, reg, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d set reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_update_bits(struct board_info *db, unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ int ret;
+
+ ret = regmap_update_bits(db->regmap_dm, reg, mask, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d update bits reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* skb buffer exhausted, just discard the received data
+ */
+static int dm9051_dumpblk(struct board_info *db, u8 reg, size_t count)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rb;
+ int ret;
+
+ /* no skb buffer,
+ * both reg and &rb must be noinc,
+ * read once one byte via regmap_read
+ */
+ do {
+ ret = regmap_read(db->regmap_dm, reg, &rb);
+ if (ret < 0) {
+ netif_err(db, drv, ndev, "%s: error %d dumping read reg %02x\n",
+ __func__, ret, reg);
+ break;
+ }
+ } while (--count);
+
+ return ret;
+}
+
+static int dm9051_set_regs(struct board_info *db, unsigned int reg, const void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_get_regs(struct board_info *db, unsigned int reg, void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_read(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_write_mem(struct board_info *db, unsigned int reg, const void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_write(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_read_mem(struct board_info *db, unsigned int reg, void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_read(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* waiting tx-end rather than tx-req
+ * got faster
+ */
+static int dm9051_nsr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_NSR, mval,
+ mval & (NSR_TX2END | NSR_TX1END), 1, 20);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "timeout in checking for tx end\n");
+ return ret;
+}
+
+static int dm9051_epcr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_EPCR, mval,
+ !(mval & EPCR_ERRE), 100, 10000);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "eeprom/phy in processing get timeout\n");
+ return ret;
+}
+
+static int dm9051_irq_flag(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int irq_type = irq_get_trigger_type(spi->irq);
+
+ if (irq_type)
+ return irq_type;
+
+ return IRQF_TRIGGER_LOW;
+}
+
+static unsigned int dm9051_intcr_value(struct board_info *db)
+{
+ return (dm9051_irq_flag(db) == IRQF_TRIGGER_LOW) ?
+ INTCR_POL_LOW : INTCR_POL_HIGH;
+}
+
+static int dm9051_set_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_set_reg(db, DM9051_FCR, fcr);
+}
+
+static int dm9051_set_recv(struct board_info *db)
+{
+ int ret;
+
+ ret = dm9051_set_regs(db, DM9051_MAR, db->rctl.hash_table, sizeof(db->rctl.hash_table));
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, db->rctl.rcr_all); /* enable rx */
+}
+
+static int dm9051_core_reset(struct board_info *db)
+{
+ int ret;
+
+ db->bc.fifo_rst_counter++;
+
+ ret = regmap_write(db->regmap_dm, DM9051_NCR, NCR_RST); /* NCR reset */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_MBNDRY, MBNDRY_BYTE); /* MemBound */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_PPCR, PPCR_PAUSE_COUNT); /* Pause Count */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_LMCR, db->lcr_all); /* LEDMode1 */
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_INTCR, dm9051_intcr_value(db));
+}
+
+static int dm9051_update_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_update_bits(db, DM9051_FCR, FCR_RXTX_BITS, fcr);
+}
+
+static int dm9051_disable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, IMR_PAR); /* disable int */
+}
+
+static int dm9051_enable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, db->imr_all); /* enable int */
+}
+
+static int dm9051_stop_mrcmd(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_ISR, ISR_STOP_MRCMD); /* to stop mrcmd */
+}
+
+static int dm9051_clear_interrupt(struct board_info *db)
+{
+ return dm9051_update_bits(db, DM9051_ISR, ISR_CLR_INT, ISR_CLR_INT);
+}
+
+static int dm9051_eeprom_read(struct board_info *db, int offset, u8 *to)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, to, 2);
+}
+
+static int dm9051_eeprom_write(struct board_info *db, int offset, u8 *data)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, data, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_WEP | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_phyread(void *context, unsigned int reg, unsigned int *val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR | EPCR_EPOS);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ /* this is a 4 bytes data, clear to zero since following regmap_bulk_read
+ * only fill lower 2 bytes
+ */
+ *val = 0;
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, val, 2);
+}
+
+static int dm9051_phywrite(void *context, unsigned int reg, unsigned int val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, &val, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_EPOS | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct board_info *db = bus->priv;
+ unsigned int val = 0xffff;
+ int ret;
+
+ if (addr == DM9051_PHY_ADDR) {
+ ret = dm9051_phyread(db, regnum, &val);
+ if (ret)
+ return ret;
+ }
+
+ return val;
+}
+
+static int dm9051_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct board_info *db = bus->priv;
+
+ if (addr == DM9051_PHY_ADDR)
+ return dm9051_phywrite(db, regnum, val);
+
+ return -ENODEV;
+}
+
+static void dm9051_reg_lock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_lock(&db->reg_mutex);
+}
+
+static void dm9051_reg_unlock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_unlock(&db->reg_mutex);
+}
+
+static struct regmap_config regconfigdm = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+};
+
+static struct regmap_config regconfigdmbulk = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int dm9051_map_init(struct spi_device *spi, struct board_info *db)
+{
+ /* create two regmap instances,
+ * split read/write and bulk_read/bulk_write to individual regmap
+ * to resolve regmap execution confliction problem
+ */
+ regconfigdm.lock_arg = db;
+ db->regmap_dm = devm_regmap_init_spi(db->spidev, &regconfigdm);
+ if (IS_ERR(db->regmap_dm))
+ return PTR_ERR(db->regmap_dm);
+
+ regconfigdmbulk.lock_arg = db;
+ db->regmap_dmbulk = devm_regmap_init_spi(db->spidev, &regconfigdmbulk);
+ return PTR_ERR_OR_ZERO(db->regmap_dmbulk);
+}
+
+static int dm9051_map_chipid(struct board_info *db)
+{
+ struct device *dev = &db->spidev->dev;
+ unsigned short wid;
+ u8 buff[6];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_VIDL, buff, sizeof(buff));
+ if (ret < 0)
+ return ret;
+
+ wid = get_unaligned_le16(buff + 2);
+ if (wid != DM9051_ID) {
+ dev_err(dev, "chipid error as %04x !\n", wid);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "chip %04x found\n", wid);
+ return 0;
+}
+
+/* Read DM9051_PAR registers which is the mac address loaded from EEPROM while power-on
+ */
+static int dm9051_map_etherdev_par(struct net_device *ndev, struct board_info *db)
+{
+ u8 addr[ETH_ALEN];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_PAR, addr, sizeof(addr));
+ if (ret < 0)
+ return ret;
+
+ if (!is_valid_ether_addr(addr)) {
+ eth_hw_addr_random(ndev);
+
+ ret = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&db->spidev->dev, "Use random MAC address\n");
+ return 0;
+ }
+
+ eth_hw_addr_set(ndev, addr);
+ return 0;
+}
+
+/* ethtool-ops
+ */
+static void dm9051_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRVNAME_9051, sizeof(info->driver));
+}
+
+static void dm9051_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->msg_enable = value;
+}
+
+static u32 dm9051_get_msglevel(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ return db->msg_enable;
+}
+
+static int dm9051_get_eeprom_len(struct net_device *dev)
+{
+ return 128;
+}
+
+static int dm9051_get_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ ee->magic = DM_EEPROM_MAGIC;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_read(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int dm9051_set_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ if (ee->magic != DM_EEPROM_MAGIC)
+ return -EINVAL;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_write(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static void dm9051_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ *pause = db->pause;
+}
+
+static int dm9051_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->pause = *pause;
+
+ if (pause->autoneg == AUTONEG_DISABLE)
+ return dm9051_update_fcr(db);
+
+ phy_set_sym_pause(db->phydev, pause->rx_pause, pause->tx_pause,
+ pause->autoneg);
+ phy_start_aneg(db->phydev);
+ return 0;
+}
+
+static const struct ethtool_ops dm9051_ethtool_ops = {
+ .get_drvinfo = dm9051_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = dm9051_get_msglevel,
+ .set_msglevel = dm9051_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = dm9051_get_eeprom_len,
+ .get_eeprom = dm9051_get_eeprom,
+ .set_eeprom = dm9051_set_eeprom,
+ .get_pauseparam = dm9051_get_pauseparam,
+ .set_pauseparam = dm9051_set_pauseparam,
+};
+
+static int dm9051_all_start(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power on of the internal phy
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, 0);
+ if (ret)
+ return ret;
+
+ /* dm9051 chip registers could not be accessed within 1 ms
+ * after GPR power on, delay 1 ms is essential
+ */
+ msleep(1);
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ return dm9051_enable_interrupt(db);
+}
+
+static int dm9051_all_stop(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power off of the internal phy,
+ * The internal phy still could be accessed after this GPR power off control
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, GPR_PHY_OFF);
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, RCR_RX_DISABLE);
+}
+
+/* fifo reset while rx error found
+ */
+static int dm9051_all_restart(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ret;
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_enable_interrupt(db);
+ if (ret)
+ return ret;
+
+ netdev_dbg(ndev, " rxstatus_Er & rxlen_Er %d, RST_c %d\n",
+ db->bc.status_err_counter + db->bc.large_err_counter,
+ db->bc.fifo_rst_counter);
+
+ ret = dm9051_set_recv(db);
+ if (ret)
+ return ret;
+
+ return dm9051_set_fcr(db);
+}
+
+/* read packets from the fifo memory
+ * return value,
+ * > 0 - read packet number, caller can repeat the rx operation
+ * 0 - no error, caller need stop further rx operation
+ * -EBUSY - read data error, caller escape from rx operation
+ */
+static int dm9051_loop_rx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rxbyte;
+ int ret, rxlen;
+ struct sk_buff *skb;
+ u8 *rdptr;
+ int scanrr = 0;
+
+ do {
+ ret = dm9051_read_mem(db, DM_SPI_MRCMDX, &rxbyte, 2);
+ if (ret)
+ return ret;
+
+ if ((rxbyte & GENMASK(7, 0)) != DM9051_PKT_RDY)
+ break; /* exhaust-empty */
+
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, &db->rxhdr, DM_RXHDR_SIZE);
+ if (ret)
+ return ret;
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret)
+ return ret;
+
+ rxlen = le16_to_cpu(db->rxhdr.rxlen);
+ if (db->rxhdr.status & RSR_ERR_BITS || rxlen > DM9051_PKT_MAX) {
+ netdev_dbg(ndev, "rxhdr-byte (%02x)\n",
+ db->rxhdr.headbyte);
+
+ if (db->rxhdr.status & RSR_ERR_BITS) {
+ db->bc.status_err_counter++;
+ netdev_dbg(ndev, "check rxstatus-error (%02x)\n",
+ db->rxhdr.status);
+ } else {
+ db->bc.large_err_counter++;
+ netdev_dbg(ndev, "check rxlen large-error (%d > %d)\n",
+ rxlen, DM9051_PKT_MAX);
+ }
+ return dm9051_all_restart(db);
+ }
+
+ skb = dev_alloc_skb(rxlen);
+ if (!skb) {
+ ret = dm9051_dumpblk(db, DM_SPI_MRCMD, rxlen);
+ if (ret)
+ return ret;
+ return scanrr;
+ }
+
+ rdptr = skb_put(skb, rxlen - 4);
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, rdptr, rxlen);
+ if (ret) {
+ db->bc.rx_err_counter++;
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ skb->protocol = eth_type_trans(skb, db->ndev);
+ if (db->ndev->features & NETIF_F_RXCSUM)
+ skb_checksum_none_assert(skb);
+ netif_rx(skb);
+ db->ndev->stats.rx_bytes += rxlen;
+ db->ndev->stats.rx_packets++;
+ scanrr++;
+ } while (!ret);
+
+ return scanrr;
+}
+
+/* transmit a packet,
+ * return value,
+ * 0 - succeed
+ * -ETIMEDOUT - timeout error
+ */
+static int dm9051_single_tx(struct board_info *db, u8 *buff, unsigned int len)
+{
+ int ret;
+
+ ret = dm9051_nsr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_write_mem(db, DM_SPI_MWCMD, buff, len);
+ if (ret)
+ return ret;
+
+ ret = dm9051_set_regs(db, DM9051_TXPLL, &len, 2);
+ if (ret < 0)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_TCR, TCR_TXREQ);
+}
+
+static int dm9051_loop_tx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ntx = 0;
+ int ret;
+
+ while (!skb_queue_empty(&db->txq)) {
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = skb_dequeue(&db->txq);
+ if (skb) {
+ ntx++;
+ ret = dm9051_single_tx(db, skb->data, skb->len);
+ len = skb->len;
+ dev_kfree_skb(skb);
+ if (ret < 0) {
+ db->bc.tx_err_counter++;
+ return 0;
+ }
+ ndev->stats.tx_bytes += len;
+ ndev->stats.tx_packets++;
+ }
+
+ if (netif_queue_stopped(ndev) &&
+ (skb_queue_len(&db->txq) < DM9051_TX_QUE_LO_WATER))
+ netif_wake_queue(ndev);
+ }
+
+ return ntx;
+}
+
+static irqreturn_t dm9051_rx_threaded_irq(int irq, void *pw)
+{
+ struct board_info *db = pw;
+ int result, result_tx;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_disable_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ result = dm9051_clear_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ do {
+ result = dm9051_loop_rx(db); /* threaded irq rx */
+ if (result < 0)
+ goto out_unlock;
+ result_tx = dm9051_loop_tx(db); /* more tx better performance */
+ if (result_tx < 0)
+ goto out_unlock;
+ } while (result > 0);
+
+ dm9051_enable_interrupt(db);
+
+ /* To exit and has mutex unlock while rx or tx error
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+
+ return IRQ_HANDLED;
+}
+
+static void dm9051_tx_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, tx_work);
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_loop_tx(db);
+ if (result < 0)
+ netdev_err(db->ndev, "transmit packet error\n");
+
+ mutex_unlock(&db->spi_lockm);
+}
+
+static void dm9051_rxctl_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, rxctrl_work);
+ struct net_device *ndev = db->ndev;
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (result < 0)
+ goto out_unlock;
+
+ dm9051_set_recv(db);
+
+ /* To has mutex unlock and return from this function if regmap function fail
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+}
+
+/* Open network device
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device
+ */
+static int dm9051_open(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->imr_all = IMR_PAR | IMR_PRM;
+ db->lcr_all = LMCR_MODE1;
+ db->rctl.rcr_all = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ memset(db->rctl.hash_table, 0, sizeof(db->rctl.hash_table));
+
+ ndev->irq = spi->irq; /* by dts */
+ ret = request_threaded_irq(spi->irq, NULL, dm9051_rx_threaded_irq,
+ dm9051_irq_flag(db) | IRQF_ONESHOT,
+ ndev->name, db);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to get irq\n");
+ return ret;
+ }
+
+ phy_support_sym_pause(db->phydev);
+ phy_start(db->phydev);
+
+ /* flow control parameters init */
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ db->pause.autoneg = AUTONEG_DISABLE;
+
+ if (db->phydev->autoneg)
+ db->pause.autoneg = AUTONEG_ENABLE;
+
+ ret = dm9051_all_start(db);
+ if (ret) {
+ phy_stop(db->phydev);
+ free_irq(spi->irq, db);
+ return ret;
+ }
+
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
+/* Close network device
+ * Called to close down a network device which has been active. Cancel any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state while it is not being used
+ */
+static int dm9051_stop(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = dm9051_all_stop(db);
+ if (ret)
+ return ret;
+
+ flush_work(&db->tx_work);
+ flush_work(&db->rxctrl_work);
+
+ phy_stop(db->phydev);
+
+ free_irq(db->spidev->irq, db);
+
+ netif_stop_queue(ndev);
+
+ skb_queue_purge(&db->txq);
+
+ return 0;
+}
+
+/* event: play a schedule starter in condition
+ */
+static netdev_tx_t dm9051_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ skb_queue_tail(&db->txq, skb);
+ if (skb_queue_len(&db->txq) > DM9051_TX_QUE_HI_WATER)
+ netif_stop_queue(ndev); /* enforce limit queue size */
+
+ schedule_work(&db->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+/* event: play with a schedule starter
+ */
+static void dm9051_set_rx_mode(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct dm9051_rxctrl rxctrl;
+ struct netdev_hw_addr *ha;
+ u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ u32 hash_val;
+
+ memset(&rxctrl, 0, sizeof(rxctrl));
+
+ /* rx control */
+ if (ndev->flags & IFF_PROMISC) {
+ rcr |= RCR_PRMSC;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_PRMSC, rcr= %02x\n", rcr);
+ }
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ rcr |= RCR_ALL;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_ALLMULTI, rcr= %02x\n", rcr);
+ }
+
+ rxctrl.rcr_all = rcr;
+
+ /* broadcast address */
+ rxctrl.hash_table[0] = 0;
+ rxctrl.hash_table[1] = 0;
+ rxctrl.hash_table[2] = 0;
+ rxctrl.hash_table[3] = 0x8000;
+
+ /* the multicast address in Hash Table : 64 bits */
+ netdev_for_each_mc_addr(ha, ndev) {
+ hash_val = ether_crc_le(ETH_ALEN, ha->addr) & GENMASK(5, 0);
+ rxctrl.hash_table[hash_val / 16] |= BIT(0) << (hash_val % 16);
+ }
+
+ /* schedule work to do the actual set of the data if needed */
+
+ if (memcmp(&db->rctl, &rxctrl, sizeof(rxctrl))) {
+ memcpy(&db->rctl, &rxctrl, sizeof(rxctrl));
+ schedule_work(&db->rxctrl_work);
+ }
+}
+
+/* event: write into the mac registers and eeprom directly
+ */
+static int dm9051_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(ndev, p);
+ if (ret < 0)
+ return ret;
+
+ eth_commit_mac_addr_change(ndev, p);
+ return dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+}
+
+static const struct net_device_ops dm9051_netdev_ops = {
+ .ndo_open = dm9051_open,
+ .ndo_stop = dm9051_stop,
+ .ndo_start_xmit = dm9051_start_xmit,
+ .ndo_set_rx_mode = dm9051_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = dm9051_set_mac_address,
+};
+
+static void dm9051_operation_clear(struct board_info *db)
+{
+ db->bc.status_err_counter = 0;
+ db->bc.large_err_counter = 0;
+ db->bc.rx_err_counter = 0;
+ db->bc.tx_err_counter = 0;
+ db->bc.fifo_rst_counter = 0;
+}
+
+static int dm9051_mdio_register(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->mdiobus = devm_mdiobus_alloc(&spi->dev);
+ if (!db->mdiobus)
+ return -ENOMEM;
+
+ db->mdiobus->priv = db;
+ db->mdiobus->read = dm9051_mdio_read;
+ db->mdiobus->write = dm9051_mdio_write;
+ db->mdiobus->name = "dm9051-mdiobus";
+ db->mdiobus->phy_mask = (u32)~BIT(1);
+ db->mdiobus->parent = &spi->dev;
+ snprintf(db->mdiobus->id, MII_BUS_ID_SIZE,
+ "dm9051-%s.%u", dev_name(&spi->dev), spi_get_chipselect(spi, 0));
+
+ ret = devm_mdiobus_register(&spi->dev, db->mdiobus);
+ if (ret)
+ dev_err(&spi->dev, "Could not register MDIO bus\n");
+
+ return ret;
+}
+
+static void dm9051_handle_link_change(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_print_status(db->phydev);
+
+ /* only write pause settings to mac. since mac and phy are integrated
+ * together, such as link state, speed and duplex are sync already
+ */
+ if (db->phydev->link) {
+ if (db->phydev->pause) {
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ }
+ dm9051_update_fcr(db);
+ }
+}
+
+/* phy connect as poll mode
+ */
+static int dm9051_phy_connect(struct board_info *db)
+{
+ char phy_id[MII_BUS_ID_SIZE + 3];
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ db->mdiobus->id, DM9051_PHY_ADDR);
+
+ db->phydev = phy_connect(db->ndev, phy_id, dm9051_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ return PTR_ERR_OR_ZERO(db->phydev);
+}
+
+static int dm9051_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev;
+ struct board_info *db;
+ int ret;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct board_info));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, dev);
+ dev_set_drvdata(dev, ndev);
+
+ db = netdev_priv(ndev);
+
+ db->msg_enable = 0;
+ db->spidev = spi;
+ db->ndev = ndev;
+
+ ndev->netdev_ops = &dm9051_netdev_ops;
+ ndev->ethtool_ops = &dm9051_ethtool_ops;
+
+ mutex_init(&db->spi_lockm);
+ mutex_init(&db->reg_mutex);
+
+ INIT_WORK(&db->rxctrl_work, dm9051_rxctl_delay);
+ INIT_WORK(&db->tx_work, dm9051_tx_delay);
+
+ ret = dm9051_map_init(spi, db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_chipid(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_etherdev_par(ndev, db);
+ if (ret < 0)
+ return ret;
+
+ ret = dm9051_mdio_register(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_phy_connect(db);
+ if (ret)
+ return ret;
+
+ dm9051_operation_clear(db);
+ skb_queue_head_init(&db->txq);
+
+ ret = devm_register_netdev(dev, ndev);
+ if (ret) {
+ phy_disconnect(db->phydev);
+ return dev_err_probe(dev, ret, "device register failed");
+ }
+
+ return 0;
+}
+
+static void dm9051_drv_remove(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_disconnect(db->phydev);
+}
+
+static const struct of_device_id dm9051_match_table[] = {
+ { .compatible = "davicom,dm9051" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dm9051_match_table);
+
+static const struct spi_device_id dm9051_id_table[] = {
+ { "dm9051", 0 },
+ {}
+};
+
+static struct spi_driver dm9051_driver = {
+ .driver = {
+ .name = DRVNAME_9051,
+ .of_match_table = dm9051_match_table,
+ },
+ .probe = dm9051_probe,
+ .remove = dm9051_drv_remove,
+ .id_table = dm9051_id_table,
+};
+module_spi_driver(dm9051_driver);
+
+MODULE_AUTHOR("Joseph CHANG <joseph_chang@davicom.com.tw>");
+MODULE_DESCRIPTION("Davicom DM9051 network SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/davicom/dm9051.h b/drivers/net/ethernet/davicom/dm9051.h
new file mode 100644
index 000000000000..fef3120edd7c
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9051.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _DM9051_H_
+#define _DM9051_H_
+
+#include <linux/bits.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#define DM9051_ID 0x9051
+
+#define DM9051_NCR 0x00
+#define DM9051_NSR 0x01
+#define DM9051_TCR 0x02
+#define DM9051_RCR 0x05
+#define DM9051_BPTR 0x08
+#define DM9051_FCR 0x0A
+#define DM9051_EPCR 0x0B
+#define DM9051_EPAR 0x0C
+#define DM9051_EPDRL 0x0D
+#define DM9051_EPDRH 0x0E
+#define DM9051_PAR 0x10
+#define DM9051_MAR 0x16
+#define DM9051_GPCR 0x1E
+#define DM9051_GPR 0x1F
+
+#define DM9051_VIDL 0x28
+#define DM9051_VIDH 0x29
+#define DM9051_PIDL 0x2A
+#define DM9051_PIDH 0x2B
+#define DM9051_SMCR 0x2F
+#define DM9051_ATCR 0x30
+#define DM9051_SPIBCR 0x38
+#define DM9051_INTCR 0x39
+#define DM9051_PPCR 0x3D
+
+#define DM9051_MPCR 0x55
+#define DM9051_LMCR 0x57
+#define DM9051_MBNDRY 0x5E
+
+#define DM9051_MRRL 0x74
+#define DM9051_MRRH 0x75
+#define DM9051_MWRL 0x7A
+#define DM9051_MWRH 0x7B
+#define DM9051_TXPLL 0x7C
+#define DM9051_TXPLH 0x7D
+#define DM9051_ISR 0x7E
+#define DM9051_IMR 0x7F
+
+#define DM_SPI_MRCMDX 0x70
+#define DM_SPI_MRCMD 0x72
+#define DM_SPI_MWCMD 0x78
+
+#define DM_SPI_WR 0x80
+
+/* dm9051 Ethernet controller registers bits
+ */
+/* 0x00 */
+#define NCR_WAKEEN BIT(6)
+#define NCR_FDX BIT(3)
+#define NCR_RST BIT(0)
+/* 0x01 */
+#define NSR_SPEED BIT(7)
+#define NSR_LINKST BIT(6)
+#define NSR_WAKEST BIT(5)
+#define NSR_TX2END BIT(3)
+#define NSR_TX1END BIT(2)
+/* 0x02 */
+#define TCR_DIS_JABBER_TIMER BIT(6) /* for Jabber Packet support */
+#define TCR_TXREQ BIT(0)
+/* 0x05 */
+#define RCR_DIS_WATCHDOG_TIMER BIT(6) /* for Jabber Packet support */
+#define RCR_DIS_LONG BIT(5)
+#define RCR_DIS_CRC BIT(4)
+#define RCR_ALL BIT(3)
+#define RCR_PRMSC BIT(1)
+#define RCR_RXEN BIT(0)
+#define RCR_RX_DISABLE (RCR_DIS_LONG | RCR_DIS_CRC)
+/* 0x06 */
+#define RSR_RF BIT(7)
+#define RSR_MF BIT(6)
+#define RSR_LCS BIT(5)
+#define RSR_RWTO BIT(4)
+#define RSR_PLE BIT(3)
+#define RSR_AE BIT(2)
+#define RSR_CE BIT(1)
+#define RSR_FOE BIT(0)
+#define RSR_ERR_BITS (RSR_RF | RSR_LCS | RSR_RWTO | RSR_PLE | \
+ RSR_AE | RSR_CE | RSR_FOE)
+/* 0x0A */
+#define FCR_TXPEN BIT(5)
+#define FCR_BKPM BIT(3)
+#define FCR_FLCE BIT(0)
+#define FCR_RXTX_BITS (FCR_TXPEN | FCR_BKPM | FCR_FLCE)
+/* 0x0B */
+#define EPCR_WEP BIT(4)
+#define EPCR_EPOS BIT(3)
+#define EPCR_ERPRR BIT(2)
+#define EPCR_ERPRW BIT(1)
+#define EPCR_ERRE BIT(0)
+/* 0x1E */
+#define GPCR_GEP_CNTL BIT(0)
+/* 0x1F */
+#define GPR_PHY_OFF BIT(0)
+/* 0x30 */
+#define ATCR_AUTO_TX BIT(7)
+/* 0x39 */
+#define INTCR_POL_LOW (1 << 0)
+#define INTCR_POL_HIGH (0 << 0)
+/* 0x3D */
+/* Pause Packet Control Register - default = 1 */
+#define PPCR_PAUSE_COUNT 0x08
+/* 0x55 */
+#define MPCR_RSTTX BIT(1)
+#define MPCR_RSTRX BIT(0)
+/* 0x57 */
+/* LEDMode Control Register - LEDMode1 */
+/* Value 0x81 : bit[7] = 1, bit[2] = 0, bit[1:0] = 01b */
+#define LMCR_NEWMOD BIT(7)
+#define LMCR_TYPED1 BIT(1)
+#define LMCR_TYPED0 BIT(0)
+#define LMCR_MODE1 (LMCR_NEWMOD | LMCR_TYPED0)
+/* 0x5E */
+#define MBNDRY_BYTE BIT(7)
+/* 0xFE */
+#define ISR_MBS BIT(7)
+#define ISR_LNKCHG BIT(5)
+#define ISR_ROOS BIT(3)
+#define ISR_ROS BIT(2)
+#define ISR_PTS BIT(1)
+#define ISR_PRS BIT(0)
+#define ISR_CLR_INT (ISR_LNKCHG | ISR_ROOS | ISR_ROS | \
+ ISR_PTS | ISR_PRS)
+#define ISR_STOP_MRCMD (ISR_MBS)
+/* 0xFF */
+#define IMR_PAR BIT(7)
+#define IMR_LNKCHGI BIT(5)
+#define IMR_PTM BIT(1)
+#define IMR_PRM BIT(0)
+
+/* Const
+ */
+#define DM9051_PHY_ADDR 1 /* PHY id */
+#define DM9051_PHY 0x40 /* PHY address 0x01 */
+#define DM9051_PKT_RDY 0x01 /* Packet ready to receive */
+#define DM9051_PKT_MAX 1536 /* Received packet max size */
+#define DM9051_TX_QUE_HI_WATER 50
+#define DM9051_TX_QUE_LO_WATER 25
+#define DM_EEPROM_MAGIC 0x9051
+
+#define DM_RXHDR_SIZE sizeof(struct dm9051_rxhdr)
+
+static inline struct board_info *to_dm9051_board(struct net_device *ndev)
+{
+ return netdev_priv(ndev);
+}
+
+#endif /* _DM9051_H_ */
diff --git a/drivers/net/ethernet/dec/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c
index 369858272650..76767dec216d 100644
--- a/drivers/net/ethernet/dec/tulip/21142.c
+++ b/drivers/net/ethernet/dec/tulip/21142.c
@@ -216,7 +216,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
(csr12 & 2) == 2) ||
(tp->nway && (csr5 & (TPLnkFail)))) {
/* Link blew? Maybe restart NWay. */
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
t21142_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -226,7 +226,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
medianame[dev->if_port],
(csr12 & 2) ? "failed" : "good");
if ((csr12 & 2) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
t21142_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 79dc336ce709..078a12f07e96 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -104,21 +104,6 @@ config TULIP_DM910X
def_bool y
depends on TULIP && SPARC
-config DE4X5
- tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
- depends on (PCI || EISA)
- depends on VIRT_TO_BUS || ALPHA || PPC || SPARC
- select CRC32
- help
- This is support for the DIGITAL series of PCI/EISA Ethernet cards.
- These include the DE425, DE434, DE435, DE450 and DE500 models. If
- you have a network card of this type, say Y. More specific
- information is contained in
- <file:Documentation/networking/device_drivers/ethernet/dec/de4x5.rst>.
-
- To compile this driver as a module, choose M here. The module will
- be called de4x5.
-
config WINBOND_840
tristate "Winbond W89c840 Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/dec/tulip/Makefile b/drivers/net/ethernet/dec/tulip/Makefile
index 8aab37564d5d..d4f1d21d29a0 100644
--- a/drivers/net/ethernet/dec/tulip/Makefile
+++ b/drivers/net/ethernet/dec/tulip/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_DM9102) += dmfe.o
obj-$(CONFIG_WINBOND_840) += winbond-840.o
obj-$(CONFIG_DE2104X) += de2104x.o
obj-$(CONFIG_TULIP) += tulip.o
-obj-$(CONFIG_DE4X5) += de4x5.o
obj-$(CONFIG_ULI526X) += uli526x.o
# Declare multi-part drivers.
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index d51b3d24a0c8..f9504f340c4a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -49,7 +49,7 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
@@ -963,7 +963,7 @@ static void de_next_media (struct de_private *de, const u32 *media,
static void de21040_media_timer (struct timer_list *t)
{
- struct de_private *de = from_timer(de, t, media_timer);
+ struct de_private *de = timer_container_of(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
@@ -1044,7 +1044,7 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
static void de21041_media_timer (struct timer_list *t)
{
- struct de_private *de = from_timer(de, t, media_timer);
+ struct de_private *de = timer_container_of(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
@@ -1428,7 +1428,7 @@ static int de_close (struct net_device *dev)
netif_dbg(de, ifdown, dev, "disabling interface\n");
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
spin_lock_irqsave(&de->lock, flags);
de_stop_hw(de);
@@ -1452,7 +1452,7 @@ static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
de->rx_tail, de->tx_head, de->tx_tail);
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
disable_irq(irq);
spin_lock_irq(&de->lock);
@@ -1606,8 +1606,8 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
{
struct de_private *de = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
}
static int de_get_regs_len(struct net_device *dev)
@@ -2126,7 +2126,7 @@ static int __maybe_unused de_suspend(struct device *dev_d)
if (netif_running (dev)) {
const int irq = pdev->irq;
- del_timer_sync(&de->media_timer);
+ timer_delete_sync(&de->media_timer);
disable_irq(irq);
spin_lock_irq(&de->lock);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
deleted file mode 100644
index 13121c4dcfe6..000000000000
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ /dev/null
@@ -1,5583 +0,0 @@
-/* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500
- ethernet driver for Linux.
-
- Copyright 1994, 1995 Digital Equipment Corporation.
-
- Testing resources for this driver have been made available
- in part by NASA Ames Research Center (mjacob@nas.nasa.gov).
-
- The author may be reached at davies@maniac.ultranet.com.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the
- Free Software Foundation; either version 2 of the License, or (at your
- option) any later version.
-
- THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Originally, this driver was written for the Digital Equipment
- Corporation series of EtherWORKS ethernet cards:
-
- DE425 TP/COAX EISA
- DE434 TP PCI
- DE435 TP/COAX/AUI PCI
- DE450 TP/COAX/AUI PCI
- DE500 10/100 PCI Fasternet
-
- but it will now attempt to support all cards which conform to the
- Digital Semiconductor SROM Specification. The driver currently
- recognises the following chips:
-
- DC21040 (no SROM)
- DC21041[A]
- DC21140[A]
- DC21142
- DC21143
-
- So far the driver is known to work with the following cards:
-
- KINGSTON
- Linksys
- ZNYX342
- SMC8432
- SMC9332 (w/new SROM)
- ZNYX31[45]
- ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
-
- The driver has been tested on a relatively busy network using the DE425,
- DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
- 16M of data to a DECstation 5000/200 as follows:
-
- TCP UDP
- TX RX TX RX
- DE425 1030k 997k 1170k 1128k
- DE434 1063k 995k 1170k 1125k
- DE435 1063k 995k 1170k 1125k
- DE500 1063k 998k 1170k 1125k in 10Mb/s mode
-
- All values are typical (in kBytes/sec) from a sample of 4 for each
- measurement. Their error is +/-20k on a quiet (private) network and also
- depend on what load the CPU has.
-
- =========================================================================
- This driver has been written substantially from scratch, although its
- inheritance of style and stack interface from 'ewrk3.c' and in turn from
- Donald Becker's 'lance.c' should be obvious. With the module autoload of
- every usable DECchip board, I pinched Donald's 'next_module' field to
- link my modules together.
-
- Up to 15 EISA cards can be supported under this driver, limited primarily
- by the available IRQ lines. I have checked different configurations of
- multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
- problem yet (provided you have at least depca.c v0.38) ...
-
- PCI support has been added to allow the driver to work with the DE434,
- DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due
- to the differences in the EISA and PCI CSR address offsets from the base
- address.
-
- The ability to load this driver as a loadable module has been included
- and used extensively during the driver development (to save those long
- reboot sequences). Loadable module support under PCI and EISA has been
- achieved by letting the driver autoprobe as if it were compiled into the
- kernel. Do make sure you're not sharing interrupts with anything that
- cannot accommodate interrupt sharing!
-
- To utilise this ability, you have to do 8 things:
-
- 0) have a copy of the loadable modules code installed on your system.
- 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
- temporary directory.
- 2) for fixed autoprobes (not recommended), edit the source code near
- line 5594 to reflect the I/O address you're using, or assign these when
- loading by:
-
- insmod de4x5 io=0xghh where g = bus number
- hh = device number
-
- NB: autoprobing for modules is now supported by default. You may just
- use:
-
- insmod de4x5
-
- to load all available boards. For a specific board, still use
- the 'io=?' above.
- 3) compile de4x5.c, but include -DMODULE in the command line to ensure
- that the correct bits are compiled (see end of source code).
- 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
- kernel with the de4x5 configuration turned off and reboot.
- 5) insmod de4x5 [io=0xghh]
- 6) run the net startup bits for your new eth?? interface(s) manually
- (usually /etc/rc.inet[12] at boot time).
- 7) enjoy!
-
- To unload a module, turn off the associated interface(s)
- 'ifconfig eth?? down' then 'rmmod de4x5'.
-
- Automedia detection is included so that in principal you can disconnect
- from, e.g. TP, reconnect to BNC and things will still work (after a
- pause whilst the driver figures out where its media went). My tests
- using ping showed that it appears to work....
-
- By default, the driver will now autodetect any DECchip based card.
- Should you have a need to restrict the driver to DIGITAL only cards, you
- can compile with a DEC_ONLY define, or if loading as a module, use the
- 'dec_only=1' parameter.
-
- I've changed the timing routines to use the kernel timer and scheduling
- functions so that the hangs and other assorted problems that occurred
- while autosensing the media should be gone. A bonus for the DC21040
- auto media sense algorithm is that it can now use one that is more in
- line with the rest (the DC21040 chip doesn't have a hardware timer).
- The downside is the 1 'jiffies' (10ms) resolution.
-
- IEEE 802.3u MII interface code has been added in anticipation that some
- products may use it in the future.
-
- The SMC9332 card has a non-compliant SROM which needs fixing - I have
- patched this driver to detect it because the SROM format used complies
- to a previous DEC-STD format.
-
- I have removed the buffer copies needed for receive on Intels. I cannot
- remove them for Alphas since the Tulip hardware only does longword
- aligned DMA transfers and the Alphas get alignment traps with non
- longword aligned data copies (which makes them really slow). No comment.
-
- I have added SROM decoding routines to make this driver work with any
- card that supports the Digital Semiconductor SROM spec. This will help
- all cards running the dc2114x series chips in particular. Cards using
- the dc2104x chips should run correctly with the basic driver. I'm in
- debt to <mjacob@feral.com> for the testing and feedback that helped get
- this feature working. So far we have tested KINGSTON, SMC8432, SMC9332
- (with the latest SROM complying with the SROM spec V3: their first was
- broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315
- (quad 21041 MAC) cards also appear to work despite their incorrectly
- wired IRQs.
-
- I have added a temporary fix for interrupt problems when some SCSI cards
- share the same interrupt as the DECchip based cards. The problem occurs
- because the SCSI card wants to grab the interrupt as a fast interrupt
- (runs the service routine with interrupts turned off) vs. this card
- which really needs to run the service routine with interrupts turned on.
- This driver will now add the interrupt service routine as a fast
- interrupt if it is bounced from the slow interrupt. THIS IS NOT A
- RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time
- until people sort out their compatibility issues and the kernel
- interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST
- INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not
- run on the same interrupt. PCMCIA/CardBus is another can of worms...
-
- Finally, I think I have really fixed the module loading problem with
- more than one DECchip based card. As a side effect, I don't mess with
- the device structure any more which means that if more than 1 card in
- 2.0.x is installed (4 in 2.1.x), the user will have to edit
- linux/drivers/net/Space.c to make room for them. Hence, module loading
- is the preferred way to use this driver, since it doesn't have this
- limitation.
-
- Where SROM media detection is used and full duplex is specified in the
- SROM, the feature is ignored unless lp->params.fdx is set at compile
- time OR during a module load (insmod de4x5 args='eth??:fdx' [see
- below]). This is because there is no way to automatically detect full
- duplex links except through autonegotiation. When I include the
- autonegotiation feature in the SROM autoconf code, this detection will
- occur automatically for that case.
-
- Command line arguments are now allowed, similar to passing arguments
- through LILO. This will allow a per adapter board set up of full duplex
- and media. The only lexical constraints are: the board name (dev->name)
- appears in the list before its parameters. The list of parameters ends
- either at the end of the parameter list or with another board name. The
- following parameters are allowed:
-
- fdx for full duplex
- autosense to set the media/speed; with the following
- sub-parameters:
- TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
-
- Case sensitivity is important for the sub-parameters. They *must* be
- upper case. Examples:
-
- insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
-
- For a compiled in driver, at or above line 548, place e.g.
- #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
-
- Yes, I know full duplex isn't permissible on BNC or AUI; they're just
- examples. By default, full duplex is turned off and AUTO is the default
- autosense setting. In reality, I expect only the full duplex option to
- be used. Note the use of single quotes in the two examples above and the
- lack of commas to separate items. ALSO, you must get the requested media
- correct in relation to what the adapter SROM says it has. There's no way
- to determine this in advance other than by trial and error and common
- sense, e.g. call a BNC connectored port 'BNC', not '10Mb'.
-
- Changed the bus probing. EISA used to be done first, followed by PCI.
- Most people probably don't even know what a de425 is today and the EISA
- probe has messed up some SCSI cards in the past, so now PCI is always
- probed first followed by EISA if a) the architecture allows EISA and
- either b) there have been no PCI cards detected or c) an EISA probe is
- forced by the user. To force a probe include "force_eisa" in your
- insmod "args" line; for built-in kernels either change the driver to do
- this automatically or include #define DE4X5_FORCE_EISA on or before
- line 1040 in the driver.
-
- TO DO:
- ------
-
- Revision History
- ----------------
-
- Version Date Description
-
- 0.1 17-Nov-94 Initial writing. ALPHA code release.
- 0.2 13-Jan-95 Added PCI support for DE435's.
- 0.21 19-Jan-95 Added auto media detection.
- 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>.
- Fix recognition bug reported by <bkm@star.rl.ac.uk>.
- Add request/release_region code.
- Add loadable modules support for PCI.
- Clean up loadable modules support.
- 0.23 28-Feb-95 Added DC21041 and DC21140 support.
- Fix missed frame counter value and initialisation.
- Fixed EISA probe.
- 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
- Change TX_BUFFS_AVAIL macro.
- Change media autodetection to allow manual setting.
- Completed DE500 (DC21140) support.
- 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm.
- 0.242 10-May-95 Minor changes.
- 0.30 12-Jun-95 Timer fix for DC21140.
- Portability changes.
- Add ALPHA changes from <jestabro@ant.tay1.dec.com>.
- Add DE500 semi automatic autosense.
- Add Link Fail interrupt TP failure detection.
- Add timer based link change detection.
- Plugged a memory leak in de4x5_queue_pkt().
- 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1.
- 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a
- suggestion by <heiko@colossus.escape.de>.
- 0.33 8-Aug-95 Add shared interrupt support (not released yet).
- 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs.
- Fix de4x5_interrupt().
- Fix dc21140_autoconf() mess.
- No shared interrupt support.
- 0.332 11-Sep-95 Added MII management interface routines.
- 0.40 5-Mar-96 Fix setup frame timeout <maartenb@hpkuipc.cern.ch>.
- Add kernel timer code (h/w is too flaky).
- Add MII based PHY autosense.
- Add new multicasting code.
- Add new autosense algorithms for media/mode
- selection using kernel scheduling/timing.
- Re-formatted.
- Made changes suggested by <jeff@router.patch.net>:
- Change driver to detect all DECchip based cards
- with DEC_ONLY restriction a special case.
- Changed driver to autoprobe as a module. No irq
- checking is done now - assume BIOS is good!
- Added SMC9332 detection <manabe@Roy.dsl.tutics.ac.jp>
- 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card
- only <niles@axp745gsfc.nasa.gov>
- Fix for multiple PCI cards reported by <jos@xos.nl>
- Duh, put the IRQF_SHARED flag into request_interrupt().
- Fix SMC ethernet address in enet_det[].
- Print chip name instead of "UNKNOWN" during boot.
- 0.42 26-Apr-96 Fix MII write TA bit error.
- Fix bug in dc21040 and dc21041 autosense code.
- Remove buffer copies on receive for Intels.
- Change sk_buff handling during media disconnects to
- eliminate DUP packets.
- Add dynamic TX thresholding.
- Change all chips to use perfect multicast filtering.
- Fix alloc_device() bug <jari@markkus2.fimr.fi>
- 0.43 21-Jun-96 Fix unconnected media TX retry bug.
- Add Accton to the list of broken cards.
- Fix TX under-run bug for non DC21140 chips.
- Fix boot command probe bug in alloc_device() as
- reported by <koen.gadeyne@barco.com> and
- <orava@nether.tky.hut.fi>.
- Add cache locks to prevent a race condition as
- reported by <csd@microplex.com> and
- <baba@beckman.uiuc.edu>.
- Upgraded alloc_device() code.
- 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
- with <csd@microplex.com>
- 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips.
- Fix EISA probe bugs reported by <os2@kpi.kharkov.ua>
- and <michael@compurex.com>.
- 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media
- with a loopback packet.
- 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
- by <bhat@mundook.cs.mu.OZ.AU>
- 0.45 8-Dec-96 Include endian functions for PPC use, from work
- by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
- 0.451 28-Dec-96 Added fix to allow autoprobe for modules after
- suggestion from <mjacob@feral.com>.
- 0.5 30-Jan-97 Added SROM decoding functions.
- Updated debug flags.
- Fix sleep/wakeup calls for PCI cards, bug reported
- by <cross@gweep.lkg.dec.com>.
- Added multi-MAC, one SROM feature from discussion
- with <mjacob@feral.com>.
- Added full module autoprobe capability.
- Added attempt to use an SMC9332 with broken SROM.
- Added fix for ZYNX multi-mac cards that didn't
- get their IRQs wired correctly.
- 0.51 13-Feb-97 Added endian fixes for the SROM accesses from
- <paubert@iram.es>
- Fix init_connection() to remove extra device reset.
- Fix MAC/PHY reset ordering in dc21140m_autoconf().
- Fix initialisation problem with lp->timeout in
- typeX_infoblock() from <paubert@iram.es>.
- Fix MII PHY reset problem from work done by
- <paubert@iram.es>.
- 0.52 26-Apr-97 Some changes may not credit the right people -
- a disk crash meant I lost some mail.
- Change RX interrupt routine to drop rather than
- defer packets to avoid hang reported by
- <g.thomas@opengroup.org>.
- Fix srom_exec() to return for COMPACT and type 1
- infoblocks.
- Added DC21142 and DC21143 functions.
- Added byte counters from <phil@tazenda.demon.co.uk>
- Added IRQF_DISABLED temporary fix from
- <mjacob@feral.com>.
- 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
- module load: bug reported by
- <Piete.Brooks@cl.cam.ac.uk>
- Fix multi-MAC, one SROM, to work with 2114x chips:
- bug reported by <cmetz@inner.net>.
- Make above search independent of BIOS device scan
- direction.
- Completed DC2114[23] autosense functions.
- 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
- <robin@intercore.com
- Fix type1_infoblock() bug introduced in 0.53, from
- problem reports by
- <parmee@postecss.ncrfran.france.ncr.com> and
- <jo@ice.dillingen.baynet.de>.
- Added argument list to set up each board from either
- a module's command line or a compiled in #define.
- Added generic MII PHY functionality to deal with
- newer PHY chips.
- Fix the mess in 2.1.67.
- 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
- <redhat@cococo.net>.
- Fix bug in pci_probe() for 64 bit systems reported
- by <belliott@accessone.com>.
- 0.533 9-Jan-98 Fix more 64 bit bugs reported by <jal@cs.brown.edu>.
- 0.534 24-Jan-98 Fix last (?) endian bug from <geert@linux-m68k.org>
- 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040.
- 0.536 21-Mar-98 Change pci_probe() to use the pci_dev structure.
- **Incompatible with 2.0.x from here.**
- 0.540 5-Jul-98 Atomicize assertion of dev->interrupt for SMP
- from <lma@varesearch.com>
- Add TP, AUI and BNC cases to 21140m_autoconf() for
- case where a 21140 under SROM control uses, e.g. AUI
- from problem report by <delchini@lpnp09.in2p3.fr>
- Add MII parallel detection to 2114x_autoconf() for
- case where no autonegotiation partner exists from
- problem report by <mlapsley@ndirect.co.uk>.
- Add ability to force connection type directly even
- when using SROM control from problem report by
- <earl@exis.net>.
- Updated the PCI interface to conform with the latest
- version. I hope nothing is broken...
- Add TX done interrupt modification from suggestion
- by <Austin.Donnelly@cl.cam.ac.uk>.
- Fix is_anc_capable() bug reported by
- <Austin.Donnelly@cl.cam.ac.uk>.
- Fix type[13]_infoblock() bug: during MII search, PHY
- lp->rst not run because lp->ibn not initialised -
- from report & fix by <paubert@iram.es>.
- Fix probe bug with EISA & PCI cards present from
- report by <eirik@netcom.com>.
- 0.541 24-Aug-98 Fix compiler problems associated with i386-string
- ops from multiple bug reports and temporary fix
- from <paubert@iram.es>.
- Fix pci_probe() to correctly emulate the old
- pcibios_find_class() function.
- Add an_exception() for old ZYNX346 and fix compile
- warning on PPC & SPARC, from <ecd@skynet.be>.
- Fix lastPCI to correctly work with compiled in
- kernels and modules from bug report by
- <Zlatko.Calusic@CARNet.hr> et al.
- 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages
- when media is unconnected.
- Change dev->interrupt to lp->interrupt to ensure
- alignment for Alpha's and avoid their unaligned
- access traps. This flag is merely for log messages:
- should do something more definitive though...
- 0.543 30-Dec-98 Add SMP spin locking.
- 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using
- a 21143 by <mmporter@home.com>.
- Change PCI/EISA bus probing order.
- 0.545 28-Nov-99 Further Moto SROM bug fix from
- <mporter@eng.mcd.mot.com>
- Remove double checking for DEBUG_RX in de4x5_dbg_rx()
- from report by <geert@linux-m68k.org>
- 0.546 22-Feb-01 Fixes Alpha XP1000 oops. The srom_search function
- was causing a page fault when initializing the
- variable 'pb', on a non de4x5 PCI device, in this
- case a PCI bridge (DEC chip 21152). The value of
- 'pb' is now only initialized if a de4x5 chip is
- present.
- <france@handhelds.org>
- 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com>
- 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and
- generic DMA APIs. Fixed DE425 support on Alpha.
- <maz@wild-wind.fr.eu.org>
- =========================================================================
-*/
-
-#include <linux/compat.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/eisa.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/ctype.h>
-#include <linux/dma-mapping.h>
-#include <linux/moduleparam.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-#include <linux/uaccess.h>
-#ifdef CONFIG_PPC_PMAC
-#include <asm/machdep.h>
-#endif /* CONFIG_PPC_PMAC */
-
-#include "de4x5.h"
-
-static const char version[] =
- KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
-
-#define c_char const char
-
-/*
-** MII Information
-*/
-struct phy_table {
- int reset; /* Hard reset required? */
- int id; /* IEEE OUI */
- int ta; /* One cycle TA time - 802.3u is confusing here */
- struct { /* Non autonegotiation (parallel) speed det. */
- int reg;
- int mask;
- int value;
- } spd;
-};
-
-struct mii_phy {
- int reset; /* Hard reset required? */
- int id; /* IEEE OUI */
- int ta; /* One cycle TA time */
- struct { /* Non autonegotiation (parallel) speed det. */
- int reg;
- int mask;
- int value;
- } spd;
- int addr; /* MII address for the PHY */
- u_char *gep; /* Start of GEP sequence block in SROM */
- u_char *rst; /* Start of reset sequence in SROM */
- u_int mc; /* Media Capabilities */
- u_int ana; /* NWay Advertisement */
- u_int fdx; /* Full DupleX capabilities for each media */
- u_int ttm; /* Transmit Threshold Mode for each media */
- u_int mci; /* 21142 MII Connector Interrupt info */
-};
-
-#define DE4X5_MAX_PHY 8 /* Allow up to 8 attached PHY devices per board */
-
-struct sia_phy {
- u_char mc; /* Media Code */
- u_char ext; /* csr13-15 valid when set */
- int csr13; /* SIA Connectivity Register */
- int csr14; /* SIA TX/RX Register */
- int csr15; /* SIA General Register */
- int gepc; /* SIA GEP Control Information */
- int gep; /* SIA GEP Data */
-};
-
-/*
-** Define the know universe of PHY devices that can be
-** recognised by this driver.
-*/
-static struct phy_table phy_info[] = {
- {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */
- {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */
- {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */
- {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */
- {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}} /* Level One LTX970 */
-};
-
-/*
-** These GENERIC values assumes that the PHY devices follow 802.3u and
-** allow parallel detection to set the link partner ability register.
-** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported.
-*/
-#define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */
-#define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */
-#define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */
-
-/*
-** Define special SROM detection cases
-*/
-static c_char enet_det[][ETH_ALEN] = {
- {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
-};
-
-#define SMC 1
-#define ACCTON 2
-
-/*
-** SROM Repair definitions. If a broken SROM is detected a card may
-** use this information to help figure out what to do. This is a
-** "stab in the dark" and so far for SMC9332's only.
-*/
-static c_char srom_repair_info[][100] = {
- {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */
- 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
- 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
- 0x00,0x18,}
-};
-
-
-#ifdef DE4X5_DEBUG
-static int de4x5_debug = DE4X5_DEBUG;
-#else
-/*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/
-static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
-#endif
-
-/*
-** Allow per adapter set up. For modules this is simply a command line
-** parameter, e.g.:
-** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
-**
-** For a compiled in driver, place e.g.
-** #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
-** here
-*/
-#ifdef DE4X5_PARM
-static char *args = DE4X5_PARM;
-#else
-static char *args;
-#endif
-
-struct parameters {
- bool fdx;
- int autosense;
-};
-
-#define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */
-
-#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */
-
-/*
-** Ethernet PROM defines
-*/
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-/*
-** Ethernet Info
-*/
-#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */
-#define IEEE802_3_SZ 1518 /* Packet + CRC */
-#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */
-#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */
-#define MIN_DAT_SZ 1 /* Minimum ethernet data length */
-#define PKT_HDR_LEN 14 /* Addresses and data length info */
-#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
-#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */
-
-
-/*
-** EISA bus defines
-*/
-#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
-#define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */
-
-#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
-
-#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
-#define DE4X5_NAME_LENGTH 8
-
-static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
-
-/*
-** Ethernet PROM defines for DC21040
-*/
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-/*
-** PCI Bus defines
-*/
-#define PCI_MAX_BUS_NUM 8
-#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */
-#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */
-
-/*
-** Memory Alignment. Each descriptor is 4 longwords long. To force a
-** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
-** DESC_ALIGN. ALIGN aligns the start address of the private memory area
-** and hence the RX descriptor ring's first entry.
-*/
-#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
-#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */
-#define DE4X5_ALIGN16 ((u_long)16 - 1) /* 4 longword align */
-#define DE4X5_ALIGN32 ((u_long)32 - 1) /* 8 longword align */
-#define DE4X5_ALIGN64 ((u_long)64 - 1) /* 16 longword align */
-#define DE4X5_ALIGN128 ((u_long)128 - 1) /* 32 longword align */
-
-#define DE4X5_ALIGN DE4X5_ALIGN32 /* Keep the DC21040 happy... */
-#define DE4X5_CACHE_ALIGN CAL_16LONG
-#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */
-/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */
-#define DESC_ALIGN
-
-#ifndef DEC_ONLY /* See README.de4x5 for using this */
-static int dec_only;
-#else
-static int dec_only = 1;
-#endif
-
-/*
-** DE4X5 IRQ ENABLE/DISABLE
-*/
-#define ENABLE_IRQs { \
- imr |= lp->irq_en;\
- outl(imr, DE4X5_IMR); /* Enable the IRQs */\
-}
-
-#define DISABLE_IRQs {\
- imr = inl(DE4X5_IMR);\
- imr &= ~lp->irq_en;\
- outl(imr, DE4X5_IMR); /* Disable the IRQs */\
-}
-
-#define UNMASK_IRQs {\
- imr |= lp->irq_mask;\
- outl(imr, DE4X5_IMR); /* Unmask the IRQs */\
-}
-
-#define MASK_IRQs {\
- imr = inl(DE4X5_IMR);\
- imr &= ~lp->irq_mask;\
- outl(imr, DE4X5_IMR); /* Mask the IRQs */\
-}
-
-/*
-** DE4X5 START/STOP
-*/
-#define START_DE4X5 {\
- omr = inl(DE4X5_OMR);\
- omr |= OMR_ST | OMR_SR;\
- outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\
-}
-
-#define STOP_DE4X5 {\
- omr = inl(DE4X5_OMR);\
- omr &= ~(OMR_ST|OMR_SR);\
- outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \
-}
-
-/*
-** DE4X5 SIA RESET
-*/
-#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */
-
-/*
-** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS)
-*/
-#define DE4X5_AUTOSENSE_MS 250
-
-/*
-** SROM Structure
-*/
-struct de4x5_srom {
- char sub_vendor_id[2];
- char sub_system_id[2];
- char reserved[12];
- char id_block_crc;
- char reserved2;
- char version;
- char num_controllers;
- char ieee_addr[6];
- char info[100];
- short chksum;
-};
-#define SUB_VENDOR_ID 0x500a
-
-/*
-** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous
-** and have sizes of both a power of 2 and a multiple of 4.
-** A size of 256 bytes for each buffer could be chosen because over 90% of
-** all packets in our network are <256 bytes long and 64 longword alignment
-** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX
-** descriptors are needed for machines with an ALPHA CPU.
-*/
-#define NUM_RX_DESC 8 /* Number of RX descriptors */
-#define NUM_TX_DESC 32 /* Number of TX descriptors */
-#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */
- /* Multiple of 4 for DC21040 */
- /* Allows 512 byte alignment */
-struct de4x5_desc {
- volatile __le32 status;
- __le32 des1;
- __le32 buf;
- __le32 next;
- DESC_ALIGN
-};
-
-/*
-** The DE4X5 private structure
-*/
-#define DE4X5_PKT_STAT_SZ 16
-#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you
- increase DE4X5_PKT_STAT_SZ */
-
-struct pkt_stats {
- u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */
- u_int unicast;
- u_int multicast;
- u_int broadcast;
- u_int excessive_collisions;
- u_int tx_underruns;
- u_int excessive_underruns;
- u_int rx_runt_frames;
- u_int rx_collision;
- u_int rx_dribble;
- u_int rx_overflow;
-};
-
-struct de4x5_private {
- char adapter_name[80]; /* Adapter name */
- u_long interrupt; /* Aligned ISR flag */
- struct de4x5_desc *rx_ring; /* RX descriptor ring */
- struct de4x5_desc *tx_ring; /* TX descriptor ring */
- struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */
- struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */
- int rx_new, rx_old; /* RX descriptor ring pointers */
- int tx_new, tx_old; /* TX descriptor ring pointers */
- char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */
- char frame[64]; /* Min sized packet for loopback*/
- spinlock_t lock; /* Adapter specific spinlock */
- struct net_device_stats stats; /* Public stats */
- struct pkt_stats pktStats; /* Private stats counters */
- char rxRingSize;
- char txRingSize;
- int bus; /* EISA or PCI */
- int bus_num; /* PCI Bus number */
- int device; /* Device number on PCI bus */
- int state; /* Adapter OPENED or CLOSED */
- int chipset; /* DC21040, DC21041 or DC21140 */
- s32 irq_mask; /* Interrupt Mask (Enable) bits */
- s32 irq_en; /* Summary interrupt bits */
- int media; /* Media (eg TP), mode (eg 100B)*/
- int c_media; /* Remember the last media conn */
- bool fdx; /* media full duplex flag */
- int linkOK; /* Link is OK */
- int autosense; /* Allow/disallow autosensing */
- bool tx_enable; /* Enable descriptor polling */
- int setup_f; /* Setup frame filtering type */
- int local_state; /* State within a 'media' state */
- struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */
- struct sia_phy sia; /* SIA PHY Information */
- int active; /* Index to active PHY device */
- int mii_cnt; /* Number of attached PHY's */
- int timeout; /* Scheduling counter */
- struct timer_list timer; /* Timer info for kernel */
- int tmp; /* Temporary global per card */
- struct {
- u_long lock; /* Lock the cache accesses */
- s32 csr0; /* Saved Bus Mode Register */
- s32 csr6; /* Saved Operating Mode Reg. */
- s32 csr7; /* Saved IRQ Mask Register */
- s32 gep; /* Saved General Purpose Reg. */
- s32 gepc; /* Control info for GEP */
- s32 csr13; /* Saved SIA Connectivity Reg. */
- s32 csr14; /* Saved SIA TX/RX Register */
- s32 csr15; /* Saved SIA General Register */
- int save_cnt; /* Flag if state already saved */
- struct sk_buff_head queue; /* Save the (re-ordered) skb's */
- } cache;
- struct de4x5_srom srom; /* A copy of the SROM */
- int cfrv; /* Card CFRV copy */
- int rx_ovf; /* Check for 'RX overflow' tag */
- bool useSROM; /* For non-DEC card use SROM */
- bool useMII; /* Infoblock using the MII */
- int asBitValid; /* Autosense bits in GEP? */
- int asPolarity; /* 0 => asserted high */
- int asBit; /* Autosense bit number in GEP */
- int defMedium; /* SROM default medium */
- int tcount; /* Last infoblock number */
- int infoblock_init; /* Initialised this infoblock? */
- int infoleaf_offset; /* SROM infoleaf for controller */
- s32 infoblock_csr6; /* csr6 value in SROM infoblock */
- int infoblock_media; /* infoblock media */
- int (*infoleaf_fn)(struct net_device *); /* Pointer to infoleaf function */
- u_char *rst; /* Pointer to Type 5 reset info */
- u_char ibn; /* Infoblock number */
- struct parameters params; /* Command line/ #defined params */
- struct device *gendev; /* Generic device */
- dma_addr_t dma_rings; /* DMA handle for rings */
- int dma_size; /* Size of the DMA area */
- char *rx_bufs; /* rx bufs on alpha, sparc, ... */
-};
-
-/*
-** To get around certain poxy cards that don't provide an SROM
-** for the second and more DECchip, I have to key off the first
-** chip's address. I'll assume there's not a bad SROM iff:
-**
-** o the chipset is the same
-** o the bus number is the same and > 0
-** o the sum of all the returned hw address bytes is 0 or 0x5fa
-**
-** Also have to save the irq for those cards whose hardware designers
-** can't follow the PCI to PCI Bridge Architecture spec.
-*/
-static struct {
- int chipset;
- int bus;
- int irq;
- u_char addr[ETH_ALEN];
-} last = {0,};
-
-/*
-** The transmit ring full condition is described by the tx_old and tx_new
-** pointers by:
-** tx_old = tx_new Empty ring
-** tx_old = tx_new+1 Full ring
-** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition)
-*/
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->txRingSize-lp->tx_new-1:\
- lp->tx_old -lp->tx_new-1)
-
-#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
-
-/*
-** Public Functions
-*/
-static int de4x5_open(struct net_device *dev);
-static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
-static int de4x5_close(struct net_device *dev);
-static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
-static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
-static void set_multicast_list(struct net_device *dev);
-static int de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq,
- void __user *data, int cmd);
-
-/*
-** Private functions
-*/
-static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
-static int de4x5_init(struct net_device *dev);
-static int de4x5_sw_reset(struct net_device *dev);
-static int de4x5_rx(struct net_device *dev);
-static int de4x5_tx(struct net_device *dev);
-static void de4x5_ast(struct timer_list *t);
-static int de4x5_txur(struct net_device *dev);
-static int de4x5_rx_ovfc(struct net_device *dev);
-
-static int autoconf_media(struct net_device *dev);
-static void create_packet(struct net_device *dev, char *frame, int len);
-static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
-static int dc21040_autoconf(struct net_device *dev);
-static int dc21041_autoconf(struct net_device *dev);
-static int dc21140m_autoconf(struct net_device *dev);
-static int dc2114x_autoconf(struct net_device *dev);
-static int srom_autoconf(struct net_device *dev);
-static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
-static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
-static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
-static int test_for_100Mb(struct net_device *dev, int msec);
-static int wait_for_link(struct net_device *dev);
-static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
-static int is_spd_100(struct net_device *dev);
-static int is_100_up(struct net_device *dev);
-static int is_10_up(struct net_device *dev);
-static int is_anc_capable(struct net_device *dev);
-static int ping_media(struct net_device *dev, int msec);
-static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
-static void de4x5_free_rx_buffs(struct net_device *dev);
-static void de4x5_free_tx_buffs(struct net_device *dev);
-static void de4x5_save_skbs(struct net_device *dev);
-static void de4x5_rst_desc_ring(struct net_device *dev);
-static void de4x5_cache_state(struct net_device *dev, int flag);
-static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
-static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
-static struct sk_buff *de4x5_get_cache(struct net_device *dev);
-static void de4x5_setup_intr(struct net_device *dev);
-static void de4x5_init_connection(struct net_device *dev);
-static int de4x5_reset_phy(struct net_device *dev);
-static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
-static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
-static int test_tp(struct net_device *dev, s32 msec);
-static int EISA_signature(char *name, struct device *device);
-static void PCI_signature(char *name, struct de4x5_private *lp);
-static void DevicePresent(struct net_device *dev, u_long iobase);
-static void enet_addr_rst(u_long aprom_addr);
-static int de4x5_bad_srom(struct de4x5_private *lp);
-static short srom_rd(u_long address, u_char offset);
-static void srom_latch(u_int command, u_long address);
-static void srom_command(u_int command, u_long address);
-static void srom_address(u_int command, u_long address, u_char offset);
-static short srom_data(u_int command, u_long address);
-/*static void srom_busy(u_int command, u_long address);*/
-static void sendto_srom(u_int command, u_long addr);
-static int getfrom_srom(u_long addr);
-static int srom_map_media(struct net_device *dev);
-static int srom_infoleaf_info(struct net_device *dev);
-static void srom_init(struct net_device *dev);
-static void srom_exec(struct net_device *dev, u_char *p);
-static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
-static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
-static int mii_rdata(u_long ioaddr);
-static void mii_wdata(int data, int len, u_long ioaddr);
-static void mii_ta(u_long rw, u_long ioaddr);
-static int mii_swap(int data, int len);
-static void mii_address(u_char addr, u_long ioaddr);
-static void sendto_mii(u32 command, int data, u_long ioaddr);
-static int getfrom_mii(u32 command, u_long ioaddr);
-static int mii_get_oui(u_char phyaddr, u_long ioaddr);
-static int mii_get_phy(struct net_device *dev);
-static void SetMulticastFilter(struct net_device *dev);
-static int get_hw_addr(struct net_device *dev);
-static void srom_repair(struct net_device *dev, int card);
-static int test_bad_enet(struct net_device *dev, int status);
-static int an_exception(struct de4x5_private *lp);
-static char *build_setup_frame(struct net_device *dev, int mode);
-static void disable_ast(struct net_device *dev);
-static long de4x5_switch_mac_port(struct net_device *dev);
-static int gep_rd(struct net_device *dev);
-static void gep_wr(s32 data, struct net_device *dev);
-static void yawn(struct net_device *dev, int state);
-static void de4x5_parse_params(struct net_device *dev);
-static void de4x5_dbg_open(struct net_device *dev);
-static void de4x5_dbg_mii(struct net_device *dev, int k);
-static void de4x5_dbg_media(struct net_device *dev);
-static void de4x5_dbg_srom(struct de4x5_srom *p);
-static void de4x5_dbg_rx(struct sk_buff *skb, int len);
-static int dc21041_infoleaf(struct net_device *dev);
-static int dc21140_infoleaf(struct net_device *dev);
-static int dc21142_infoleaf(struct net_device *dev);
-static int dc21143_infoleaf(struct net_device *dev);
-static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
-
-/*
-** Note now that module autoprobing is allowed under EISA and PCI. The
-** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes
-** to "do the right thing".
-*/
-
-static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */
-
-module_param_hw(io, int, ioport, 0);
-module_param(de4x5_debug, int, 0);
-module_param(dec_only, int, 0);
-module_param(args, charp, 0);
-
-MODULE_PARM_DESC(io, "de4x5 I/O base address");
-MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
-MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
-MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
-MODULE_LICENSE("GPL");
-
-/*
-** List the SROM infoleaf functions and chipsets
-*/
-struct InfoLeaf {
- int chipset;
- int (*fn)(struct net_device *);
-};
-static struct InfoLeaf infoleaf_array[] = {
- {DC21041, dc21041_infoleaf},
- {DC21140, dc21140_infoleaf},
- {DC21142, dc21142_infoleaf},
- {DC21143, dc21143_infoleaf}
-};
-#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
-
-/*
-** List the SROM info block functions
-*/
-static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
- type0_infoblock,
- type1_infoblock,
- type2_infoblock,
- type3_infoblock,
- type4_infoblock,
- type5_infoblock,
- compact_infoblock
-};
-
-#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
-
-/*
-** Miscellaneous defines...
-*/
-#define RESET_DE4X5 {\
- int i;\
- i=inl(DE4X5_BMR);\
- mdelay(1);\
- outl(i | BMR_SWR, DE4X5_BMR);\
- mdelay(1);\
- outl(i, DE4X5_BMR);\
- mdelay(1);\
- for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
- mdelay(1);\
-}
-
-#define PHY_HARD_RESET {\
- outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\
- mdelay(1); /* Assert for 1ms */\
- outl(0x00, DE4X5_GEP);\
- mdelay(2); /* Wait for 2ms */\
-}
-
-static const struct net_device_ops de4x5_netdev_ops = {
- .ndo_open = de4x5_open,
- .ndo_stop = de4x5_close,
- .ndo_start_xmit = de4x5_queue_pkt,
- .ndo_get_stats = de4x5_get_stats,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_siocdevprivate = de4x5_siocdevprivate,
- .ndo_set_mac_address= eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-
-static int
-de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
-{
- char name[DE4X5_NAME_LENGTH + 1];
- struct de4x5_private *lp = netdev_priv(dev);
- struct pci_dev *pdev = NULL;
- int i, status=0;
-
- dev_set_drvdata(gendev, dev);
-
- /* Ensure we're not sleeping */
- if (lp->bus == EISA) {
- outb(WAKEUP, PCI_CFPM);
- } else {
- pdev = to_pci_dev (gendev);
- pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
- }
- mdelay(10);
-
- RESET_DE4X5;
-
- if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
- return -ENXIO; /* Hardware could not reset */
- }
-
- /*
- ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
- */
- lp->useSROM = false;
- if (lp->bus == PCI) {
- PCI_signature(name, lp);
- } else {
- EISA_signature(name, gendev);
- }
-
- if (*name == '\0') { /* Not found a board signature */
- return -ENXIO;
- }
-
- dev->base_addr = iobase;
- printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
-
- status = get_hw_addr(dev);
- printk(", h/w address %pM\n", dev->dev_addr);
-
- if (status != 0) {
- printk(" which has an Ethernet PROM CRC error.\n");
- return -ENXIO;
- } else {
- skb_queue_head_init(&lp->cache.queue);
- lp->cache.gepc = GEP_INIT;
- lp->asBit = GEP_SLNK;
- lp->asPolarity = GEP_SLNK;
- lp->asBitValid = ~0;
- lp->timeout = -1;
- lp->gendev = gendev;
- spin_lock_init(&lp->lock);
- timer_setup(&lp->timer, de4x5_ast, 0);
- de4x5_parse_params(dev);
-
- /*
- ** Choose correct autosensing in case someone messed up
- */
- lp->autosense = lp->params.autosense;
- if (lp->chipset != DC21140) {
- if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
- lp->params.autosense = TP;
- }
- if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
- lp->params.autosense = BNC;
- }
- }
- lp->fdx = lp->params.fdx;
- sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
-
- lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
-#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
- lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
-#endif
- lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
- &lp->dma_rings, GFP_ATOMIC);
- if (lp->rx_ring == NULL) {
- return -ENOMEM;
- }
-
- lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
-
- /*
- ** Set up the RX descriptor ring (Intels)
- ** Allocate contiguous receive buffers, long word aligned (Alphas)
- */
-#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
- for (i=0; i<NUM_RX_DESC; i++) {
- lp->rx_ring[i].status = 0;
- lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
- lp->rx_ring[i].buf = 0;
- lp->rx_ring[i].next = 0;
- lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
- }
-
-#else
- {
- dma_addr_t dma_rx_bufs;
-
- dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
- * sizeof(struct de4x5_desc);
- dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
- lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
- + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
- for (i=0; i<NUM_RX_DESC; i++) {
- lp->rx_ring[i].status = 0;
- lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
- lp->rx_ring[i].buf =
- cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
- lp->rx_ring[i].next = 0;
- lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
- }
-
- }
-#endif
-
- barrier();
-
- lp->rxRingSize = NUM_RX_DESC;
- lp->txRingSize = NUM_TX_DESC;
-
- /* Write the end of list marker to the descriptor lists */
- lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
- lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
-
- /* Tell the adapter where the TX/RX rings are located. */
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- /* Initialise the IRQ mask and Enable/Disable */
- lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
- lp->irq_en = IMR_NIM | IMR_AIM;
-
- /* Create a loopback packet frame for later media probing */
- create_packet(dev, lp->frame, sizeof(lp->frame));
-
- /* Check if the RX overflow bug needs testing for */
- i = lp->cfrv & 0x000000fe;
- if ((lp->chipset == DC21140) && (i == 0x20)) {
- lp->rx_ovf = 1;
- }
-
- /* Initialise the SROM pointers if possible */
- if (lp->useSROM) {
- lp->state = INITIALISED;
- if (srom_infoleaf_info(dev)) {
- dma_free_coherent (gendev, lp->dma_size,
- lp->rx_ring, lp->dma_rings);
- return -ENXIO;
- }
- srom_init(dev);
- }
-
- lp->state = CLOSED;
-
- /*
- ** Check for an MII interface
- */
- if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
- mii_get_phy(dev);
- }
-
- printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
- ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
- }
-
- if (de4x5_debug & DEBUG_VERSION) {
- printk(version);
- }
-
- /* The DE4X5-specific entries in the device structure. */
- SET_NETDEV_DEV(dev, gendev);
- dev->netdev_ops = &de4x5_netdev_ops;
- dev->mem_start = 0;
-
- /* Fill in the generic fields of the device structure. */
- if ((status = register_netdev (dev))) {
- dma_free_coherent (gendev, lp->dma_size,
- lp->rx_ring, lp->dma_rings);
- return status;
- }
-
- /* Let the adapter sleep to save power */
- yawn(dev, SLEEP);
-
- return status;
-}
-
-
-static int
-de4x5_open(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, status = 0;
- s32 omr;
-
- /* Allocate the RX buffers */
- for (i=0; i<lp->rxRingSize; i++) {
- if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
- de4x5_free_rx_buffs(dev);
- return -EAGAIN;
- }
- }
-
- /*
- ** Wake up the adapter
- */
- yawn(dev, WAKEUP);
-
- /*
- ** Re-initialize the DE4X5...
- */
- status = de4x5_init(dev);
- spin_lock_init(&lp->lock);
- lp->state = OPEN;
- de4x5_dbg_open(dev);
-
- if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
- lp->adapter_name, dev)) {
- printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
- if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
- lp->adapter_name, dev)) {
- printk("\n Cannot get IRQ- reconfigure your hardware.\n");
- disable_ast(dev);
- de4x5_free_rx_buffs(dev);
- de4x5_free_tx_buffs(dev);
- yawn(dev, SLEEP);
- lp->state = CLOSED;
- return -EAGAIN;
- } else {
- printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
- printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
- }
- }
-
- lp->interrupt = UNMASK_INTERRUPTS;
- netif_trans_update(dev); /* prevent tx timeout */
-
- START_DE4X5;
-
- de4x5_setup_intr(dev);
-
- if (de4x5_debug & DEBUG_OPEN) {
- printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
- printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
- printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
- printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
- printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
- printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
- printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
- printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
- }
-
- return status;
-}
-
-/*
-** Initialize the DE4X5 operating conditions. NB: a chip problem with the
-** DC21140 requires using perfect filtering mode for that chip. Since I can't
-** see why I'd want > 14 multicast addresses, I have changed all chips to use
-** the perfect filtering mode. Keep the DMA burst length at 8: there seems
-** to be data corruption problems if it is larger (UDP errors seen from a
-** ttcp source).
-*/
-static int
-de4x5_init(struct net_device *dev)
-{
- /* Lock out other processes whilst setting up the hardware */
- netif_stop_queue(dev);
-
- de4x5_sw_reset(dev);
-
- /* Autoconfigure the connected port */
- autoconf_media(dev);
-
- return 0;
-}
-
-static int
-de4x5_sw_reset(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, j, status = 0;
- s32 bmr, omr;
-
- /* Select the MII or SRL port now and RESET the MAC */
- if (!lp->useSROM) {
- if (lp->phy[lp->active].id != 0) {
- lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
- } else {
- lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
- }
- de4x5_switch_mac_port(dev);
- }
-
- /*
- ** Set the programmable burst length to 8 longwords for all the DC21140
- ** Fasternet chips and 4 longwords for all others: DMA errors result
- ** without these values. Cache align 16 long.
- */
- bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
- bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
- outl(bmr, DE4X5_BMR);
-
- omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */
- if (lp->chipset == DC21140) {
- omr |= (OMR_SDP | OMR_SB);
- }
- lp->setup_f = PERFECT;
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- lp->rx_new = lp->rx_old = 0;
- lp->tx_new = lp->tx_old = 0;
-
- for (i = 0; i < lp->rxRingSize; i++) {
- lp->rx_ring[i].status = cpu_to_le32(R_OWN);
- }
-
- for (i = 0; i < lp->txRingSize; i++) {
- lp->tx_ring[i].status = cpu_to_le32(0);
- }
-
- barrier();
-
- /* Build the setup frame depending on filtering mode */
- SetMulticastFilter(dev);
-
- load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
- outl(omr|OMR_ST, DE4X5_OMR);
-
- /* Poll for setup frame completion (adapter interrupts are disabled now) */
-
- for (j=0, i=0;(i<500) && (j==0);i++) { /* Up to 500ms delay */
- mdelay(1);
- if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
- }
- outl(omr, DE4X5_OMR); /* Stop everything! */
-
- if (j == 0) {
- printk("%s: Setup frame timed out, status %08x\n", dev->name,
- inl(DE4X5_STS));
- status = -EIO;
- }
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- lp->tx_old = lp->tx_new;
-
- return status;
-}
-
-/*
-** Writes a socket buffer address to the next available transmit descriptor.
-*/
-static netdev_tx_t
-de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_long flags = 0;
-
- netif_stop_queue(dev);
- if (!lp->tx_enable) /* Cannot send for now */
- goto tx_err;
-
- /*
- ** Clean out the TX ring asynchronously to interrupts - sometimes the
- ** interrupts are lost by delayed descriptor status updates relative to
- ** the irq assertion, especially with a busy PCI bus.
- */
- spin_lock_irqsave(&lp->lock, flags);
- de4x5_tx(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- /* Test if cache is already locked - requeue skb if so */
- if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
- goto tx_err;
-
- /* Transmit descriptor ring full or stale skb */
- if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
- if (lp->interrupt) {
- de4x5_putb_cache(dev, skb); /* Requeue the buffer */
- } else {
- de4x5_put_cache(dev, skb);
- }
- if (de4x5_debug & DEBUG_TX) {
- printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
- }
- } else if (skb->len > 0) {
- /* If we already have stuff queued locally, use that first */
- if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
- de4x5_put_cache(dev, skb);
- skb = de4x5_get_cache(dev);
- }
-
- while (skb && !netif_queue_stopped(dev) &&
- (u_long) lp->tx_skb[lp->tx_new] <= 1) {
- spin_lock_irqsave(&lp->lock, flags);
- netif_stop_queue(dev);
- load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
- lp->stats.tx_bytes += skb->len;
- outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
-
- if (TX_BUFFS_AVAIL) {
- netif_start_queue(dev); /* Another pkt may be queued */
- }
- skb = de4x5_get_cache(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- if (skb) de4x5_putb_cache(dev, skb);
- }
-
- lp->cache.lock = 0;
-
- return NETDEV_TX_OK;
-tx_err:
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
-
-/*
-** The DE4X5 interrupt handler.
-**
-** I/O Read/Writes through intermediate PCI bridges are never 'posted',
-** so that the asserted interrupt always has some real data to work with -
-** if these I/O accesses are ever changed to memory accesses, ensure the
-** STS write is read immediately to complete the transaction if the adapter
-** is not on bus 0. Lost interrupts can still occur when the PCI bus load
-** is high and descriptor status bits cannot be set before the associated
-** interrupt is asserted and this routine entered.
-*/
-static irqreturn_t
-de4x5_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct de4x5_private *lp;
- s32 imr, omr, sts, limit;
- u_long iobase;
- unsigned int handled = 0;
-
- lp = netdev_priv(dev);
- spin_lock(&lp->lock);
- iobase = dev->base_addr;
-
- DISABLE_IRQs; /* Ensure non re-entrancy */
-
- if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
- printk("%s: Re-entering the interrupt handler.\n", dev->name);
-
- synchronize_irq(dev->irq);
-
- for (limit=0; limit<8; limit++) {
- sts = inl(DE4X5_STS); /* Read IRQ status */
- outl(sts, DE4X5_STS); /* Reset the board interrupts */
-
- if (!(sts & lp->irq_mask)) break;/* All done */
- handled = 1;
-
- if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
- de4x5_rx(dev);
-
- if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
- de4x5_tx(dev);
-
- if (sts & STS_LNF) { /* TP Link has failed */
- lp->irq_mask &= ~IMR_LFM;
- }
-
- if (sts & STS_UNF) { /* Transmit underrun */
- de4x5_txur(dev);
- }
-
- if (sts & STS_SE) { /* Bus Error */
- STOP_DE4X5;
- printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
- dev->name, sts);
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
- }
- }
-
- /* Load the TX ring with any locally stored packets */
- if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
- while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
- de4x5_queue_pkt(de4x5_get_cache(dev), dev);
- }
- lp->cache.lock = 0;
- }
-
- lp->interrupt = UNMASK_INTERRUPTS;
- ENABLE_IRQs;
- spin_unlock(&lp->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static int
-de4x5_rx(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int entry;
- s32 status;
-
- for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
- entry=lp->rx_new) {
- status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
-
- if (lp->rx_ovf) {
- if (inl(DE4X5_MFC) & MFC_FOCM) {
- de4x5_rx_ovfc(dev);
- break;
- }
- }
-
- if (status & RD_FS) { /* Remember the start of frame */
- lp->rx_old = entry;
- }
-
- if (status & RD_LS) { /* Valid frame status */
- if (lp->tx_enable) lp->linkOK++;
- if (status & RD_ES) { /* There was an error. */
- lp->stats.rx_errors++; /* Update the error stats. */
- if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
- if (status & RD_CE) lp->stats.rx_crc_errors++;
- if (status & RD_OF) lp->stats.rx_fifo_errors++;
- if (status & RD_TL) lp->stats.rx_length_errors++;
- if (status & RD_RF) lp->pktStats.rx_runt_frames++;
- if (status & RD_CS) lp->pktStats.rx_collision++;
- if (status & RD_DB) lp->pktStats.rx_dribble++;
- if (status & RD_OF) lp->pktStats.rx_overflow++;
- } else { /* A valid frame received */
- struct sk_buff *skb;
- short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
- >> 16) - 4;
-
- if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
- printk("%s: Insufficient memory; nuking packet.\n",
- dev->name);
- lp->stats.rx_dropped++;
- } else {
- de4x5_dbg_rx(skb, pkt_len);
-
- /* Push up the protocol stack */
- skb->protocol=eth_type_trans(skb,dev);
- de4x5_local_stats(dev, skb->data, pkt_len);
- netif_rx(skb);
-
- /* Update stats */
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
- }
- }
-
- /* Change buffer ownership for this frame, back to the adapter */
- for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
- lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
- barrier();
- }
- lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
- barrier();
- }
-
- /*
- ** Update entry information
- */
- lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
- }
-
- return 0;
-}
-
-static inline void
-de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
-{
- dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
- le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
- DMA_TO_DEVICE);
- if ((u_long) lp->tx_skb[entry] > 1)
- dev_kfree_skb_irq(lp->tx_skb[entry]);
- lp->tx_skb[entry] = NULL;
-}
-
-/*
-** Buffer sent - check for TX buffer errors.
-*/
-static int
-de4x5_tx(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int entry;
- s32 status;
-
- for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
- status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
- if (status < 0) { /* Buffer not sent yet */
- break;
- } else if (status != 0x7fffffff) { /* Not setup frame */
- if (status & TD_ES) { /* An error happened */
- lp->stats.tx_errors++;
- if (status & TD_NC) lp->stats.tx_carrier_errors++;
- if (status & TD_LC) lp->stats.tx_window_errors++;
- if (status & TD_UF) lp->stats.tx_fifo_errors++;
- if (status & TD_EC) lp->pktStats.excessive_collisions++;
- if (status & TD_DE) lp->stats.tx_aborted_errors++;
-
- if (TX_PKT_PENDING) {
- outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
- }
- } else { /* Packet sent */
- lp->stats.tx_packets++;
- if (lp->tx_enable) lp->linkOK++;
- }
- /* Update the collision counter */
- lp->stats.collisions += ((status & TD_EC) ? 16 :
- ((status & TD_CC) >> 3));
-
- /* Free the buffer. */
- if (lp->tx_skb[entry] != NULL)
- de4x5_free_tx_buff(lp, entry);
- }
-
- /* Update all the pointers */
- lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
- }
-
- /* Any resources available? */
- if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
- if (lp->interrupt)
- netif_wake_queue(dev);
- else
- netif_start_queue(dev);
- }
-
- return 0;
-}
-
-static void
-de4x5_ast(struct timer_list *t)
-{
- struct de4x5_private *lp = from_timer(lp, t, timer);
- struct net_device *dev = dev_get_drvdata(lp->gendev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int dt;
-
- if (lp->useSROM)
- next_tick = srom_autoconf(dev);
- else if (lp->chipset == DC21140)
- next_tick = dc21140m_autoconf(dev);
- else if (lp->chipset == DC21041)
- next_tick = dc21041_autoconf(dev);
- else if (lp->chipset == DC21040)
- next_tick = dc21040_autoconf(dev);
- lp->linkOK = 0;
-
- dt = (next_tick * HZ) / 1000;
-
- if (!dt)
- dt = 1;
-
- mod_timer(&lp->timer, jiffies + dt);
-}
-
-static int
-de4x5_txur(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int omr;
-
- omr = inl(DE4X5_OMR);
- if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
- omr &= ~(OMR_ST|OMR_SR);
- outl(omr, DE4X5_OMR);
- while (inl(DE4X5_STS) & STS_TS);
- if ((omr & OMR_TR) < OMR_TR) {
- omr += 0x4000;
- } else {
- omr |= OMR_SF;
- }
- outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
- }
-
- return 0;
-}
-
-static int
-de4x5_rx_ovfc(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int omr;
-
- omr = inl(DE4X5_OMR);
- outl(omr & ~OMR_SR, DE4X5_OMR);
- while (inl(DE4X5_STS) & STS_RS);
-
- for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
- lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
- lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
- }
-
- outl(omr, DE4X5_OMR);
-
- return 0;
-}
-
-static int
-de4x5_close(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 imr, omr;
-
- disable_ast(dev);
-
- netif_stop_queue(dev);
-
- if (de4x5_debug & DEBUG_CLOSE) {
- printk("%s: Shutting down ethercard, status was %8.8x.\n",
- dev->name, inl(DE4X5_STS));
- }
-
- /*
- ** We stop the DE4X5 here... mask interrupts and stop TX & RX
- */
- DISABLE_IRQs;
- STOP_DE4X5;
-
- /* Free the associated irq */
- free_irq(dev->irq, dev);
- lp->state = CLOSED;
-
- /* Free any socket buffers */
- de4x5_free_rx_buffs(dev);
- de4x5_free_tx_buffs(dev);
-
- /* Put the adapter to sleep to save power */
- yawn(dev, SLEEP);
-
- return 0;
-}
-
-static struct net_device_stats *
-de4x5_get_stats(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
-
- return &lp->stats;
-}
-
-static void
-de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
- if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
- lp->pktStats.bins[i]++;
- i = DE4X5_PKT_STAT_SZ;
- }
- }
- if (is_multicast_ether_addr(buf)) {
- if (is_broadcast_ether_addr(buf)) {
- lp->pktStats.broadcast++;
- } else {
- lp->pktStats.multicast++;
- }
- } else if (ether_addr_equal(buf, dev->dev_addr)) {
- lp->pktStats.unicast++;
- }
-
- lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
- if (lp->pktStats.bins[0] == 0) { /* Reset counters */
- memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
- }
-}
-
-/*
-** Removes the TD_IC flag from previous descriptor to improve TX performance.
-** If the flag is changed on a descriptor that is being read by the hardware,
-** I assume PCI transaction ordering will mean you are either successful or
-** just miss asserting the change to the hardware. Anyway you're messing with
-** a descriptor you don't own, but this shouldn't kill the chip provided
-** the descriptor register is read only to the hardware.
-*/
-static void
-load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
- dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
-
- lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
- lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
- lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
- lp->tx_skb[lp->tx_new] = skb;
- lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
- barrier();
-
- lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
- barrier();
-}
-
-/*
-** Set or clear the multicast filter for this adaptor.
-*/
-static void
-set_multicast_list(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- /* First, double check that the adapter is open */
- if (lp->state == OPEN) {
- if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
- u32 omr;
- omr = inl(DE4X5_OMR);
- omr |= OMR_PR;
- outl(omr, DE4X5_OMR);
- } else {
- SetMulticastFilter(dev);
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
- SETUP_FRAME_LEN, (struct sk_buff *)1);
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- netif_trans_update(dev); /* prevent tx timeout */
- }
- }
-}
-
-/*
-** Calculate the hash code and update the logical address filter
-** from a list of ethernet multicast addresses.
-** Little endian crc one liner from Matt Thomas, DEC.
-*/
-static void
-SetMulticastFilter(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- u_long iobase = dev->base_addr;
- int i, bit, byte;
- u16 hashcode;
- u32 omr, crc;
- char *pa;
- unsigned char *addrs;
-
- omr = inl(DE4X5_OMR);
- omr &= ~(OMR_PR | OMR_PM);
- pa = build_setup_frame(dev, ALL); /* Build the basic frame */
-
- if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
- omr |= OMR_PM; /* Pass all multicasts */
- } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- hashcode = crc & DE4X5_HASH_BITS; /* hashcode is 9 LSb of CRC */
-
- byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
- bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
-
- byte <<= 1; /* calc offset into setup frame */
- if (byte & 0x02) {
- byte -= 1;
- }
- lp->setup_frame[byte] |= bit;
- }
- } else { /* Perfect filtering */
- netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
- for (i=0; i<ETH_ALEN; i++) {
- *(pa + (i&1)) = *addrs++;
- if (i & 0x01) pa += 4;
- }
- }
- }
- outl(omr, DE4X5_OMR);
-}
-
-#ifdef CONFIG_EISA
-
-static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
-
-static int de4x5_eisa_probe(struct device *gendev)
-{
- struct eisa_device *edev;
- u_long iobase;
- u_char irq, regval;
- u_short vendor;
- u32 cfid;
- int status, device;
- struct net_device *dev;
- struct de4x5_private *lp;
-
- edev = to_eisa_device (gendev);
- iobase = edev->base_addr;
-
- if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
- return -EBUSY;
-
- if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
- DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
- status = -EBUSY;
- goto release_reg_1;
- }
-
- if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
- status = -ENOMEM;
- goto release_reg_2;
- }
- lp = netdev_priv(dev);
-
- cfid = (u32) inl(PCI_CFID);
- lp->cfrv = (u_short) inl(PCI_CFRV);
- device = (cfid >> 8) & 0x00ffff00;
- vendor = (u_short) cfid;
-
- /* Read the EISA Configuration Registers */
- regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
-#ifdef CONFIG_ALPHA
- /* Looks like the Jensen firmware (rev 2.2) doesn't really
- * care about the EISA configuration, and thus doesn't
- * configure the PLX bridge properly. Oh well... Simply mimic
- * the EISA config file to sort it out. */
-
- /* EISA REG1: Assert DecChip 21040 HW Reset */
- outb (ER1_IAM | 1, EISA_REG1);
- mdelay (1);
-
- /* EISA REG1: Deassert DecChip 21040 HW Reset */
- outb (ER1_IAM, EISA_REG1);
- mdelay (1);
-
- /* EISA REG3: R/W Burst Transfer Enable */
- outb (ER3_BWE | ER3_BRE, EISA_REG3);
-
- /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
- outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
-#endif
- irq = de4x5_irq[(regval >> 1) & 0x03];
-
- if (is_DC2114x) {
- device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
- }
- lp->chipset = device;
- lp->bus = EISA;
-
- /* Write the PCI Configuration Registers */
- outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
- outl(0x00006000, PCI_CFLT);
- outl(iobase, PCI_CBIO);
-
- DevicePresent(dev, EISA_APROM);
-
- dev->irq = irq;
-
- if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
- return 0;
- }
-
- free_netdev (dev);
- release_reg_2:
- release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
- release_reg_1:
- release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
-
- return status;
-}
-
-static int de4x5_eisa_remove(struct device *device)
-{
- struct net_device *dev;
- u_long iobase;
-
- dev = dev_get_drvdata(device);
- iobase = dev->base_addr;
-
- unregister_netdev (dev);
- free_netdev (dev);
- release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
- release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
-
- return 0;
-}
-
-static const struct eisa_device_id de4x5_eisa_ids[] = {
- { "DEC4250", 0 }, /* 0 is the board name index... */
- { "" }
-};
-MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
-
-static struct eisa_driver de4x5_eisa_driver = {
- .id_table = de4x5_eisa_ids,
- .driver = {
- .name = "de4x5",
- .probe = de4x5_eisa_probe,
- .remove = de4x5_eisa_remove,
- }
-};
-#endif
-
-#ifdef CONFIG_PCI
-
-/*
-** This function searches the current bus (which is >0) for a DECchip with an
-** SROM, so that in multiport cards that have one SROM shared between multiple
-** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
-** For single port cards this is a time waster...
-*/
-static void
-srom_search(struct net_device *dev, struct pci_dev *pdev)
-{
- u_char pb;
- u_short vendor, status;
- u_int irq = 0, device;
- u_long iobase = 0; /* Clear upper 32 bits in Alphas */
- int i, j;
- struct de4x5_private *lp = netdev_priv(dev);
- struct pci_dev *this_dev;
-
- list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
- vendor = this_dev->vendor;
- device = this_dev->device << 8;
- if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
-
- /* Get the chip configuration revision register */
- pb = this_dev->bus->number;
-
- /* Set the device number information */
- lp->device = PCI_SLOT(this_dev->devfn);
- lp->bus_num = pb;
-
- /* Set the chipset information */
- if (is_DC2114x) {
- device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
- ? DC21142 : DC21143);
- }
- lp->chipset = device;
-
- /* Get the board I/O address (64 bits on sparc64) */
- iobase = pci_resource_start(this_dev, 0);
-
- /* Fetch the IRQ to be used */
- irq = this_dev->irq;
- if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
-
- /* Check if I/O accesses are enabled */
- pci_read_config_word(this_dev, PCI_COMMAND, &status);
- if (!(status & PCI_COMMAND_IO)) continue;
-
- /* Search for a valid SROM attached to this DECchip */
- DevicePresent(dev, DE4X5_APROM);
- for (j=0, i=0; i<ETH_ALEN; i++) {
- j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
- }
- if (j != 0 && j != 6 * 0xff) {
- last.chipset = device;
- last.bus = pb;
- last.irq = irq;
- for (i=0; i<ETH_ALEN; i++) {
- last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
- }
- return;
- }
- }
-}
-
-/*
-** PCI bus I/O device probe
-** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not
-** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be
-** enabled by the user first in the set up utility. Hence we just check for
-** enabled features and silently ignore the card if they're not.
-**
-** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering
-** bit. Here, check for I/O accesses and then set BM. If you put the card in
-** a non BM slot, you're on your own (and complain to the PC vendor that your
-** PC doesn't conform to the PCI standard)!
-**
-** This function is only compatible with the *latest* 2.1.x kernels. For 2.0.x
-** kernels use the V0.535[n] drivers.
-*/
-
-static int de4x5_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- u_char pb, pbus = 0, dev_num, dnum = 0, timer;
- u_short vendor, status;
- u_int irq = 0, device;
- u_long iobase = 0; /* Clear upper 32 bits in Alphas */
- int error;
- struct net_device *dev;
- struct de4x5_private *lp;
-
- dev_num = PCI_SLOT(pdev->devfn);
- pb = pdev->bus->number;
-
- if (io) { /* probe a single PCI device */
- pbus = (u_short)(io >> 8);
- dnum = (u_short)(io & 0xff);
- if ((pbus != pb) || (dnum != dev_num))
- return -ENODEV;
- }
-
- vendor = pdev->vendor;
- device = pdev->device << 8;
- if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
- return -ENODEV;
-
- /* Ok, the device seems to be for us. */
- if ((error = pci_enable_device (pdev)))
- return error;
-
- if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
- error = -ENOMEM;
- goto disable_dev;
- }
-
- lp = netdev_priv(dev);
- lp->bus = PCI;
- lp->bus_num = 0;
-
- /* Search for an SROM on this bus */
- if (lp->bus_num != pb) {
- lp->bus_num = pb;
- srom_search(dev, pdev);
- }
-
- /* Get the chip configuration revision register */
- lp->cfrv = pdev->revision;
-
- /* Set the device number information */
- lp->device = dev_num;
- lp->bus_num = pb;
-
- /* Set the chipset information */
- if (is_DC2114x) {
- device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
- }
- lp->chipset = device;
-
- /* Get the board I/O address (64 bits on sparc64) */
- iobase = pci_resource_start(pdev, 0);
-
- /* Fetch the IRQ to be used */
- irq = pdev->irq;
- if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- /* Check if I/O accesses and Bus Mastering are enabled */
- pci_read_config_word(pdev, PCI_COMMAND, &status);
-#ifdef __powerpc__
- if (!(status & PCI_COMMAND_IO)) {
- status |= PCI_COMMAND_IO;
- pci_write_config_word(pdev, PCI_COMMAND, status);
- pci_read_config_word(pdev, PCI_COMMAND, &status);
- }
-#endif /* __powerpc__ */
- if (!(status & PCI_COMMAND_IO)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- if (!(status & PCI_COMMAND_MASTER)) {
- status |= PCI_COMMAND_MASTER;
- pci_write_config_word(pdev, PCI_COMMAND, status);
- pci_read_config_word(pdev, PCI_COMMAND, &status);
- }
- if (!(status & PCI_COMMAND_MASTER)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- /* Check the latency timer for values >= 0x60 */
- pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
- if (timer < 0x60) {
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
- }
-
- DevicePresent(dev, DE4X5_APROM);
-
- if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
- error = -EBUSY;
- goto free_dev;
- }
-
- dev->irq = irq;
-
- if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
- goto release;
- }
-
- return 0;
-
- release:
- release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
- free_dev:
- free_netdev (dev);
- disable_dev:
- pci_disable_device (pdev);
- return error;
-}
-
-static void de4x5_pci_remove(struct pci_dev *pdev)
-{
- struct net_device *dev;
- u_long iobase;
-
- dev = pci_get_drvdata(pdev);
- iobase = dev->base_addr;
-
- unregister_netdev (dev);
- free_netdev (dev);
- release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
- pci_disable_device (pdev);
-}
-
-static const struct pci_device_id de4x5_pci_tbl[] = {
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
- { },
-};
-
-static struct pci_driver de4x5_pci_driver = {
- .name = "de4x5",
- .id_table = de4x5_pci_tbl,
- .probe = de4x5_pci_probe,
- .remove = de4x5_pci_remove,
-};
-
-#endif
-
-/*
-** Auto configure the media here rather than setting the port at compile
-** time. This routine is called by de4x5_init() and when a loss of media is
-** detected (excessive collisions, loss of carrier, no carrier or link fail
-** [TP] or no recent receive activity) to check whether the user has been
-** sneaky and changed the port on us.
-*/
-static int
-autoconf_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- disable_ast(dev);
-
- lp->c_media = AUTO; /* Bogus last media */
- inl(DE4X5_MFC); /* Zero the lost frames counter */
- lp->media = INIT;
- lp->tcount = 0;
-
- de4x5_ast(&lp->timer);
-
- return lp->media;
-}
-
-/*
-** Autoconfigure the media when using the DC21040. AUI cannot be distinguished
-** from BNC as the port has a jumper to set thick or thin wire. When set for
-** BNC, the BNC port will indicate activity if it's not terminated correctly.
-** The only way to test for that is to place a loopback packet onto the
-** network and watch for errors. Since we're messing with the interrupt mask
-** register, disable the board interrupts and do not allow any more packets to
-** be queued to the hardware. Re-enable everything only when the media is
-** found.
-** I may have to "age out" locally queued packets so that the higher layer
-** timeouts don't effectively duplicate packets on the network.
-*/
-static int
-dc21040_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int next_tick = DE4X5_AUTOSENSE_MS;
- s32 imr;
-
- switch (lp->media) {
- case INIT:
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->timeout = -1;
- de4x5_save_skbs(dev);
- if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
- lp->media = TP;
- } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
- lp->media = BNC_AUI;
- } else if (lp->autosense == EXT_SIA) {
- lp->media = EXT_SIA;
- } else {
- lp->media = NC;
- }
- lp->local_state = 0;
- next_tick = dc21040_autoconf(dev);
- break;
-
- case TP:
- next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
- TP_SUSPECT, test_tp);
- break;
-
- case TP_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
- break;
-
- case BNC:
- case AUI:
- case BNC_AUI:
- next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
- BNC_AUI_SUSPECT, ping_media);
- break;
-
- case BNC_AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
- break;
-
- case EXT_SIA:
- next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
- NC, EXT_SIA_SUSPECT, ping_media);
- break;
-
- case EXT_SIA_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
- break;
-
- case NC:
- /* default to TP for all */
- reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-static int
-dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
- int next_state, int suspect_state,
- int (*fn)(struct net_device *, int))
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int linkBad;
-
- switch (lp->local_state) {
- case 0:
- reset_init_sia(dev, csr13, csr14, csr15);
- lp->local_state++;
- next_tick = 500;
- break;
-
- case 1:
- if (!lp->tx_enable) {
- linkBad = fn(dev, timeout);
- if (linkBad < 0) {
- next_tick = linkBad & ~TIMER_CB;
- } else {
- if (linkBad && (lp->autosense == AUTO)) {
- lp->local_state = 0;
- lp->media = next_state;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = suspect_state;
- next_tick = 3000;
- }
- break;
- }
-
- return next_tick;
-}
-
-static int
-de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
- int (*fn)(struct net_device *, int),
- int (*asfn)(struct net_device *))
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int linkBad;
-
- switch (lp->local_state) {
- case 1:
- if (lp->linkOK) {
- lp->media = prev_state;
- } else {
- lp->local_state++;
- next_tick = asfn(dev);
- }
- break;
-
- case 2:
- linkBad = fn(dev, timeout);
- if (linkBad < 0) {
- next_tick = linkBad & ~TIMER_CB;
- } else if (!linkBad) {
- lp->local_state--;
- lp->media = prev_state;
- } else {
- lp->media = INIT;
- lp->tcount++;
- }
- }
-
- return next_tick;
-}
-
-/*
-** Autoconfigure the media when using the DC21041. AUI needs to be tested
-** before BNC, because the BNC port will indicate activity if it's not
-** terminated correctly. The only way to test for that is to place a loopback
-** packet onto the network and watch for errors. Since we're messing with
-** the interrupt mask register, disable the board interrupts and do not allow
-** any more packets to be queued to the hardware. Re-enable everything only
-** when the media is found.
-*/
-static int
-dc21041_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, irqs, irq_mask, imr, omr;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- switch (lp->media) {
- case INIT:
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->timeout = -1;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
- lp->media = TP; /* On chip auto negotiation is broken */
- } else if (lp->autosense == TP) {
- lp->media = TP;
- } else if (lp->autosense == BNC) {
- lp->media = BNC;
- } else if (lp->autosense == AUI) {
- lp->media = AUI;
- } else {
- lp->media = NC;
- }
- lp->local_state = 0;
- next_tick = dc21041_autoconf(dev);
- break;
-
- case TP_NW:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
- outl(omr | OMR_FDX, DE4X5_OMR);
- }
- irqs = STS_LNF | STS_LNP;
- irq_mask = IMR_LFM | IMR_LPM;
- sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts & STS_LNP) {
- lp->media = ANS;
- } else {
- lp->media = AUI;
- }
- next_tick = dc21041_autoconf(dev);
- }
- break;
-
- case ANS:
- if (!lp->tx_enable) {
- irqs = STS_LNP;
- irq_mask = IMR_LPM;
- sts = test_ans(dev, irqs, irq_mask, 3000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
- lp->media = TP;
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = ANS_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case ANS_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
- break;
-
- case TP:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for TP */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = STS_LNF | STS_LNP;
- irq_mask = IMR_LFM | IMR_LPM;
- sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
- if (inl(DE4X5_SISR) & SISR_NRA) {
- lp->media = AUI; /* Non selected port activity */
- } else {
- lp->media = BNC;
- }
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = TP_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case TP_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
- break;
-
- case AUI:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
- lp->media = BNC;
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = AUI_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
- break;
-
- case BNC:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- lp->local_state++; /* Ensure media connected */
- next_tick = dc21041_autoconf(dev);
- }
- break;
-
- case 1:
- if (!lp->tx_enable) {
- if ((sts = ping_media(dev, 3000)) < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts) {
- lp->local_state = 0;
- lp->media = NC;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = BNC_SUSPECT;
- next_tick = 3000;
- }
- break;
- }
- break;
-
- case BNC_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
- break;
-
- case NC:
- omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
- outl(omr | OMR_FDX, DE4X5_OMR);
- reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-/*
-** Some autonegotiation chips are broken in that they do not return the
-** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement
-** register, except at the first power up negotiation.
-*/
-static int
-dc21140m_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int ana, anlpa, cap, cr, slnk, sr;
- int next_tick = DE4X5_AUTOSENSE_MS;
- u_long imr, omr, iobase = dev->base_addr;
-
- switch(lp->media) {
- case INIT:
- if (lp->timeout < 0) {
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->linkOK = 0;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- }
- if ((next_tick = de4x5_reset_phy(dev)) < 0) {
- next_tick &= ~TIMER_CB;
- } else {
- if (lp->useSROM) {
- if (srom_map_media(dev) < 0) {
- lp->tcount++;
- return next_tick;
- }
- srom_exec(dev, lp->phy[lp->active].gep);
- if (lp->infoblock_media == ANS) {
- ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- }
- } else {
- lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */
- SET_10Mb;
- if (lp->autosense == _100Mb) {
- lp->media = _100Mb;
- } else if (lp->autosense == _10Mb) {
- lp->media = _10Mb;
- } else if ((lp->autosense == AUTO) &&
- ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
- ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
- ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- lp->media = ANS;
- } else if (lp->autosense == AUTO) {
- lp->media = SPD_DET;
- } else if (is_spd_100(dev) && is_100_up(dev)) {
- lp->media = _100Mb;
- } else {
- lp->media = NC;
- }
- }
- lp->local_state = 0;
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case ANS:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
- if (cr < 0) {
- next_tick = cr & ~TIMER_CB;
- } else {
- if (cr) {
- lp->local_state = 0;
- lp->media = SPD_DET;
- } else {
- lp->local_state++;
- }
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case 1:
- if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
- next_tick = sr & ~TIMER_CB;
- } else {
- lp->media = SPD_DET;
- lp->local_state = 0;
- if (sr) { /* Success! */
- lp->tmp = MII_SR_ASSC;
- anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
- ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
- (cap = anlpa & MII_ANLPA_TAF & ana)) {
- if (cap & MII_ANA_100M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
- lp->media = _100Mb;
- } else if (cap & MII_ANA_10M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
-
- lp->media = _10Mb;
- }
- }
- } /* Auto Negotiation failed to finish */
- next_tick = dc21140m_autoconf(dev);
- } /* Auto Negotiation failed to start */
- break;
- }
- break;
-
- case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
- if (lp->timeout < 0) {
- lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
- (~gep_rd(dev) & GEP_LNP));
- SET_100Mb_PDET;
- }
- if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
- next_tick = slnk & ~TIMER_CB;
- } else {
- if (is_spd_100(dev) && is_100_up(dev)) {
- lp->media = _100Mb;
- } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
- lp->media = _10Mb;
- } else {
- lp->media = NC;
- }
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case _100Mb: /* Set 100Mb/s */
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_100Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case BNC:
- case AUI:
- case _10Mb: /* Set 10Mb/s */
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_10Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case NC:
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-/*
-** This routine may be merged into dc21140m_autoconf() sometime as I'm
-** changing how I figure out the media - but trying to keep it backwards
-** compatible with the de500-xa and de500-aa.
-** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock
-** functions and set during de4x5_mac_port() and/or de4x5_reset_phy().
-** This routine just has to figure out whether 10Mb/s or 100Mb/s is
-** active.
-** When autonegotiation is working, the ANS part searches the SROM for
-** the highest common speed (TP) link that both can run and if that can
-** be full duplex. That infoblock is executed and then the link speed set.
-**
-** Only _10Mb and _100Mb are tested here.
-*/
-static int
-dc2114x_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- switch (lp->media) {
- case INIT:
- if (lp->timeout < 0) {
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->linkOK = 0;
- lp->timeout = -1;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- if (lp->params.autosense & ~AUTO) {
- srom_map_media(dev); /* Fixed media requested */
- if (lp->media != lp->params.autosense) {
- lp->tcount++;
- lp->media = INIT;
- return next_tick;
- }
- lp->media = INIT;
- }
- }
- if ((next_tick = de4x5_reset_phy(dev)) < 0) {
- next_tick &= ~TIMER_CB;
- } else {
- if (lp->autosense == _100Mb) {
- lp->media = _100Mb;
- } else if (lp->autosense == _10Mb) {
- lp->media = _10Mb;
- } else if (lp->autosense == TP) {
- lp->media = TP;
- } else if (lp->autosense == BNC) {
- lp->media = BNC;
- } else if (lp->autosense == AUI) {
- lp->media = AUI;
- } else {
- lp->media = SPD_DET;
- if ((lp->infoblock_media == ANS) &&
- ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
- ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
- ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- lp->media = ANS;
- }
- }
- lp->local_state = 0;
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case ANS:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
- if (cr < 0) {
- next_tick = cr & ~TIMER_CB;
- } else {
- if (cr) {
- lp->local_state = 0;
- lp->media = SPD_DET;
- } else {
- lp->local_state++;
- }
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case 1:
- sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
- if (sr < 0) {
- next_tick = sr & ~TIMER_CB;
- } else {
- lp->media = SPD_DET;
- lp->local_state = 0;
- if (sr) { /* Success! */
- lp->tmp = MII_SR_ASSC;
- anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
- ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
- (cap = anlpa & MII_ANLPA_TAF & ana)) {
- if (cap & MII_ANA_100M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
- lp->media = _100Mb;
- } else if (cap & MII_ANA_10M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
- lp->media = _10Mb;
- }
- }
- } /* Auto Negotiation failed to finish */
- next_tick = dc2114x_autoconf(dev);
- } /* Auto Negotiation failed to start */
- break;
- }
- break;
-
- case AUI:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
- lp->media = BNC;
- next_tick = dc2114x_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = AUI_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
- break;
-
- case BNC:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- lp->local_state++; /* Ensure media connected */
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case 1:
- if (!lp->tx_enable) {
- if ((sts = ping_media(dev, 3000)) < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts) {
- lp->local_state = 0;
- lp->tcount++;
- lp->media = INIT;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = BNC_SUSPECT;
- next_tick = 3000;
- }
- break;
- }
- break;
-
- case BNC_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
- break;
-
- case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
- if (srom_map_media(dev) < 0) {
- lp->tcount++;
- lp->media = INIT;
- return next_tick;
- }
- if (lp->media == _100Mb) {
- if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
- lp->media = SPD_DET;
- return slnk & ~TIMER_CB;
- }
- } else {
- if (wait_for_link(dev) < 0) {
- lp->media = SPD_DET;
- return PDET_LINK_WAIT;
- }
- }
- if (lp->media == ANS) { /* Do MII parallel detection */
- if (is_spd_100(dev)) {
- lp->media = _100Mb;
- } else {
- lp->media = _10Mb;
- }
- next_tick = dc2114x_autoconf(dev);
- } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
- (((lp->media == _10Mb) || (lp->media == TP) ||
- (lp->media == BNC) || (lp->media == AUI)) &&
- is_10_up(dev))) {
- next_tick = dc2114x_autoconf(dev);
- } else {
- lp->tcount++;
- lp->media = INIT;
- }
- break;
-
- case _10Mb:
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_10Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case _100Mb:
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_100Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- default:
- lp->tcount++;
-printk("Huh?: media:%02x\n", lp->media);
- lp->media = INIT;
- break;
- }
-
- return next_tick;
-}
-
-static int
-srom_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- return lp->infoleaf_fn(dev);
-}
-
-/*
-** This mapping keeps the original media codes and FDX flag unchanged.
-** While it isn't strictly necessary, it helps me for the moment...
-** The early return avoids a media state / SROM media space clash.
-*/
-static int
-srom_map_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- lp->fdx = false;
- if (lp->infoblock_media == lp->media)
- return 0;
-
- switch(lp->infoblock_media) {
- case SROM_10BASETF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_10BASET:
- if (lp->params.fdx && !lp->fdx) return -1;
- if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
- lp->media = _10Mb;
- } else {
- lp->media = TP;
- }
- break;
-
- case SROM_10BASE2:
- lp->media = BNC;
- break;
-
- case SROM_10BASE5:
- lp->media = AUI;
- break;
-
- case SROM_100BASETF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_100BASET:
- if (lp->params.fdx && !lp->fdx) return -1;
- lp->media = _100Mb;
- break;
-
- case SROM_100BASET4:
- lp->media = _100Mb;
- break;
-
- case SROM_100BASEFF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_100BASEF:
- if (lp->params.fdx && !lp->fdx) return -1;
- lp->media = _100Mb;
- break;
-
- case ANS:
- lp->media = ANS;
- lp->fdx = lp->params.fdx;
- break;
-
- default:
- printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
- lp->infoblock_media);
- return -1;
- }
-
- return 0;
-}
-
-static void
-de4x5_init_connection(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_long flags = 0;
-
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media; /* Stop scrolling media messages */
- }
-
- spin_lock_irqsave(&lp->lock, flags);
- de4x5_rst_desc_ring(dev);
- de4x5_setup_intr(dev);
- lp->tx_enable = true;
- spin_unlock_irqrestore(&lp->lock, flags);
- outl(POLL_DEMAND, DE4X5_TPD);
-
- netif_wake_queue(dev);
-}
-
-/*
-** General PHY reset function. Some MII devices don't reset correctly
-** since their MII address pins can float at voltages that are dependent
-** on the signal pin use. Do a double reset to ensure a reset.
-*/
-static int
-de4x5_reset_phy(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int next_tick = 0;
-
- if ((lp->useSROM) || (lp->phy[lp->active].id)) {
- if (lp->timeout < 0) {
- if (lp->useSROM) {
- if (lp->phy[lp->active].rst) {
- srom_exec(dev, lp->phy[lp->active].rst);
- srom_exec(dev, lp->phy[lp->active].rst);
- } else if (lp->rst) { /* Type 5 infoblock reset */
- srom_exec(dev, lp->rst);
- srom_exec(dev, lp->rst);
- }
- } else {
- PHY_HARD_RESET;
- }
- if (lp->useMII) {
- mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- }
- if (lp->useMII) {
- next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
- }
- } else if (lp->chipset == DC21140) {
- PHY_HARD_RESET;
- }
-
- return next_tick;
-}
-
-static int
-test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, csr12;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
- reset_init_sia(dev, csr13, csr14, csr15);
- }
-
- /* set up the interrupt mask */
- outl(irq_mask, DE4X5_IMR);
-
- /* clear all pending interrupts */
- sts = inl(DE4X5_STS);
- outl(sts, DE4X5_STS);
-
- /* clear csr12 NRA and SRA bits */
- if ((lp->chipset == DC21041) || lp->useSROM) {
- csr12 = inl(DE4X5_SISR);
- outl(csr12, DE4X5_SISR);
- }
- }
-
- sts = inl(DE4X5_STS) & ~TIMER_CB;
-
- if (!(sts & irqs) && --lp->timeout) {
- sts = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sts;
-}
-
-static int
-test_tp(struct net_device *dev, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int sisr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- }
-
- sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
-
- if (sisr && --lp->timeout) {
- sisr = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sisr;
-}
-
-/*
-** Samples the 100Mb Link State Signal. The sample interval is important
-** because too fast a rate can give erroneous results and confuse the
-** speed sense algorithm.
-*/
-#define SAMPLE_INTERVAL 500 /* ms */
-#define SAMPLE_DELAY 2000 /* ms */
-static int
-test_for_100Mb(struct net_device *dev, int msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
-
- if (lp->timeout < 0) {
- if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
- if (msec > SAMPLE_DELAY) {
- lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
- gep = SAMPLE_DELAY | TIMER_CB;
- return gep;
- } else {
- lp->timeout = msec/SAMPLE_INTERVAL;
- }
- }
-
- if (lp->phy[lp->active].id || lp->useSROM) {
- gep = is_100_up(dev) | is_spd_100(dev);
- } else {
- gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
- }
- if (!(gep & ret) && --lp->timeout) {
- gep = SAMPLE_INTERVAL | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return gep;
-}
-
-static int
-wait_for_link(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->timeout < 0) {
- lp->timeout = 1;
- }
-
- if (lp->timeout--) {
- return TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return 0;
-}
-
-/*
-**
-**
-*/
-static int
-test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int test;
- u_long iobase = dev->base_addr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- }
-
- reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
- test = (reg ^ (pol ? ~0 : 0)) & mask;
-
- if (test && --lp->timeout) {
- reg = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return reg;
-}
-
-static int
-is_spd_100(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int spd;
-
- if (lp->useMII) {
- spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
- spd = ~(spd ^ lp->phy[lp->active].spd.value);
- spd &= lp->phy[lp->active].spd.mask;
- } else if (!lp->useSROM) { /* de500-xa */
- spd = ((~gep_rd(dev)) & GEP_SLNK);
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
-
- spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-
- return spd;
-}
-
-static int
-is_100_up(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->useMII) {
- /* Double read for sticky bits & temporary drops */
- mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
- } else if (!lp->useSROM) { /* de500-xa */
- return (~gep_rd(dev)) & GEP_SLNK;
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
-
- return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-}
-
-static int
-is_10_up(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->useMII) {
- /* Double read for sticky bits & temporary drops */
- mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
- } else if (!lp->useSROM) { /* de500-xa */
- return (~gep_rd(dev)) & GEP_LNP;
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return ((lp->chipset & ~0x00ff) == DC2114x) ?
- (~inl(DE4X5_SISR)&SISR_LS10):
- 0;
-
- return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-}
-
-static int
-is_anc_capable(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
- } else {
- return 0;
- }
-}
-
-/*
-** Send a packet onto the media and watch for send errors that indicate the
-** media is bad or unconnected.
-*/
-static int
-ping_media(struct net_device *dev, int msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int sisr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
-
- lp->tmp = lp->tx_new; /* Remember the ring position */
- load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD);
- }
-
- sisr = inl(DE4X5_SISR);
-
- if ((!(sisr & SISR_NCR)) &&
- ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
- (--lp->timeout)) {
- sisr = 100 | TIMER_CB;
- } else {
- if ((!(sisr & SISR_NCR)) &&
- !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
- lp->timeout) {
- sisr = 0;
- } else {
- sisr = 1;
- }
- lp->timeout = -1;
- }
-
- return sisr;
-}
-
-/*
-** This function does 2 things: on Intels it kmalloc's another buffer to
-** replace the one about to be passed up. On Alpha's it kmallocs a buffer
-** into which the packet is copied.
-*/
-static struct sk_buff *
-de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct sk_buff *p;
-
-#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
- struct sk_buff *ret;
- u_long i=0, tmp;
-
- p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
- if (!p) return NULL;
-
- tmp = virt_to_bus(p->data);
- i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
- skb_reserve(p, i);
- lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
-
- ret = lp->rx_skb[index];
- lp->rx_skb[index] = p;
-
- if ((u_long) ret > 1) {
- skb_put(ret, len);
- }
-
- return ret;
-
-#else
- if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
-
- p = netdev_alloc_skb(dev, len + 2);
- if (!p) return NULL;
-
- skb_reserve(p, 2); /* Align */
- if (index < lp->rx_old) { /* Wrapped buffer */
- short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
- skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
- skb_put_data(p, lp->rx_bufs, len - tlen);
- } else { /* Linear buffer */
- skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
- }
-
- return p;
-#endif
-}
-
-static void
-de4x5_free_rx_buffs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=0; i<lp->rxRingSize; i++) {
- if ((u_long) lp->rx_skb[i] > 1) {
- dev_kfree_skb(lp->rx_skb[i]);
- }
- lp->rx_ring[i].status = 0;
- lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
- }
-}
-
-static void
-de4x5_free_tx_buffs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=0; i<lp->txRingSize; i++) {
- if (lp->tx_skb[i])
- de4x5_free_tx_buff(lp, i);
- lp->tx_ring[i].status = 0;
- }
-
- /* Unload the locally queued packets */
- __skb_queue_purge(&lp->cache.queue);
-}
-
-/*
-** When a user pulls a connection, the DECchip can end up in a
-** 'running - waiting for end of transmission' state. This means that we
-** have to perform a chip soft reset to ensure that we can synchronize
-** the hardware and software and make any media probes using a loopback
-** packet meaningful.
-*/
-static void
-de4x5_save_skbs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 omr;
-
- if (!lp->cache.save_cnt) {
- STOP_DE4X5;
- de4x5_tx(dev); /* Flush any sent skb's */
- de4x5_free_tx_buffs(dev);
- de4x5_cache_state(dev, DE4X5_SAVE_STATE);
- de4x5_sw_reset(dev);
- de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
- lp->cache.save_cnt++;
- START_DE4X5;
- }
-}
-
-static void
-de4x5_rst_desc_ring(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i;
- s32 omr;
-
- if (lp->cache.save_cnt) {
- STOP_DE4X5;
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- lp->rx_new = lp->rx_old = 0;
- lp->tx_new = lp->tx_old = 0;
-
- for (i = 0; i < lp->rxRingSize; i++) {
- lp->rx_ring[i].status = cpu_to_le32(R_OWN);
- }
-
- for (i = 0; i < lp->txRingSize; i++) {
- lp->tx_ring[i].status = cpu_to_le32(0);
- }
-
- barrier();
- lp->cache.save_cnt--;
- START_DE4X5;
- }
-}
-
-static void
-de4x5_cache_state(struct net_device *dev, int flag)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- switch(flag) {
- case DE4X5_SAVE_STATE:
- lp->cache.csr0 = inl(DE4X5_BMR);
- lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
- lp->cache.csr7 = inl(DE4X5_IMR);
- break;
-
- case DE4X5_RESTORE_STATE:
- outl(lp->cache.csr0, DE4X5_BMR);
- outl(lp->cache.csr6, DE4X5_OMR);
- outl(lp->cache.csr7, DE4X5_IMR);
- if (lp->chipset == DC21140) {
- gep_wr(lp->cache.gepc, dev);
- gep_wr(lp->cache.gep, dev);
- } else {
- reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
- lp->cache.csr15);
- }
- break;
- }
-}
-
-static void
-de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- __skb_queue_tail(&lp->cache.queue, skb);
-}
-
-static void
-de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- __skb_queue_head(&lp->cache.queue, skb);
-}
-
-static struct sk_buff *
-de4x5_get_cache(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- return __skb_dequeue(&lp->cache.queue);
-}
-
-/*
-** Check the Auto Negotiation State. Return OK when a link pass interrupt
-** is received and the auto-negotiation status is NWAY OK.
-*/
-static int
-test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, ans;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- outl(irq_mask, DE4X5_IMR);
-
- /* clear all pending interrupts */
- sts = inl(DE4X5_STS);
- outl(sts, DE4X5_STS);
- }
-
- ans = inl(DE4X5_SISR) & SISR_ANS;
- sts = inl(DE4X5_STS) & ~TIMER_CB;
-
- if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
- sts = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sts;
-}
-
-static void
-de4x5_setup_intr(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 imr, sts;
-
- if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
- imr = 0;
- UNMASK_IRQs;
- sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */
- outl(sts, DE4X5_STS);
- ENABLE_IRQs;
- }
-}
-
-/*
-**
-*/
-static void
-reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- RESET_SIA;
- if (lp->useSROM) {
- if (lp->ibn == 3) {
- srom_exec(dev, lp->phy[lp->active].rst);
- srom_exec(dev, lp->phy[lp->active].gep);
- outl(1, DE4X5_SICR);
- return;
- } else {
- csr15 = lp->cache.csr15;
- csr14 = lp->cache.csr14;
- csr13 = lp->cache.csr13;
- outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
- outl(csr15 | lp->cache.gep, DE4X5_SIGR);
- }
- } else {
- outl(csr15, DE4X5_SIGR);
- }
- outl(csr14, DE4X5_STRR);
- outl(csr13, DE4X5_SICR);
-
- mdelay(10);
-}
-
-/*
-** Create a loopback ethernet packet
-*/
-static void
-create_packet(struct net_device *dev, char *frame, int len)
-{
- int i;
- char *buf = frame;
-
- for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
- *buf++ = dev->dev_addr[i];
- }
- for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
- *buf++ = dev->dev_addr[i];
- }
-
- *buf++ = 0; /* Packet length (2 bytes) */
- *buf++ = 1;
-}
-
-/*
-** Look for a particular board name in the EISA configuration space
-*/
-static int
-EISA_signature(char *name, struct device *device)
-{
- int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
- struct eisa_device *edev;
-
- *name = '\0';
- edev = to_eisa_device (device);
- i = edev->id.driver_data;
-
- if (i >= 0 && i < siglen) {
- strcpy (name, de4x5_signatures[i]);
- status = 1;
- }
-
- return status; /* return the device name string */
-}
-
-/*
-** Look for a particular board name in the PCI configuration space
-*/
-static void
-PCI_signature(char *name, struct de4x5_private *lp)
-{
- int i, siglen = ARRAY_SIZE(de4x5_signatures);
-
- if (lp->chipset == DC21040) {
- strcpy(name, "DE434/5");
- return;
- } else { /* Search for a DEC name in the SROM */
- int tmp = *((char *)&lp->srom + 19) * 3;
- strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
- }
- name[8] = '\0';
- for (i=0; i<siglen; i++) {
- if (strstr(name,de4x5_signatures[i])!=NULL) break;
- }
- if (i == siglen) {
- if (dec_only) {
- *name = '\0';
- } else { /* Use chip name to avoid confusion */
- strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
- ((lp->chipset == DC21041) ? "DC21041" :
- ((lp->chipset == DC21140) ? "DC21140" :
- ((lp->chipset == DC21142) ? "DC21142" :
- ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
- )))))));
- }
- if (lp->chipset != DC21041) {
- lp->useSROM = true; /* card is not recognisably DEC */
- }
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- lp->useSROM = true;
- }
-}
-
-/*
-** Set up the Ethernet PROM counter to the start of the Ethernet address on
-** the DC21040, else read the SROM for the other chips.
-** The SROM may not be present in a multi-MAC card, so first read the
-** MAC address and check for a bad address. If there is a bad one then exit
-** immediately with the prior srom contents intact (the h/w address will
-** be fixed up later).
-*/
-static void
-DevicePresent(struct net_device *dev, u_long aprom_addr)
-{
- int i, j=0;
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->chipset == DC21040) {
- if (lp->bus == EISA) {
- enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
- } else {
- outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */
- }
- } else { /* Read new srom */
- u_short tmp;
- __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
- for (i=0; i<(ETH_ALEN>>1); i++) {
- tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
- j += tmp; /* for check for 0:0:0:0:0:0 or ff:ff:ff:ff:ff:ff */
- *p = cpu_to_le16(tmp);
- }
- if (j == 0 || j == 3 * 0xffff) {
- /* could get 0 only from all-0 and 3 * 0xffff only from all-1 */
- return;
- }
-
- p = (__le16 *)&lp->srom;
- for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
- tmp = srom_rd(aprom_addr, i);
- *p++ = cpu_to_le16(tmp);
- }
- de4x5_dbg_srom(&lp->srom);
- }
-}
-
-/*
-** Since the write on the Enet PROM register doesn't seem to reset the PROM
-** pointer correctly (at least on my DE425 EISA card), this routine should do
-** it...from depca.c.
-*/
-static void
-enet_addr_rst(u_long aprom_addr)
-{
- union {
- struct {
- u32 a;
- u32 b;
- } llsig;
- char Sig[sizeof(u32) << 1];
- } dev;
- short sigLength=0;
- s8 data;
- int i, j;
-
- dev.llsig.a = ETH_PROM_SIG;
- dev.llsig.b = ETH_PROM_SIG;
- sigLength = sizeof(u32) << 1;
-
- for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
- data = inb(aprom_addr);
- if (dev.Sig[j] == data) { /* track signature */
- j++;
- } else { /* lost signature; begin search again */
- if (data == dev.Sig[0]) { /* rare case.... */
- j=1;
- } else {
- j=0;
- }
- }
- }
-}
-
-/*
-** For the bad status case and no SROM, then add one to the previous
-** address. However, need to add one backwards in case we have 0xff
-** as one or more of the bytes. Only the last 3 bytes should be checked
-** as the first three are invariant - assigned to an organisation.
-*/
-static int
-get_hw_addr(struct net_device *dev)
-{
- u_long iobase = dev->base_addr;
- int broken, i, k, tmp, status = 0;
- u_short j,chksum;
- struct de4x5_private *lp = netdev_priv(dev);
- u8 addr[ETH_ALEN];
-
- broken = de4x5_bad_srom(lp);
-
- for (i=0,k=0,j=0;j<3;j++) {
- k <<= 1;
- if (k > 0xffff) k-=0xffff;
-
- if (lp->bus == PCI) {
- if (lp->chipset == DC21040) {
- while ((tmp = inl(DE4X5_APROM)) < 0);
- k += (u_char) tmp;
- addr[i++] = (u_char) tmp;
- while ((tmp = inl(DE4X5_APROM)) < 0);
- k += (u_short) (tmp << 8);
- addr[i++] = (u_char) tmp;
- } else if (!broken) {
- addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- } else if ((broken == SMC) || (broken == ACCTON)) {
- addr[i] = *((u_char *)&lp->srom + i); i++;
- addr[i] = *((u_char *)&lp->srom + i); i++;
- }
- } else {
- k += (u_char) (tmp = inb(EISA_APROM));
- addr[i++] = (u_char) tmp;
- k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
- addr[i++] = (u_char) tmp;
- }
-
- if (k > 0xffff) k-=0xffff;
- }
- if (k == 0xffff) k=0;
-
- eth_hw_addr_set(dev, addr);
-
- if (lp->bus == PCI) {
- if (lp->chipset == DC21040) {
- while ((tmp = inl(DE4X5_APROM)) < 0);
- chksum = (u_char) tmp;
- while ((tmp = inl(DE4X5_APROM)) < 0);
- chksum |= (u_short) (tmp << 8);
- if ((k != chksum) && (dec_only)) status = -1;
- }
- } else {
- chksum = (u_char) inb(EISA_APROM);
- chksum |= (u_short) (inb(EISA_APROM) << 8);
- if ((k != chksum) && (dec_only)) status = -1;
- }
-
- /* If possible, try to fix a broken card - SMC only so far */
- srom_repair(dev, broken);
-
-#ifdef CONFIG_PPC_PMAC
- /*
- ** If the address starts with 00 a0, we have to bit-reverse
- ** each byte of the address.
- */
- if ( machine_is(powermac) &&
- (dev->dev_addr[0] == 0) &&
- (dev->dev_addr[1] == 0xa0) )
- {
- for (i = 0; i < ETH_ALEN; ++i)
- {
- int x = dev->dev_addr[i];
- x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
- x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
- addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
- }
- eth_hw_addr_set(dev, addr);
- }
-#endif /* CONFIG_PPC_PMAC */
-
- /* Test for a bad enet address */
- status = test_bad_enet(dev, status);
-
- return status;
-}
-
-/*
-** Test for enet addresses in the first 32 bytes.
-*/
-static int
-de4x5_bad_srom(struct de4x5_private *lp)
-{
- int i, status = 0;
-
- for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
- if (!memcmp(&lp->srom, &enet_det[i], 3) &&
- !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
- if (i == 0) {
- status = SMC;
- } else if (i == 1) {
- status = ACCTON;
- }
- break;
- }
- }
-
- return status;
-}
-
-static void
-srom_repair(struct net_device *dev, int card)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- switch(card) {
- case SMC:
- memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
- memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
- memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
- lp->useSROM = true;
- break;
- }
-}
-
-/*
-** Assume that the irq's do not follow the PCI spec - this is seems
-** to be true so far (2 for 2).
-*/
-static int
-test_bad_enet(struct net_device *dev, int status)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i, tmp;
-
- for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
- if ((tmp == 0) || (tmp == 0x5fa)) {
- if ((lp->chipset == last.chipset) &&
- (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
- eth_addr_inc(last.addr);
- eth_hw_addr_set(dev, last.addr);
-
- if (!an_exception(lp)) {
- dev->irq = last.irq;
- }
-
- status = 0;
- }
- } else if (!status) {
- last.chipset = lp->chipset;
- last.bus = lp->bus_num;
- last.irq = dev->irq;
- for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
- }
-
- return status;
-}
-
-/*
-** List of board exceptions with correctly wired IRQs
-*/
-static int
-an_exception(struct de4x5_private *lp)
-{
- if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
- (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
- return -1;
- }
-
- return 0;
-}
-
-/*
-** SROM Read
-*/
-static short
-srom_rd(u_long addr, u_char offset)
-{
- sendto_srom(SROM_RD | SROM_SR, addr);
-
- srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
- srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
- srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
-
- return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
-}
-
-static void
-srom_latch(u_int command, u_long addr)
-{
- sendto_srom(command, addr);
- sendto_srom(command | DT_CLK, addr);
- sendto_srom(command, addr);
-}
-
-static void
-srom_command(u_int command, u_long addr)
-{
- srom_latch(command, addr);
- srom_latch(command, addr);
- srom_latch((command & 0x0000ff00) | DT_CS, addr);
-}
-
-static void
-srom_address(u_int command, u_long addr, u_char offset)
-{
- int i, a;
-
- a = offset << 2;
- for (i=0; i<6; i++, a <<= 1) {
- srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
- }
- udelay(1);
-
- i = (getfrom_srom(addr) >> 3) & 0x01;
-}
-
-static short
-srom_data(u_int command, u_long addr)
-{
- int i;
- short word = 0;
- s32 tmp;
-
- for (i=0; i<16; i++) {
- sendto_srom(command | DT_CLK, addr);
- tmp = getfrom_srom(addr);
- sendto_srom(command, addr);
-
- word = (word << 1) | ((tmp >> 3) & 0x01);
- }
-
- sendto_srom(command & 0x0000ff00, addr);
-
- return word;
-}
-
-/*
-static void
-srom_busy(u_int command, u_long addr)
-{
- sendto_srom((command & 0x0000ff00) | DT_CS, addr);
-
- while (!((getfrom_srom(addr) >> 3) & 0x01)) {
- mdelay(1);
- }
-
- sendto_srom(command & 0x0000ff00, addr);
-}
-*/
-
-static void
-sendto_srom(u_int command, u_long addr)
-{
- outl(command, addr);
- udelay(1);
-}
-
-static int
-getfrom_srom(u_long addr)
-{
- s32 tmp;
-
- tmp = inl(addr);
- udelay(1);
-
- return tmp;
-}
-
-static int
-srom_infoleaf_info(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i, count;
- u_char *p;
-
- /* Find the infoleaf decoder function that matches this chipset */
- for (i=0; i<INFOLEAF_SIZE; i++) {
- if (lp->chipset == infoleaf_array[i].chipset) break;
- }
- if (i == INFOLEAF_SIZE) {
- lp->useSROM = false;
- printk("%s: Cannot find correct chipset for SROM decoding!\n",
- dev->name);
- return -ENXIO;
- }
-
- lp->infoleaf_fn = infoleaf_array[i].fn;
-
- /* Find the information offset that this function should use */
- count = *((u_char *)&lp->srom + 19);
- p = (u_char *)&lp->srom + 26;
-
- if (count > 1) {
- for (i=count; i; --i, p+=3) {
- if (lp->device == *p) break;
- }
- if (i == 0) {
- lp->useSROM = false;
- printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
- dev->name, lp->device);
- return -ENXIO;
- }
- }
-
- lp->infoleaf_offset = get_unaligned_le16(p + 1);
-
- return 0;
-}
-
-/*
-** This routine loads any type 1 or 3 MII info into the mii device
-** struct and executes any type 5 code to reset PHY devices for this
-** controller.
-** The info for the MII devices will be valid since the index used
-** will follow the discovery process from MII address 1-31 then 0.
-*/
-static void
-srom_init(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- u_char count;
-
- p+=2;
- if (lp->chipset == DC21140) {
- lp->cache.gepc = (*p++ | GEP_CTRL);
- gep_wr(lp->cache.gepc, dev);
- }
-
- /* Block count */
- count = *p++;
-
- /* Jump the infoblocks to find types */
- for (;count; --count) {
- if (*p < 128) {
- p += COMPACT_LEN;
- } else if (*(p+1) == 5) {
- type5_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 4) {
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 3) {
- type3_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 2) {
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 1) {
- type1_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else {
- p += ((*p & BLOCK_LEN) + 1);
- }
- }
-}
-
-/*
-** A generic routine that writes GEP control, data and reset information
-** to the GEP register (21140) or csr15 GEP portion (2114[23]).
-*/
-static void
-srom_exec(struct net_device *dev, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char count = (p ? *p++ : 0);
- u_short *w = (u_short *)p;
-
- if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
-
- if (lp->chipset != DC21140) RESET_SIA;
-
- while (count--) {
- gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
- *p++ : get_unaligned_le16(w++)), dev);
- mdelay(2); /* 2ms per action */
- }
-
- if (lp->chipset != DC21140) {
- outl(lp->cache.csr14, DE4X5_STRR);
- outl(lp->cache.csr13, DE4X5_SICR);
- }
-}
-
-/*
-** Basically this function is a NOP since it will never be called,
-** unless I implement the DC21041 SROM functions. There's no need
-** since the existing code will be satisfactory for all boards.
-*/
-static int
-dc21041_infoleaf(struct net_device *dev)
-{
- return DE4X5_AUTOSENSE_MS;
-}
-
-static int
-dc21140_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* GEP control */
- lp->cache.gepc = (*p++ | GEP_CTRL);
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
-
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-static int
-dc21142_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
-
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-static int
-dc21143_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-/*
-** The compact infoblock is only designed for DC21140[A] chips, so
-** we'll reuse the dc21140m_autoconf function. Non MII media only.
-*/
-static int
-compact_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+COMPACT_LEN) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
- } else {
- return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = COMPACT;
- lp->active = 0;
- gep_wr(lp->cache.gepc, dev);
- lp->infoblock_media = (*p++) & COMPACT_MC;
- lp->cache.gep = *p++;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-/*
-** This block describes non MII media for the DC21140[A] only.
-*/
-static int
-type0_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 0;
- lp->active = 0;
- gep_wr(lp->cache.gepc, dev);
- p+=2;
- lp->infoblock_media = (*p++) & BLOCK0_MC;
- lp->cache.gep = *p++;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-/* These functions are under construction! */
-
-static int
-type1_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- p += 2;
- if (lp->state == INITIALISED) {
- lp->ibn = 1;
- lp->active = *p++;
- lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
- lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
- lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ttm = get_unaligned_le16(p);
- return 0;
- } else if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 1;
- lp->active = *p;
- lp->infoblock_csr6 = OMR_MII_100;
- lp->useMII = true;
- lp->infoblock_media = ANS;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-static int
-type2_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 2;
- lp->active = 0;
- p += 2;
- lp->infoblock_media = (*p) & MEDIA_CODE;
-
- if ((*p++) & EXT_FIELD) {
- lp->cache.csr13 = get_unaligned_le16(p); p += 2;
- lp->cache.csr14 = get_unaligned_le16(p); p += 2;
- lp->cache.csr15 = get_unaligned_le16(p); p += 2;
- } else {
- lp->cache.csr13 = CSR13;
- lp->cache.csr14 = CSR14;
- lp->cache.csr15 = CSR15;
- }
- lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
- lp->infoblock_csr6 = OMR_SIA;
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-static int
-type3_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- p += 2;
- if (lp->state == INITIALISED) {
- lp->ibn = 3;
- lp->active = *p++;
- if (MOTO_SROM_BUG) lp->active = 0;
- lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
- lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
- lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].mci = *p;
- return 0;
- } else if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 3;
- lp->active = *p;
- if (MOTO_SROM_BUG) lp->active = 0;
- lp->infoblock_csr6 = OMR_MII_100;
- lp->useMII = true;
- lp->infoblock_media = ANS;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-static int
-type4_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 4;
- lp->active = 0;
- p+=2;
- lp->infoblock_media = (*p++) & MEDIA_CODE;
- lp->cache.csr13 = CSR13; /* Hard coded defaults */
- lp->cache.csr14 = CSR14;
- lp->cache.csr15 = CSR15;
- lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-/*
-** This block type provides information for resetting external devices
-** (chips) through the General Purpose Register.
-*/
-static int
-type5_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- /* Must be initializing to run this code */
- if ((lp->state == INITIALISED) || (lp->media == INIT)) {
- p+=2;
- lp->rst = p;
- srom_exec(dev, lp->rst);
- }
-
- return DE4X5_AUTOSENSE_MS;
-}
-
-/*
-** MII Read/Write
-*/
-
-static int
-mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
-{
- mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
- mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
- mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */
- mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
- mii_address(phyreg, ioaddr); /* PHY Register to read */
- mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
-
- return mii_rdata(ioaddr); /* Read data */
-}
-
-static void
-mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
-{
- mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
- mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
- mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */
- mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
- mii_address(phyreg, ioaddr); /* PHY Register to write */
- mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
- data = mii_swap(data, 16); /* Swap data bit ordering */
- mii_wdata(data, 16, ioaddr); /* Write data */
-}
-
-static int
-mii_rdata(u_long ioaddr)
-{
- int i;
- s32 tmp = 0;
-
- for (i=0; i<16; i++) {
- tmp <<= 1;
- tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
- }
-
- return tmp;
-}
-
-static void
-mii_wdata(int data, int len, u_long ioaddr)
-{
- int i;
-
- for (i=0; i<len; i++) {
- sendto_mii(MII_MWR | MII_WR, data, ioaddr);
- data >>= 1;
- }
-}
-
-static void
-mii_address(u_char addr, u_long ioaddr)
-{
- int i;
-
- addr = mii_swap(addr, 5);
- for (i=0; i<5; i++) {
- sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
- addr >>= 1;
- }
-}
-
-static void
-mii_ta(u_long rw, u_long ioaddr)
-{
- if (rw == MII_STWR) {
- sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
- sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
- } else {
- getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
- }
-}
-
-static int
-mii_swap(int data, int len)
-{
- int i, tmp = 0;
-
- for (i=0; i<len; i++) {
- tmp <<= 1;
- tmp |= (data & 1);
- data >>= 1;
- }
-
- return tmp;
-}
-
-static void
-sendto_mii(u32 command, int data, u_long ioaddr)
-{
- u32 j;
-
- j = (data & 1) << 17;
- outl(command | j, ioaddr);
- udelay(1);
- outl(command | MII_MDC | j, ioaddr);
- udelay(1);
-}
-
-static int
-getfrom_mii(u32 command, u_long ioaddr)
-{
- outl(command, ioaddr);
- udelay(1);
- outl(command | MII_MDC, ioaddr);
- udelay(1);
-
- return (inl(ioaddr) >> 19) & 1;
-}
-
-/*
-** Here's 3 ways to calculate the OUI from the ID registers.
-*/
-static int
-mii_get_oui(u_char phyaddr, u_long ioaddr)
-{
-/*
- union {
- u_short reg;
- u_char breg[2];
- } a;
- int i, r2, r3, ret=0;*/
- int r2;
-
- /* Read r2 and r3 */
- r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
- mii_rd(MII_ID1, phyaddr, ioaddr);
- /* SEEQ and Cypress way * /
- / * Shuffle r2 and r3 * /
- a.reg=0;
- r3 = ((r3>>10)|(r2<<6))&0x0ff;
- r2 = ((r2>>2)&0x3fff);
-
- / * Bit reverse r3 * /
- for (i=0;i<8;i++) {
- ret<<=1;
- ret |= (r3&1);
- r3>>=1;
- }
-
- / * Bit reverse r2 * /
- for (i=0;i<16;i++) {
- a.reg<<=1;
- a.reg |= (r2&1);
- r2>>=1;
- }
-
- / * Swap r2 bytes * /
- i=a.breg[0];
- a.breg[0]=a.breg[1];
- a.breg[1]=i;
-
- return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */
-/* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */
- return r2; /* (I did it) My way */
-}
-
-/*
-** The SROM spec forces us to search addresses [1-31 0]. Bummer.
-*/
-static int
-mii_get_phy(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, j, k, n, limit=ARRAY_SIZE(phy_info);
- int id;
-
- lp->active = 0;
- lp->useMII = true;
-
- /* Search the MII address space for possible PHY devices */
- for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
- lp->phy[lp->active].addr = i;
- if (i==0) n++; /* Count cycles */
- while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
- id = mii_get_oui(i, DE4X5_MII);
- if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
- for (j=0; j<limit; j++) { /* Search PHY table */
- if (id != phy_info[j].id) continue; /* ID match? */
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
- if (k < DE4X5_MAX_PHY) {
- memcpy((char *)&lp->phy[k],
- (char *)&phy_info[j], sizeof(struct phy_table));
- lp->phy[k].addr = i;
- lp->mii_cnt++;
- lp->active++;
- } else {
- goto purgatory; /* Stop the search */
- }
- break;
- }
- if ((j == limit) && (i < DE4X5_MAX_MII)) {
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
- lp->phy[k].addr = i;
- lp->phy[k].id = id;
- lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
- lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
- lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
- lp->mii_cnt++;
- lp->active++;
- printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
- j = de4x5_debug;
- de4x5_debug |= DEBUG_MII;
- de4x5_dbg_mii(dev, k);
- de4x5_debug = j;
- printk("\n");
- }
- }
- purgatory:
- lp->active = 0;
- if (lp->phy[0].id) { /* Reset the PHY devices */
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/
- mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
- while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
-
- de4x5_dbg_mii(dev, k);
- }
- }
- if (!lp->mii_cnt) lp->useMII = false;
-
- return lp->mii_cnt;
-}
-
-static char *
-build_setup_frame(struct net_device *dev, int mode)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
- char *pa = lp->setup_frame;
-
- /* Initialise the setup frame */
- if (mode == ALL) {
- memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
- }
-
- if (lp->setup_f == HASH_PERF) {
- for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
- *(pa + i) = dev->dev_addr[i]; /* Host address */
- if (i & 0x01) pa += 2;
- }
- *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80;
- } else {
- for (i=0; i<ETH_ALEN; i++) { /* Host address */
- *(pa + (i&1)) = dev->dev_addr[i];
- if (i & 0x01) pa += 4;
- }
- for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */
- *(pa + (i&1)) = (char) 0xff;
- if (i & 0x01) pa += 4;
- }
- }
-
- return pa; /* Points to the next entry */
-}
-
-static void
-disable_ast(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- del_timer_sync(&lp->timer);
-}
-
-static long
-de4x5_switch_mac_port(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 omr;
-
- STOP_DE4X5;
-
- /* Assert the OMR_PS bit in CSR6 */
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
- OMR_FDX));
- omr |= lp->infoblock_csr6;
- if (omr & OMR_PS) omr |= OMR_HBD;
- outl(omr, DE4X5_OMR);
-
- /* Soft Reset */
- RESET_DE4X5;
-
- /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
- if (lp->chipset == DC21140) {
- gep_wr(lp->cache.gepc, dev);
- gep_wr(lp->cache.gep, dev);
- } else if ((lp->chipset & ~0x0ff) == DC2114x) {
- reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
- }
-
- /* Restore CSR6 */
- outl(omr, DE4X5_OMR);
-
- /* Reset CSR8 */
- inl(DE4X5_MFC);
-
- return omr;
-}
-
-static void
-gep_wr(s32 data, struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->chipset == DC21140) {
- outl(data, DE4X5_GEP);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
- }
-}
-
-static int
-gep_rd(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->chipset == DC21140) {
- return inl(DE4X5_GEP);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- return inl(DE4X5_SIGR) & 0x000fffff;
- }
-
- return 0;
-}
-
-static void
-yawn(struct net_device *dev, int state)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
-
- if(lp->bus == EISA) {
- switch(state) {
- case WAKEUP:
- outb(WAKEUP, PCI_CFPM);
- mdelay(10);
- break;
-
- case SNOOZE:
- outb(SNOOZE, PCI_CFPM);
- break;
-
- case SLEEP:
- outl(0, DE4X5_SICR);
- outb(SLEEP, PCI_CFPM);
- break;
- }
- } else {
- struct pci_dev *pdev = to_pci_dev (lp->gendev);
- switch(state) {
- case WAKEUP:
- pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
- mdelay(10);
- break;
-
- case SNOOZE:
- pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
- break;
-
- case SLEEP:
- outl(0, DE4X5_SICR);
- pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
- break;
- }
- }
-}
-
-static void
-de4x5_parse_params(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- char *p, *q, t;
-
- lp->params.fdx = false;
- lp->params.autosense = AUTO;
-
- if (args == NULL) return;
-
- if ((p = strstr(args, dev->name))) {
- if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
- t = *q;
- *q = '\0';
-
- if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
-
- if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
- if (strstr(p, "TP_NW")) {
- lp->params.autosense = TP_NW;
- } else if (strstr(p, "TP")) {
- lp->params.autosense = TP;
- } else if (strstr(p, "BNC_AUI")) {
- lp->params.autosense = BNC;
- } else if (strstr(p, "BNC")) {
- lp->params.autosense = BNC;
- } else if (strstr(p, "AUI")) {
- lp->params.autosense = AUI;
- } else if (strstr(p, "10Mb")) {
- lp->params.autosense = _10Mb;
- } else if (strstr(p, "100Mb")) {
- lp->params.autosense = _100Mb;
- } else if (strstr(p, "AUTO")) {
- lp->params.autosense = AUTO;
- }
- }
- *q = t;
- }
-}
-
-static void
-de4x5_dbg_open(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- if (de4x5_debug & DEBUG_OPEN) {
- printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
- printk("\tphysical address: %pM\n", dev->dev_addr);
- printk("Descriptor head addresses:\n");
- printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
- printk("Descriptor addresses:\nRX: ");
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
- }
- }
- printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
- printk("TX: ");
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
- }
- }
- printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
- printk("Descriptor buffers:\nRX: ");
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
- }
- }
- printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
- printk("TX: ");
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
- }
- }
- printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
- printk("Ring size:\nRX: %d\nTX: %d\n",
- (short)lp->rxRingSize,
- (short)lp->txRingSize);
- }
-}
-
-static void
-de4x5_dbg_mii(struct net_device *dev, int k)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (de4x5_debug & DEBUG_MII) {
- printk("\nMII device address: %d\n", lp->phy[k].addr);
- printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
- printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
- printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
- printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
- if (lp->phy[k].id != BROADCOM_T4) {
- printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
- printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
- }
- printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
- if (lp->phy[k].id != BROADCOM_T4) {
- printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
- printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
- } else {
- printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
- }
- }
-}
-
-static void
-de4x5_dbg_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->media != lp->c_media) {
- if (de4x5_debug & DEBUG_MEDIA) {
- printk("%s: media is %s%s\n", dev->name,
- (lp->media == NC ? "unconnected, link down or incompatible connection" :
- (lp->media == TP ? "TP" :
- (lp->media == ANS ? "TP/Nway" :
- (lp->media == BNC ? "BNC" :
- (lp->media == AUI ? "AUI" :
- (lp->media == BNC_AUI ? "BNC/AUI" :
- (lp->media == EXT_SIA ? "EXT SIA" :
- (lp->media == _100Mb ? "100Mb/s" :
- (lp->media == _10Mb ? "10Mb/s" :
- "???"
- ))))))))), (lp->fdx?" full duplex.":"."));
- }
- lp->c_media = lp->media;
- }
-}
-
-static void
-de4x5_dbg_srom(struct de4x5_srom *p)
-{
- int i;
-
- if (de4x5_debug & DEBUG_SROM) {
- printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
- printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
- printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
- printk("SROM version: %02x\n", (u_char)(p->version));
- printk("# controllers: %02x\n", (u_char)(p->num_controllers));
-
- printk("Hardware Address: %pM\n", p->ieee_addr);
- printk("CRC checksum: %04x\n", (u_short)(p->chksum));
- for (i=0; i<64; i++) {
- printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
- }
- }
-}
-
-static void
-de4x5_dbg_rx(struct sk_buff *skb, int len)
-{
- int i, j;
-
- if (de4x5_debug & DEBUG_RX) {
- printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
- skb->data, &skb->data[6],
- (u_char)skb->data[12],
- (u_char)skb->data[13],
- len);
- for (j=0; len>0;j+=16, len-=16) {
- printk(" %03x: ",j);
- for (i=0; i<16 && i<len; i++) {
- printk("%02x ",(u_char)skb->data[i+j]);
- }
- printk("\n");
- }
- }
-}
-
-/*
-** Perform IOCTL call functions here. Some are privileged operations and the
-** effective uid is checked in those cases. In the normal course of events
-** this function is only used for my testing.
-*/
-static int
-de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
- u_long iobase = dev->base_addr;
- int i, j, status = 0;
- s32 omr;
- union {
- u8 addr[144];
- u16 sval[72];
- u32 lval[36];
- } tmp;
- u_long flags = 0;
-
- if (cmd != SIOCDEVPRIVATE || in_compat_syscall())
- return -EOPNOTSUPP;
-
- switch(ioc->cmd) {
- case DE4X5_GET_HWADDR: /* Get the hardware address */
- ioc->len = ETH_ALEN;
- for (i=0; i<ETH_ALEN; i++) {
- tmp.addr[i] = dev->dev_addr[i];
- }
- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
- break;
-
- case DE4X5_SET_HWADDR: /* Set the hardware address */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
- if (netif_queue_stopped(dev))
- return -EBUSY;
- netif_stop_queue(dev);
- eth_hw_addr_set(dev, tmp.addr);
- build_setup_frame(dev, PHYS_ADDR_ONLY);
- /* Set up the descriptor and give ownership to the card */
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
- SETUP_FRAME_LEN, (struct sk_buff *)1);
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- netif_wake_queue(dev); /* Unlock the TX ring */
- break;
-
- case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- printk("%s: Boo!\n", dev->name);
- break;
-
- case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- omr = inl(DE4X5_OMR);
- omr |= OMR_PM;
- outl(omr, DE4X5_OMR);
- break;
-
- case DE4X5_GET_STATS: /* Get the driver statistics */
- {
- struct pkt_stats statbuf;
- ioc->len = sizeof(statbuf);
- spin_lock_irqsave(&lp->lock, flags);
- memcpy(&statbuf, &lp->pktStats, ioc->len);
- spin_unlock_irqrestore(&lp->lock, flags);
- if (copy_to_user(ioc->data, &statbuf, ioc->len))
- return -EFAULT;
- break;
- }
- case DE4X5_CLR_STATS: /* Zero out the driver statistics */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- spin_lock_irqsave(&lp->lock, flags);
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->lock, flags);
- break;
-
- case DE4X5_GET_OMR: /* Get the OMR Register contents */
- tmp.addr[0] = inl(DE4X5_OMR);
- if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
- break;
-
- case DE4X5_SET_OMR: /* Set the OMR Register contents */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
- outl(tmp.addr[0], DE4X5_OMR);
- break;
-
- case DE4X5_GET_REG: /* Get the DE4X5 Registers */
- j = 0;
- tmp.lval[0] = inl(DE4X5_STS); j+=4;
- tmp.lval[1] = inl(DE4X5_BMR); j+=4;
- tmp.lval[2] = inl(DE4X5_IMR); j+=4;
- tmp.lval[3] = inl(DE4X5_OMR); j+=4;
- tmp.lval[4] = inl(DE4X5_SISR); j+=4;
- tmp.lval[5] = inl(DE4X5_SICR); j+=4;
- tmp.lval[6] = inl(DE4X5_STRR); j+=4;
- tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
- ioc->len = j;
- if (copy_to_user(ioc->data, tmp.lval, ioc->len))
- return -EFAULT;
- break;
-
-#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
-/*
- case DE4X5_DUMP:
- j = 0;
- tmp.addr[j++] = dev->irq;
- for (i=0; i<ETH_ALEN; i++) {
- tmp.addr[j++] = dev->dev_addr[i];
- }
- tmp.addr[j++] = lp->rxRingSize;
- tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
- tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
-
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
- }
- }
- tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
- }
- }
- tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
-
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
- }
- }
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
- }
- }
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
-
- for (i=0;i<lp->rxRingSize;i++){
- tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
- }
- for (i=0;i<lp->txRingSize;i++){
- tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
- }
-
- tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
- tmp.lval[j>>2] = lp->chipset; j+=4;
- if (lp->chipset == DC21140) {
- tmp.lval[j>>2] = gep_rd(dev); j+=4;
- } else {
- tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
- }
- tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
- if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
- tmp.lval[j>>2] = lp->active; j+=4;
- tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- if (lp->phy[lp->active].id != BROADCOM_T4) {
- tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- }
- tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- if (lp->phy[lp->active].id != BROADCOM_T4) {
- tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- } else {
- tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- }
- }
-
- tmp.addr[j++] = lp->txRingSize;
- tmp.addr[j++] = netif_queue_stopped(dev);
-
- ioc->len = j;
- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
- break;
-
-*/
- default:
- return -EOPNOTSUPP;
- }
-
- return status;
-}
-
-static int __init de4x5_module_init (void)
-{
- int err = 0;
-
-#ifdef CONFIG_PCI
- err = pci_register_driver(&de4x5_pci_driver);
-#endif
-#ifdef CONFIG_EISA
- err |= eisa_driver_register (&de4x5_eisa_driver);
-#endif
-
- return err;
-}
-
-static void __exit de4x5_module_exit (void)
-{
-#ifdef CONFIG_PCI
- pci_unregister_driver (&de4x5_pci_driver);
-#endif
-#ifdef CONFIG_EISA
- eisa_driver_unregister (&de4x5_eisa_driver);
-#endif
-}
-
-module_init (de4x5_module_init);
-module_exit (de4x5_module_exit);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h
deleted file mode 100644
index 1bfdc9b117f6..000000000000
--- a/drivers/net/ethernet/dec/tulip/de4x5.h
+++ /dev/null
@@ -1,1017 +0,0 @@
-/*
- Copyright 1994 Digital Equipment Corporation.
-
- This software may be used and distributed according to the terms of the
- GNU General Public License, incorporated herein by reference.
-
- The author may be reached as davies@wanton.lkg.dec.com or Digital
- Equipment Corporation, 550 King Street, Littleton MA 01460.
-
- =========================================================================
-*/
-
-/*
-** DC21040 CSR<1..15> Register Address Map
-*/
-#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */
-#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */
-#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */
-#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */
-#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */
-#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */
-#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */
-#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */
-#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */
-#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */
-#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */
-#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */
-#define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */
-#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */
-#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */
-#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/
-#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */
-#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */
-#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */
-#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */
-#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */
-
-/*
-** EISA Register Address Map
-*/
-#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
-#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
-#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
-#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
-#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
-#define EISA_CR iobase+0x0c84 /* EISA Control Register */
-#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
-#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
-#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */
-#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */
-#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */
-
-/*
-** PCI/EISA Configuration Registers Address Map
-*/
-#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */
-#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */
-#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */
-#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */
-#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */
-#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */
-#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */
-#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */
-#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */
-#define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */
-#define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */
-
-/*
-** EISA Configuration Register 0 bit definitions
-*/
-#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */
-#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */
-#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */
-#define ER0_ISTS 0x10 /* Interrupt Status (X) */
-#define ER0_LI 0x08 /* Latch Interrupts */
-#define ER0_INTL 0x06 /* INTerrupt Level */
-#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */
-
-/*
-** EISA Configuration Register 1 bit definitions
-*/
-#define ER1_IAM 0xe0 /* ISA Address Mode */
-#define ER1_IAE 0x10 /* ISA Addressing Enable */
-#define ER1_UPIN 0x0f /* User Pins */
-
-/*
-** EISA Configuration Register 2 bit definitions
-*/
-#define ER2_BRS 0xc0 /* Boot ROM Size */
-#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */
-
-/*
-** EISA Configuration Register 3 bit definitions
-*/
-#define ER3_BWE 0x40 /* Burst Write Enable */
-#define ER3_BRE 0x04 /* Burst Read Enable */
-#define ER3_LSR 0x02 /* Local Software Reset */
-
-/*
-** PCI Configuration ID Register (PCI_CFID). The Device IDs are left
-** shifted 8 bits to allow detection of DC21142 and DC21143 variants with
-** the configuration revision register step number.
-*/
-#define CFID_DID 0xff00 /* Device ID */
-#define CFID_VID 0x00ff /* Vendor ID */
-#define DC21040_DID 0x0200 /* Unique Device ID # */
-#define DC21040_VID 0x1011 /* DC21040 Manufacturer */
-#define DC21041_DID 0x1400 /* Unique Device ID # */
-#define DC21041_VID 0x1011 /* DC21041 Manufacturer */
-#define DC21140_DID 0x0900 /* Unique Device ID # */
-#define DC21140_VID 0x1011 /* DC21140 Manufacturer */
-#define DC2114x_DID 0x1900 /* Unique Device ID # */
-#define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */
-
-/*
-** Chipset defines
-*/
-#define DC21040 DC21040_DID
-#define DC21041 DC21041_DID
-#define DC21140 DC21140_DID
-#define DC2114x DC2114x_DID
-#define DC21142 (DC2114x_DID | 0x0010)
-#define DC21143 (DC2114x_DID | 0x0030)
-#define DC2114x_BRK 0x0020 /* CFRV break between DC21142 & DC21143 */
-
-#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID))
-#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID))
-#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID))
-#define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID))
-#define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142))
-#define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143))
-
-/*
-** PCI Configuration Command/Status Register (PCI_CFCS)
-*/
-#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */
-#define CFCS_SSE 0x40000000 /* Signal System Error (S) */
-#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */
-#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */
-#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */
-#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */
-#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */
-#define CFCS_SEE 0x00000100 /* System Error Enable (C) */
-#define CFCS_PER 0x00000040 /* Parity Error Response (C) */
-#define CFCS_MO 0x00000004 /* Master Operation (C) */
-#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */
-#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */
-
-/*
-** PCI Configuration Revision Register (PCI_CFRV)
-*/
-#define CFRV_BC 0xff000000 /* Base Class */
-#define CFRV_SC 0x00ff0000 /* Subclass */
-#define CFRV_RN 0x000000f0 /* Revision Number */
-#define CFRV_SN 0x0000000f /* Step Number */
-#define BASE_CLASS 0x02000000 /* Indicates Network Controller */
-#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */
-#define STEP_NUMBER 0x00000020 /* Increments for future chips */
-#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */
-#define CFRV_MASK 0xffff0000 /* Register mask */
-
-/*
-** PCI Configuration Latency Timer Register (PCI_CFLT)
-*/
-#define CFLT_BC 0x0000ff00 /* Latency Timer bits */
-
-/*
-** PCI Configuration Base I/O Address Register (PCI_CBIO)
-*/
-#define CBIO_MASK -128 /* Base I/O Address Mask */
-#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */
-
-/*
-** PCI Configuration Card Information Structure Register (PCI_CCIS)
-*/
-#define CCIS_ROMI 0xf0000000 /* ROM Image */
-#define CCIS_ASO 0x0ffffff8 /* Address Space Offset */
-#define CCIS_ASI 0x00000007 /* Address Space Indicator */
-
-/*
-** PCI Configuration Subsystem ID Register (PCI_SSID)
-*/
-#define SSID_SSID 0xffff0000 /* Subsystem ID */
-#define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */
-
-/*
-** PCI Configuration Expansion ROM Base Address Register (PCI_CBER)
-*/
-#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */
-#define CBER_ROME 0x00000001 /* ROM Enable */
-
-/*
-** PCI Configuration Interrupt Register (PCI_CFIT)
-*/
-#define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */
-#define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */
-#define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */
-#define CFIT_IRQL 0x000000ff /* Interrupt Line */
-
-/*
-** PCI Configuration Power Management Area Register (PCI_CFPM)
-*/
-#define SLEEP 0x80 /* Power Saving Sleep Mode */
-#define SNOOZE 0x40 /* Power Saving Snooze Mode */
-#define WAKEUP 0x00 /* Power Saving Wakeup */
-
-#define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */
-#define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */
-
-/*
-** DC21040 Bus Mode Register (DE4X5_BMR)
-*/
-#define BMR_RML 0x00200000 /* [Memory] Read Multiple */
-#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */
-#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */
-#define BMR_DAS 0x00010000 /* Diagnostic Address Space */
-#define BMR_CAL 0x0000c000 /* Cache Alignment */
-#define BMR_PBL 0x00003f00 /* Programmable Burst Length */
-#define BMR_BLE 0x00000080 /* Big/Little Endian */
-#define BMR_DSL 0x0000007c /* Descriptor Skip Length */
-#define BMR_BAR 0x00000002 /* Bus ARbitration */
-#define BMR_SWR 0x00000001 /* Software Reset */
-
- /* Timings here are for 10BASE-T/AUI only*/
-#define TAP_NOPOLL 0x00000000 /* No automatic polling */
-#define TAP_200US 0x00020000 /* TX automatic polling every 200us */
-#define TAP_800US 0x00040000 /* TX automatic polling every 800us */
-#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */
-#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */
-#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */
-#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */
-#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */
-
-#define CAL_NOUSE 0x00000000 /* Not used */
-#define CAL_8LONG 0x00004000 /* 8-longword alignment */
-#define CAL_16LONG 0x00008000 /* 16-longword alignment */
-#define CAL_32LONG 0x0000c000 /* 32-longword alignment */
-
-#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */
-#define PBL_1 0x00000100 /* 1 longword DMA burst length */
-#define PBL_2 0x00000200 /* 2 longwords DMA burst length */
-#define PBL_4 0x00000400 /* 4 longwords DMA burst length */
-#define PBL_8 0x00000800 /* 8 longwords DMA burst length */
-#define PBL_16 0x00001000 /* 16 longwords DMA burst length */
-#define PBL_32 0x00002000 /* 32 longwords DMA burst length */
-
-#define DSL_0 0x00000000 /* 0 longword / descriptor */
-#define DSL_1 0x00000004 /* 1 longword / descriptor */
-#define DSL_2 0x00000008 /* 2 longwords / descriptor */
-#define DSL_4 0x00000010 /* 4 longwords / descriptor */
-#define DSL_8 0x00000020 /* 8 longwords / descriptor */
-#define DSL_16 0x00000040 /* 16 longwords / descriptor */
-#define DSL_32 0x00000080 /* 32 longwords / descriptor */
-
-/*
-** DC21040 Transmit Poll Demand Register (DE4X5_TPD)
-*/
-#define TPD 0x00000001 /* Transmit Poll Demand */
-
-/*
-** DC21040 Receive Poll Demand Register (DE4X5_RPD)
-*/
-#define RPD 0x00000001 /* Receive Poll Demand */
-
-/*
-** DC21040 Receive Ring Base Address Register (DE4X5_RRBA)
-*/
-#define RRBA 0xfffffffc /* RX Descriptor List Start Address */
-
-/*
-** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA)
-*/
-#define TRBA 0xfffffffc /* TX Descriptor List Start Address */
-
-/*
-** Status Register (DE4X5_STS)
-*/
-#define STS_GPI 0x04000000 /* General Purpose Port Interrupt */
-#define STS_BE 0x03800000 /* Bus Error Bits */
-#define STS_TS 0x00700000 /* Transmit Process State */
-#define STS_RS 0x000e0000 /* Receive Process State */
-#define STS_NIS 0x00010000 /* Normal Interrupt Summary */
-#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */
-#define STS_ER 0x00004000 /* Early Receive */
-#define STS_FBE 0x00002000 /* Fatal Bus Error */
-#define STS_SE 0x00002000 /* System Error */
-#define STS_LNF 0x00001000 /* Link Fail */
-#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */
-#define STS_TM 0x00000800 /* Timer Expired (DC21041) */
-#define STS_ETI 0x00000400 /* Early Transmit Interrupt */
-#define STS_AT 0x00000400 /* AUI/TP Pin */
-#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */
-#define STS_RPS 0x00000100 /* Receive Process Stopped */
-#define STS_RU 0x00000080 /* Receive Buffer Unavailable */
-#define STS_RI 0x00000040 /* Receive Interrupt */
-#define STS_UNF 0x00000020 /* Transmit Underflow */
-#define STS_LNP 0x00000010 /* Link Pass */
-#define STS_ANC 0x00000010 /* Autonegotiation Complete */
-#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */
-#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */
-#define STS_TPS 0x00000002 /* Transmit Process Stopped */
-#define STS_TI 0x00000001 /* Transmit Interrupt */
-
-#define EB_PAR 0x00000000 /* Parity Error */
-#define EB_MA 0x00800000 /* Master Abort */
-#define EB_TA 0x01000000 /* Target Abort */
-#define EB_RES0 0x01800000 /* Reserved */
-#define EB_RES1 0x02000000 /* Reserved */
-
-#define TS_STOP 0x00000000 /* Stopped */
-#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */
-#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */
-#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */
-#define TS_RES 0x00400000 /* Reserved */
-#define TS_SPKT 0x00500000 /* Setup Packet */
-#define TS_SUSP 0x00600000 /* Suspended */
-#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */
-
-#define RS_STOP 0x00000000 /* Stopped */
-#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */
-#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */
-#define RS_WFRP 0x00060000 /* Wait for Receive Packet */
-#define RS_SUSP 0x00080000 /* Suspended */
-#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */
-#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */
-#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */
-
-#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */
-
-/*
-** Operation Mode Register (DE4X5_OMR)
-*/
-#define OMR_SC 0x80000000 /* Special Capture Effect Enable */
-#define OMR_RA 0x40000000 /* Receive All */
-#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */
-#define OMR_SCR 0x01000000 /* Scrambler Mode */
-#define OMR_PCS 0x00800000 /* PCS Function */
-#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */
-#define OMR_SF 0x00200000 /* Store and Forward */
-#define OMR_HBD 0x00080000 /* HeartBeat Disable */
-#define OMR_PS 0x00040000 /* Port Select */
-#define OMR_CA 0x00020000 /* Capture Effect Enable */
-#define OMR_BP 0x00010000 /* Back Pressure */
-#define OMR_TR 0x0000c000 /* Threshold Control Bits */
-#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */
-#define OMR_FC 0x00001000 /* Force Collision Mode */
-#define OMR_OM 0x00000c00 /* Operating Mode */
-#define OMR_FDX 0x00000200 /* Full Duplex Mode */
-#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */
-#define OMR_PM 0x00000080 /* Pass All Multicast */
-#define OMR_PR 0x00000040 /* Promiscuous Mode */
-#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */
-#define OMR_IF 0x00000010 /* Inverse Filtering */
-#define OMR_PB 0x00000008 /* Pass Bad Frames */
-#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */
-#define OMR_SR 0x00000002 /* Start/Stop Receive */
-#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */
-
-#define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */
-#define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */
-#define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */
-#define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */
-
-#define OMR_DEF (OMR_SDP)
-#define OMR_SIA (OMR_SDP | OMR_TTM)
-#define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS)
-#define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS)
-#define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS)
-
-/*
-** DC21040 Interrupt Mask Register (DE4X5_IMR)
-*/
-#define IMR_GPM 0x04000000 /* General Purpose Port Mask */
-#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */
-#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */
-#define IMR_ERM 0x00004000 /* Early Receive Mask */
-#define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */
-#define IMR_SEM 0x00002000 /* System Error Mask */
-#define IMR_LFM 0x00001000 /* Link Fail Mask */
-#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */
-#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */
-#define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */
-#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */
-#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */
-#define IMR_RSM 0x00000100 /* Receive Stopped Mask */
-#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */
-#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */
-#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */
-#define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */
-#define IMR_LPM 0x00000010 /* Link Pass */
-#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */
-#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */
-#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */
-#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */
-
-/*
-** Missed Frames and FIFO Overflow Counters (DE4X5_MFC)
-*/
-#define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */
-#define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */
-#define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */
-#define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */
-#define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */
-
-/*
-** DC21040 Ethernet Address PROM (DE4X5_APROM)
-*/
-#define APROM_DN 0x80000000 /* Data Not Valid */
-#define APROM_DT 0x000000ff /* Address Byte */
-
-/*
-** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM)
-*/
-#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
-#define BROM_RD 0x00004000 /* Read from Boot ROM */
-#define BROM_WR 0x00002000 /* Write to Boot ROM */
-#define BROM_BR 0x00001000 /* Select Boot ROM when set */
-#define BROM_SR 0x00000800 /* Select Serial ROM when set */
-#define BROM_REG 0x00000400 /* External Register Select */
-#define BROM_DT 0x000000ff /* Data Byte */
-
-/*
-** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII)
-*/
-#define MII_MDI 0x00080000 /* MII Management Data In */
-#define MII_MDO 0x00060000 /* MII Management Mode/Data Out */
-#define MII_MRD 0x00040000 /* MII Management Define Read Mode */
-#define MII_MWR 0x00000000 /* MII Management Define Write Mode */
-#define MII_MDT 0x00020000 /* MII Management Data Out */
-#define MII_MDC 0x00010000 /* MII Management Clock */
-#define MII_RD 0x00004000 /* Read from MII */
-#define MII_WR 0x00002000 /* Write to MII */
-#define MII_SEL 0x00000800 /* Select MII when RESET */
-
-#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
-#define SROM_RD 0x00004000 /* Read from Boot ROM */
-#define SROM_WR 0x00002000 /* Write to Boot ROM */
-#define SROM_BR 0x00001000 /* Select Boot ROM when set */
-#define SROM_SR 0x00000800 /* Select Serial ROM when set */
-#define SROM_REG 0x00000400 /* External Register Select */
-#define SROM_DT 0x000000ff /* Data Byte */
-
-#define DT_OUT 0x00000008 /* Serial Data Out */
-#define DT_IN 0x00000004 /* Serial Data In */
-#define DT_CLK 0x00000002 /* Serial ROM Clock */
-#define DT_CS 0x00000001 /* Serial ROM Chip Select */
-
-#define MII_PREAMBLE 0xffffffff /* MII Management Preamble */
-#define MII_TEST 0xaaaaaaaa /* MII Test Signal */
-#define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */
-#define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */
-
-#define MII_CR 0x00 /* MII Management Control Register */
-#define MII_SR 0x01 /* MII Management Status Register */
-#define MII_ID0 0x02 /* PHY Identifier Register 0 */
-#define MII_ID1 0x03 /* PHY Identifier Register 1 */
-#define MII_ANA 0x04 /* Auto Negotiation Advertisement */
-#define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */
-#define MII_ANE 0x06 /* Auto Negotiation Expansion */
-#define MII_ANP 0x07 /* Auto Negotiation Next Page TX */
-
-#define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */
-
-/*
-** MII Management Control Register
-*/
-#define MII_CR_RST 0x8000 /* RESET the PHY chip */
-#define MII_CR_LPBK 0x4000 /* Loopback enable */
-#define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */
-#define MII_CR_10 0x0000 /* Set 10Mb/s */
-#define MII_CR_100 0x2000 /* Set 100Mb/s */
-#define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */
-#define MII_CR_PD 0x0800 /* Power Down */
-#define MII_CR_ISOL 0x0400 /* Isolate Mode */
-#define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */
-#define MII_CR_FDM 0x0100 /* Full Duplex Mode */
-#define MII_CR_CTE 0x0080 /* Collision Test Enable */
-
-/*
-** MII Management Status Register
-*/
-#define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */
-#define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */
-#define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */
-#define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */
-#define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */
-#define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/
-#define MII_SR_RFD 0x0010 /* Remote Fault Detected */
-#define MII_SR_ANC 0x0008 /* Auto Negotiation capable */
-#define MII_SR_LKS 0x0004 /* Link Status */
-#define MII_SR_JABD 0x0002 /* Jabber Detect */
-#define MII_SR_XC 0x0001 /* Extended Capabilities */
-
-/*
-** MII Management Auto Negotiation Advertisement Register
-*/
-#define MII_ANA_TAF 0x03e0 /* Technology Ability Field */
-#define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */
-#define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */
-#define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
-#define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
-#define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */
-#define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */
-#define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */
-
-/*
-** MII Management Auto Negotiation Remote End Register
-*/
-#define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */
-#define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */
-#define MII_ANLPA_RF 0x2000 /* Remote Fault */
-#define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */
-#define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */
-#define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */
-#define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
-#define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
-#define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */
-#define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */
-#define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */
-
-/*
-** SROM Media Definitions (ABG SROM Section)
-*/
-#define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */
-#define MEDIA_MII 0x0040 /* MII Present on the adapter */
-#define MEDIA_FIBRE 0x0008 /* Fibre Media present */
-#define MEDIA_AUI 0x0004 /* AUI Media present */
-#define MEDIA_TP 0x0002 /* TP Media present */
-#define MEDIA_BNC 0x0001 /* BNC Media present */
-
-/*
-** SROM Definitions (Digital Semiconductor Format)
-*/
-#define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */
-#define SROM_SSID 0x0002 /* Sub-system ID offset */
-#define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */
-#define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */
-#define SROM_IDCRC 0x0010 /* ID Block CRC offset*/
-#define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */
-#define SROM_SFV 0x0012 /* SROM Format Version offset */
-#define SROM_CCNT 0x0013 /* Controller Count offset */
-#define SROM_HWADD 0x0014 /* Hardware Address offset */
-#define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/
-#define SROM_CRC 0x007e /* SROM CRC offset */
-
-/*
-** SROM Media Connection Definitions
-*/
-#define SROM_10BT 0x0000 /* 10BASE-T half duplex */
-#define SROM_10BTN 0x0100 /* 10BASE-T with Nway */
-#define SROM_10BTF 0x0204 /* 10BASE-T full duplex */
-#define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */
-#define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */
-#define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */
-#define SROM_100BTH 0x0003 /* 100BASE-T half duplex */
-#define SROM_100BTF 0x0205 /* 100BASE-T full duplex */
-#define SROM_100BT4 0x0006 /* 100BASE-T4 */
-#define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */
-#define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */
-#define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */
-#define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */
-#define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */
-#define SROM_M100BT4 0x000f /* MII 100BASE-T4 */
-#define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */
-#define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */
-#define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */
-#define SROM_PAO 0x8800 /* Powerup Autosense Only */
-#define SROM_NSMI 0xffff /* No Selected Media Information */
-
-/*
-** SROM Media Definitions
-*/
-#define SROM_10BASET 0x0000 /* 10BASE-T half duplex */
-#define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */
-#define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */
-#define SROM_100BASET 0x0003 /* 100BASE-T half duplex */
-#define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */
-#define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */
-#define SROM_100BASET4 0x0006 /* 100BASE-T4 */
-#define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */
-#define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */
-
-#define BLOCK_LEN 0x7f /* Extended blocks length mask */
-#define EXT_FIELD 0x40 /* Extended blocks extension field bit */
-#define MEDIA_CODE 0x3f /* Extended blocks media code mask */
-
-/*
-** SROM Compact Format Block Masks
-*/
-#define COMPACT_FI 0x80 /* Format Indicator */
-#define COMPACT_LEN 0x04 /* Length */
-#define COMPACT_MC 0x3f /* Media Code */
-
-/*
-** SROM Extended Format Block Type 0 Masks
-*/
-#define BLOCK0_FI 0x80 /* Format Indicator */
-#define BLOCK0_MCS 0x80 /* Media Code byte Sign */
-#define BLOCK0_MC 0x3f /* Media Code */
-
-/*
-** DC21040 Full Duplex Register (DE4X5_FDR)
-*/
-#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */
-
-/*
-** DC21041 General Purpose Timer Register (DE4X5_GPT)
-*/
-#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */
-#define GPT_VAL 0x0000ffff /* Timer Value */
-
-/*
-** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits)
-*/
-/* Valid ONLY for DE500 hardware */
-#define GEP_LNP 0x00000080 /* Link Pass (input) */
-#define GEP_SLNK 0x00000040 /* SYM LINK (input) */
-#define GEP_SDET 0x00000020 /* Signal Detect (input) */
-#define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */
-#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */
-#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */
-#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */
-#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */
-#define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */
-#define GEP_CTRL 0x00000100 /* GEP control bit */
-
-/*
-** SIA Register Defaults
-*/
-#define CSR13 0x00000001
-#define CSR14 0x0003ff7f /* Autonegotiation disabled */
-#define CSR15 0x00000008
-
-/*
-** SIA Status Register (DE4X5_SISR)
-*/
-#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */
-#define SISR_LPN 0x00008000 /* Link Partner Negotiable */
-#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */
-#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */
-#define SISR_TRF 0x00000800 /* Transmit Remote Fault */
-#define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */
-#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/
-#define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */
-#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */
-#define SISR_ARA 0x00000100 /* AUI Receive Port Activity */
-#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */
-#define SISR_DAO 0x00000080 /* PLL All One */
-#define SISR_DAZ 0x00000040 /* PLL All Zero */
-#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */
-#define SISR_DSD 0x00000010 /* PLL Self-Test Done */
-#define SISR_APS 0x00000008 /* Auto Polarity State */
-#define SISR_LKF 0x00000004 /* Link Fail Status */
-#define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */
-#define SISR_NCR 0x00000002 /* Network Connection Error */
-#define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */
-#define SISR_PAUI 0x00000001 /* AUI_TP Indication */
-#define SISR_MRA 0x00000001 /* MII Receive Port Activity */
-
-#define ANS_NDIS 0x00000000 /* Nway disable */
-#define ANS_TDIS 0x00001000 /* Transmit Disable */
-#define ANS_ADET 0x00002000 /* Ability Detect */
-#define ANS_ACK 0x00003000 /* Acknowledge */
-#define ANS_CACK 0x00004000 /* Complete Acknowledge */
-#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */
-#define ANS_LCHK 0x00006000 /* Link Check */
-
-#define SISR_RST 0x00000301 /* CSR12 reset */
-#define SISR_ANR 0x00001301 /* Autonegotiation restart */
-
-/*
-** SIA Connectivity Register (DE4X5_SICR)
-*/
-#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */
-#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */
-#define SICR_OE24 0x00004000 /* Output Enable 2 4 */
-#define SICR_OE13 0x00002000 /* Output Enable 1 3 */
-#define SICR_IE 0x00001000 /* Input Enable */
-#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */
-#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */
-#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/
-#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/
-#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */
-#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */
-#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/
-#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */
-#define SICR_ASE 0x00000080 /* APLL Start Enable*/
-#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */
-#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */
-#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */
-#define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */
-#define SICR_CAC 0x00000004 /* CSR Auto Configuration */
-#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */
-#define SICR_SRL 0x00000001 /* SIA Reset */
-#define SIA_RESET 0x00000000 /* SIA Reset Value */
-
-/*
-** SIA Transmit and Receive Register (DE4X5_STRR)
-*/
-#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */
-#define STRR_SPP 0x00004000 /* Set Polarity Plus */
-#define STRR_APE 0x00002000 /* Auto Polarity Enable */
-#define STRR_LTE 0x00001000 /* Link Test Enable */
-#define STRR_SQE 0x00000800 /* Signal Quality Enable */
-#define STRR_CLD 0x00000400 /* Collision Detect Enable */
-#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */
-#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */
-#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */
-#define STRR_HDE 0x00000040 /* Half Duplex Enable */
-#define STRR_CPEN 0x00000030 /* Compensation Enable */
-#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */
-#define STRR_DREN 0x00000004 /* Driver Enable */
-#define STRR_LBK 0x00000002 /* Loopback Enable */
-#define STRR_ECEN 0x00000001 /* Encoder Enable */
-#define STRR_RESET 0xffffffff /* Reset value for STRR */
-
-/*
-** SIA General Register (DE4X5_SIGR)
-*/
-#define SIGR_RMI 0x40000000 /* Receive Match Interrupt */
-#define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */
-#define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */
-#define SIGR_CWE 0x08000000 /* Control Write Enable */
-#define SIGR_RME 0x04000000 /* Receive Match Enable */
-#define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */
-#define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */
-#define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */
-#define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */
-#define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */
-#define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */
-#define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */
-#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */
-#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */
-#define SIGR_FRL 0x00002000 /* Force Receiver Low */
-#define SIGR_DPST 0x00001000 /* PLL Self Test Start */
-#define SIGR_LSD 0x00000800 /* LED Stretch Disable */
-#define SIGR_FLF 0x00000400 /* Force Link Fail */
-#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */
-#define SIGR_TSCK 0x00000100 /* Test Clock */
-#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */
-#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */
-#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */
-#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */
-#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */
-#define SIGR_JCK 0x00000004 /* Jabber Clock */
-#define SIGR_HUJ 0x00000002 /* Host Unjab */
-#define SIGR_JBD 0x00000001 /* Jabber Disable */
-#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */
-
-/*
-** Receive Descriptor Bit Summary
-*/
-#define R_OWN 0x80000000 /* Own Bit */
-#define RD_FF 0x40000000 /* Filtering Fail */
-#define RD_FL 0x3fff0000 /* Frame Length */
-#define RD_ES 0x00008000 /* Error Summary */
-#define RD_LE 0x00004000 /* Length Error */
-#define RD_DT 0x00003000 /* Data Type */
-#define RD_RF 0x00000800 /* Runt Frame */
-#define RD_MF 0x00000400 /* Multicast Frame */
-#define RD_FS 0x00000200 /* First Descriptor */
-#define RD_LS 0x00000100 /* Last Descriptor */
-#define RD_TL 0x00000080 /* Frame Too Long */
-#define RD_CS 0x00000040 /* Collision Seen */
-#define RD_FT 0x00000020 /* Frame Type */
-#define RD_RJ 0x00000010 /* Receive Watchdog */
-#define RD_RE 0x00000008 /* Report on MII Error */
-#define RD_DB 0x00000004 /* Dribbling Bit */
-#define RD_CE 0x00000002 /* CRC Error */
-#define RD_OF 0x00000001 /* Overflow */
-
-#define RD_RER 0x02000000 /* Receive End Of Ring */
-#define RD_RCH 0x01000000 /* Second Address Chained */
-#define RD_RBS2 0x003ff800 /* Buffer 2 Size */
-#define RD_RBS1 0x000007ff /* Buffer 1 Size */
-
-/*
-** Transmit Descriptor Bit Summary
-*/
-#define T_OWN 0x80000000 /* Own Bit */
-#define TD_ES 0x00008000 /* Error Summary */
-#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */
-#define TD_LO 0x00000800 /* Loss Of Carrier */
-#define TD_NC 0x00000400 /* No Carrier */
-#define TD_LC 0x00000200 /* Late Collision */
-#define TD_EC 0x00000100 /* Excessive Collisions */
-#define TD_HF 0x00000080 /* Heartbeat Fail */
-#define TD_CC 0x00000078 /* Collision Counter */
-#define TD_LF 0x00000004 /* Link Fail */
-#define TD_UF 0x00000002 /* Underflow Error */
-#define TD_DE 0x00000001 /* Deferred */
-
-#define TD_IC 0x80000000 /* Interrupt On Completion */
-#define TD_LS 0x40000000 /* Last Segment */
-#define TD_FS 0x20000000 /* First Segment */
-#define TD_FT1 0x10000000 /* Filtering Type */
-#define TD_SET 0x08000000 /* Setup Packet */
-#define TD_AC 0x04000000 /* Add CRC Disable */
-#define TD_TER 0x02000000 /* Transmit End Of Ring */
-#define TD_TCH 0x01000000 /* Second Address Chained */
-#define TD_DPD 0x00800000 /* Disabled Padding */
-#define TD_FT0 0x00400000 /* Filtering Type */
-#define TD_TBS2 0x003ff800 /* Buffer 2 Size */
-#define TD_TBS1 0x000007ff /* Buffer 1 Size */
-
-#define PERFECT_F 0x00000000
-#define HASH_F TD_FT0
-#define INVERSE_F TD_FT1
-#define HASH_O_F (TD_FT1 | TD_F0)
-
-/*
-** Media / mode state machine definitions
-** User selectable:
-*/
-#define TP 0x0040 /* 10Base-T (now equiv to _10Mb) */
-#define TP_NW 0x0002 /* 10Base-T with Nway */
-#define BNC 0x0004 /* Thinwire */
-#define AUI 0x0008 /* Thickwire */
-#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */
-#define _10Mb 0x0040 /* 10Mb/s Ethernet */
-#define _100Mb 0x0080 /* 100Mb/s Ethernet */
-#define AUTO 0x4000 /* Auto sense the media or speed */
-
-/*
-** Internal states
-*/
-#define NC 0x0000 /* No Connection */
-#define ANS 0x0020 /* Intermediate AutoNegotiation State */
-#define SPD_DET 0x0100 /* Parallel speed detection */
-#define INIT 0x0200 /* Initial state */
-#define EXT_SIA 0x0400 /* External SIA for motherboard chip */
-#define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */
-#define TP_SUSPECT 0x0803 /* Suspect the TP port is down */
-#define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */
-#define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */
-#define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */
-#define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */
-#define MII 0x1000 /* MII on the 21143 */
-
-#define TIMER_CB 0x80000000 /* Timer callback detection */
-
-/*
-** DE4X5 DEBUG Options
-*/
-#define DEBUG_NONE 0x0000 /* No DEBUG messages */
-#define DEBUG_VERSION 0x0001 /* Print version message */
-#define DEBUG_MEDIA 0x0002 /* Print media messages */
-#define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */
-#define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */
-#define DEBUG_SROM 0x0010 /* Print SROM messages */
-#define DEBUG_MII 0x0020 /* Print MII messages */
-#define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */
-#define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */
-#define DEBUG_PCICFG 0x0100
-#define DEBUG_ALL 0x01ff
-
-/*
-** Miscellaneous
-*/
-#define PCI 0
-#define EISA 1
-
-#define DE4X5_HASH_TABLE_LEN 512 /* Bits */
-#define DE4X5_HASH_BITS 0x01ff /* 9 LS bits */
-
-#define SETUP_FRAME_LEN 192 /* Bytes */
-#define IMPERF_PA_OFFSET 156 /* Bytes */
-
-#define POLL_DEMAND 1
-
-#define LOST_MEDIA_THRESHOLD 3
-
-#define MASK_INTERRUPTS 1
-#define UNMASK_INTERRUPTS 0
-
-#define DE4X5_STRLEN 8
-
-#define DE4X5_INIT 0 /* Initialisation time */
-#define DE4X5_RUN 1 /* Run time */
-
-#define DE4X5_SAVE_STATE 0
-#define DE4X5_RESTORE_STATE 1
-
-/*
-** Address Filtering Modes
-*/
-#define PERFECT 0 /* 16 perfect physical addresses */
-#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */
-#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */
-#define ALL_HASH 3 /* Hashes all physical & multicast addrs */
-
-#define ALL 0 /* Clear out all the setup frame */
-#define PHYS_ADDR_ONLY 1 /* Update the physical address only */
-
-/*
-** Adapter state
-*/
-#define INITIALISED 0 /* After h/w initialised and mem alloc'd */
-#define CLOSED 1 /* Ready for opening */
-#define OPEN 2 /* Running */
-
-/*
-** Various wait times
-*/
-#define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */
-#define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */
-
-/*
-** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since
-** the vendors seem split 50-50 on how to calculate the OUI register values
-** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()].
-*/
-#define NATIONAL_TX 0x2000
-#define BROADCOM_T4 0x03e0
-#define SEEQ_T4 0x0016
-#define CYPRESS_T4 0x0014
-
-/*
-** Speed Selection stuff
-*/
-#define SET_10Mb {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
- if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
- mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\
- outl(omr, DE4X5_OMR);\
- if (!lp->useSROM) lp->cache.gep = 0;\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\
- lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-#define SET_100Mb {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- int fdx=0;\
- if (lp->phy[lp->active].id == NATIONAL_TX) {\
- mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\
- 0x18, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
- sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\
- if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\
- if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
- mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- if (fdx) omr |= OMR_FDX;\
- outl(omr, DE4X5_OMR);\
- if (!lp->useSROM) lp->cache.gep = 0;\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | lp->infoblock_csr6, DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\
- lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-/* FIX ME so I don't jam 10Mb networks */
-#define SET_100Mb_PDET {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr, DE4X5_OMR);\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr, DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\
- lp->cache.gep = (GEP_FDXD | GEP_MODE);\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-/*
-** Include the IOCTL stuff
-*/
-#include <linux/sockios.h>
-
-struct de4x5_ioctl {
- unsigned short cmd; /* Command to run */
- unsigned short len; /* Length of the data buffer */
- unsigned char __user *data; /* Pointer to the data buffer */
-};
-
-/*
-** Recognised commands for the driver
-*/
-#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
-#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
-/* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */
-#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
-#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
-#define DE4X5_SET_MCA 0x07 /* Set a multicast address */
-#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */
-#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */
-#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */
-#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */
-#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */
-#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */
-#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */
-
-#define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008)
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 83f1727d1423..2d3bd343b6e6 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -745,7 +745,7 @@ static int dmfe_stop(struct net_device *dev)
netif_stop_queue(dev);
/* deleted timer */
- del_timer_sync(&db->timer);
+ timer_delete_sync(&db->timer);
/* Reset & stop DM910X board */
dw32(DCR0, DM910X_RESET);
@@ -1074,8 +1074,8 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
{
struct dmfe_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int dmfe_ethtool_set_wol(struct net_device *dev,
@@ -1115,7 +1115,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static void dmfe_timer(struct timer_list *t)
{
- struct dmfe_board_info *db = from_timer(db, t, timer);
+ struct dmfe_board_info *db = timer_container_of(db, t, timer);
struct net_device *dev = pci_get_drvdata(db->pdev);
void __iomem *ioaddr = db->ioaddr;
u32 tmp_cr8;
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index ba0a69b363f8..71ff9e6db209 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -13,7 +13,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include "tulip.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
@@ -117,8 +117,8 @@ static void tulip_build_fake_mediatable(struct tulip_private *tp)
0x00, 0x06 /* ttm bit map */
};
- tp->mtable = kmalloc(sizeof(struct mediatable) +
- sizeof(struct medialeaf), GFP_KERNEL);
+ tp->mtable = devm_kmalloc(&tp->pdev->dev, sizeof(struct mediatable) +
+ sizeof(struct medialeaf), GFP_KERNEL);
if (tp->mtable == NULL)
return; /* Horrible, impossible failure. */
@@ -224,7 +224,8 @@ subsequent_board:
return;
}
- mtable = kmalloc(struct_size(mtable, mleaf, count), GFP_KERNEL);
+ mtable = devm_kmalloc(&tp->pdev->dev, struct_size(mtable, mleaf, count),
+ GFP_KERNEL);
if (mtable == NULL)
return; /* Horrible, impossible failure. */
last_mediatable = tp->mtable = mtable;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 54560f9a1651..0a12cb9b3ba7 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -104,7 +104,7 @@ int tulip_refill_rx(struct net_device *dev)
void oom_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, oom_timer);
+ struct tulip_private *tp = timer_container_of(tp, t, oom_timer);
napi_schedule(&tp->napi);
}
@@ -699,8 +699,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
tulip_start_rxtx(tp);
}
/*
- * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
- * call is ever done under the spinlock
+ * NB: t21142_lnk_change() does a timer_delete_sync(), so be careful
+ * if this call is ever done under the spinlock
*/
if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
if (tp->link_change)
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 3fb39e32e1b4..1de5ed967070 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -21,7 +21,7 @@ void pnic_do_nway(struct net_device *dev)
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
u32 phy_reg = ioread32(ioaddr + 0xB8);
- u32 new_csr6 = tp->csr6 & ~0x40C40200;
+ u32 new_csr6;
if (phy_reg & 0x78000000) { /* Ignore baseT4 */
if (phy_reg & 0x20000000) dev->if_port = 5;
@@ -86,7 +86,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
void pnic_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
index 72a09156b48b..39c410bf224e 100644
--- a/drivers/net/ethernet/dec/tulip/pnic2.c
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -78,7 +78,7 @@
void pnic2_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
@@ -323,7 +323,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 2)
netdev_dbg(dev, "Ugh! Link blew?\n");
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -348,7 +348,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* if failed then try doing an nway to get in sync */
if ((csr12 & 2) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
@@ -372,7 +372,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* if failed, try doing an nway to get in sync */
if ((csr12 & 4) && ! tp->medialock) {
- del_timer_sync(&tp->timer);
+ timer_delete_sync(&tp->timer);
pnic2_start_nway(dev);
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 642e9dfc5451..ca0c509b601c 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -139,7 +139,7 @@ void tulip_media_task(struct work_struct *work)
void mxic_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
@@ -156,7 +156,7 @@ void mxic_timer(struct timer_list *t)
void comet_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
int next_tick = 2*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index 0ed598dc7569..5e010e1fa6f7 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -23,7 +23,7 @@
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
@@ -381,7 +381,7 @@ struct mediatable {
unsigned has_reset:6;
u32 csr15dir;
u32 csr15val; /* 21143 NWay setting. */
- struct medialeaf mleaf[];
+ struct medialeaf mleaf[] __counted_by(leafcount);
};
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 79df5a72877b..b608585f1954 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -23,7 +23,7 @@
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/crc32.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/uaccess.h>
#ifdef CONFIG_SPARC
@@ -114,7 +114,7 @@ int tulip_debug = 1;
static void tulip_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
if (netif_running(dev))
@@ -747,9 +747,9 @@ static void tulip_down (struct net_device *dev)
napi_disable(&tp->napi);
#endif
- del_timer_sync (&tp->timer);
+ timer_delete_sync(&tp->timer);
#ifdef CONFIG_TULIP_NAPI
- del_timer_sync (&tp->oom_timer);
+ timer_delete_sync(&tp->oom_timer);
#endif
spin_lock_irqsave (&tp->lock, flags);
@@ -858,8 +858,8 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tulip_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -1177,7 +1177,6 @@ static void set_rx_mode(struct net_device *dev)
iowrite32(csr6, ioaddr + CSR6);
}
-#ifdef CONFIG_TULIP_MWI
static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
@@ -1251,7 +1250,6 @@ out:
netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
cache, csr0);
}
-#endif
/*
* Chips that have the MRM/reserved bit quirk and the burst quirk. That
@@ -1389,7 +1387,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* And back to business
*/
- i = pci_enable_device(pdev);
+ i = pcim_enable_device(pdev);
if (i) {
pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
return i;
@@ -1398,7 +1396,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
irq = pdev->irq;
/* alloc_etherdev ensures aligned and zeroed private structures */
- dev = alloc_etherdev (sizeof (*tp));
+ dev = devm_alloc_etherdev(&pdev->dev, sizeof(*tp));
if (!dev)
return -ENOMEM;
@@ -1408,18 +1406,18 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_name(pdev),
(unsigned long long)pci_resource_len (pdev, 0),
(unsigned long long)pci_resource_start (pdev, 0));
- goto err_out_free_netdev;
+ return -ENODEV;
}
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions (pdev, DRV_NAME))
- goto err_out_free_netdev;
+ if (pcim_request_all_regions(pdev, DRV_NAME))
+ return -ENODEV;
- ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
+ ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
if (!ioaddr)
- goto err_out_free_res;
+ return -ENODEV;
/*
* initialize private data structure 'tp'
@@ -1428,12 +1426,12 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp = netdev_priv(dev);
tp->dev = dev;
- tp->rx_ring = dma_alloc_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- &tp->rx_ring_dma, GFP_KERNEL);
+ tp->rx_ring = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ &tp->rx_ring_dma, GFP_KERNEL);
if (!tp->rx_ring)
- goto err_out_mtable;
+ return -ENODEV;
tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
@@ -1463,10 +1461,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
-#ifdef CONFIG_TULIP_MWI
- if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+ if (IS_ENABLED(CONFIG_TULIP_MWI) && !force_csr0 &&
+ (tp->flags & HAS_PCI_MWI))
tulip_mwi_config (pdev, dev);
-#endif
/* Stop the chip's Tx and Rx processes. */
tulip_stop_rxtx(tp);
@@ -1553,7 +1550,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(PCI_SLOT(pdev->devfn) == 12))) {
/* Cobalt MAC address in first EEPROM locations. */
sa_offset = 0;
- /* Ensure our media table fixup get's applied */
+ /* Ensure our media table fixup gets applied */
memcpy(ee_data + 16, ee_data, 8);
}
#endif
@@ -1689,12 +1686,13 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &tulip_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
#ifdef CONFIG_TULIP_NAPI
- netif_napi_add(dev, &tp->napi, tulip_poll, 16);
+ netif_napi_add_weight(dev, &tp->napi, tulip_poll, 16);
#endif
dev->ethtool_ops = &ops;
- if (register_netdev(dev))
- goto err_out_free_ring;
+ i = register_netdev(dev);
+ if (i)
+ return i;
pci_set_drvdata(pdev, dev);
@@ -1769,23 +1767,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tulip_set_power_state (tp, 0, 1);
return 0;
-
-err_out_free_ring:
- dma_free_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
-
-err_out_mtable:
- kfree (tp->mtable);
- pci_iounmap(pdev, ioaddr);
-
-err_out_free_res:
- pci_release_regions (pdev);
-
-err_out_free_netdev:
- free_netdev (dev);
- return -ENODEV;
}
@@ -1885,24 +1866,11 @@ static int __maybe_unused tulip_resume(struct device *dev_d)
static void tulip_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct tulip_private *tp;
if (!dev)
return;
- tp = netdev_priv(dev);
unregister_netdev(dev);
- dma_free_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
- kfree (tp->mtable);
- pci_iounmap(pdev, tp->base_addr);
- free_netdev (dev);
- pci_release_regions (pdev);
- pci_disable_device(pdev);
-
- /* pci_power_off (pdev, -1); */
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 77d9058431e3..6e4d8d31aba9 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -656,7 +656,7 @@ static int uli526x_stop(struct net_device *dev)
netif_stop_queue(dev);
/* deleted timer */
- del_timer_sync(&db->timer);
+ timer_delete_sync(&db->timer);
/* Reset & stop ULI526X board */
uw32(DCR0, ULI526X_RESET);
@@ -971,8 +971,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct uli526x_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
@@ -1014,7 +1014,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static void uli526x_timer(struct timer_list *t)
{
- struct uli526x_board_info *db = from_timer(db, t, timer);
+ struct uli526x_board_info *db = timer_container_of(db, t, timer);
struct net_device *dev = pci_get_drvdata(db->pdev);
struct uli_phy_ops *phy = &db->phy;
void __iomem *ioaddr = db->ioaddr;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 86b1d23eba83..a24a25a5f73d 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -375,7 +375,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_request_regions(pdev, DRV_NAME))
+ if (pcim_request_all_regions(pdev, DRV_NAME))
goto err_out_netdev;
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
@@ -474,8 +474,6 @@ err_out_netdev:
No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
made udelay() unreliable.
- The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
- deprecated.
*/
#define eeprom_delay(ee_addr) ioread32(ee_addr)
@@ -765,7 +763,7 @@ static inline void update_csr6(struct net_device *dev, int new)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = pci_get_drvdata(np->pci_dev);
void __iomem *ioaddr = np->base_addr;
@@ -1376,8 +1374,8 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
@@ -1511,7 +1509,7 @@ static int netdev_close(struct net_device *dev)
}
#endif /* __i386__ debugging only */
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
free_rxtx_rings(np);
free_ringdesc(np);
@@ -1562,7 +1560,7 @@ static int __maybe_unused w840_suspend(struct device *dev_d)
rtnl_lock();
if (netif_running (dev)) {
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
spin_lock_irq(&np->lock);
netif_device_detach(dev);
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 8759f9f76b62..e5d2ede13845 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -143,7 +143,7 @@ static const struct pci_device_id xircom_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, xircom_pci_table);
-static struct pci_driver xircom_ops = {
+static struct pci_driver xircom_driver = {
.name = "xircom_cb",
.id_table = xircom_pci_table,
.probe = xircom_probe,
@@ -1169,4 +1169,4 @@ investigate_write_descriptor(struct net_device *dev,
}
}
-module_pci_driver(xircom_ops);
+module_pci_driver(xircom_driver);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index a301f7e6a440..846d58c769ea 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -41,7 +41,7 @@ module_param(tx_flow, int, 0);
module_param(rx_flow, int, 0);
module_param(copy_thresh, int, 0);
module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */
-module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */
+module_param(rx_timeout, int, 0); /* Rx DMA wait time in 640ns increments */
module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
@@ -99,6 +99,13 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = rio_tx_timeout,
};
+static bool is_support_rmon_mmio(struct pci_dev *pdev)
+{
+ return pdev->vendor == PCI_VENDOR_ID_DLINK &&
+ pdev->device == 0x4000 &&
+ pdev->revision == 0x0c;
+}
+
static int
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -131,21 +138,27 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np = netdev_priv(dev);
+ if (is_support_rmon_mmio(pdev))
+ np->rmon_enable = true;
+
/* IO registers range. */
ioaddr = pci_iomap(pdev, 0, 0);
if (!ioaddr)
goto err_out_dev;
np->eeprom_addr = ioaddr;
-#ifdef MEM_MAPPING
- /* MM registers range. */
- ioaddr = pci_iomap(pdev, 1, 0);
- if (!ioaddr)
- goto err_out_iounmap;
-#endif
+ if (np->rmon_enable) {
+ /* MM registers range. */
+ ioaddr = pci_iomap(pdev, 1, 0);
+ if (!ioaddr)
+ goto err_out_iounmap;
+ }
+
np->ioaddr = ioaddr;
np->chip_id = chip_idx;
np->pdev = pdev;
+
+ spin_lock_init(&np->stats_lock);
spin_lock_init (&np->tx_lock);
spin_lock_init (&np->rx_lock);
@@ -249,7 +262,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np->link_status = 0;
/* Set media and reset PHY */
if (np->phy_media) {
- /* default Auto-Negotiation for fiber deivices */
+ /* default Auto-Negotiation for fiber devices */
if (np->an_enable == 2) {
np->an_enable = 1;
}
@@ -287,9 +300,8 @@ err_out_unmap_tx:
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
err_out_iounmap:
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
err_out_dev:
free_netdev (dev);
@@ -352,7 +364,7 @@ parse_eeprom (struct net_device *dev)
eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
- np->led_mode = psrom->led_mode;
+ np->led_mode = le16_to_cpu(psrom->led_mode);
return 0;
}
@@ -496,25 +508,34 @@ static int alloc_list(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
/* Allocated fixed size of skbuff */
struct sk_buff *skb;
+ dma_addr_t addr;
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
- if (!skb) {
- free_list(dev);
- return -ENOMEM;
- }
+ if (!skb)
+ goto err_free_list;
+
+ addr = dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pdev->dev, addr))
+ goto err_kfree_skb;
np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
sizeof(struct netdev_desc));
/* Rubicon now supports 40 bits of addressing space. */
- np->rx_ring[i].fraginfo =
- cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
- np->rx_buf_sz, DMA_FROM_DEVICE));
+ np->rx_ring[i].fraginfo = cpu_to_le64(addr);
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
return 0;
+
+err_kfree_skb:
+ dev_kfree_skb(np->rx_skbuff[i]);
+ np->rx_skbuff[i] = NULL;
+err_free_list:
+ free_list(dev);
+ return -ENOMEM;
}
static void rio_hw_init(struct net_device *dev)
@@ -565,8 +586,7 @@ static void rio_hw_init(struct net_device *dev)
* too. However, it doesn't work on IP1000A so we use 16-bit access.
*/
for (i = 0; i < 3; i++)
- dw16(StationAddr0 + 2 * i,
- cpu_to_le16(((const u16 *)dev->dev_addr)[i]));
+ dw16(StationAddr0 + 2 * i, get_unaligned_le16(&dev->dev_addr[2 * i]));
set_multicast (dev);
if (np->coalesce) {
@@ -577,7 +597,8 @@ static void rio_hw_init(struct net_device *dev)
dw8(TxDMAPollPeriod, 0xff);
dw8(RxDMABurstThresh, 0x30);
dw8(RxDMAUrgentThresh, 0x30);
- dw32(RmonStatMask, 0x0007ffff);
+ if (!np->rmon_enable)
+ dw32(RmonStatMask, 0x0007ffff);
/* clear statistics */
clear_stats (dev);
@@ -649,7 +670,7 @@ static int rio_open(struct net_device *dev)
static void
rio_timer (struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = pci_get_drvdata(np->pdev);
unsigned int entry;
int next_tick = 1*HZ;
@@ -712,7 +733,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
u64 tfc_vlan_tag = 0;
if (np->link_status == 0) { /* Link Down */
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
entry = np->cur_tx % TX_RING_SIZE;
@@ -814,7 +835,6 @@ rio_free_tx (struct net_device *dev, int irq)
{
struct netdev_private *np = netdev_priv(dev);
int entry = np->old_tx % TX_RING_SIZE;
- int tx_use = 0;
unsigned long flag = 0;
if (irq)
@@ -839,7 +859,6 @@ rio_free_tx (struct net_device *dev, int irq)
np->tx_skbuff[entry] = NULL;
entry = (entry + 1) % TX_RING_SIZE;
- tx_use++;
}
if (irq)
spin_unlock(&np->tx_lock);
@@ -868,8 +887,7 @@ tx_error (struct net_device *dev, int tx_status)
frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
- dev->stats.tx_errors++;
- /* Ttransmit Underrun */
+ /* Transmit Underrun */
if (tx_status & 0x10) {
dev->stats.tx_fifo_errors++;
dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
@@ -905,9 +923,15 @@ tx_error (struct net_device *dev, int tx_status)
rio_set_led_mode(dev);
/* Let TxStartThresh stay default value */
}
+
+ spin_lock(&np->stats_lock);
/* Maximum Collisions */
if (tx_status & 0x08)
dev->stats.collisions++;
+
+ dev->stats.tx_errors++;
+ spin_unlock(&np->stats_lock);
+
/* Restart the Tx */
dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
}
@@ -949,15 +973,18 @@ receive_packet (struct net_device *dev)
} else {
struct sk_buff *skb;
+ skb = NULL;
/* Small skbuffs for short packets */
- if (pkt_len > copy_thresh) {
+ if (pkt_len <= copy_thresh)
+ skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ if (!skb) {
dma_unmap_single(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
- } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
+ } else {
dma_sync_single_for_cpu(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
@@ -1056,7 +1083,7 @@ rio_error (struct net_device *dev, int int_status)
get_stats (dev);
}
- /* PCI Error, a catastronphic error related to the bus interface
+ /* PCI Error, a catastrophic error related to the bus interface
occurs, set GlobalReset and HostReset to reset. */
if (int_status & HostError) {
printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
@@ -1072,11 +1099,10 @@ get_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
unsigned int stat_reg;
+ unsigned long flags;
+ spin_lock_irqsave(&np->stats_lock, flags);
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1085,7 +1111,7 @@ get_stats (struct net_device *dev)
dev->stats.rx_bytes += dr32(OctetRcvOk);
dev->stats.tx_bytes += dr32(OctetXmtOk);
- dev->stats.multicast = dr32(McstFramesRcvdOk);
+ dev->stats.multicast += dr32(McstFramesRcvdOk);
dev->stats.collisions += dr32(SingleColFrames)
+ dr32(MultiColFrames);
@@ -1117,15 +1143,18 @@ get_stats (struct net_device *dev)
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
+
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
dr16(UDPCheckSumErrors);
dr16(IPCheckSumErrors);
+
+ spin_unlock_irqrestore(&np->stats_lock, flags);
+
return &dev->stats;
}
@@ -1134,9 +1163,6 @@ clear_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1172,10 +1198,9 @@ clear_stats (struct net_device *dev)
dr16(BcstFramesXmtdOk);
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
@@ -1235,8 +1260,8 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, "dl2k", sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, "dl2k", sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int rio_get_link_ksettings(struct net_device *dev,
@@ -1781,7 +1806,7 @@ rio_close (struct net_device *dev)
rio_hw_stop(dev);
free_irq(pdev->irq, dev);
- del_timer_sync (&np->timer);
+ timer_delete_sync(&np->timer);
free_list(dev);
@@ -1801,9 +1826,8 @@ rio_remove1 (struct pci_dev *pdev)
np->rx_ring_dma);
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
free_netdev (dev);
pci_release_regions (pdev);
@@ -1821,7 +1845,7 @@ static int rio_suspend(struct device *device)
return 0;
netif_device_detach(dev);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
rio_hw_stop(dev);
return 0;
@@ -1845,7 +1869,7 @@ static int rio_resume(struct device *device)
return 0;
}
-static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
#define RIO_PM_OPS (&rio_pm_ops)
#else
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 195dc6cfd895..9ebf7a6db93e 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -270,7 +270,7 @@ enum _pcs_reg {
PCS_ESR = 15,
};
-/* IEEE Extened Status Register */
+/* IEEE Extended Status Register */
enum _mii_esr {
MII_ESR_1000BX_FD = 0x8000,
MII_ESR_1000BX_HD = 0x4000,
@@ -329,18 +329,18 @@ enum _pcs_anlpar {
};
typedef struct t_SROM {
- u16 config_param; /* 0x00 */
- u16 asic_ctrl; /* 0x02 */
- u16 sub_vendor_id; /* 0x04 */
- u16 sub_system_id; /* 0x06 */
- u16 pci_base_1; /* 0x08 (IP1000A only) */
- u16 pci_base_2; /* 0x0a (IP1000A only) */
- u16 led_mode; /* 0x0c (IP1000A only) */
- u16 reserved1[9]; /* 0x0e-0x1f */
+ __le16 config_param; /* 0x00 */
+ __le16 asic_ctrl; /* 0x02 */
+ __le16 sub_vendor_id; /* 0x04 */
+ __le16 sub_system_id; /* 0x06 */
+ __le16 pci_base_1; /* 0x08 (IP1000A only) */
+ __le16 pci_base_2; /* 0x0a (IP1000A only) */
+ __le16 led_mode; /* 0x0c (IP1000A only) */
+ __le16 reserved1[9]; /* 0x0e-0x1f */
u8 mac_addr[6]; /* 0x20-0x25 */
u8 reserved2[10]; /* 0x26-0x2f */
u8 sib[204]; /* 0x30-0xfb */
- u32 crc; /* 0xfc-0xff */
+ __le32 crc; /* 0xfc-0xff */
} SROM_t, *PSROM_t;
/* Ioctl custom data */
@@ -372,6 +372,8 @@ struct netdev_private {
struct pci_dev *pdev;
void __iomem *ioaddr;
void __iomem *eeprom_addr;
+ // To ensure synchronization when stats are updated.
+ spinlock_t stats_lock;
spinlock_t tx_lock;
spinlock_t rx_lock;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
@@ -401,6 +403,8 @@ struct netdev_private {
u16 negotiate; /* Negotiated media */
int phy_addr; /* PHY addresses. */
u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
+
+ bool rmon_enable;
};
/* The station address location in the EEPROM. */
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index c710dc17be90..277c50ef773f 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -340,7 +340,7 @@ enum wake_event_bits {
struct netdev_desc {
__le32 next_desc;
__le32 status;
- struct desc_frag { __le32 addr, length; } frag[1];
+ struct desc_frag { __le32 addr, length; } frag;
};
/* Bits in netdev_desc.status */
@@ -708,7 +708,7 @@ static int change_mtu(struct net_device *dev, int new_mtu)
{
if (netif_running(dev))
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -942,7 +942,7 @@ static void check_duplex(struct net_device *dev)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = np->mii_if.dev;
void __iomem *ioaddr = np->base;
int next_tick = 10*HZ;
@@ -980,8 +980,8 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue)
le32_to_cpu(np->tx_ring[i].next_desc),
le32_to_cpu(np->tx_ring[i].status),
(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
- le32_to_cpu(np->tx_ring[i].frag[0].length));
+ le32_to_cpu(np->tx_ring[i].frag.addr),
+ le32_to_cpu(np->tx_ring[i].frag.length));
}
printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
ioread32(np->base + TxListPtr),
@@ -1027,28 +1027,29 @@ static void init_ring(struct net_device *dev)
np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
np->rx_ring[i].status = 0;
- np->rx_ring[i].frag[0].length = 0;
+ np->rx_ring[i].frag.length = 0;
np->rx_skbuff[i] = NULL;
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
+ dma_addr_t addr;
+
struct sk_buff *skb =
netdev_alloc_skb(dev, np->rx_buf_sz + 2);
np->rx_skbuff[i] = skb;
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- np->rx_ring[i].frag[0].addr = cpu_to_le32(
- dma_map_single(&np->pci_dev->dev, skb->data,
- np->rx_buf_sz, DMA_FROM_DEVICE));
- if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[i].frag[0].addr)) {
+ addr = dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr)) {
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
break;
}
- np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
+ np->rx_ring[i].frag.addr = cpu_to_le32(addr);
+ np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
}
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1088,6 +1089,7 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
struct netdev_desc *txdesc;
+ dma_addr_t addr;
unsigned entry;
/* Calculate the next Tx descriptor entry. */
@@ -1095,14 +1097,15 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
np->tx_skbuff[entry] = skb;
txdesc = &np->tx_ring[entry];
+ addr = dma_map_single(&np->pci_dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr))
+ goto drop_frame;
+
txdesc->next_desc = 0;
txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
- txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
- skb->data, skb->len, DMA_TO_DEVICE));
- if (dma_mapping_error(&np->pci_dev->dev,
- txdesc->frag[0].addr))
- goto drop_frame;
- txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
+ txdesc->frag.addr = cpu_to_le32(addr);
+ txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
/* Increment cur_tx before tasklet_schedule() */
np->cur_tx++;
@@ -1151,7 +1154,7 @@ reset_tx (struct net_device *dev)
skb = np->tx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
np->tx_skbuff[i] = NULL;
@@ -1271,12 +1274,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
skb = np->tx_skbuff[entry];
/* Free the original skb. */
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag[0].addr = 0;
- np->tx_ring[entry].frag[0].length = 0;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
}
spin_unlock(&np->lock);
} else {
@@ -1290,12 +1293,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
skb = np->tx_skbuff[entry];
/* Free the original skb. */
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag[0].addr = 0;
- np->tx_ring[entry].frag[0].length = 0;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
}
spin_unlock(&np->lock);
}
@@ -1372,16 +1375,16 @@ static void rx_poll(struct tasklet_struct *t)
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
dma_sync_single_for_cpu(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
dma_sync_single_for_device(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
} else {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
@@ -1414,12 +1417,13 @@ static void refill_rx (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int entry;
- int cnt = 0;
/* Refill the Rx ring buffers. */
for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
struct sk_buff *skb;
+ dma_addr_t addr;
+
entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
@@ -1427,21 +1431,20 @@ static void refill_rx (struct net_device *dev)
if (skb == NULL)
break; /* Better luck next round. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- np->rx_ring[entry].frag[0].addr = cpu_to_le32(
- dma_map_single(&np->pci_dev->dev, skb->data,
- np->rx_buf_sz, DMA_FROM_DEVICE));
- if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[entry].frag[0].addr)) {
+ addr = dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr)) {
dev_kfree_skb_irq(skb);
np->rx_skbuff[entry] = NULL;
break;
}
+
+ np->rx_ring[entry].frag.addr = cpu_to_le32(addr);
}
/* Perhaps we need not reset this field. */
- np->rx_ring[entry].frag[0].length =
+ np->rx_ring[entry].frag.length =
cpu_to_le32(np->rx_buf_sz | LastFrag);
np->rx_ring[entry].status = 0;
- cnt++;
}
}
static void netdev_error(struct net_device *dev, int intr_status)
@@ -1644,8 +1647,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
@@ -1870,21 +1873,21 @@ static int netdev_close(struct net_device *dev)
(int)(np->tx_ring_dma));
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
- i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
- np->tx_ring[i].frag[0].length);
+ i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
+ np->tx_ring[i].frag.length);
printk(KERN_DEBUG " Rx ring %8.8x:\n",
(int)(np->rx_ring_dma));
for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
- i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
- np->rx_ring[i].frag[0].length);
+ i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
+ np->rx_ring[i].frag.length);
}
}
#endif /* __i386__ debugging only */
free_irq(np->pci_dev->irq, dev);
- del_timer_sync(&np->timer);
+ timer_delete_sync(&np->timer);
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
@@ -1892,19 +1895,19 @@ static int netdev_close(struct net_device *dev)
skb = np->rx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->rx_ring[i].frag[0].addr),
+ le32_to_cpu(np->rx_ring[i].frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
}
- np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
+ np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
}
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_ring[i].next_desc = 0;
skb = np->tx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
np->tx_skbuff[i] = NULL;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 92462ed87bc4..0de3cd660ec8 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -550,11 +550,11 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
/* free the buffer */
dev_kfree_skb(skb);
- spin_unlock_irqrestore(&bp->lock, flags);
-
return NETDEV_TX_OK;
}
@@ -725,8 +725,8 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, "0", sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, "0", sizeof(info->bus_info));
}
static const struct ethtool_ops dnet_ethtool_ops = {
@@ -788,7 +788,7 @@ static int dnet_probe(struct platform_device *pdev)
}
dev->netdev_ops = &dnet_netdev_ops;
- netif_napi_add(dev, &bp->napi, dnet_poll, 64);
+ netif_napi_add(dev, &bp->napi, dnet_poll);
dev->ethtool_ops = &dnet_ethtool_ops;
dev->base_addr = (unsigned long)bp->regs;
@@ -841,7 +841,7 @@ err_out_free_dev:
return err;
}
-static int dnet_remove(struct platform_device *pdev)
+static void dnet_remove(struct platform_device *pdev)
{
struct net_device *dev;
@@ -859,8 +859,6 @@ static int dnet_remove(struct platform_device *pdev)
free_irq(dev->irq, dev);
free_netdev(dev);
}
-
- return 0;
}
static struct platform_driver dnet_driver = {
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 46e3a05e9582..67275aa4f65b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -416,8 +416,7 @@ static int ec_bhf_open(struct net_device *net_dev)
netif_start_queue(net_dev);
- hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_setup(&priv->hrtimer, ec_bhf_timer_fun, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
return 0;
@@ -558,7 +557,6 @@ err_unmap:
err_release_regions:
pci_release_regions(dev);
err_disable_dev:
- pci_clear_master(dev);
pci_disable_device(dev);
return err;
@@ -577,7 +575,6 @@ static void ec_bhf_remove(struct pci_dev *dev)
free_netdev(net_dev);
pci_release_regions(dev);
- pci_clear_master(dev);
pci_disable_device(dev);
}
@@ -592,5 +589,6 @@ module_pci_driver(pci_driver);
module_param(polling_frequency, long, 0444);
MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+MODULE_DESCRIPTION("Beckhoff CX5020 EtherCAT Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8689d4a51fe5..270ff9aab335 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -101,8 +101,7 @@
#define MAX_ROCE_EQS 5
#define MAX_MSIX_VECTORS 32
#define MIN_MSIX_VECTORS 1
-#define BE_NAPI_WEIGHT 64
-#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
+#define MAX_RX_POST NAPI_POLL_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define MAX_NUM_POST_ERX_DB 255u
@@ -563,7 +562,7 @@ struct be_adapter {
struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
- struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
u16 cfg_num_rx_irqs; /* configured via set-channels */
@@ -967,9 +966,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
void be_link_status_update(struct be_adapter *adapter, u8 link_status);
void be_parse_stats(struct be_adapter *adapter);
int be_load_fw(struct be_adapter *adapter, u8 *func);
-bool be_is_wol_supported(struct be_adapter *adapter);
bool be_pause_supported(struct be_adapter *adapter);
-u32 be_get_fw_log_level(struct be_adapter *adapter);
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
void be_eqd_update(struct be_adapter *adapter, bool force_update);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 528eb0f223b1..bb5d2fa15736 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -135,7 +135,8 @@ static int be_mcc_notify(struct be_adapter *adapter)
/* To check if valid bit is set, check the entire word as we don't know
* the endianness of the data (old entry is host endian while a new entry is
- * little endian) */
+ * little endian)
+ */
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
{
u32 flags;
@@ -248,7 +249,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
u8 opcode = 0, subsystem = 0;
/* Just swap the status to host endian; mcc tag is opaquely copied
- * from mcc_wrb */
+ * from mcc_wrb
+ */
be_dws_le_to_cpu(compl, 4);
base_status = base_status(compl->status);
@@ -573,7 +575,7 @@ int be_process_mcc(struct be_adapter *adapter)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
-#define mcc_timeout 12000 /* 12s timeout */
+#define mcc_timeout 120000 /* 12s timeout */
int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
@@ -587,7 +589,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (atomic_read(&mcc_obj->q.used) == 0)
break;
- usleep_range(500, 1000);
+ udelay(100);
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
@@ -657,8 +659,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
return 0;
}
-/*
- * Insert the mailbox address into the doorbell in two steps
+/* Insert the mailbox address into the doorbell in two steps
* Polls on the mbox doorbell till a command completion (or a timeout) occurs
*/
static int be_mbox_notify_wait(struct be_adapter *adapter)
@@ -802,7 +803,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
req_hdr->version = 0;
- fill_wrb_tags(wrb, (ulong) req_hdr);
+ fill_wrb_tags(wrb, (ulong)req_hdr);
wrb->payload_length = cmd_len;
if (mem) {
wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
@@ -832,8 +833,8 @@ static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
{
struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
- struct be_mcc_wrb *wrb
- = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
+ struct be_mcc_wrb *wrb = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
+
memset(wrb, 0, sizeof(*wrb));
return wrb;
}
@@ -865,7 +866,7 @@ static bool use_mcc(struct be_adapter *adapter)
static int be_cmd_lock(struct be_adapter *adapter)
{
if (use_mcc(adapter)) {
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
return 0;
} else {
return mutex_lock_interruptible(&adapter->mbox_lock);
@@ -876,7 +877,7 @@ static int be_cmd_lock(struct be_adapter *adapter)
static void be_cmd_unlock(struct be_adapter *adapter)
{
if (use_mcc(adapter))
- return mutex_unlock(&adapter->mcc_lock);
+ return spin_unlock_bh(&adapter->mcc_lock);
else
return mutex_unlock(&adapter->mbox_lock);
}
@@ -896,7 +897,7 @@ static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
memcpy(dest_wrb, wrb, sizeof(*wrb));
if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
- fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
+ fill_wrb_tags(dest_wrb, (ulong)embedded_payload(wrb));
return dest_wrb;
}
@@ -1046,7 +1047,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1075,7 +1076,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1087,7 +1088,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
struct be_cmd_req_pmac_add *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1112,9 +1113,9 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
- if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;
@@ -1130,7 +1131,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
if (pmac_id == -1)
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1150,7 +1151,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1413,7 +1414,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1443,7 +1444,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1507,7 +1508,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
struct be_cmd_req_q_destroy *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1524,7 +1525,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
q->created = false;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1592,7 +1593,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1608,7 +1609,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
/* version 1 of the cmd is not supported only by BE2 */
if (BE2_chip(adapter))
hdr->version = 0;
- if (BE3_chip(adapter) || lancer_chip(adapter))
+ else if (BE3_chip(adapter) || lancer_chip(adapter))
hdr->version = 1;
else
hdr->version = 2;
@@ -1620,7 +1621,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1636,7 +1637,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
CMD_SUBSYSTEM_ETH))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1659,7 +1660,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1696,7 +1697,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
struct be_cmd_req_link_status *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
if (link_status)
*link_status = LINK_DOWN;
@@ -1735,7 +1736,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1746,7 +1747,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
struct be_cmd_req_get_cntl_addnl_attribs *req;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1761,7 +1762,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1803,17 +1804,17 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
total_size = buf_len;
- get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60 * 1024;
get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
get_fat_cmd.size,
&get_fat_cmd.dma, GFP_ATOMIC);
if (!get_fat_cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
while (total_size) {
- buf_size = min(total_size, (u32)60*1024);
+ buf_size = min(total_size, (u32)60 * 1024);
total_size -= buf_size;
wrb = wrb_from_mccq(adapter);
@@ -1848,9 +1849,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
log_offset += buf_size;
}
err:
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1861,7 +1862,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
struct be_cmd_req_get_fw_version *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1878,13 +1879,13 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
- strlcpy(adapter->fw_ver, resp->firmware_version_string,
+ strscpy(adapter->fw_ver, resp->firmware_version_string,
sizeof(adapter->fw_ver));
- strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
+ strscpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
sizeof(adapter->fw_on_flash));
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1898,7 +1899,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
struct be_cmd_req_modify_eq_delay *req;
int status = 0, i;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1921,7 +1922,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1948,7 +1949,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1970,7 +1971,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1981,7 +1982,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct be_cmd_req_rx_filter *req = mem->va;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2014,7 +2015,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2045,7 +2046,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2065,7 +2066,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
return -EOPNOTSUPP;
@@ -2084,7 +2085,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2107,7 +2108,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2188,7 +2189,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2213,7 +2214,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2225,7 +2226,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
struct be_cmd_req_enable_disable_beacon *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2246,7 +2247,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2257,7 +2258,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
struct be_cmd_req_get_beacon_state *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2281,13 +2282,13 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
/* Uses sync mcc */
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data)
+ u8 page_num, u32 off, u32 len, u8 *data)
{
struct be_dma_mem cmd;
struct be_mcc_wrb *wrb;
@@ -2305,7 +2306,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2321,13 +2322,13 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
req->port = cpu_to_le32(adapter->hba_port_num);
req->page_num = cpu_to_le32(page_num);
status = be_mcc_notify_wait(adapter);
- if (!status) {
+ if (!status && len > 0) {
struct be_cmd_resp_port_type *resp = cmd.va;
- memcpy(data, resp->page_data, PAGE_DATA_LEN);
+ memcpy(data, resp->page_data + off, len);
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2344,7 +2345,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
void *ctxt = NULL;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2373,7 +2374,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
@@ -2386,7 +2387,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
@@ -2405,7 +2406,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2415,7 +2416,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
switch (adapter->phy.interface_type) {
case PHY_TYPE_QSFP:
@@ -2440,11 +2441,11 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
- strlcpy(adapter->phy.vendor_name, page_data +
+ strscpy(adapter->phy.vendor_name, page_data +
SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
- strlcpy(adapter->phy.vendor_pn,
+ strscpy(adapter->phy.vendor_pn,
page_data + SFP_VENDOR_PN_OFFSET,
SFP_VENDOR_NAME_LEN - 1);
}
@@ -2459,7 +2460,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2473,11 +2474,11 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
OPCODE_COMMON_DELETE_OBJECT,
sizeof(*req), wrb, NULL);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2490,7 +2491,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
struct lancer_cmd_resp_read_object *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2524,7 +2525,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
}
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2536,7 +2537,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_cmd_write_flashrom *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2561,7 +2562,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(40000)))
@@ -2572,7 +2573,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2583,7 +2584,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2610,11 +2611,15 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
memcpy(flashed_crc, req->crc, 4);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
-static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+/*
+ * Since the cookie is text, add a parsing-skipped space to keep it from
+ * ever being matched on storage holding this source file.
+ */
+static const char flash_cookie[32] __nonstring = "*** SE FLAS" "H DIRECTORY *** ";
static bool phy_flashing_required(struct be_adapter *adapter)
{
@@ -3216,7 +3221,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_cmd_req_acpi_wol_magic_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3233,7 +3238,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3248,7 +3253,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3271,7 +3276,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
@@ -3280,7 +3285,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3297,7 +3302,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3323,7 +3328,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
if (status)
goto err;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
wait_for_completion(&adapter->et_cmd_compl);
resp = embedded_payload(wrb);
@@ -3331,7 +3336,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
return status;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3347,7 +3352,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3362,7 +3367,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
req->pattern = cpu_to_le64(pattern);
req->byte_count = cpu_to_le32(byte_cnt);
for (i = 0; i < byte_cnt; i++) {
- req->snd_buff[i] = (u8)(pattern >> (j*8));
+ req->snd_buff[i] = (u8)(pattern >> (j * 8));
j++;
if (j > 7)
j = 0;
@@ -3381,7 +3386,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3392,7 +3397,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
struct be_cmd_req_seeprom_read *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3408,7 +3413,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3423,7 +3428,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3468,7 +3473,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3478,7 +3483,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
struct be_cmd_req_set_qos *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3498,7 +3503,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3610,7 +3615,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_req_get_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3642,7 +3647,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3654,7 +3659,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
struct be_cmd_req_set_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3674,7 +3679,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3706,7 +3711,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3770,7 +3775,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
}
out:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
@@ -3830,7 +3835,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
if (!cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3846,13 +3851,13 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
req->hdr.domain = domain;
req->mac_count = mac_count;
if (mac_count)
- memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
+ memcpy(req->mac, mac_array, ETH_ALEN * mac_count);
status = be_mcc_notify_wait(adapter);
err:
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -3888,7 +3893,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3929,7 +3934,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3943,7 +3948,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
int status;
u16 vid;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3990,7 +3995,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4189,7 +4194,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_set_ext_fat_caps *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4205,7 +4210,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4683,7 +4688,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
if (iface == 0xFFFFFFFF)
return -1;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4700,7 +4705,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4734,7 +4739,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
struct be_cmd_resp_get_iface_list *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4755,7 +4760,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4849,7 +4854,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
if (BEx_chip(adapter))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4867,7 +4872,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
req->enable = 1;
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4940,7 +4945,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
u32 link_config = 0;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4968,7 +4973,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4999,8 +5004,7 @@ int be_cmd_set_features(struct be_adapter *adapter)
struct be_mcc_wrb *wrb;
int status;
- if (mutex_lock_interruptible(&adapter->mcc_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5038,7 +5042,7 @@ err:
dev_info(&adapter->pdev->dev,
"Adapter does not support HW error recovery\n");
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5052,7 +5056,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
struct be_cmd_resp_hdr *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5075,7 +5079,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db1f3b908582..5e2d3ddb5d43 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1415,7 +1415,7 @@ struct flash_section_entry {
} __packed;
struct flash_section_info {
- u8 cookie[32];
+ u8 cookie[32] __nonstring;
struct flash_section_hdr fsec_hdr;
struct flash_section_entry fsec_entry[32];
} __packed;
@@ -2381,7 +2381,6 @@ struct be_cmd_req_manage_iface_filters {
} __packed;
u16 be_POST_stage_get(struct be_adapter *adapter);
-int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
bool permanent, u32 if_handle, u32 pmac_id);
@@ -2406,7 +2405,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
u8 *link_status, u32 dom);
-int be_cmd_reset(struct be_adapter *adapter);
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
@@ -2427,7 +2425,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
u32 *state);
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data);
+ u8 page_num, u32 off, u32 len, u8 *data);
int be_cmd_query_cable_type(struct be_adapter *adapter);
int be_cmd_query_sfp_info(struct be_adapter *adapter);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
@@ -2488,7 +2486,6 @@ int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
int lancer_initiate_dump(struct be_adapter *adapter);
int lancer_delete_dump(struct be_adapter *adapter);
bool dump_present(struct be_adapter *adapter);
-int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
int be_cmd_query_port_name(struct be_adapter *adapter);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f9955308b93d..f9216326bdfe 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -220,15 +220,15 @@ static void be_get_drvinfo(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
- strlcpy(drvinfo->fw_version, adapter->fw_ver,
+ strscpy(drvinfo->fw_version, adapter->fw_ver,
sizeof(drvinfo->fw_version));
else
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
@@ -389,10 +389,10 @@ static void be_get_ethtool_stats(struct net_device *netdev,
struct be_rx_stats *stats = rx_stats(rxo);
do {
- start = u64_stats_fetch_begin_irq(&stats->sync);
+ start = u64_stats_fetch_begin(&stats->sync);
data[base] = stats->rx_bytes;
data[base + 1] = stats->rx_pkts;
- } while (u64_stats_fetch_retry_irq(&stats->sync, start));
+ } while (u64_stats_fetch_retry(&stats->sync, start));
for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
p = (u8 *)stats + et_rx_stats[i].offset;
@@ -405,19 +405,19 @@ static void be_get_ethtool_stats(struct net_device *netdev,
struct be_tx_stats *stats = tx_stats(txo);
do {
- start = u64_stats_fetch_begin_irq(&stats->sync_compl);
+ start = u64_stats_fetch_begin(&stats->sync_compl);
data[base] = stats->tx_compl;
- } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
+ } while (u64_stats_fetch_retry(&stats->sync_compl, start));
do {
- start = u64_stats_fetch_begin_irq(&stats->sync);
+ start = u64_stats_fetch_begin(&stats->sync);
for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
p = (u8 *)stats + et_tx_stats[i].offset;
data[base + i] =
(et_tx_stats[i].size == sizeof(u64)) ?
*(u64 *)p : *(u32 *)p;
}
- } while (u64_stats_fetch_retry_irq(&stats->sync, start));
+ } while (u64_stats_fetch_retry(&stats->sync, start));
base += ETHTOOL_TXSTATS_NUM;
}
}
@@ -683,7 +683,9 @@ static int be_get_link_ksettings(struct net_device *netdev,
}
static void be_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1071,10 +1073,19 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
adapter->msg_enable = level;
}
-static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
+static int be_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u64 flow_type = cmd->flow_type;
u64 data = 0;
+ if (!be_multi_rxq(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "ethtool::get_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
switch (flow_type) {
case TCP_V4_FLOW:
if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
@@ -1102,7 +1113,8 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
break;
}
- return data;
+ cmd->data = data;
+ return 0;
}
static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
@@ -1117,9 +1129,6 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
- break;
case ETHTOOL_GRXRINGS:
cmd->data = adapter->num_rx_qs;
break;
@@ -1130,11 +1139,19 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
return 0;
}
-static int be_set_rss_hash_opts(struct be_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int be_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- int status;
+ struct be_adapter *adapter = netdev_priv(netdev);
u32 rss_flags = adapter->rss_info.rss_flags;
+ int status;
+
+ if (!be_multi_rxq(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "ethtool::set_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
if (cmd->data != L3_RSS_FLAGS &&
cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1193,28 +1210,6 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return be_cmd_status(status);
}
-static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- int status = 0;
-
- if (!be_multi_rxq(adapter)) {
- dev_err(&adapter->pdev->dev,
- "ethtool::set_rxnfc: RX flow hashing is disabled\n");
- return -EINVAL;
- }
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- status = be_set_rss_hash_opts(adapter, cmd);
- break;
- default:
- return -EINVAL;
- }
-
- return status;
-}
-
static void be_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
@@ -1269,43 +1264,45 @@ static u32 be_get_rxfh_key_size(struct net_device *netdev)
return RSS_HASH_KEY_LEN;
}
-static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
- u8 *hfunc)
+static int be_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i;
struct rss_info *rss = &adapter->rss_info;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
- indir[i] = rss->rss_queue[i];
+ rxfh->indir[i] = rss->rss_queue[i];
}
- if (hkey)
- memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+ if (rxfh->key)
+ memcpy(rxfh->key, rss->rss_hkey, RSS_HASH_KEY_LEN);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+static int be_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
int rc = 0, i, j;
struct be_adapter *adapter = netdev_priv(netdev);
+ u8 *hkey = rxfh->key;
u8 rsstable[RSS_INDIR_TABLE_LEN];
/* We do not allow change in unsupported parameters */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
struct be_rx_obj *rxo;
for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
- j = indir[i];
+ j = rxfh->indir[i];
rxo = &adapter->rx_obj[j];
rsstable[i] = rxo->rss_id;
adapter->rss_info.rss_queue[i] = j;
@@ -1342,7 +1339,7 @@ static int be_get_module_info(struct net_device *netdev,
return -EOPNOTSUPP;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
modinfo->type = ETH_MODULE_SFF_8079;
@@ -1360,25 +1357,32 @@ static int be_get_module_eeprom(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
+ u32 begin, end;
if (!check_privilege(adapter, MAX_PRIVILEGES))
return -EOPNOTSUPP;
- status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- data);
- if (status)
- goto err;
+ begin = eeprom->offset;
+ end = eeprom->offset + eeprom->len;
+
+ if (begin < PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
+ min_t(u32, end, PAGE_DATA_LEN) - begin,
+ data);
+ if (status)
+ goto err;
+
+ data += PAGE_DATA_LEN - begin;
+ begin = PAGE_DATA_LEN;
+ }
- if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) {
- status = be_cmd_read_port_transceiver_data(adapter,
- TR_PAGE_A2,
- data +
- PAGE_DATA_LEN);
+ if (end > PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
+ begin - PAGE_DATA_LEN,
+ end - begin, data);
if (status)
goto err;
}
- if (eeprom->offset)
- memcpy(data, data + eeprom->offset, eeprom->len);
err:
return be_cmd_status(status);
}
@@ -1438,7 +1442,8 @@ const struct ethtool_ops be_ethtool_ops = {
.flash_device = be_do_flash,
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
- .set_rxnfc = be_set_rxnfc,
+ .get_rxfh_fields = be_get_rxfh_fields,
+ .set_rxfh_fields = be_set_rxfh_fields,
.get_rxfh_indir_size = be_get_rxfh_indir_size,
.get_rxfh_key_size = be_get_rxfh_key_size,
.get_rxfh = be_get_rxfh,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d51f24c9e1b8..5bb31c8fab39 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -16,7 +16,6 @@
#include "be.h"
#include "be_cmds.h"
#include <asm/div64.h>
-#include <linux/aer.h>
#include <linux/if_bridge.h>
#include <net/busy_poll.h>
#include <net/vxlan.h>
@@ -665,10 +664,10 @@ static void be_get_stats64(struct net_device *netdev,
const struct be_rx_stats *rx_stats = rx_stats(rxo);
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->sync);
+ start = u64_stats_fetch_begin(&rx_stats->sync);
pkts = rx_stats(rxo)->rx_pkts;
bytes = rx_stats(rxo)->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
+ } while (u64_stats_fetch_retry(&rx_stats->sync, start));
stats->rx_packets += pkts;
stats->rx_bytes += bytes;
stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
@@ -680,10 +679,10 @@ static void be_get_stats64(struct net_device *netdev,
const struct be_tx_stats *tx_stats = tx_stats(txo);
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->sync);
+ start = u64_stats_fetch_begin(&tx_stats->sync);
pkts = tx_stats(txo)->tx_pkts;
bytes = tx_stats(txo)->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
+ } while (u64_stats_fetch_retry(&tx_stats->sync, start));
stats->tx_packets += pkts;
stats->tx_bytes += bytes;
}
@@ -737,9 +736,9 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
static int be_gso_hdr_len(struct sk_buff *skb)
{
if (skb->encapsulation)
- return skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@ -1125,7 +1124,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
struct be_wrb_params
*wrb_params)
{
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb);
unsigned int eth_hdr_len;
struct iphdr *ip;
@@ -1136,10 +1135,11 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
VLAN_ETH_HLEN : ETH_HLEN;
if (skb->len <= 60 &&
- (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
- is_ipv4_pkt(skb)) {
+ (lancer_chip(adapter) || BE3_chip(adapter) ||
+ skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb);
- pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
+ if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len))))
+ goto tx_drop;
}
/* If vlan tag is already inlined in the packet, skip HW VLAN
@@ -1296,7 +1296,8 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
(adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
- struct sk_buff **skb)
+ struct sk_buff **skb,
+ struct be_wrb_params *wrb_params)
{
struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
bool os2bmc = false;
@@ -1360,7 +1361,7 @@ done:
* to BMC, asic expects the vlan to be inline in the packet.
*/
if (os2bmc)
- *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
+ *skb = be_insert_vlan_in_pkt(adapter, *skb, wrb_params);
return os2bmc;
}
@@ -1381,19 +1382,17 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
- if (unlikely(!wrb_cnt)) {
- dev_kfree_skb_any(skb);
- goto drop;
- }
+ if (unlikely(!wrb_cnt))
+ goto drop_skb;
/* if os2bmc is enabled and if the pkt is destined to bmc,
* enqueue the pkt a 2nd time with mgmt bit set.
*/
- if (be_send_pkt_to_bmc(adapter, &skb)) {
+ if (be_send_pkt_to_bmc(adapter, &skb, &wrb_params)) {
BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
if (unlikely(!wrb_cnt))
- goto drop;
+ goto drop_skb;
else
skb_get(skb);
}
@@ -1407,6 +1406,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
be_xmit_flush(adapter, txo);
return NETDEV_TX_OK;
+drop_skb:
+ dev_kfree_skb_any(skb);
drop:
tx_stats(txo)->tx_drv_drops++;
/* Flush the already enqueued tx requests */
@@ -1465,10 +1466,10 @@ static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
ntohs(tcphdr->source));
dev_info(dev, "TCP dest port %d\n",
ntohs(tcphdr->dest));
- dev_info(dev, "TCP sequence num %d\n",
- ntohs(tcphdr->seq));
- dev_info(dev, "TCP ack_seq %d\n",
- ntohs(tcphdr->ack_seq));
+ dev_info(dev, "TCP sequence num %u\n",
+ ntohl(tcphdr->seq));
+ dev_info(dev, "TCP ack_seq %u\n",
+ ntohl(tcphdr->ack_seq));
} else if (ip_hdr(skb)->protocol ==
IPPROTO_UDP) {
udphdr = udp_hdr(skb);
@@ -2155,16 +2156,16 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
do {
- start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
+ start = u64_stats_fetch_begin(&rxo->stats.sync);
rx_pkts += rxo->stats.rx_pkts;
- } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
+ } while (u64_stats_fetch_retry(&rxo->stats.sync, start));
}
for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
do {
- start = u64_stats_fetch_begin_irq(&txo->stats.sync);
+ start = u64_stats_fetch_begin(&txo->stats.sync);
tx_pkts += txo->stats.tx_reqs;
- } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
+ } while (u64_stats_fetch_retry(&txo->stats.sync, start));
}
/* Skip, if wrapped around or first calculation */
@@ -2344,11 +2345,10 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
hdr_len = ETH_HLEN;
memcpy(skb->data, start, hdr_len);
skb_shinfo(skb)->nr_frags = 1;
- skb_frag_set_page(skb, 0, page_info->page);
- skb_frag_off_set(&skb_shinfo(skb)->frags[0],
- page_info->page_offset + hdr_len);
- skb_frag_size_set(&skb_shinfo(skb)->frags[0],
- curr_frag_len - hdr_len);
+ skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[0],
+ page_info->page,
+ page_info->page_offset + hdr_len,
+ curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
skb->truesize += rx_frag_size;
skb->tail += hdr_len;
@@ -2370,16 +2370,17 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
if (page_info->page_offset == 0) {
/* Fresh page */
j++;
- skb_frag_set_page(skb, j, page_info->page);
- skb_frag_off_set(&skb_shinfo(skb)->frags[j],
- page_info->page_offset);
- skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
+ skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
+ page_info->page,
+ page_info->page_offset,
+ curr_frag_len);
skb_shinfo(skb)->nr_frags++;
} else {
put_page(page_info->page);
+ skb_frag_size_add(&skb_shinfo(skb)->frags[j],
+ curr_frag_len);
}
- skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
skb->truesize += rx_frag_size;
@@ -2452,14 +2453,16 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
if (i == 0 || page_info->page_offset == 0) {
/* First frag or Fresh page */
j++;
- skb_frag_set_page(skb, j, page_info->page);
- skb_frag_off_set(&skb_shinfo(skb)->frags[j],
- page_info->page_offset);
- skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
+ skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
+ page_info->page,
+ page_info->page_offset,
+ curr_frag_len);
} else {
put_page(page_info->page);
+ skb_frag_size_add(&skb_shinfo(skb)->frags[j],
+ curr_frag_len);
}
- skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
+
skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
memset(page_info, 0, sizeof(*page_info));
@@ -2982,8 +2985,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
return -ENOMEM;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- BE_NAPI_WEIGHT);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll);
}
return 0;
}
@@ -3178,7 +3180,7 @@ static irqreturn_t be_intx(int irq, void *dev)
}
be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
- /* Return IRQ_HANDLED only for the the first spurious intr
+ /* Return IRQ_HANDLED only for the first spurious intr
* after a valid intr to stop the kernel from branding
* this irq as a bad one!
*/
@@ -3491,7 +3493,7 @@ static int be_msix_register(struct be_adapter *adapter)
if (status)
goto err_msix;
- irq_set_affinity_hint(vec, eqo->affinity_mask);
+ irq_update_affinity_hint(vec, eqo->affinity_mask);
}
return 0;
@@ -3552,7 +3554,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
/* MSIx */
for_all_evt_queues(adapter, eqo, i) {
vec = be_msix_vec_get(adapter, eqo);
- irq_set_affinity_hint(vec, NULL);
+ irq_update_affinity_hint(vec, NULL);
free_irq(vec, eqo);
}
@@ -4030,8 +4032,7 @@ static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
static const struct udp_tunnel_nic_info be_udp_tunnels = {
.set_port = be_vxlan_set_port,
.unset_port = be_vxlan_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
@@ -4981,13 +4982,7 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
mode = nla_get_u16(attr);
if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
return -EOPNOTSUPP;
@@ -5194,7 +5189,8 @@ static void be_netdev_init(struct net_device *netdev)
netdev->hw_features |= NETIF_F_RXHASH;
netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -5203,7 +5199,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->flags |= IFF_MULTICAST;
- netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
+ netif_set_tso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
netdev->netdev_ops = &be_netdev_ops;
@@ -5671,8 +5667,8 @@ static int be_drv_init(struct be_adapter *adapter)
}
mutex_init(&adapter->mbox_lock);
- mutex_init(&adapter->mcc_lock);
mutex_init(&adapter->rx_filter_lock);
+ spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
init_completion(&adapter->et_cmd_compl);
@@ -5726,8 +5722,6 @@ static void be_remove(struct pci_dev *pdev)
be_unmap_pci_bars(adapter);
be_drv_cleanup(adapter);
- pci_disable_pcie_error_reporting(pdev);
-
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -5840,20 +5834,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
SET_NETDEV_DEV(netdev, &pdev->dev);
status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!status) {
- netdev->features |= NETIF_F_HIGHDMA;
- } else {
- status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (status) {
- dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
- goto free_netdev;
- }
+ if (status) {
+ dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
+ goto free_netdev;
}
- status = pci_enable_pcie_error_reporting(pdev);
- if (!status)
- dev_info(&pdev->dev, "PCIe error reporting enabled\n");
-
status = be_map_pci_bars(adapter);
if (status)
goto free_netdev;
@@ -5898,7 +5883,6 @@ drv_cleanup:
unmap_bars:
be_unmap_pci_bars(adapter);
free_netdev:
- pci_disable_pcie_error_reporting(pdev);
free_netdev(netdev);
rel_reg:
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/engleder/Kconfig b/drivers/net/ethernet/engleder/Kconfig
new file mode 100644
index 000000000000..3df6bf476ae7
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Engleder network device configuration
+#
+
+config NET_VENDOR_ENGLEDER
+ bool "Engleder devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Engleder devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ENGLEDER
+
+config TSNEP
+ tristate "TSN endpoint support"
+ depends on HAS_IOMEM && HAS_DMA
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select PHYLIB
+ select PAGE_POOL
+ help
+ Support for the Engleder TSN endpoint Ethernet MAC IP Core.
+
+ To compile this driver as a module, choose M here. The module will be
+ called tsnep.
+
+config TSNEP_SELFTESTS
+ bool "TSN endpoint self test support"
+ default n
+ depends on TSNEP
+ help
+ This enables self test support within the TSN endpoint driver.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_ENGLEDER
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
new file mode 100644
index 000000000000..b98135f65eb7
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Engleder Ethernet drivers
+#
+
+obj-$(CONFIG_TSNEP) += tsnep.o
+
+tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
+ tsnep_rxnfc.o tsnep_xdp.o
+tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
new file mode 100644
index 000000000000..03e19aea9ea4
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#ifndef _TSNEP_H
+#define _TSNEP_H
+
+#include "tsnep_hw.h"
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/miscdevice.h>
+#include <net/xdp.h>
+
+#define TSNEP "tsnep"
+
+#define TSNEP_RING_SIZE 256
+#define TSNEP_RING_MASK (TSNEP_RING_SIZE - 1)
+#define TSNEP_RING_RX_REFILL 16
+#define TSNEP_RING_RX_REUSE (TSNEP_RING_SIZE - TSNEP_RING_SIZE / 4)
+#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
+#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE)
+
+struct tsnep_gcl {
+ void __iomem *addr;
+
+ u64 base_time;
+ u64 cycle_time;
+ u64 cycle_time_extension;
+
+ struct tsnep_gcl_operation operation[TSNEP_GCL_COUNT];
+ int count;
+
+ u64 change_limit;
+
+ u64 start_time;
+ bool change;
+};
+
+enum tsnep_rxnfc_filter_type {
+ TSNEP_RXNFC_ETHER_TYPE,
+};
+
+struct tsnep_rxnfc_filter {
+ enum tsnep_rxnfc_filter_type type;
+ union {
+ u16 ether_type;
+ };
+};
+
+struct tsnep_rxnfc_rule {
+ struct list_head list;
+ struct tsnep_rxnfc_filter filter;
+ int queue_index;
+ int location;
+};
+
+struct tsnep_tx_entry {
+ struct tsnep_tx_desc *desc;
+ struct tsnep_tx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+ bool owner_user_flag;
+
+ u32 properties;
+
+ u32 type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ bool zc;
+ };
+ size_t len;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+};
+
+struct tsnep_tx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+ int queue_index;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ struct tsnep_tx_entry entry[TSNEP_RING_SIZE];
+ int write;
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+ struct xsk_buff_pool *xsk_pool;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+};
+
+struct tsnep_rx_entry {
+ struct tsnep_rx_desc *desc;
+ struct tsnep_rx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+
+ u32 properties;
+
+ union {
+ struct page *page;
+ struct xdp_buff *xdp;
+ };
+ size_t len;
+ dma_addr_t dma;
+};
+
+struct tsnep_rx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+ int queue_index;
+ int tx_queue_index;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ struct tsnep_rx_entry entry[TSNEP_RING_SIZE];
+ int write;
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+ struct page_pool *page_pool;
+ struct page **page_buffer;
+ struct xsk_buff_pool *xsk_pool;
+ struct xdp_buff **xdp_batch;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+ u32 multicast;
+ u32 alloc_failed;
+
+ struct xdp_rxq_info xdp_rxq;
+ struct xdp_rxq_info xdp_rxq_zc;
+};
+
+struct tsnep_queue {
+ struct tsnep_adapter *adapter;
+ char name[IFNAMSIZ + 16];
+
+ struct tsnep_tx *tx;
+ struct tsnep_rx *rx;
+
+ struct napi_struct napi;
+
+ int irq;
+ u32 irq_mask;
+ void __iomem *irq_delay_addr;
+ u8 irq_delay;
+};
+
+struct tsnep_adapter {
+ struct net_device *netdev;
+ u8 mac_address[ETH_ALEN];
+ struct mii_bus *mdiobus;
+ bool suppress_preamble;
+ phy_interface_t phy_mode;
+ struct phy_device *phydev;
+ int msg_enable;
+
+ struct platform_device *pdev;
+ struct device *dmadev;
+ void __iomem *addr;
+
+ bool gate_control;
+ /* gate control lock */
+ struct mutex gate_control_lock;
+ bool gate_control_active;
+ struct tsnep_gcl gcl[2];
+ int next_gcl;
+
+ struct kernel_hwtstamp_config hwtstamp_config;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ /* ptp clock lock */
+ spinlock_t ptp_lock;
+
+ /* RX flow classification rules lock */
+ struct mutex rxnfc_lock;
+ struct list_head rxnfc_rules;
+ int rxnfc_count;
+ int rxnfc_max;
+
+ struct bpf_prog *xdp_prog;
+
+ int num_tx_queues;
+ struct tsnep_tx tx[TSNEP_MAX_QUEUES];
+ int num_rx_queues;
+ struct tsnep_rx rx[TSNEP_MAX_QUEUES];
+
+ int num_queues;
+ struct tsnep_queue queue[TSNEP_MAX_QUEUES];
+};
+
+extern const struct ethtool_ops tsnep_ethtool_ops;
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter);
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter);
+int tsnep_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int tsnep_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
+int tsnep_tc_init(struct tsnep_adapter *adapter);
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
+
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter);
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+
+int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack);
+int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter,
+ struct xsk_buff_pool *pool, u16 queue_id);
+
+#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
+int tsnep_ethtool_get_test_count(void);
+void tsnep_ethtool_get_test_strings(u8 *data);
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data);
+#else
+static inline int tsnep_ethtool_get_test_count(void)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ /* not enabled */
+}
+
+static inline void tsnep_ethtool_self_test(struct net_device *dev,
+ struct ethtool_test *eth_test,
+ u64 *data)
+{
+ /* not enabled */
+}
+#endif /* CONFIG_TSNEP_SELFTESTS */
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time);
+int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs);
+u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue);
+int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool);
+void tsnep_disable_xsk(struct tsnep_queue *queue);
+
+#endif /* _TSNEP_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
new file mode 100644
index 000000000000..228a638eae16
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+static const char tsnep_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "rx_bytes",
+ "rx_dropped",
+ "rx_multicast",
+ "rx_alloc_failed",
+ "rx_phy_errors",
+ "rx_forwarded_phy_errors",
+ "rx_invalid_frame_errors",
+ "tx_packets",
+ "tx_bytes",
+ "tx_dropped",
+};
+
+struct tsnep_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_alloc_failed;
+ u64 rx_phy_errors;
+ u64 rx_forwarded_phy_errors;
+ u64 rx_invalid_frame_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_STATS_COUNT (sizeof(struct tsnep_stats) / sizeof(u64))
+
+static const char tsnep_rx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_%d_packets",
+ "rx_%d_bytes",
+ "rx_%d_dropped",
+ "rx_%d_multicast",
+ "rx_%d_alloc_failed",
+ "rx_%d_no_descriptor_errors",
+ "rx_%d_buffer_too_small_errors",
+ "rx_%d_fifo_overflow_errors",
+ "rx_%d_invalid_frame_errors",
+};
+
+struct tsnep_rx_queue_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_alloc_failed;
+ u64 rx_no_descriptor_errors;
+ u64 rx_buffer_too_small_errors;
+ u64 rx_fifo_overflow_errors;
+ u64 rx_invalid_frame_errors;
+};
+
+#define TSNEP_RX_QUEUE_STATS_COUNT (sizeof(struct tsnep_rx_queue_stats) / \
+ sizeof(u64))
+
+static const char tsnep_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "tx_%d_packets",
+ "tx_%d_bytes",
+ "tx_%d_dropped",
+};
+
+struct tsnep_tx_queue_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_TX_QUEUE_STATS_COUNT (sizeof(struct tsnep_tx_queue_stats) / \
+ sizeof(u64))
+
+static void tsnep_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ strscpy(drvinfo->driver, TSNEP, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(&adapter->pdev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+static int tsnep_ethtool_get_regs_len(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int len;
+ int num_additional_queues;
+
+ len = TSNEP_MAC_SIZE;
+
+ /* first queue pair is within TSNEP_MAC_SIZE, only queues additional to
+ * the first queue pair extend the register length by TSNEP_QUEUE_SIZE
+ */
+ num_additional_queues =
+ max(adapter->num_tx_queues, adapter->num_rx_queues) - 1;
+ len += TSNEP_QUEUE_SIZE * num_additional_queues;
+
+ return len;
+}
+
+static void tsnep_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ regs->version = 1;
+
+ memcpy_fromio(p, adapter->addr, regs->len);
+}
+
+static u32 tsnep_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void tsnep_ethtool_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = data;
+}
+
+static void tsnep_ethtool_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, tsnep_stats_strings, sizeof(tsnep_stats_strings));
+ data += sizeof(tsnep_stats_strings);
+
+ for (i = 0; i < rx_count; i++) {
+ for (j = 0; j < TSNEP_RX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_rx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ for (j = 0; j < TSNEP_TX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_tx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ case ETH_SS_TEST:
+ tsnep_ethtool_get_test_strings(data);
+ break;
+ }
+}
+
+static void tsnep_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ struct tsnep_stats tsnep_stats;
+ struct tsnep_rx_queue_stats tsnep_rx_queue_stats;
+ struct tsnep_tx_queue_stats tsnep_tx_queue_stats;
+ u32 reg;
+ int i;
+
+ memset(&tsnep_stats, 0, sizeof(tsnep_stats));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ tsnep_stats.rx_packets += adapter->rx[i].packets;
+ tsnep_stats.rx_bytes += adapter->rx[i].bytes;
+ tsnep_stats.rx_dropped += adapter->rx[i].dropped;
+ tsnep_stats.rx_multicast += adapter->rx[i].multicast;
+ tsnep_stats.rx_alloc_failed += adapter->rx[i].alloc_failed;
+ }
+ reg = ioread32(adapter->addr + ECM_STAT);
+ tsnep_stats.rx_phy_errors =
+ (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ tsnep_stats.rx_forwarded_phy_errors =
+ (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ tsnep_stats.rx_invalid_frame_errors =
+ (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tsnep_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_stats.tx_dropped += adapter->tx[i].dropped;
+ }
+ memcpy(data, &tsnep_stats, sizeof(tsnep_stats));
+ data += TSNEP_STATS_COUNT;
+
+ for (i = 0; i < rx_count; i++) {
+ memset(&tsnep_rx_queue_stats, 0, sizeof(tsnep_rx_queue_stats));
+ tsnep_rx_queue_stats.rx_packets = adapter->rx[i].packets;
+ tsnep_rx_queue_stats.rx_bytes = adapter->rx[i].bytes;
+ tsnep_rx_queue_stats.rx_dropped = adapter->rx[i].dropped;
+ tsnep_rx_queue_stats.rx_multicast = adapter->rx[i].multicast;
+ tsnep_rx_queue_stats.rx_alloc_failed =
+ adapter->rx[i].alloc_failed;
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ tsnep_rx_queue_stats.rx_no_descriptor_errors =
+ (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ tsnep_rx_queue_stats.rx_buffer_too_small_errors =
+ (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ tsnep_rx_queue_stats.rx_fifo_overflow_errors =
+ (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ tsnep_rx_queue_stats.rx_invalid_frame_errors =
+ (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ memcpy(data, &tsnep_rx_queue_stats,
+ sizeof(tsnep_rx_queue_stats));
+ data += TSNEP_RX_QUEUE_STATS_COUNT;
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ memset(&tsnep_tx_queue_stats, 0, sizeof(tsnep_tx_queue_stats));
+ tsnep_tx_queue_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_tx_queue_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_tx_queue_stats.tx_dropped += adapter->tx[i].dropped;
+ memcpy(data, &tsnep_tx_queue_stats,
+ sizeof(tsnep_tx_queue_stats));
+ data += TSNEP_TX_QUEUE_STATS_COUNT;
+ }
+}
+
+static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count;
+ int tx_count;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ rx_count = adapter->num_rx_queues;
+ tx_count = adapter->num_tx_queues;
+ return TSNEP_STATS_COUNT +
+ TSNEP_RX_QUEUE_STATS_COUNT * rx_count +
+ TSNEP_TX_QUEUE_STATS_COUNT * tx_count;
+ case ETH_SS_TEST:
+ return tsnep_ethtool_get_test_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->rxnfc_count;
+ cmd->data = adapter->rxnfc_max;
+ cmd->data |= RX_CLS_LOC_SPECIAL;
+ return 0;
+ case ETHTOOL_GRXCLSRULE:
+ return tsnep_rxnfc_get_rule(adapter, cmd);
+ case ETHTOOL_GRXCLSRLALL:
+ return tsnep_rxnfc_get_all(adapter, cmd, rule_locs);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_set_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return tsnep_rxnfc_add_rule(adapter, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return tsnep_rxnfc_del_rule(adapter, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void tsnep_ethtool_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ ch->max_combined = adapter->num_queues;
+ ch->combined_count = adapter->num_queues;
+}
+
+static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static struct tsnep_queue *tsnep_get_queue_with_tx(struct tsnep_adapter *adapter,
+ int index)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].tx) {
+ if (index == 0)
+ return &adapter->queue[i];
+
+ index--;
+ }
+ }
+
+ return NULL;
+}
+
+static struct tsnep_queue *tsnep_get_queue_with_rx(struct tsnep_adapter *adapter,
+ int index)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].rx) {
+ if (index == 0)
+ return &adapter->queue[i];
+
+ index--;
+ }
+ }
+
+ return NULL;
+}
+
+static int tsnep_ethtool_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue;
+
+ queue = tsnep_get_queue_with_rx(adapter, 0);
+ if (queue)
+ ec->rx_coalesce_usecs = tsnep_get_irq_coalesce(queue);
+
+ queue = tsnep_get_queue_with_tx(adapter, 0);
+ if (queue)
+ ec->tx_coalesce_usecs = tsnep_get_irq_coalesce(queue);
+
+ return 0;
+}
+
+static int tsnep_ethtool_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+ int retval;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ /* RX coalesce has priority for queues with TX and RX */
+ if (adapter->queue[i].rx)
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ ec->rx_coalesce_usecs);
+ else
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ ec->tx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int tsnep_ethtool_get_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue_with_rx;
+ struct tsnep_queue *queue_with_tx;
+
+ if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues))
+ return -EINVAL;
+
+ queue_with_rx = tsnep_get_queue_with_rx(adapter, queue);
+ if (queue_with_rx)
+ ec->rx_coalesce_usecs = tsnep_get_irq_coalesce(queue_with_rx);
+
+ queue_with_tx = tsnep_get_queue_with_tx(adapter, queue);
+ if (queue_with_tx)
+ ec->tx_coalesce_usecs = tsnep_get_irq_coalesce(queue_with_tx);
+
+ return 0;
+}
+
+static int tsnep_ethtool_set_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue_with_rx;
+ struct tsnep_queue *queue_with_tx;
+ int retval;
+
+ if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues))
+ return -EINVAL;
+
+ queue_with_rx = tsnep_get_queue_with_rx(adapter, queue);
+ if (queue_with_rx) {
+ retval = tsnep_set_irq_coalesce(queue_with_rx, ec->rx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ /* RX coalesce has priority for queues with TX and RX */
+ queue_with_tx = tsnep_get_queue_with_tx(adapter, queue);
+ if (queue_with_tx && !queue_with_tx->rx) {
+ retval = tsnep_set_irq_coalesce(queue_with_tx, ec->tx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+const struct ethtool_ops tsnep_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = tsnep_ethtool_get_drvinfo,
+ .get_regs_len = tsnep_ethtool_get_regs_len,
+ .get_regs = tsnep_ethtool_get_regs,
+ .get_msglevel = tsnep_ethtool_get_msglevel,
+ .set_msglevel = tsnep_ethtool_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .self_test = tsnep_ethtool_self_test,
+ .get_strings = tsnep_ethtool_get_strings,
+ .get_ethtool_stats = tsnep_ethtool_get_ethtool_stats,
+ .get_sset_count = tsnep_ethtool_get_sset_count,
+ .get_rxnfc = tsnep_ethtool_get_rxnfc,
+ .set_rxnfc = tsnep_ethtool_set_rxnfc,
+ .get_channels = tsnep_ethtool_get_channels,
+ .get_ts_info = tsnep_ethtool_get_ts_info,
+ .get_coalesce = tsnep_ethtool_get_coalesce,
+ .set_coalesce = tsnep_ethtool_set_coalesce,
+ .get_per_queue_coalesce = tsnep_ethtool_get_per_queue_coalesce,
+ .set_per_queue_coalesce = tsnep_ethtool_set_per_queue_coalesce,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
new file mode 100644
index 000000000000..64c97eb66f67
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* Hardware definition of TSNEP and EtherCAT MAC device */
+
+#ifndef _TSNEP_HW_H
+#define _TSNEP_HW_H
+
+#include <linux/types.h>
+
+/* type */
+#define ECM_TYPE 0x0000
+#define ECM_REVISION_MASK 0x000000FF
+#define ECM_REVISION_SHIFT 0
+#define ECM_VERSION_MASK 0x0000FF00
+#define ECM_VERSION_SHIFT 8
+#define ECM_QUEUE_COUNT_MASK 0x00070000
+#define ECM_QUEUE_COUNT_SHIFT 16
+#define ECM_GATE_CONTROL 0x02000000
+
+/* system time */
+#define ECM_SYSTEM_TIME_LOW 0x0008
+#define ECM_SYSTEM_TIME_HIGH 0x000C
+
+/* clock */
+#define ECM_CLOCK_RATE 0x0010
+#define ECM_CLOCK_RATE_OFFSET_MASK 0x7FFFFFFF
+#define ECM_CLOCK_RATE_OFFSET_SIGN 0x80000000
+
+/* interrupt */
+#define ECM_INT_ENABLE 0x0018
+#define ECM_INT_ACTIVE 0x001C
+#define ECM_INT_ACKNOWLEDGE 0x001C
+#define ECM_INT_LINK 0x00000020
+#define ECM_INT_TX_0 0x00000100
+#define ECM_INT_RX_0 0x00000200
+#define ECM_INT_TXRX_SHIFT 2
+#define ECM_INT_ALL 0x7FFFFFFF
+#define ECM_INT_DISABLE 0x80000000
+
+/* reset */
+#define ECM_RESET 0x0020
+#define ECM_RESET_COMMON 0x00000001
+#define ECM_RESET_CHANNEL 0x00000100
+#define ECM_RESET_TXRX 0x00010000
+
+/* counter */
+#define ECM_COUNTER_LOW 0x0028
+#define ECM_COUNTER_HIGH 0x002C
+
+/* interrupt delay */
+#define ECM_INT_DELAY 0x0030
+#define ECM_INT_DELAY_MASK 0xF0
+#define ECM_INT_DELAY_SHIFT 4
+#define ECM_INT_DELAY_BASE_US 16
+#define ECM_INT_DELAY_OFFSET 1
+
+/* control and status */
+#define ECM_STATUS 0x0080
+#define ECM_LINK_MODE_OFF 0x01000000
+#define ECM_LINK_MODE_100 0x02000000
+#define ECM_LINK_MODE_1000 0x04000000
+#define ECM_NO_LINK 0x01000000
+#define ECM_LINK_MODE_MASK 0x06000000
+
+/* management data */
+#define ECM_MD_CONTROL 0x0084
+#define ECM_MD_STATUS 0x0084
+#define ECM_MD_PREAMBLE 0x00000001
+#define ECM_MD_READ 0x00000004
+#define ECM_MD_WRITE 0x00000002
+#define ECM_MD_ADDR_MASK 0x000000F8
+#define ECM_MD_ADDR_SHIFT 3
+#define ECM_MD_PHY_ADDR_MASK 0x00001F00
+#define ECM_MD_PHY_ADDR_SHIFT 8
+#define ECM_MD_BUSY 0x00000001
+#define ECM_MD_DATA_MASK 0xFFFF0000
+#define ECM_MD_DATA_SHIFT 16
+
+/* statistic */
+#define ECM_STAT 0x00B0
+#define ECM_STAT_RX_ERR_MASK 0x000000FF
+#define ECM_STAT_RX_ERR_SHIFT 0
+#define ECM_STAT_INV_FRM_MASK 0x0000FF00
+#define ECM_STAT_INV_FRM_SHIFT 8
+#define ECM_STAT_FWD_RX_ERR_MASK 0x00FF0000
+#define ECM_STAT_FWD_RX_ERR_SHIFT 16
+
+/* tsnep */
+#define TSNEP_MAC_SIZE 0x4000
+#define TSNEP_QUEUE_SIZE 0x1000
+#define TSNEP_QUEUE(n) ({ typeof(n) __n = (n); \
+ (__n) == 0 ? \
+ 0 : \
+ TSNEP_MAC_SIZE + TSNEP_QUEUE_SIZE * ((__n) - 1); })
+#define TSNEP_MAX_QUEUES 8
+#define TSNEP_MAX_FRAME_SIZE (2 * 1024) /* hardware supports actually 16k */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_OFFSET 128
+
+/* tsnep register */
+#define TSNEP_INFO 0x0100
+#define TSNEP_INFO_TX_TIME 0x00010000
+#define TSNEP_CONTROL 0x0108
+#define TSNEP_CONTROL_TX_RESET 0x00000001
+#define TSNEP_CONTROL_TX_ENABLE 0x00000002
+#define TSNEP_CONTROL_TX_DMA_ERROR 0x00000010
+#define TSNEP_CONTROL_TX_DESC_ERROR 0x00000020
+#define TSNEP_CONTROL_RX_RESET 0x00000100
+#define TSNEP_CONTROL_RX_ENABLE 0x00000200
+#define TSNEP_CONTROL_RX_DISABLE 0x00000400
+#define TSNEP_CONTROL_RX_DMA_ERROR 0x00001000
+#define TSNEP_CONTROL_RX_DESC_ERROR 0x00002000
+#define TSNEP_TX_DESC_ADDR_LOW 0x0140
+#define TSNEP_TX_DESC_ADDR_HIGH 0x0144
+#define TSNEP_RX_DESC_ADDR_LOW 0x0180
+#define TSNEP_RX_DESC_ADDR_HIGH 0x0184
+#define TSNEP_RESET_OWNER_COUNTER 0x01
+#define TSNEP_RX_STATISTIC 0x0190
+#define TSNEP_RX_STATISTIC_NO_DESC_MASK 0x000000FF
+#define TSNEP_RX_STATISTIC_NO_DESC_SHIFT 0
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK 0x0000FF00
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT 8
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK 0x00FF0000
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT 16
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_MASK 0xFF000000
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT 24
+#define TSNEP_RX_STATISTIC_NO_DESC 0x0190
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192
+#define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193
+#define TSNEP_MAC_ADDRESS_LOW 0x0800
+#define TSNEP_MAC_ADDRESS_HIGH 0x0804
+#define TSNEP_RX_FILTER 0x0806
+#define TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS 0x0001
+#define TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS 0x0002
+#define TSNEP_GC 0x0808
+#define TSNEP_GC_ENABLE_A 0x00000002
+#define TSNEP_GC_ENABLE_B 0x00000004
+#define TSNEP_GC_DISABLE 0x00000008
+#define TSNEP_GC_ENABLE_TIMEOUT 0x00000010
+#define TSNEP_GC_ACTIVE_A 0x00000002
+#define TSNEP_GC_ACTIVE_B 0x00000004
+#define TSNEP_GC_CHANGE_AB 0x00000008
+#define TSNEP_GC_TIMEOUT_ACTIVE 0x00000010
+#define TSNEP_GC_TIMEOUT_SIGNAL 0x00000020
+#define TSNEP_GC_LIST_ERROR 0x00000080
+#define TSNEP_GC_OPEN 0x00FF0000
+#define TSNEP_GC_OPEN_SHIFT 16
+#define TSNEP_GC_NEXT_OPEN 0xFF000000
+#define TSNEP_GC_NEXT_OPEN_SHIFT 24
+#define TSNEP_GC_TIMEOUT 131072
+#define TSNEP_GC_TIME 0x080C
+#define TSNEP_GC_CHANGE 0x0810
+#define TSNEP_GCL_A 0x2000
+#define TSNEP_GCL_B 0x2800
+#define TSNEP_GCL_SIZE SZ_2K
+#define TSNEP_RX_ASSIGN 0x0840
+#define TSNEP_RX_ASSIGN_ACTIVE 0x00000001
+#define TSNEP_RX_ASSIGN_QUEUE_MASK 0x00000006
+#define TSNEP_RX_ASSIGN_QUEUE_SHIFT 1
+#define TSNEP_RX_ASSIGN_OFFSET 1
+#define TSNEP_RX_ASSIGN_ETHER_TYPE 0x0880
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET 2
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT 2
+
+/* tsnep gate control list operation */
+struct tsnep_gcl_operation {
+ u32 properties;
+ u32 interval;
+};
+
+#define TSNEP_GCL_COUNT (TSNEP_GCL_SIZE / sizeof(struct tsnep_gcl_operation))
+#define TSNEP_GCL_MASK 0x000000FF
+#define TSNEP_GCL_INSERT 0x20000000
+#define TSNEP_GCL_CHANGE 0x40000000
+#define TSNEP_GCL_LAST 0x80000000
+#define TSNEP_GCL_MIN_INTERVAL 32
+
+/* tsnep TX/RX descriptor */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_SIZE_DATA_AFTER 2048
+#define TSNEP_DESC_OFFSET 128
+#define TSNEP_DESC_SIZE_DATA_AFTER_INLINE (64 - sizeof(struct tsnep_tx_desc) + \
+ sizeof_field(struct tsnep_tx_desc, tx))
+#define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000
+#define TSNEP_DESC_OWNER_COUNTER_SHIFT 30
+#define TSNEP_DESC_LENGTH_MASK 0x00003FFF
+#define TSNEP_DESC_INTERRUPT_FLAG 0x00040000
+#define TSNEP_DESC_EXTENDED_WRITEBACK_FLAG 0x00080000
+#define TSNEP_DESC_NO_LINK_FLAG 0x01000000
+
+/* tsnep TX descriptor */
+struct tsnep_tx_desc {
+ __le32 properties;
+ __le32 more_properties;
+ __le32 reserved[2];
+ __le64 next;
+ __le64 tx;
+};
+
+#define TSNEP_TX_DESC_OWNER_MASK 0xE0000000
+#define TSNEP_TX_DESC_OWNER_USER_FLAG 0x20000000
+#define TSNEP_TX_DESC_LAST_FRAGMENT_FLAG 0x00010000
+#define TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG 0x00020000
+
+/* tsnep TX descriptor writeback */
+struct tsnep_tx_desc_wb {
+ __le32 properties;
+ __le32 reserved1;
+ __le64 counter;
+ __le64 timestamp;
+ __le32 dma_delay;
+ __le32 reserved2;
+};
+
+#define TSNEP_TX_DESC_UNDERRUN_ERROR_FLAG 0x00010000
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_MASK 0x0000FFFC
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_SHIFT 2
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_MASK 0xFFFC0000
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_SHIFT 18
+#define TSNEP_TX_DESC_DMA_DELAY_NS 64
+
+/* tsnep RX descriptor */
+struct tsnep_rx_desc {
+ __le32 properties;
+ __le32 reserved[3];
+ __le64 next;
+ __le64 rx;
+};
+
+#define TSNEP_RX_DESC_BUFFER_SIZE_MASK 0x00003FFC
+
+/* tsnep RX descriptor writeback */
+struct tsnep_rx_desc_wb {
+ __le32 properties;
+ __le32 reserved[7];
+};
+
+/* tsnep RX inline meta */
+struct tsnep_rx_inline {
+ __le64 counter;
+ __le64 timestamp;
+};
+
+#define TSNEP_RX_INLINE_METADATA_SIZE (sizeof(struct tsnep_rx_inline))
+
+#endif /* _TSNEP_HW_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
new file mode 100644
index 000000000000..b118407c30e8
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -0,0 +1,2715 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* TSN endpoint Ethernet MAC driver
+ *
+ * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
+ * communication. It is designed for endpoints within TSN (Time Sensitive
+ * Networking) networks; e.g., for PLCs in the industrial automation case.
+ *
+ * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
+ * by the driver.
+ *
+ * More information can be found here:
+ * - www.embedded-experts.at/tsn
+ * - www.engleder-embedded.com
+ */
+
+#include "tsnep.h"
+#include "tsnep_hw.h"
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/iopoll.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <net/page_pool/helpers.h>
+#include <net/xdp_sock_drv.h>
+
+#define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+#define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4)
+#define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+/* XSK buffer shall store at least Q-in-Q frame */
+#define TSNEP_XSK_RX_BUF_SIZE (ALIGN(TSNEP_RX_INLINE_METADATA_SIZE + \
+ ETH_FRAME_LEN + ETH_FCS_LEN + \
+ VLAN_HLEN * 2, 4))
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
+#else
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
+#endif
+#define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
+
+#define TSNEP_COALESCE_USECS_DEFAULT 64
+#define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
+ ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
+
+/* mapping type */
+#define TSNEP_TX_TYPE_MAP BIT(0)
+#define TSNEP_TX_TYPE_MAP_PAGE BIT(1)
+#define TSNEP_TX_TYPE_INLINE BIT(2)
+/* buffer type */
+#define TSNEP_TX_TYPE_SKB BIT(8)
+#define TSNEP_TX_TYPE_SKB_MAP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_MAP)
+#define TSNEP_TX_TYPE_SKB_INLINE (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_INLINE)
+#define TSNEP_TX_TYPE_SKB_FRAG BIT(9)
+#define TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_MAP_PAGE)
+#define TSNEP_TX_TYPE_SKB_FRAG_INLINE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_INLINE)
+#define TSNEP_TX_TYPE_XDP_TX BIT(10)
+#define TSNEP_TX_TYPE_XDP_NDO BIT(11)
+#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
+#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
+#define TSNEP_TX_TYPE_XSK BIT(12)
+#define TSNEP_TX_TYPE_TSTAMP BIT(13)
+#define TSNEP_TX_TYPE_SKB_TSTAMP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP)
+
+#define TSNEP_XDP_TX BIT(0)
+#define TSNEP_XDP_REDIRECT BIT(1)
+
+static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ mask |= ECM_INT_DISABLE;
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static irqreturn_t tsnep_irq(int irq, void *arg)
+{
+ struct tsnep_adapter *adapter = arg;
+ u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
+
+ /* acknowledge interrupt */
+ if (active != 0)
+ iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
+
+ /* handle link interrupt */
+ if ((active & ECM_INT_LINK) != 0)
+ phy_mac_interrupt(adapter->netdev->phydev);
+
+ /* handle TX/RX queue 0 interrupt */
+ if ((active & adapter->queue[0].irq_mask) != 0) {
+ if (napi_schedule_prep(&adapter->queue[0].napi)) {
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ /* schedule after masking to avoid races */
+ __napi_schedule(&adapter->queue[0].napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
+{
+ struct tsnep_queue *queue = arg;
+
+ /* handle TX/RX queue interrupt */
+ if (napi_schedule_prep(&queue->napi)) {
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ /* schedule after masking to avoid races */
+ __napi_schedule(&queue->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs)
+{
+ if (usecs > TSNEP_COALESCE_USECS_MAX)
+ return -ERANGE;
+
+ usecs /= ECM_INT_DELAY_BASE_US;
+ usecs <<= ECM_INT_DELAY_SHIFT;
+ usecs &= ECM_INT_DELAY_MASK;
+
+ queue->irq_delay &= ~ECM_INT_DELAY_MASK;
+ queue->irq_delay |= usecs;
+ iowrite8(queue->irq_delay, queue->irq_delay_addr);
+
+ return 0;
+}
+
+u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue)
+{
+ u32 usecs;
+
+ usecs = (queue->irq_delay & ECM_INT_DELAY_MASK);
+ usecs >>= ECM_INT_DELAY_SHIFT;
+ usecs *= ECM_INT_DELAY_BASE_US;
+
+ return usecs;
+}
+
+static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ md = ECM_MD_READ;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
+}
+
+static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ md = ECM_MD_WRITE;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
+static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
+{
+ u32 mode;
+
+ switch (adapter->phydev->speed) {
+ case SPEED_100:
+ mode = ECM_LINK_MODE_100;
+ break;
+ case SPEED_1000:
+ mode = ECM_LINK_MODE_1000;
+ break;
+ default:
+ mode = ECM_LINK_MODE_OFF;
+ break;
+ }
+ iowrite32(mode, adapter->addr + ECM_STATUS);
+}
+
+static void tsnep_phy_link_status_change(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+
+ if (phydev->link)
+ tsnep_set_link_mode(adapter);
+
+ phy_print_status(netdev->phydev);
+}
+
+static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
+{
+ int speed;
+
+ if (enable) {
+ if (adapter->phydev->autoneg == AUTONEG_DISABLE &&
+ adapter->phydev->speed == SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_1000;
+ } else {
+ speed = 0;
+ }
+
+ return phy_loopback(adapter->phydev, enable, speed);
+}
+
+static int tsnep_phy_open(struct tsnep_adapter *adapter)
+{
+ struct phy_device *phydev;
+ struct ethtool_keee ethtool_keee;
+ int retval;
+
+ retval = phy_connect_direct(adapter->netdev, adapter->phydev,
+ tsnep_phy_link_status_change,
+ adapter->phy_mode);
+ if (retval)
+ return retval;
+ phydev = adapter->netdev->phydev;
+
+ /* MAC supports only 100Mbps|1000Mbps full duplex
+ * SPE (Single Pair Ethernet) is also an option but not implemented yet
+ */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ /* disable EEE autoneg, EEE not supported by TSNEP */
+ memset(&ethtool_keee, 0, sizeof(ethtool_keee));
+ phy_ethtool_set_eee(adapter->phydev, &ethtool_keee);
+
+ adapter->phydev->irq = PHY_MAC_INTERRUPT;
+ phy_start(adapter->phydev);
+
+ return 0;
+}
+
+static void tsnep_phy_close(struct tsnep_adapter *adapter)
+{
+ phy_stop(adapter->netdev->phydev);
+ phy_disconnect(adapter->netdev->phydev);
+}
+
+static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ int i;
+
+ memset(tx->entry, 0, sizeof(tx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (tx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
+ tx->page_dma[i]);
+ tx->page[i] = NULL;
+ tx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_tx_ring_create(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ struct tsnep_tx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ tx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
+ GFP_KERNEL);
+ if (!tx->page[i]) {
+ retval = -ENOMEM;
+ goto alloc_failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_tx_desc_wb *)
+ (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_tx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ entry->owner_user_flag = false;
+ }
+ }
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &tx->entry[i];
+ next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+ }
+
+ return 0;
+
+alloc_failed:
+ tsnep_tx_ring_cleanup(tx);
+ return retval;
+}
+
+static void tsnep_tx_init(struct tsnep_tx *tx)
+{
+ dma_addr_t dma;
+
+ dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
+ tx->write = 0;
+ tx->read = 0;
+ tx->owner_counter = 1;
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+}
+
+static void tsnep_tx_enable(struct tsnep_tx *tx)
+{
+ struct netdev_queue *nq;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+
+ __netif_tx_lock_bh(nq);
+ netif_tx_wake_queue(nq);
+ __netif_tx_unlock_bh(nq);
+}
+
+static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi)
+{
+ struct netdev_queue *nq;
+ u32 val;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+
+ __netif_tx_lock_bh(nq);
+ netif_tx_stop_queue(nq);
+ __netif_tx_unlock_bh(nq);
+
+ /* wait until TX is done in hardware */
+ readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
+ 1000000);
+
+ /* wait until TX is also done in software */
+ while (READ_ONCE(tx->read) != tx->write) {
+ napi_schedule(napi);
+ napi_synchronize(napi);
+ }
+}
+
+static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
+ bool last)
+{
+ struct tsnep_tx_entry *entry = &tx->entry[index];
+
+ entry->properties = 0;
+ /* xdpf and zc are union with skb */
+ if (entry->skb) {
+ entry->properties = length & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP)
+ entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
+
+ /* toggle user flag to prevent false acknowledge
+ *
+ * Only the first fragment is acknowledged. For all other
+ * fragments no acknowledge is done and the last written owner
+ * counter stays in the writeback descriptor. Therefore, it is
+ * possible that the last written owner counter is identical to
+ * the new incremented owner counter and a false acknowledge is
+ * detected before the real acknowledge has been done by
+ * hardware.
+ *
+ * The user flag is used to prevent this situation. The user
+ * flag is copied to the writeback descriptor by the hardware
+ * and is used as additional acknowledge data. By toggeling the
+ * user flag only for the first fragment (which is
+ * acknowledged), it is guaranteed that the last acknowledge
+ * done for this descriptor has used a different user flag and
+ * cannot be detected as false acknowledge.
+ */
+ entry->owner_user_flag = !entry->owner_user_flag;
+ }
+ if (last)
+ entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
+ if (index == tx->increment_owner_counter) {
+ tx->owner_counter++;
+ if (tx->owner_counter == 4)
+ tx->owner_counter = 1;
+ tx->increment_owner_counter--;
+ if (tx->increment_owner_counter < 0)
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+ if (entry->owner_user_flag)
+ entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
+ entry->desc->more_properties =
+ __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
+ if (entry->type & TSNEP_TX_TYPE_INLINE)
+ entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG;
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_tx_desc_available(struct tsnep_tx *tx)
+{
+ if (tx->read <= tx->write)
+ return TSNEP_RING_SIZE - tx->write + tx->read - 1;
+ else
+ return tx->read - tx->write - 1;
+}
+
+static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
+ struct device *dmadev, dma_addr_t *dma)
+{
+ unsigned int len;
+ int mapped;
+
+ len = skb_frag_size(frag);
+ if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) {
+ *dma = skb_frag_dma_map(dmadev, frag, 0, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dmadev, *dma))
+ return -ENOMEM;
+ entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE;
+ mapped = 1;
+ } else {
+ void *fragdata = skb_frag_address_safe(frag);
+
+ if (likely(fragdata)) {
+ memcpy(&entry->desc->tx, fragdata, len);
+ } else {
+ struct page *page = skb_frag_page(frag);
+
+ fragdata = kmap_local_page(page);
+ memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag),
+ len);
+ kunmap_local(fragdata);
+ }
+ entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE;
+ mapped = 0;
+ }
+
+ return mapped;
+}
+
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count,
+ bool do_tstamp)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ unsigned int len;
+ int map_len = 0;
+ dma_addr_t dma;
+ int i, mapped;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
+
+ if (!i) {
+ len = skb_headlen(skb);
+ if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) {
+ dma = dma_map_single(dmadev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+ entry->type = TSNEP_TX_TYPE_SKB_MAP;
+ mapped = 1;
+ } else {
+ memcpy(&entry->desc->tx, skb->data, len);
+ entry->type = TSNEP_TX_TYPE_SKB_INLINE;
+ mapped = 0;
+ }
+
+ if (do_tstamp)
+ entry->type |= TSNEP_TX_TYPE_TSTAMP;
+ } else {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ len = skb_frag_size(frag);
+ mapped = tsnep_tx_map_frag(frag, entry, dmadev, &dma);
+ if (mapped < 0)
+ return mapped;
+ }
+
+ entry->len = len;
+ if (likely(mapped)) {
+ dma_unmap_addr_set(entry, dma, dma);
+ entry->desc->tx = __cpu_to_le64(dma);
+ }
+
+ map_len += len;
+ }
+
+ return map_len;
+}
+
+static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ int map_len = 0;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(index + i) & TSNEP_RING_MASK];
+
+ if (entry->len) {
+ if (entry->type & TSNEP_TX_TYPE_MAP)
+ dma_unmap_single(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE)
+ dma_unmap_page(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ map_len += entry->len;
+ entry->len = 0;
+ }
+ }
+
+ return map_len;
+}
+
+static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ struct tsnep_tx *tx)
+{
+ struct tsnep_tx_entry *entry;
+ bool do_tstamp = false;
+ int count = 1;
+ int length;
+ int retval;
+ int i;
+
+ if (skb_shinfo(skb)->nr_frags > 0)
+ count += skb_shinfo(skb)->nr_frags;
+
+ if (tsnep_tx_desc_available(tx) < count) {
+ /* ring full, shall not happen because queue is stopped if full
+ * below
+ */
+ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = &tx->entry[tx->write];
+ entry->skb = skb;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_tstamp = true;
+ }
+
+ retval = tsnep_tx_map(skb, tx, count, do_tstamp);
+ if (retval < 0) {
+ tsnep_tx_unmap(tx, tx->write, count);
+ dev_kfree_skb_any(entry->skb);
+ entry->skb = NULL;
+
+ tx->dropped++;
+
+ return NETDEV_TX_OK;
+ }
+ length = retval;
+
+ for (i = 0; i < count; i++)
+ tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
+ i == count - 1);
+ tx->write = (tx->write + count) & TSNEP_RING_MASK;
+
+ skb_tx_timestamp(skb);
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
+
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
+ /* ring can get full with next frame */
+ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx,
+ struct skb_shared_info *shinfo, int count, u32 type)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ struct page *page;
+ skb_frag_t *frag;
+ unsigned int len;
+ int map_len = 0;
+ dma_addr_t dma;
+ void *data;
+ int i;
+
+ frag = NULL;
+ len = xdpf->len;
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
+ if (type & TSNEP_TX_TYPE_XDP_NDO) {
+ data = unlikely(frag) ? skb_frag_address(frag) :
+ xdpf->data;
+ dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+
+ entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE;
+ } else {
+ page = unlikely(frag) ? skb_frag_page(frag) :
+ virt_to_page(xdpf->data);
+ dma = page_pool_get_dma_addr(page);
+ if (unlikely(frag))
+ dma += skb_frag_off(frag);
+ else
+ dma += sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(dmadev, dma, len,
+ DMA_BIDIRECTIONAL);
+
+ entry->type = TSNEP_TX_TYPE_XDP_TX;
+ }
+
+ entry->len = len;
+ dma_unmap_addr_set(entry, dma, dma);
+
+ entry->desc->tx = __cpu_to_le64(dma);
+
+ map_len += len;
+
+ if (i + 1 < count) {
+ frag = &shinfo->frags[i];
+ len = skb_frag_size(frag);
+ }
+ }
+
+ return map_len;
+}
+
+/* This function requires __netif_tx_lock is held by the caller. */
+static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf,
+ struct tsnep_tx *tx, u32 type)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf);
+ struct tsnep_tx_entry *entry;
+ int count, length, retval, i;
+
+ count = 1;
+ if (unlikely(xdp_frame_has_frags(xdpf)))
+ count += shinfo->nr_frags;
+
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count))
+ return false;
+
+ entry = &tx->entry[tx->write];
+ entry->xdpf = xdpf;
+
+ retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type);
+ if (retval < 0) {
+ tsnep_tx_unmap(tx, tx->write, count);
+ entry->xdpf = NULL;
+
+ tx->dropped++;
+
+ return false;
+ }
+ length = retval;
+
+ for (i = 0; i < count; i++)
+ tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
+ i == count - 1);
+ tx->write = (tx->write + count) & TSNEP_RING_MASK;
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ return true;
+}
+
+static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
+{
+ iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
+}
+
+static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
+ struct xdp_buff *xdp,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx,
+ bool zc)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ bool xmit;
+ u32 type;
+
+ if (unlikely(!xdpf))
+ return false;
+
+ /* no page pool for zero copy */
+ if (zc)
+ type = TSNEP_TX_TYPE_XDP_NDO;
+ else
+ type = TSNEP_TX_TYPE_XDP_TX;
+
+ __netif_tx_lock(tx_nq, smp_processor_id());
+
+ xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type);
+
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ if (xmit)
+ txq_trans_cond_update(tx_nq);
+
+ __netif_tx_unlock(tx_nq);
+
+ return xmit;
+}
+
+static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx)
+{
+ struct tsnep_tx_entry *entry;
+ dma_addr_t dma;
+
+ entry = &tx->entry[tx->write];
+ entry->zc = true;
+
+ dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr);
+ xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len);
+
+ entry->type = TSNEP_TX_TYPE_XSK;
+ entry->len = xdpd->len;
+
+ entry->desc->tx = __cpu_to_le64(dma);
+
+ return xdpd->len;
+}
+
+static void tsnep_xdp_xmit_frame_ring_zc(struct xdp_desc *xdpd,
+ struct tsnep_tx *tx)
+{
+ int length;
+
+ length = tsnep_xdp_tx_map_zc(xdpd, tx);
+
+ tsnep_tx_activate(tx, tx->write, length, true);
+ tx->write = (tx->write + 1) & TSNEP_RING_MASK;
+}
+
+static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx)
+{
+ int desc_available = tsnep_tx_desc_available(tx);
+ struct xdp_desc *descs = tx->xsk_pool->tx_descs;
+ int batch, i;
+
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (desc_available <= (MAX_SKB_FRAGS + 1))
+ return;
+ desc_available -= MAX_SKB_FRAGS + 1;
+
+ batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available);
+ for (i = 0; i < batch; i++)
+ tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx);
+
+ if (batch) {
+ /* descriptor properties shall be valid before hardware is
+ * notified
+ */
+ dma_wmb();
+
+ tsnep_xdp_xmit_flush(tx);
+ }
+}
+
+static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
+{
+ struct tsnep_tx_entry *entry;
+ struct netdev_queue *nq;
+ int xsk_frames = 0;
+ int budget = 128;
+ int length;
+ int count;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+ __netif_tx_lock(nq, smp_processor_id());
+
+ do {
+ if (tx->read == tx->write)
+ break;
+
+ entry = &tx->entry[tx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_TX_DESC_OWNER_MASK) !=
+ (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
+ break;
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ count = 1;
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ skb_shinfo(entry->skb)->nr_frags > 0)
+ count += skb_shinfo(entry->skb)->nr_frags;
+ else if ((entry->type & TSNEP_TX_TYPE_XDP) &&
+ xdp_frame_has_frags(entry->xdpf))
+ count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags;
+
+ length = tsnep_tx_unmap(tx, tx->read, count);
+
+ if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) &&
+ (__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 timestamp;
+
+ if (entry->skb->sk &&
+ READ_ONCE(entry->skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
+ timestamp =
+ __le64_to_cpu(entry->desc_wb->counter);
+ else
+ timestamp =
+ __le64_to_cpu(entry->desc_wb->timestamp);
+
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(timestamp);
+
+ skb_tstamp_tx(entry->skb, &hwtstamps);
+ }
+
+ if (entry->type & TSNEP_TX_TYPE_SKB)
+ napi_consume_skb(entry->skb, napi_budget);
+ else if (entry->type & TSNEP_TX_TYPE_XDP)
+ xdp_return_frame_rx_napi(entry->xdpf);
+ else
+ xsk_frames++;
+ /* xdpf and zc are union with skb */
+ entry->skb = NULL;
+
+ tx->read = (tx->read + count) & TSNEP_RING_MASK;
+
+ tx->packets++;
+ tx->bytes += length + ETH_FCS_LEN;
+
+ budget--;
+ } while (likely(budget));
+
+ if (tx->xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(tx->xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(tx->xsk_pool))
+ xsk_set_tx_need_wakeup(tx->xsk_pool);
+ tsnep_xdp_xmit_zc(tx);
+ }
+
+ if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
+ netif_tx_queue_stopped(nq)) {
+ netif_tx_wake_queue(nq);
+ }
+
+ __netif_tx_unlock(nq);
+
+ return budget != 0;
+}
+
+static bool tsnep_tx_pending(struct tsnep_tx *tx)
+{
+ struct tsnep_tx_entry *entry;
+ struct netdev_queue *nq;
+ bool pending = false;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+ __netif_tx_lock(nq, smp_processor_id());
+
+ if (tx->read != tx->write) {
+ entry = &tx->entry[tx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_TX_DESC_OWNER_MASK) ==
+ (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
+ pending = true;
+ }
+
+ __netif_tx_unlock(nq);
+
+ return pending;
+}
+
+static int tsnep_tx_open(struct tsnep_tx *tx)
+{
+ int retval;
+
+ retval = tsnep_tx_ring_create(tx);
+ if (retval)
+ return retval;
+
+ tsnep_tx_init(tx);
+
+ return 0;
+}
+
+static void tsnep_tx_close(struct tsnep_tx *tx)
+{
+ tsnep_tx_ring_cleanup(tx);
+}
+
+static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ int i;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ if (!rx->xsk_pool && entry->page)
+ page_pool_put_full_page(rx->page_pool, entry->page,
+ false);
+ if (rx->xsk_pool && entry->xdp)
+ xsk_buff_free(entry->xdp);
+ /* xdp is union with page */
+ entry->page = NULL;
+ }
+
+ if (rx->page_pool)
+ page_pool_destroy(rx->page_pool);
+
+ memset(rx->entry, 0, sizeof(rx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (rx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
+ rx->page_dma[i]);
+ rx->page[i] = NULL;
+ rx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_rx_ring_create(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ struct page_pool_params pp_params = { 0 };
+ struct tsnep_rx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ rx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
+ GFP_KERNEL);
+ if (!rx->page[i]) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_rx_desc_wb *)
+ (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_rx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ }
+ }
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.order = 0;
+ pp_params.pool_size = TSNEP_RING_SIZE;
+ pp_params.nid = dev_to_node(dmadev);
+ pp_params.dev = dmadev;
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
+ pp_params.offset = TSNEP_RX_OFFSET;
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool)) {
+ retval = PTR_ERR(rx->page_pool);
+ rx->page_pool = NULL;
+ goto failed;
+ }
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+ }
+
+ return 0;
+
+failed:
+ tsnep_rx_ring_cleanup(rx);
+ return retval;
+}
+
+static void tsnep_rx_init(struct tsnep_rx *rx)
+{
+ dma_addr_t dma;
+
+ dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
+ rx->write = 0;
+ rx->read = 0;
+ rx->owner_counter = 1;
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+}
+
+static void tsnep_rx_enable(struct tsnep_rx *rx)
+{
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
+}
+
+static void tsnep_rx_disable(struct tsnep_rx *rx)
+{
+ u32 val;
+
+ iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
+ readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
+ 1000000);
+}
+
+static int tsnep_rx_desc_available(struct tsnep_rx *rx)
+{
+ if (rx->read <= rx->write)
+ return TSNEP_RING_SIZE - rx->write + rx->read - 1;
+ else
+ return rx->read - rx->write - 1;
+}
+
+static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx)
+{
+ struct page **page;
+
+ /* last entry of page_buffer is always zero, because ring cannot be
+ * filled completely
+ */
+ page = rx->page_buffer;
+ while (*page) {
+ page_pool_put_full_page(rx->page_pool, *page, false);
+ *page = NULL;
+ page++;
+ }
+}
+
+static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx)
+{
+ int i;
+
+ /* alloc for all ring entries except the last one, because ring cannot
+ * be filled completely
+ */
+ for (i = 0; i < TSNEP_RING_SIZE - 1; i++) {
+ rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool);
+ if (!rx->page_buffer[i]) {
+ tsnep_rx_free_page_buffer(rx);
+
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
+ struct page *page)
+{
+ entry->page = page;
+ entry->len = TSNEP_MAX_RX_BUF_SIZE;
+ entry->dma = page_pool_get_dma_addr(entry->page);
+ entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET);
+}
+
+static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return -ENOMEM;
+ tsnep_rx_set_page(rx, entry, page);
+
+ return 0;
+}
+
+static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+ struct tsnep_rx_entry *read = &rx->entry[rx->read];
+
+ tsnep_rx_set_page(rx, entry, read->page);
+ read->page = NULL;
+}
+
+static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+
+ /* TSNEP_MAX_RX_BUF_SIZE and TSNEP_XSK_RX_BUF_SIZE are multiple of 4 */
+ entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if (index == rx->increment_owner_counter) {
+ rx->owner_counter++;
+ if (rx->owner_counter == 4)
+ rx->owner_counter = 1;
+ rx->increment_owner_counter--;
+ if (rx->increment_owner_counter < 0)
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse)
+{
+ bool alloc_failed = false;
+ int i, index;
+
+ for (i = 0; i < count && !alloc_failed; i++) {
+ index = (rx->write + i) & TSNEP_RING_MASK;
+
+ if (unlikely(tsnep_rx_alloc_buffer(rx, index))) {
+ rx->alloc_failed++;
+ alloc_failed = true;
+
+ /* reuse only if no other allocation was successful */
+ if (i == 0 && reuse)
+ tsnep_rx_reuse_buffer(rx, index);
+ else
+ break;
+ }
+
+ tsnep_rx_activate(rx, index);
+ }
+
+ if (i)
+ rx->write = (rx->write + i) & TSNEP_RING_MASK;
+
+ return i;
+}
+
+static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
+{
+ int desc_refilled;
+
+ desc_refilled = tsnep_rx_alloc(rx, count, reuse);
+ if (desc_refilled)
+ tsnep_rx_enable(rx);
+
+ return desc_refilled;
+}
+
+static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
+ struct xdp_buff *xdp)
+{
+ entry->xdp = xdp;
+ entry->len = TSNEP_XSK_RX_BUF_SIZE;
+ entry->dma = xsk_buff_xdp_get_dma(entry->xdp);
+ entry->desc->rx = __cpu_to_le64(entry->dma);
+}
+
+static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+ struct tsnep_rx_entry *read = &rx->entry[rx->read];
+
+ tsnep_rx_set_xdp(rx, entry, read->xdp);
+ read->xdp = NULL;
+}
+
+static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse)
+{
+ u32 allocated;
+ int i;
+
+ allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count);
+ for (i = 0; i < allocated; i++) {
+ int index = (rx->write + i) & TSNEP_RING_MASK;
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+
+ tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]);
+ tsnep_rx_activate(rx, index);
+ }
+ if (i == 0) {
+ rx->alloc_failed++;
+
+ if (reuse) {
+ tsnep_rx_reuse_buffer_zc(rx, rx->write);
+ tsnep_rx_activate(rx, rx->write);
+ }
+ }
+
+ if (i)
+ rx->write = (rx->write + i) & TSNEP_RING_MASK;
+
+ return i;
+}
+
+static void tsnep_rx_free_zc(struct tsnep_rx *rx)
+{
+ int i;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ struct tsnep_rx_entry *entry = &rx->entry[i];
+
+ if (entry->xdp)
+ xsk_buff_free(entry->xdp);
+ entry->xdp = NULL;
+ }
+}
+
+static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse)
+{
+ int desc_refilled;
+
+ desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse);
+ if (desc_refilled)
+ tsnep_rx_enable(rx);
+
+ return desc_refilled;
+}
+
+static void tsnep_xsk_rx_need_wakeup(struct tsnep_rx *rx, int desc_available)
+{
+ if (desc_available)
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+}
+
+static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
+ struct xdp_buff *xdp, int *status,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ unsigned int length;
+ unsigned int sync;
+ u32 act;
+
+ length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false))
+ goto out_failure;
+ *status |= TSNEP_XDP_TX;
+ return true;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
+ goto out_failure;
+ *status |= TSNEP_XDP_REDIRECT;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU
+ * touch
+ */
+ sync = xdp->data_end - xdp->data_hard_start -
+ XDP_PACKET_HEADROOM;
+ sync = max(sync, length);
+ page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
+ sync, true);
+ return true;
+ }
+}
+
+static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
+ struct xdp_buff *xdp, int *status,
+ struct netdev_queue *tx_nq,
+ struct tsnep_tx *tx)
+{
+ u32 act;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ /* XDP_REDIRECT is the main action for zero-copy */
+ if (likely(act == XDP_REDIRECT)) {
+ if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
+ goto out_failure;
+ *status |= TSNEP_XDP_REDIRECT;
+ return true;
+ }
+
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true))
+ goto out_failure;
+ *status |= TSNEP_XDP_TX;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ xsk_buff_free(xdp);
+ return true;
+ }
+}
+
+static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ if (status & TSNEP_XDP_TX) {
+ __netif_tx_lock(tx_nq, smp_processor_id());
+ tsnep_xdp_xmit_flush(tx);
+ __netif_tx_unlock(tx_nq);
+ }
+
+ if (status & TSNEP_XDP_REDIRECT)
+ xdp_do_flush();
+}
+
+static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
+ int length)
+{
+ struct sk_buff *skb;
+
+ skb = napi_build_skb(page_address(page), PAGE_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE);
+ __skb_put(skb, length - ETH_FCS_LEN);
+
+ if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct tsnep_rx_inline *rx_inline =
+ (struct tsnep_rx_inline *)(page_address(page) +
+ TSNEP_RX_OFFSET);
+
+ skb_shinfo(skb)->tx_flags |=
+ SKBTX_HW_TSTAMP_NETDEV;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->netdev_data = rx_inline;
+ }
+
+ skb_record_rx_queue(skb, rx->queue_index);
+ skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
+
+ return skb;
+}
+
+static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi,
+ struct page *page, int length)
+{
+ struct sk_buff *skb;
+
+ skb = tsnep_build_skb(rx, page, length);
+ if (skb) {
+ skb_mark_for_recycle(skb);
+
+ rx->packets++;
+ rx->bytes += length;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx->multicast++;
+
+ napi_gro_receive(napi, skb);
+ } else {
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ rx->dropped++;
+ }
+}
+
+static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
+ int budget)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ enum dma_data_direction dma_dir;
+ struct tsnep_rx_entry *entry;
+ struct netdev_queue *tx_nq;
+ struct bpf_prog *prog;
+ struct xdp_buff xdp;
+ struct tsnep_tx *tx;
+ int desc_available;
+ int xdp_status = 0;
+ int done = 0;
+ int length;
+
+ desc_available = tsnep_rx_desc_available(rx);
+ dma_dir = page_pool_get_dma_dir(rx->page_pool);
+ prog = READ_ONCE(rx->adapter->xdp_prog);
+ if (prog) {
+ tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
+ rx->tx_queue_index);
+ tx = &rx->adapter->tx[rx->tx_queue_index];
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
+ }
+
+ while (likely(done < budget) && (rx->read != rx->write)) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) !=
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ break;
+ done++;
+
+ if (desc_available >= TSNEP_RING_RX_REFILL) {
+ bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
+
+ desc_available -= tsnep_rx_refill(rx, desc_available,
+ reuse);
+ if (!entry->page) {
+ /* buffer has been reused for refill to prevent
+ * empty RX ring, thus buffer cannot be used for
+ * RX processing
+ */
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ rx->dropped++;
+
+ continue;
+ }
+ }
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ prefetch(page_address(entry->page) + TSNEP_RX_OFFSET);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ dma_sync_single_range_for_cpu(dmadev, entry->dma,
+ TSNEP_RX_OFFSET, length, dma_dir);
+
+ /* RX metadata with timestamps is in front of actual data,
+ * subtract metadata size to get length of actual data and
+ * consider metadata size as offset of actual data during RX
+ * processing
+ */
+ length -= TSNEP_RX_INLINE_METADATA_SIZE;
+
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ if (prog) {
+ bool consume;
+
+ xdp_prepare_buff(&xdp, page_address(entry->page),
+ XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
+ length - ETH_FCS_LEN, false);
+
+ consume = tsnep_xdp_run_prog(rx, prog, &xdp,
+ &xdp_status, tx_nq, tx);
+ if (consume) {
+ rx->packets++;
+ rx->bytes += length;
+
+ entry->page = NULL;
+
+ continue;
+ }
+ }
+
+ tsnep_rx_page(rx, napi, entry->page, length);
+ entry->page = NULL;
+ }
+
+ if (xdp_status)
+ tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
+
+ if (desc_available)
+ tsnep_rx_refill(rx, desc_available, false);
+
+ return done;
+}
+
+static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
+ int budget)
+{
+ struct tsnep_rx_entry *entry;
+ struct netdev_queue *tx_nq;
+ struct bpf_prog *prog;
+ struct tsnep_tx *tx;
+ int desc_available;
+ int xdp_status = 0;
+ struct page *page;
+ int done = 0;
+ int length;
+
+ desc_available = tsnep_rx_desc_available(rx);
+ prog = READ_ONCE(rx->adapter->xdp_prog);
+ if (prog) {
+ tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
+ rx->tx_queue_index);
+ tx = &rx->adapter->tx[rx->tx_queue_index];
+ }
+
+ while (likely(done < budget) && (rx->read != rx->write)) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) !=
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ break;
+ done++;
+
+ if (desc_available >= TSNEP_RING_RX_REFILL) {
+ bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
+
+ desc_available -= tsnep_rx_refill_zc(rx, desc_available,
+ reuse);
+ if (!entry->xdp) {
+ /* buffer has been reused for refill to prevent
+ * empty RX ring, thus buffer cannot be used for
+ * RX processing
+ */
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ rx->dropped++;
+
+ continue;
+ }
+ }
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ prefetch(entry->xdp->data);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
+ xsk_buff_dma_sync_for_cpu(entry->xdp);
+
+ /* RX metadata with timestamps is in front of actual data,
+ * subtract metadata size to get length of actual data and
+ * consider metadata size as offset of actual data during RX
+ * processing
+ */
+ length -= TSNEP_RX_INLINE_METADATA_SIZE;
+
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ if (prog) {
+ bool consume;
+
+ entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE;
+ entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE;
+
+ consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp,
+ &xdp_status, tx_nq, tx);
+ if (consume) {
+ rx->packets++;
+ rx->bytes += length;
+
+ entry->xdp = NULL;
+
+ continue;
+ }
+ }
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (page) {
+ memcpy(page_address(page) + TSNEP_RX_OFFSET,
+ entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE,
+ length + TSNEP_RX_INLINE_METADATA_SIZE);
+ tsnep_rx_page(rx, napi, page, length);
+ } else {
+ rx->dropped++;
+ }
+ xsk_buff_free(entry->xdp);
+ entry->xdp = NULL;
+ }
+
+ if (xdp_status)
+ tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
+
+ if (desc_available)
+ desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
+
+ if (xsk_uses_need_wakeup(rx->xsk_pool)) {
+ tsnep_xsk_rx_need_wakeup(rx, desc_available);
+
+ return done;
+ }
+
+ return desc_available ? budget : done;
+}
+
+static bool tsnep_rx_pending(struct tsnep_rx *rx)
+{
+ struct tsnep_rx_entry *entry;
+
+ if (rx->read != rx->write) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) ==
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ return true;
+ }
+
+ return false;
+}
+
+static int tsnep_rx_open(struct tsnep_rx *rx)
+{
+ int desc_available;
+ int retval;
+
+ retval = tsnep_rx_ring_create(rx);
+ if (retval)
+ return retval;
+
+ tsnep_rx_init(rx);
+
+ desc_available = tsnep_rx_desc_available(rx);
+ if (rx->xsk_pool)
+ retval = tsnep_rx_alloc_zc(rx, desc_available, false);
+ else
+ retval = tsnep_rx_alloc(rx, desc_available, false);
+ if (retval != desc_available) {
+ retval = -ENOMEM;
+
+ goto alloc_failed;
+ }
+
+ /* prealloc pages to prevent allocation failures when XSK pool is
+ * disabled at runtime
+ */
+ if (rx->xsk_pool) {
+ retval = tsnep_rx_alloc_page_buffer(rx);
+ if (retval)
+ goto alloc_failed;
+ }
+
+ return 0;
+
+alloc_failed:
+ tsnep_rx_ring_cleanup(rx);
+ return retval;
+}
+
+static void tsnep_rx_close(struct tsnep_rx *rx)
+{
+ if (rx->xsk_pool)
+ tsnep_rx_free_page_buffer(rx);
+
+ tsnep_rx_ring_cleanup(rx);
+}
+
+static void tsnep_rx_reopen(struct tsnep_rx *rx)
+{
+ struct page **page = rx->page_buffer;
+ int i;
+
+ tsnep_rx_init(rx);
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ struct tsnep_rx_entry *entry = &rx->entry[i];
+
+ /* defined initial values for properties are required for
+ * correct owner counter checking
+ */
+ entry->desc->properties = 0;
+ entry->desc_wb->properties = 0;
+
+ /* prevent allocation failures by reusing kept pages */
+ if (*page) {
+ tsnep_rx_set_page(rx, entry, *page);
+ tsnep_rx_activate(rx, rx->write);
+ rx->write++;
+
+ *page = NULL;
+ page++;
+ }
+ }
+}
+
+static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
+{
+ struct page **page = rx->page_buffer;
+ u32 allocated;
+ int i;
+
+ tsnep_rx_init(rx);
+
+ /* alloc all ring entries except the last one, because ring cannot be
+ * filled completely, as many buffers as possible is enough as wakeup is
+ * done if new buffers are available
+ */
+ allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch,
+ TSNEP_RING_SIZE - 1);
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ struct tsnep_rx_entry *entry = &rx->entry[i];
+
+ /* keep pages to prevent allocation failures when xsk is
+ * disabled
+ */
+ if (entry->page) {
+ *page = entry->page;
+ entry->page = NULL;
+
+ page++;
+ }
+
+ /* defined initial values for properties are required for
+ * correct owner counter checking
+ */
+ entry->desc->properties = 0;
+ entry->desc_wb->properties = 0;
+
+ if (allocated) {
+ tsnep_rx_set_xdp(rx, entry,
+ rx->xdp_batch[allocated - 1]);
+ tsnep_rx_activate(rx, rx->write);
+ rx->write++;
+
+ allocated--;
+ }
+ }
+
+ /* set need wakeup flag immediately if ring is not filled completely,
+ * first polling would be too late as need wakeup signalisation would
+ * be delayed for an indefinite time
+ */
+ if (xsk_uses_need_wakeup(rx->xsk_pool))
+ tsnep_xsk_rx_need_wakeup(rx, tsnep_rx_desc_available(rx));
+}
+
+static bool tsnep_pending(struct tsnep_queue *queue)
+{
+ if (queue->tx && tsnep_tx_pending(queue->tx))
+ return true;
+
+ if (queue->rx && tsnep_rx_pending(queue->rx))
+ return true;
+
+ return false;
+}
+
+static int tsnep_poll(struct napi_struct *napi, int budget)
+{
+ struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
+ napi);
+ bool complete = true;
+ int done = 0;
+
+ if (queue->tx)
+ complete = tsnep_tx_poll(queue->tx, budget);
+
+ /* handle case where we are called by netpoll with a budget of 0 */
+ if (unlikely(budget <= 0))
+ return budget;
+
+ if (queue->rx) {
+ done = queue->rx->xsk_pool ?
+ tsnep_rx_poll_zc(queue->rx, napi, budget) :
+ tsnep_rx_poll(queue->rx, napi, budget);
+ if (done >= budget)
+ complete = false;
+ }
+
+ /* if all work not completed, return budget and keep polling */
+ if (!complete)
+ return budget;
+
+ if (likely(napi_complete_done(napi, done))) {
+ tsnep_enable_irq(queue->adapter, queue->irq_mask);
+
+ /* reschedule if work is already pending, prevent rotten packets
+ * which are transmitted or received after polling but before
+ * interrupt enable
+ */
+ if (tsnep_pending(queue)) {
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ napi_schedule(napi);
+ }
+ }
+
+ return min(done, budget - 1);
+}
+
+static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+{
+ const char *name = netdev_name(queue->adapter->netdev);
+ irq_handler_t handler;
+ void *dev;
+ int retval;
+
+ if (first) {
+ sprintf(queue->name, "%s-mac", name);
+ handler = tsnep_irq;
+ dev = queue->adapter;
+ } else {
+ if (queue->tx && queue->rx)
+ snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
+ name, queue->rx->queue_index);
+ else if (queue->tx)
+ snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
+ name, queue->tx->queue_index);
+ else
+ snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
+ name, queue->rx->queue_index);
+ handler = tsnep_irq_txrx;
+ dev = queue;
+ }
+
+ retval = request_irq(queue->irq, handler, 0, queue->name, dev);
+ if (retval) {
+ /* if name is empty, then interrupt won't be freed */
+ memset(queue->name, 0, sizeof(queue->name));
+ }
+
+ return retval;
+}
+
+static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
+{
+ void *dev;
+
+ if (!strlen(queue->name))
+ return;
+
+ if (first)
+ dev = queue->adapter;
+ else
+ dev = queue;
+
+ free_irq(queue->irq, dev);
+ memset(queue->name, 0, sizeof(queue->name));
+}
+
+static void tsnep_queue_close(struct tsnep_queue *queue, bool first)
+{
+ struct tsnep_rx *rx = queue->rx;
+
+ tsnep_free_irq(queue, first);
+
+ if (rx) {
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc))
+ xdp_rxq_info_unreg(&rx->xdp_rxq_zc);
+ }
+
+ netif_napi_del(&queue->napi);
+}
+
+static int tsnep_queue_open(struct tsnep_adapter *adapter,
+ struct tsnep_queue *queue, bool first)
+{
+ struct tsnep_rx *rx = queue->rx;
+ struct tsnep_tx *tx = queue->tx;
+ int retval;
+
+ netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll);
+
+ if (rx) {
+ /* choose TX queue for XDP_TX */
+ if (tx)
+ rx->tx_queue_index = tx->queue_index;
+ else if (rx->queue_index < adapter->num_tx_queues)
+ rx->tx_queue_index = rx->queue_index;
+ else
+ rx->tx_queue_index = 0;
+
+ /* prepare both memory models to eliminate possible registration
+ * errors when memory model is switched between page pool and
+ * XSK pool during runtime
+ */
+ retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev,
+ rx->queue_index, queue->napi.napi_id);
+ if (retval)
+ goto failed;
+ retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx->page_pool);
+ if (retval)
+ goto failed;
+ retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev,
+ rx->queue_index, queue->napi.napi_id);
+ if (retval)
+ goto failed;
+ retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ if (retval)
+ goto failed;
+ if (rx->xsk_pool)
+ xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc);
+ }
+
+ retval = tsnep_request_irq(queue, first);
+ if (retval) {
+ netif_err(adapter, drv, adapter->netdev,
+ "can't get assigned irq %d.\n", queue->irq);
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ tsnep_queue_close(queue, first);
+
+ return retval;
+}
+
+static void tsnep_queue_enable(struct tsnep_queue *queue)
+{
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ netif_napi_set_irq(&queue->napi, queue->irq);
+ napi_enable(&queue->napi);
+ tsnep_enable_irq(adapter, queue->irq_mask);
+
+ if (queue->tx) {
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, &queue->napi);
+ tsnep_tx_enable(queue->tx);
+ }
+
+ if (queue->rx) {
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, &queue->napi);
+ tsnep_rx_enable(queue->rx);
+ }
+}
+
+static void tsnep_queue_disable(struct tsnep_queue *queue)
+{
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ if (queue->rx)
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+
+ if (queue->tx) {
+ tsnep_tx_disable(queue->tx, &queue->napi);
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
+
+ napi_disable(&queue->napi);
+ tsnep_disable_irq(adapter, queue->irq_mask);
+
+ /* disable RX after NAPI polling has been disabled, because RX can be
+ * enabled during NAPI polling
+ */
+ if (queue->rx)
+ tsnep_rx_disable(queue->rx);
+}
+
+static int tsnep_netdev_open(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i, retval;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].tx) {
+ retval = tsnep_tx_open(adapter->queue[i].tx);
+ if (retval)
+ goto failed;
+ }
+ if (adapter->queue[i].rx) {
+ retval = tsnep_rx_open(adapter->queue[i].rx);
+ if (retval)
+ goto failed;
+ }
+
+ retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0);
+ if (retval)
+ goto failed;
+ }
+
+ retval = netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_queues);
+ if (retval)
+ goto failed;
+ retval = netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+ if (retval)
+ goto failed;
+
+ tsnep_enable_irq(adapter, ECM_INT_LINK);
+ retval = tsnep_phy_open(adapter);
+ if (retval)
+ goto phy_failed;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ tsnep_queue_enable(&adapter->queue[i]);
+
+ return 0;
+
+phy_failed:
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+failed:
+ for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_queue_close(&adapter->queue[i], i == 0);
+
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+ return retval;
+}
+
+static int tsnep_netdev_close(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+ tsnep_phy_close(adapter);
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_queue_disable(&adapter->queue[i]);
+
+ tsnep_queue_close(&adapter->queue[i], i == 0);
+
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+
+ return 0;
+}
+
+int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool)
+{
+ bool running = netif_running(queue->adapter->netdev);
+ u32 frame_size;
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ if (frame_size < TSNEP_XSK_RX_BUF_SIZE)
+ return -EOPNOTSUPP;
+
+ queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE,
+ sizeof(*queue->rx->page_buffer),
+ GFP_KERNEL);
+ if (!queue->rx->page_buffer)
+ return -ENOMEM;
+ queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE,
+ sizeof(*queue->rx->xdp_batch),
+ GFP_KERNEL);
+ if (!queue->rx->xdp_batch) {
+ kfree(queue->rx->page_buffer);
+ queue->rx->page_buffer = NULL;
+
+ return -ENOMEM;
+ }
+
+ xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc);
+
+ if (running)
+ tsnep_queue_disable(queue);
+
+ queue->tx->xsk_pool = pool;
+ queue->rx->xsk_pool = pool;
+
+ if (running) {
+ tsnep_rx_reopen_xsk(queue->rx);
+ tsnep_queue_enable(queue);
+ }
+
+ return 0;
+}
+
+void tsnep_disable_xsk(struct tsnep_queue *queue)
+{
+ bool running = netif_running(queue->adapter->netdev);
+
+ if (running)
+ tsnep_queue_disable(queue);
+
+ tsnep_rx_free_zc(queue->rx);
+
+ queue->rx->xsk_pool = NULL;
+ queue->tx->xsk_pool = NULL;
+
+ if (running) {
+ tsnep_rx_reopen(queue->rx);
+ tsnep_queue_enable(queue);
+ }
+
+ kfree(queue->rx->xdp_batch);
+ queue->rx->xdp_batch = NULL;
+ kfree(queue->rx->page_buffer);
+ queue->rx->page_buffer = NULL;
+}
+
+static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+
+ if (queue_mapping >= adapter->num_tx_queues)
+ queue_mapping = 0;
+
+ return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
+}
+
+static void tsnep_netdev_set_multicast(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ u16 rx_filter = 0;
+
+ /* configured MAC address and broadcasts are never filtered */
+ if (netdev->flags & IFF_PROMISC) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
+ } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ }
+ iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
+}
+
+static void tsnep_netdev_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u32 reg;
+ u32 val;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ stats->tx_packets += adapter->tx[i].packets;
+ stats->tx_bytes += adapter->tx[i].bytes;
+ stats->tx_dropped += adapter->tx[i].dropped;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ stats->rx_packets += adapter->rx[i].packets;
+ stats->rx_bytes += adapter->rx[i].bytes;
+ stats->rx_dropped += adapter->rx[i].dropped;
+ stats->multicast += adapter->rx[i].multicast;
+
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_fifo_errors += val;
+ val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_frame_errors += val;
+ }
+
+ reg = ioread32(adapter->addr + ECM_STAT);
+ val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+ val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_crc_errors += val;
+ val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+}
+
+static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
+{
+ iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ iowrite16(*(u16 *)(addr + sizeof(u32)),
+ adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+
+ ether_addr_copy(adapter->mac_address, addr);
+ netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
+ addr);
+}
+
+static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *sock_addr = addr;
+ int retval;
+
+ retval = eth_prepare_mac_addr_change(netdev, sock_addr);
+ if (retval)
+ return retval;
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
+ tsnep_mac_set_address(adapter, sock_addr->sa_data);
+
+ return 0;
+}
+
+static int tsnep_netdev_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
+ bool enable;
+ int retval = 0;
+
+ if (changed & NETIF_F_LOOPBACK) {
+ enable = !!(features & NETIF_F_LOOPBACK);
+ retval = tsnep_phy_loopback(adapter, enable);
+ }
+
+ return retval;
+}
+
+static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles)
+{
+ struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
+ u64 timestamp;
+
+ if (cycles)
+ timestamp = __le64_to_cpu(rx_inline->counter);
+ else
+ timestamp = __le64_to_cpu(rx_inline->timestamp);
+
+ return ns_to_ktime(timestamp);
+}
+
+static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack);
+ case XDP_SETUP_XSK_POOL:
+ return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool,
+ bpf->xsk.queue_id);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu)
+{
+ if (cpu >= TSNEP_MAX_QUEUES)
+ cpu &= TSNEP_MAX_QUEUES - 1;
+
+ while (cpu >= adapter->num_tx_queues)
+ cpu -= adapter->num_tx_queues;
+
+ return &adapter->tx[cpu];
+}
+
+static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **xdp, u32 flags)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+ u32 cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct tsnep_tx *tx;
+ int nxmit;
+ bool xmit;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ tx = tsnep_xdp_get_tx(adapter, cpu);
+ nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index);
+
+ __netif_tx_lock(nq, cpu);
+
+ for (nxmit = 0; nxmit < n; nxmit++) {
+ xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx,
+ TSNEP_TX_TYPE_XDP_NDO);
+ if (!xmit)
+ break;
+
+ /* avoid transmit queue timeout since we share it with the slow
+ * path
+ */
+ txq_trans_cond_update(nq);
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ tsnep_xdp_xmit_flush(tx);
+
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
+static int tsnep_netdev_xsk_wakeup(struct net_device *dev, u32 queue_id,
+ u32 flags)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+ struct tsnep_queue *queue;
+
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ queue = &adapter->queue[queue_id];
+
+ if (!napi_if_scheduled_mark_missed(&queue->napi))
+ napi_schedule(&queue->napi);
+
+ return 0;
+}
+
+static const struct net_device_ops tsnep_netdev_ops = {
+ .ndo_open = tsnep_netdev_open,
+ .ndo_stop = tsnep_netdev_close,
+ .ndo_start_xmit = tsnep_netdev_xmit_frame,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_set_rx_mode = tsnep_netdev_set_multicast,
+ .ndo_get_stats64 = tsnep_netdev_get_stats64,
+ .ndo_set_mac_address = tsnep_netdev_set_mac_address,
+ .ndo_set_features = tsnep_netdev_set_features,
+ .ndo_get_tstamp = tsnep_netdev_get_tstamp,
+ .ndo_setup_tc = tsnep_tc_setup,
+ .ndo_bpf = tsnep_netdev_bpf,
+ .ndo_xdp_xmit = tsnep_netdev_xdp_xmit,
+ .ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup,
+ .ndo_hwtstamp_get = tsnep_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = tsnep_ptp_hwtstamp_set,
+};
+
+static int tsnep_mac_init(struct tsnep_adapter *adapter)
+{
+ int retval;
+
+ /* initialize RX filtering, at least configured MAC address and
+ * broadcast are not filtered
+ */
+ iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
+
+ /* try to get MAC address in the following order:
+ * - device tree
+ * - valid MAC address already set
+ * - MAC address register if valid
+ * - random MAC address
+ */
+ retval = of_get_mac_address(adapter->pdev->dev.of_node,
+ adapter->mac_address);
+ if (retval == -EPROBE_DEFER)
+ return retval;
+ if (retval && !is_valid_ether_addr(adapter->mac_address)) {
+ *(u32 *)adapter->mac_address =
+ ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ *(u16 *)(adapter->mac_address + sizeof(u32)) =
+ ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+ if (!is_valid_ether_addr(adapter->mac_address))
+ eth_random_addr(adapter->mac_address);
+ }
+
+ tsnep_mac_set_address(adapter, adapter->mac_address);
+ eth_hw_addr_set(adapter->netdev, adapter->mac_address);
+
+ return 0;
+}
+
+static int tsnep_mdio_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *np = adapter->pdev->dev.of_node;
+ int retval;
+
+ if (np) {
+ np = of_get_child_by_name(np, "mdio");
+ if (!np)
+ return 0;
+
+ adapter->suppress_preamble =
+ of_property_read_bool(np, "suppress-preamble");
+ }
+
+ adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
+ if (!adapter->mdiobus) {
+ retval = -ENOMEM;
+
+ goto out;
+ }
+
+ adapter->mdiobus->priv = (void *)adapter;
+ adapter->mdiobus->parent = &adapter->pdev->dev;
+ adapter->mdiobus->read = tsnep_mdiobus_read;
+ adapter->mdiobus->write = tsnep_mdiobus_write;
+ adapter->mdiobus->name = TSNEP "-mdiobus";
+ snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
+ adapter->pdev->name);
+
+ /* do not scan broadcast address */
+ adapter->mdiobus->phy_mask = 0x0000001;
+
+ retval = of_mdiobus_register(adapter->mdiobus, np);
+
+out:
+ of_node_put(np);
+
+ return retval;
+}
+
+static int tsnep_phy_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *phy_node;
+ int retval;
+
+ retval = of_get_phy_mode(adapter->pdev->dev.of_node,
+ &adapter->phy_mode);
+ if (retval)
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+
+ phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
+ 0);
+ adapter->phydev = of_phy_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!adapter->phydev && adapter->mdiobus)
+ adapter->phydev = phy_find_first(adapter->mdiobus);
+ if (!adapter->phydev)
+ return -EIO;
+
+ return 0;
+}
+
+static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
+{
+ u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+ char name[8];
+ int i;
+ int retval;
+
+ /* one TX/RX queue pair for netdev is mandatory */
+ if (platform_irq_count(adapter->pdev) == 1)
+ retval = platform_get_irq(adapter->pdev, 0);
+ else
+ retval = platform_get_irq_byname(adapter->pdev, "mac");
+ if (retval < 0)
+ return retval;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+ adapter->num_queues = 1;
+ adapter->queue[0].adapter = adapter;
+ adapter->queue[0].irq = retval;
+ adapter->queue[0].tx = &adapter->tx[0];
+ adapter->queue[0].tx->adapter = adapter;
+ adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0);
+ adapter->queue[0].tx->queue_index = 0;
+ adapter->queue[0].rx = &adapter->rx[0];
+ adapter->queue[0].rx->adapter = adapter;
+ adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0);
+ adapter->queue[0].rx->queue_index = 0;
+ adapter->queue[0].irq_mask = irq_mask;
+ adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY;
+ retval = tsnep_set_irq_coalesce(&adapter->queue[0],
+ TSNEP_COALESCE_USECS_DEFAULT);
+ if (retval < 0)
+ return retval;
+
+ adapter->netdev->irq = adapter->queue[0].irq;
+
+ /* add additional TX/RX queue pairs only if dedicated interrupt is
+ * available
+ */
+ for (i = 1; i < queue_count; i++) {
+ sprintf(name, "txrx-%d", i);
+ retval = platform_get_irq_byname_optional(adapter->pdev, name);
+ if (retval < 0)
+ break;
+
+ adapter->num_tx_queues++;
+ adapter->num_rx_queues++;
+ adapter->num_queues++;
+ adapter->queue[i].adapter = adapter;
+ adapter->queue[i].irq = retval;
+ adapter->queue[i].tx = &adapter->tx[i];
+ adapter->queue[i].tx->adapter = adapter;
+ adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i);
+ adapter->queue[i].tx->queue_index = i;
+ adapter->queue[i].rx = &adapter->rx[i];
+ adapter->queue[i].rx->adapter = adapter;
+ adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i);
+ adapter->queue[i].rx->queue_index = i;
+ adapter->queue[i].irq_mask =
+ irq_mask << (ECM_INT_TXRX_SHIFT * i);
+ adapter->queue[i].irq_delay_addr =
+ adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i;
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ TSNEP_COALESCE_USECS_DEFAULT);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int tsnep_probe(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter;
+ struct net_device *netdev;
+ struct resource *io;
+ u32 type;
+ int revision;
+ int version;
+ int queue_count;
+ int retval;
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct tsnep_adapter),
+ TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
+ if (!netdev)
+ return -ENODEV;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ adapter = netdev_priv(netdev);
+ platform_set_drvdata(pdev, adapter);
+ adapter->pdev = pdev;
+ adapter->dmadev = &pdev->dev;
+ adapter->netdev = netdev;
+ adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
+
+ mutex_init(&adapter->gate_control_lock);
+ mutex_init(&adapter->rxnfc_lock);
+ INIT_LIST_HEAD(&adapter->rxnfc_rules);
+
+ adapter->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &io);
+ if (IS_ERR(adapter->addr))
+ return PTR_ERR(adapter->addr);
+ netdev->mem_start = io->start;
+ netdev->mem_end = io->end;
+
+ type = ioread32(adapter->addr + ECM_TYPE);
+ revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
+ version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
+ queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT;
+ adapter->gate_control = type & ECM_GATE_CONTROL;
+ adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+
+ retval = tsnep_queue_init(adapter, queue_count);
+ if (retval)
+ return retval;
+
+ retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
+ DMA_BIT_MASK(64));
+ if (retval) {
+ dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
+ return retval;
+ }
+
+ retval = tsnep_mac_init(adapter);
+ if (retval)
+ return retval;
+
+ retval = tsnep_mdio_init(adapter);
+ if (retval)
+ goto mdio_init_failed;
+
+ retval = tsnep_phy_init(adapter);
+ if (retval)
+ goto phy_init_failed;
+
+ retval = tsnep_ptp_init(adapter);
+ if (retval)
+ goto ptp_init_failed;
+
+ retval = tsnep_tc_init(adapter);
+ if (retval)
+ goto tc_init_failed;
+
+ retval = tsnep_rxnfc_init(adapter);
+ if (retval)
+ goto rxnfc_init_failed;
+
+ netdev->netdev_ops = &tsnep_netdev_ops;
+ netdev->ethtool_ops = &tsnep_ethtool_ops;
+ netdev->features = NETIF_F_SG;
+ netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ retval = register_netdev(netdev);
+ if (retval)
+ goto register_failed;
+
+ dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
+ revision);
+ if (adapter->gate_control)
+ dev_info(&adapter->pdev->dev, "gate control detected\n");
+
+ return 0;
+
+register_failed:
+ tsnep_rxnfc_cleanup(adapter);
+rxnfc_init_failed:
+ tsnep_tc_cleanup(adapter);
+tc_init_failed:
+ tsnep_ptp_cleanup(adapter);
+ptp_init_failed:
+phy_init_failed:
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+mdio_init_failed:
+ return retval;
+}
+
+static void tsnep_remove(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
+
+ unregister_netdev(adapter->netdev);
+
+ tsnep_rxnfc_cleanup(adapter);
+
+ tsnep_tc_cleanup(adapter);
+
+ tsnep_ptp_cleanup(adapter);
+
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+}
+
+static const struct of_device_id tsnep_of_match[] = {
+ { .compatible = "engleder,tsnep", },
+{ },
+};
+MODULE_DEVICE_TABLE(of, tsnep_of_match);
+
+static struct platform_driver tsnep_driver = {
+ .driver = {
+ .name = TSNEP,
+ .of_match_table = tsnep_of_match,
+ },
+ .probe = tsnep_probe,
+ .remove = tsnep_remove,
+};
+module_platform_driver(tsnep_driver);
+
+MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
+MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c
new file mode 100644
index 000000000000..ae1308eb813d
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ptp.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time)
+{
+ u32 high_before;
+ u32 low;
+ u32 high;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ *time = (((u64)high) << 32) | ((u64)low);
+}
+
+int tsnep_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ *config = adapter->hwtstamp_config;
+ return 0;
+}
+
+int tsnep_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ adapter->hwtstamp_config = *config;
+ return 0;
+}
+
+static int tsnep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ bool negative = false;
+ u64 rate_offset;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ negative = true;
+ }
+
+ /* convert from 16 bit to 32 bit binary fractional, divide by 1000000 to
+ * eliminate ppm, multiply with 8 to compensate 8ns clock cycle time,
+ * simplify calculation because 15625 * 8 = 1000000 / 8
+ */
+ rate_offset = scaled_ppm;
+ rate_offset <<= 16 - 3;
+ rate_offset = div_u64(rate_offset, 15625);
+
+ rate_offset &= ECM_CLOCK_RATE_OFFSET_MASK;
+ if (negative)
+ rate_offset |= ECM_CLOCK_RATE_OFFSET_SIGN;
+ iowrite32(rate_offset & 0xFFFFFFFF, adapter->addr + ECM_CLOCK_RATE);
+
+ return 0;
+}
+
+static int tsnep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ tsnep_get_system_time(adapter, &system_time);
+
+ system_time += delta;
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+static int tsnep_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u32 high_before;
+ u32 low;
+ u32 high;
+ u64 system_time;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ ptp_read_system_prets(sts);
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ ptp_read_system_postts(sts);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ system_time = (((u64)high) << 32) | ((u64)low);
+
+ *ts = ns_to_timespec64(system_time);
+
+ return 0;
+}
+
+static int tsnep_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+static int tsnep_ptp_getcyclesx64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u32 high_before;
+ u32 low;
+ u32 high;
+ u64 counter;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_COUNTER_HIGH);
+ do {
+ ptp_read_system_prets(sts);
+ low = ioread32(adapter->addr + ECM_COUNTER_LOW);
+ ptp_read_system_postts(sts);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_COUNTER_HIGH);
+ } while (high != high_before);
+ counter = (((u64)high) << 32) | ((u64)low);
+
+ *ts = ns_to_timespec64(counter);
+
+ return 0;
+}
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter)
+{
+ int retval = 0;
+
+ adapter->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ snprintf(adapter->ptp_clock_info.name, 16, "%s", TSNEP);
+ adapter->ptp_clock_info.owner = THIS_MODULE;
+ /* at most 2^-1ns adjustment every clock cycle for 8ns clock cycle time,
+ * stay slightly below because only bits below 2^-1ns are supported
+ */
+ adapter->ptp_clock_info.max_adj = (500000000 / 8 - 1);
+ adapter->ptp_clock_info.adjfine = tsnep_ptp_adjfine;
+ adapter->ptp_clock_info.adjtime = tsnep_ptp_adjtime;
+ adapter->ptp_clock_info.gettimex64 = tsnep_ptp_gettimex64;
+ adapter->ptp_clock_info.settime64 = tsnep_ptp_settime64;
+ adapter->ptp_clock_info.getcyclesx64 = tsnep_ptp_getcyclesx64;
+
+ spin_lock_init(&adapter->ptp_lock);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ netdev_err(adapter->netdev, "ptp_clock_register failed\n");
+
+ retval = PTR_ERR(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ } else if (adapter->ptp_clock) {
+ netdev_info(adapter->netdev, "PHC added\n");
+ }
+
+ return retval;
+}
+
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter)
+{
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ netdev_info(adapter->netdev, "PHC removed\n");
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_rxnfc.c b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
new file mode 100644
index 000000000000..9ac2a0cf3833
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+
+static void tsnep_enable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ u8 rx_assign;
+ void __iomem *addr;
+
+ rx_assign = TSNEP_RX_ASSIGN_ACTIVE;
+ rx_assign |= (rule->queue_index << TSNEP_RX_ASSIGN_QUEUE_SHIFT) &
+ TSNEP_RX_ASSIGN_QUEUE_MASK;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN_ETHER_TYPE +
+ TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET * rule->location;
+ iowrite16(rule->filter.ether_type, addr);
+
+ /* enable rule after all settings are done */
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(rx_assign, addr);
+}
+
+static void tsnep_disable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ void __iomem *addr;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(0, addr);
+}
+
+static struct tsnep_rxnfc_rule *tsnep_get_rule(struct tsnep_adapter *adapter,
+ int location)
+{
+ struct tsnep_rxnfc_rule *rule;
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (rule->location == location)
+ return rule;
+ if (rule->location > location)
+ break;
+ }
+
+ return NULL;
+}
+
+static void tsnep_add_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct tsnep_rxnfc_rule *pred, *cur;
+
+ tsnep_enable_rule(adapter, rule);
+
+ pred = NULL;
+ list_for_each_entry(cur, &adapter->rxnfc_rules, list) {
+ if (cur->location >= rule->location)
+ break;
+ pred = cur;
+ }
+
+ list_add(&rule->list, pred ? &pred->list : &adapter->rxnfc_rules);
+ adapter->rxnfc_count++;
+}
+
+static void tsnep_delete_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ tsnep_disable_rule(adapter, rule);
+
+ list_del(&rule->list);
+ adapter->rxnfc_count--;
+
+ kfree(rule);
+}
+
+static void tsnep_flush_rules(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *rule, *tmp;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry_safe(rule, tmp, &adapter->rxnfc_rules, list)
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+}
+
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct tsnep_rxnfc_rule *rule = NULL;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->queue_index;
+
+ if (rule->filter.type == TSNEP_RXNFC_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = htons(rule->filter.ether_type);
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct tsnep_rxnfc_rule *rule;
+ int count = 0;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (count == cmd->rule_cnt) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -EMSGSIZE;
+ }
+
+ rule_locs[count] = rule->location;
+ count++;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+static int tsnep_rxnfc_find_location(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *tmp;
+ int location = 0;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (tmp->location == location)
+ location++;
+ else
+ return location;
+ }
+
+ if (location >= adapter->rxnfc_max)
+ return -ENOSPC;
+
+ return location;
+}
+
+static void tsnep_rxnfc_init_rule(struct tsnep_rxnfc_rule *rule,
+ const struct ethtool_rx_flow_spec *fsp)
+{
+ INIT_LIST_HEAD(&rule->list);
+
+ rule->queue_index = fsp->ring_cookie;
+ rule->location = fsp->location;
+
+ rule->filter.type = TSNEP_RXNFC_ETHER_TYPE;
+ rule->filter.ether_type = ntohs(fsp->h_u.ether_spec.h_proto);
+}
+
+static int tsnep_rxnfc_check_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct net_device *dev = adapter->netdev;
+ struct tsnep_rxnfc_rule *tmp;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (!memcmp(&rule->filter, &tmp->filter, sizeof(rule->filter)) &&
+ tmp->location != rule->location) {
+ netdev_dbg(dev, "rule already exists\n");
+
+ return -EEXIST;
+ }
+ }
+
+ return 0;
+}
+
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule, *old_rule;
+ int retval;
+
+ /* only EtherType is supported */
+ if (fsp->flow_type != ETHER_FLOW ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_source) ||
+ fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK) {
+ netdev_dbg(netdev, "only ethernet protocol is supported\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (fsp->ring_cookie >
+ (TSNEP_RX_ASSIGN_QUEUE_MASK >> TSNEP_RX_ASSIGN_QUEUE_SHIFT)) {
+ netdev_dbg(netdev, "invalid action\n");
+
+ return -EINVAL;
+ }
+
+ if (fsp->location != RX_CLS_LOC_ANY &&
+ fsp->location >= adapter->rxnfc_max) {
+ netdev_dbg(netdev, "invalid location\n");
+
+ return -EINVAL;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ if (fsp->location == RX_CLS_LOC_ANY) {
+ retval = tsnep_rxnfc_find_location(adapter);
+ if (retval < 0)
+ goto failed;
+ fsp->location = retval;
+ }
+
+ tsnep_rxnfc_init_rule(rule, fsp);
+
+ retval = tsnep_rxnfc_check_rule(adapter, rule);
+ if (retval)
+ goto failed;
+
+ old_rule = tsnep_get_rule(adapter, fsp->location);
+ if (old_rule)
+ tsnep_delete_rule(adapter, old_rule);
+
+ tsnep_add_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+
+failed:
+ mutex_unlock(&adapter->rxnfc_lock);
+ kfree(rule);
+ return retval;
+}
+
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ /* disable all rules */
+ for (i = 0; i < adapter->rxnfc_max;
+ i += sizeof(u32) / TSNEP_RX_ASSIGN_OFFSET)
+ iowrite32(0, adapter->addr + TSNEP_RX_ASSIGN + i);
+
+ return 0;
+}
+
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter)
+{
+ tsnep_flush_rules(adapter);
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_selftests.c b/drivers/net/ethernet/engleder/tsnep_selftests.c
new file mode 100644
index 000000000000..8a9145f93147
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_selftests.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+enum tsnep_test {
+ TSNEP_TEST_ENABLE = 0,
+ TSNEP_TEST_TAPRIO,
+ TSNEP_TEST_TAPRIO_CHANGE,
+ TSNEP_TEST_TAPRIO_EXTENSION,
+};
+
+static const char tsnep_test_strings[][ETH_GSTRING_LEN] = {
+ "Enable timeout (offline)",
+ "TAPRIO (offline)",
+ "TAPRIO change (offline)",
+ "TAPRIO extension (offline)",
+};
+
+#define TSNEP_TEST_COUNT (sizeof(tsnep_test_strings) / ETH_GSTRING_LEN)
+
+static bool enable_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_ACTIVE))
+ return false;
+
+ return true;
+}
+
+static bool gc_timeout_signaled(struct tsnep_adapter *adapter)
+{
+ if (ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_SIGNAL)
+ return true;
+
+ return false;
+}
+
+static bool ack_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_TIMEOUT_ACTIVE | TSNEP_GC_TIMEOUT_SIGNAL))
+ return false;
+ return true;
+}
+
+static bool enable_gc(struct tsnep_adapter *adapter, bool a)
+{
+ u8 enable;
+ u8 active;
+
+ if (a) {
+ enable = TSNEP_GC_ENABLE_A;
+ active = TSNEP_GC_ACTIVE_A;
+ } else {
+ enable = TSNEP_GC_ENABLE_B;
+ active = TSNEP_GC_ACTIVE_B;
+ }
+
+ iowrite8(enable, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & active))
+ return false;
+
+ return true;
+}
+
+static bool disable_gc(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_ACTIVE_A | TSNEP_GC_ACTIVE_B))
+ return false;
+
+ return true;
+}
+
+static bool gc_delayed_enable(struct tsnep_adapter *adapter, bool a, int delay)
+{
+ u64 before, after;
+ u32 time;
+ bool enabled;
+
+ if (!disable_gc(adapter))
+ return false;
+
+ before = ktime_get_ns();
+
+ if (!enable_gc_timeout(adapter))
+ return false;
+
+ /* for start time after timeout, the timeout can guarantee, that enable
+ * is blocked if too late
+ */
+ time = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ time += TSNEP_GC_TIMEOUT;
+ iowrite32(time, adapter->addr + TSNEP_GC_TIME);
+
+ ndelay(delay);
+
+ enabled = enable_gc(adapter, a);
+ after = ktime_get_ns();
+
+ if (delay > TSNEP_GC_TIMEOUT) {
+ /* timeout must have blocked enable */
+ if (enabled)
+ return false;
+ } else if ((after - before) < TSNEP_GC_TIMEOUT * 14 / 16) {
+ /* timeout must not have blocked enable */
+ if (!enabled)
+ return false;
+ }
+
+ if (enabled) {
+ if (gc_timeout_signaled(adapter))
+ return false;
+ } else {
+ if (!gc_timeout_signaled(adapter))
+ return false;
+ if (!ack_gc_timeout(adapter))
+ return false;
+ }
+
+ if (!disable_gc(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_gc_enable(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_A + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_A + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, true, i))
+ return false;
+ }
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_B + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_B + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, false, i))
+ return false;
+ }
+
+ return true;
+}
+
+static void delay_base_time(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ u64 system_time;
+ u64 base_time = ktime_to_ns(qopt->base_time);
+ u64 n;
+
+ tsnep_get_system_time(adapter, &system_time);
+ system_time += ms * 1000000;
+ n = div64_u64(system_time - base_time, qopt->cycle_time);
+
+ qopt->base_time = ktime_add_ns(qopt->base_time,
+ (n + 1) * qopt->cycle_time);
+}
+
+static void get_gate_state(struct tsnep_adapter *adapter, u32 *gc, u32 *gc_time,
+ u64 *system_time)
+{
+ u32 time_high_before;
+ u32 time_low;
+ u32 time_high;
+ u32 gc_time_before;
+
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ do {
+ time_low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ *gc = ioread32(adapter->addr + TSNEP_GC);
+
+ gc_time_before = *gc_time;
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ time_high_before = time_high;
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while ((time_high != time_high_before) ||
+ (*gc_time != gc_time_before));
+
+ *system_time = (((u64)time_high) << 32) | ((u64)time_low);
+}
+
+static int get_operation(struct tsnep_gcl *gcl, u64 system_time, u64 *next)
+{
+ u64 n = div64_u64(system_time - gcl->base_time, gcl->cycle_time);
+ u64 cycle_start = gcl->base_time + gcl->cycle_time * n;
+ int i;
+
+ *next = cycle_start;
+ for (i = 0; i < gcl->count; i++) {
+ *next += gcl->operation[i].interval;
+ if (*next > system_time)
+ break;
+ }
+
+ return i;
+}
+
+static bool check_gate(struct tsnep_adapter *adapter)
+{
+ u32 gc_time;
+ u32 gc;
+ u64 system_time;
+ struct tsnep_gcl *curr;
+ struct tsnep_gcl *prev;
+ u64 next_time;
+ u8 gate_open;
+ u8 next_gate_open;
+
+ get_gate_state(adapter, &gc, &gc_time, &system_time);
+
+ if (gc & TSNEP_GC_ACTIVE_A) {
+ curr = &adapter->gcl[0];
+ prev = &adapter->gcl[1];
+ } else if (gc & TSNEP_GC_ACTIVE_B) {
+ curr = &adapter->gcl[1];
+ prev = &adapter->gcl[0];
+ } else {
+ return false;
+ }
+ if (curr->start_time <= system_time) {
+ /* GCL is already active */
+ int index;
+
+ index = get_operation(curr, system_time, &next_time);
+ gate_open = curr->operation[index].properties & TSNEP_GCL_MASK;
+ if (index == curr->count - 1)
+ index = 0;
+ else
+ index++;
+ next_gate_open =
+ curr->operation[index].properties & TSNEP_GCL_MASK;
+ } else if (curr->change) {
+ /* operation of previous GCL is active */
+ int index;
+ u64 start_before;
+ u64 n;
+
+ index = get_operation(prev, system_time, &next_time);
+ next_time = curr->start_time;
+ start_before = prev->base_time;
+ n = div64_u64(curr->start_time - start_before,
+ prev->cycle_time);
+ start_before += n * prev->cycle_time;
+ if (curr->start_time == start_before)
+ start_before -= prev->cycle_time;
+ if (((start_before + prev->cycle_time_extension) >=
+ curr->start_time) &&
+ (curr->start_time - prev->cycle_time_extension <=
+ system_time)) {
+ /* extend */
+ index = prev->count - 1;
+ }
+ gate_open = prev->operation[index].properties & TSNEP_GCL_MASK;
+ next_gate_open =
+ curr->operation[0].properties & TSNEP_GCL_MASK;
+ } else {
+ /* GCL is waiting for start */
+ next_time = curr->start_time;
+ gate_open = 0xFF;
+ next_gate_open = curr->operation[0].properties & TSNEP_GCL_MASK;
+ }
+
+ if (gc_time != (next_time & 0xFFFFFFFF)) {
+ dev_err(&adapter->pdev->dev, "gate control time 0x%x!=0x%llx\n",
+ gc_time, next_time);
+ return false;
+ }
+ if (((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT) != gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT),
+ gate_open);
+ return false;
+ }
+ if (((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT) !=
+ next_gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control next open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT),
+ next_gate_open);
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gate_duration(struct tsnep_adapter *adapter, s64 ms)
+{
+ ktime_t start = ktime_get();
+
+ do {
+ if (!check_gate(adapter))
+ return false;
+ } while (ktime_ms_delta(ktime_get(), start) < ms);
+
+ return true;
+}
+
+static bool enable_check_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ int retval;
+
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, qopt);
+ if (retval)
+ return false;
+
+ if (!check_gate_duration(adapter, ms))
+ return false;
+
+ return true;
+}
+
+static bool disable_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload qopt;
+ int retval;
+
+ memset(&qopt, 0, sizeof(qopt));
+ qopt.cmd = TAPRIO_CMD_DESTROY;
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, &qopt);
+ if (retval)
+ return false;
+
+ return true;
+}
+
+static bool run_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ if (!enable_check_taprio(adapter, qopt, ms))
+ return false;
+
+ if (!disable_taprio(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->cmd = TAPRIO_CMD_REPLACE;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x02;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x03;
+ qopt->entries[1].interval = 800000;
+ qopt->entries[2].gate_mask = 0x07;
+ qopt->entries[2].interval = 240000;
+ qopt->entries[3].gate_mask = 0x01;
+ qopt->entries[3].interval = 80000;
+ qopt->entries[4].gate_mask = 0x04;
+ qopt->entries[4].interval = 70000;
+ qopt->entries[5].gate_mask = 0x06;
+ qopt->entries[5].interval = 60000;
+ qopt->entries[6].gate_mask = 0x0F;
+ qopt->entries[6].interval = 50000;
+ qopt->num_entries = 7;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->cmd = TAPRIO_CMD_REPLACE;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 411854;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x17;
+ qopt->entries[0].interval = 23842;
+ qopt->entries[1].gate_mask = 0x16;
+ qopt->entries[1].interval = 13482;
+ qopt->entries[2].gate_mask = 0x15;
+ qopt->entries[2].interval = 49428;
+ qopt->entries[3].gate_mask = 0x14;
+ qopt->entries[3].interval = 38189;
+ qopt->entries[4].gate_mask = 0x13;
+ qopt->entries[4].interval = 92321;
+ qopt->entries[5].gate_mask = 0x12;
+ qopt->entries[5].interval = 71239;
+ qopt->entries[6].gate_mask = 0x11;
+ qopt->entries[6].interval = 69932;
+ qopt->entries[7].gate_mask = 0x10;
+ qopt->entries[7].interval = 53421;
+ qopt->num_entries = 8;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->cmd = TAPRIO_CMD_REPLACE;
+ qopt->base_time = ktime_set(0, 0);
+ delay_base_time(adapter, qopt, 12);
+ qopt->cycle_time = 125000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x27;
+ qopt->entries[0].interval = 15000;
+ qopt->entries[1].gate_mask = 0x26;
+ qopt->entries[1].interval = 15000;
+ qopt->entries[2].gate_mask = 0x25;
+ qopt->entries[2].interval = 12500;
+ qopt->entries[3].gate_mask = 0x24;
+ qopt->entries[3].interval = 17500;
+ qopt->entries[4].gate_mask = 0x23;
+ qopt->entries[4].interval = 10000;
+ qopt->entries[5].gate_mask = 0x22;
+ qopt->entries[5].interval = 11000;
+ qopt->entries[6].gate_mask = 0x21;
+ qopt->entries[6].interval = 9000;
+ qopt->entries[7].gate_mask = 0x20;
+ qopt->entries[7].interval = 10000;
+ qopt->entries[8].gate_mask = 0x20;
+ qopt->entries[8].interval = 12500;
+ qopt->entries[9].gate_mask = 0x20;
+ qopt->entries[9].interval = 12500;
+ qopt->num_entries = 10;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_change(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->cmd = TAPRIO_CMD_REPLACE;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x30;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x31;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to identical */
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ delay_base_time(adapter, qopt, 17);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to same cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x42;
+ qopt->entries[1].gate_mask = 0x43;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x54;
+ qopt->entries[0].interval = 33333;
+ qopt->entries[1].gate_mask = 0x55;
+ qopt->entries[1].interval = 66667;
+ delay_base_time(adapter, qopt, 23);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x66;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x67;
+ qopt->entries[1].interval = 25000;
+ qopt->entries[2].gate_mask = 0x68;
+ qopt->entries[2].interval = 25000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to multiple of cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 200000;
+ qopt->entries[0].gate_mask = 0x79;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x7A;
+ qopt->entries[1].interval = 150000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->entries[0].gate_mask = 0x7B;
+ qopt->entries[0].interval = 125000;
+ qopt->entries[1].gate_mask = 0x7C;
+ qopt->entries[1].interval = 250000;
+ qopt->entries[2].gate_mask = 0x7D;
+ qopt->entries[2].interval = 375000;
+ qopt->entries[3].gate_mask = 0x7E;
+ qopt->entries[3].interval = 250000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 333333;
+ qopt->entries[0].gate_mask = 0x8F;
+ qopt->entries[0].interval = 166666;
+ qopt->entries[1].gate_mask = 0x80;
+ qopt->entries[1].interval = 166667;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 62500;
+ qopt->entries[0].gate_mask = 0x81;
+ qopt->entries[0].interval = 31250;
+ qopt->entries[1].gate_mask = 0x82;
+ qopt->entries[1].interval = 15625;
+ qopt->entries[2].gate_mask = 0x83;
+ qopt->entries[2].interval = 15625;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->entries[0].gate_mask = 0x84;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0x85;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0x86;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0x87;
+ qopt->entries[3].interval = 100000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1700000;
+ qopt->entries[0].gate_mask = 0x88;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x89;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0x8A;
+ qopt->entries[2].interval = 600000;
+ qopt->entries[3].gate_mask = 0x8B;
+ qopt->entries[3].interval = 100000;
+ qopt->entries[4].gate_mask = 0x8C;
+ qopt->entries[4].interval = 500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 6);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_extension(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->cmd = TAPRIO_CMD_REPLACE;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 50000;
+ qopt->entries[0].gate_mask = 0x90;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x91;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase */
+ qopt->base_time = ktime_set(0, 50000);
+ qopt->entries[0].gate_mask = 0x92;
+ qopt->entries[0].interval = 33000;
+ qopt->entries[1].gate_mask = 0x93;
+ qopt->entries[1].interval = 67000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x94;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x95;
+ qopt->entries[1].interval = 600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 700000);
+ qopt->cycle_time = 2000000;
+ qopt->cycle_time_extension = 1900000;
+ qopt->entries[0].gate_mask = 0x96;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x97;
+ qopt->entries[1].interval = 1600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x98;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x99;
+ qopt->entries[1].interval = 600000;
+ qopt->entries[2].gate_mask = 0x9A;
+ qopt->entries[2].interval = 500000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 100000);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 300000;
+ qopt->entries[0].gate_mask = 0x9B;
+ qopt->entries[0].interval = 150000;
+ qopt->entries[1].gate_mask = 0x9C;
+ qopt->entries[1].interval = 350000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0xAD;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0xAE;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0xAF;
+ qopt->entries[2].interval = 300000;
+ qopt->num_entries = 3;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->cycle_time_extension = 100000;
+ qopt->entries[0].gate_mask = 0xA0;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0xA1;
+ qopt->entries[1].interval = 200000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 499999;
+ qopt->entries[0].gate_mask = 0xB2;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0xB3;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0xB4;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0xB5;
+ qopt->entries[3].interval = 200000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 6000000;
+ qopt->cycle_time_extension = 5999999;
+ qopt->entries[0].gate_mask = 0xC6;
+ qopt->entries[0].interval = 1000000;
+ qopt->entries[1].gate_mask = 0xC7;
+ qopt->entries[1].interval = 1000000;
+ qopt->entries[2].gate_mask = 0xC8;
+ qopt->entries[2].interval = 1000000;
+ qopt->entries[3].gate_mask = 0xC9;
+ qopt->entries[3].interval = 1500000;
+ qopt->entries[4].gate_mask = 0xCA;
+ qopt->entries[4].interval = 1500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+int tsnep_ethtool_get_test_count(void)
+{
+ return TSNEP_TEST_COUNT;
+}
+
+void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ memcpy(data, tsnep_test_strings, sizeof(tsnep_test_strings));
+}
+
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ eth_test->len = TSNEP_TEST_COUNT;
+
+ if (eth_test->flags != ETH_TEST_FL_OFFLINE) {
+ /* no tests are done online */
+ data[TSNEP_TEST_ENABLE] = 0;
+ data[TSNEP_TEST_TAPRIO] = 0;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+
+ return;
+ }
+
+ if (tsnep_test_gc_enable(adapter)) {
+ data[TSNEP_TEST_ENABLE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_ENABLE] = 1;
+ }
+
+ if (tsnep_test_taprio(adapter)) {
+ data[TSNEP_TEST_TAPRIO] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO] = 1;
+ }
+
+ if (tsnep_test_taprio_change(adapter)) {
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 1;
+ }
+
+ if (tsnep_test_taprio_extension(adapter)) {
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 1;
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_tc.c b/drivers/net/ethernet/engleder/tsnep_tc.c
new file mode 100644
index 000000000000..745b191a5540
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_tc.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+/* save one operation at the end for additional operation at list change */
+#define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1)
+
+static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u64 cycle_time;
+
+ if (!qopt->cycle_time)
+ return -ERANGE;
+ if (qopt->num_entries > TSNEP_MAX_GCL_NUM)
+ return -EINVAL;
+ cycle_time = 0;
+ for (i = 0; i < qopt->num_entries; i++) {
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+ if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK)
+ return -EINVAL;
+ if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL)
+ return -EINVAL;
+ cycle_time += qopt->entries[i].interval;
+ }
+ if (qopt->cycle_time != cycle_time)
+ return -EINVAL;
+ if (qopt->cycle_time_extension >= qopt->cycle_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index,
+ u32 properties, u32 interval, bool flush)
+{
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties = properties;
+ gcl->operation[index].interval = interval;
+
+ iowrite32(properties, addr);
+ iowrite32(interval, addr + sizeof(u32));
+
+ if (flush) {
+ /* flush write with read access */
+ ioread32(addr);
+ }
+}
+
+static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index)
+{
+ u64 duration;
+ int count;
+
+ /* change needs to be triggered one or two operations before start of
+ * new gate control list
+ * - change is triggered at start of operation (minimum one operation)
+ * - operation with adjusted interval is inserted on demand to exactly
+ * meet the start of the new gate control list (optional)
+ *
+ * additionally properties are read directly after start of previous
+ * operation
+ *
+ * therefore, three operations needs to be considered for the limit
+ */
+ duration = 0;
+ count = 3;
+ while (count) {
+ duration += gcl->operation[index].interval;
+
+ index--;
+ if (index < 0)
+ index = gcl->count - 1;
+
+ count--;
+ }
+
+ return duration;
+}
+
+static void tsnep_write_gcl(struct tsnep_gcl *gcl,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u32 properties;
+ u64 extend;
+ u64 cut;
+
+ gcl->base_time = ktime_to_ns(qopt->base_time);
+ gcl->cycle_time = qopt->cycle_time;
+ gcl->cycle_time_extension = qopt->cycle_time_extension;
+
+ for (i = 0; i < qopt->num_entries; i++) {
+ properties = qopt->entries[i].gate_mask;
+ if (i == (qopt->num_entries - 1))
+ properties |= TSNEP_GCL_LAST;
+
+ tsnep_write_gcl_operation(gcl, i, properties,
+ qopt->entries[i].interval, true);
+ }
+ gcl->count = qopt->num_entries;
+
+ /* calculate change limit; i.e., the time needed between enable and
+ * start of new gate control list
+ */
+
+ /* case 1: extend cycle time for change
+ * - change duration of last operation
+ * - cycle time extension
+ */
+ extend = tsnep_change_duration(gcl, gcl->count - 1);
+ extend += gcl->cycle_time_extension;
+
+ /* case 2: cut cycle time for change
+ * - maximum change duration
+ */
+ cut = 0;
+ for (i = 0; i < gcl->count; i++)
+ cut = max(cut, tsnep_change_duration(gcl, i));
+
+ /* use maximum, because the actual case (extend or cut) can be
+ * determined only after limit is known (chicken-and-egg problem)
+ */
+ gcl->change_limit = max(extend, cut);
+}
+
+static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ if (start <= limit) {
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += (n + 1) * gcl->cycle_time;
+ }
+
+ return start;
+}
+
+static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += n * gcl->cycle_time;
+ if (start == limit)
+ start -= gcl->cycle_time;
+
+ return start;
+}
+
+static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change,
+ bool insert)
+{
+ /* previous operation triggers change and properties are evaluated at
+ * start of operation
+ */
+ if (index == 0)
+ index = gcl->count - 1;
+ else
+ index = index - 1;
+ change -= gcl->operation[index].interval;
+
+ /* optionally change to new list with additional operation in between */
+ if (insert) {
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties |= TSNEP_GCL_INSERT;
+ iowrite32(gcl->operation[index].properties, addr);
+ }
+
+ return change;
+}
+
+static void tsnep_clean_gcl(struct tsnep_gcl *gcl)
+{
+ int i;
+ u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK;
+ void __iomem *addr;
+
+ /* search for insert operation and reset properties */
+ for (i = 0; i < gcl->count; i++) {
+ if (gcl->operation[i].properties & ~mask) {
+ addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * i;
+
+ gcl->operation[i].properties &= mask;
+ iowrite32(gcl->operation[i].properties, addr);
+
+ break;
+ }
+ }
+}
+
+static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref,
+ u64 change, u32 interval)
+{
+ u32 properties;
+
+ properties = gcl->operation[ref].properties & TSNEP_GCL_MASK;
+ /* change to new list directly after inserted operation */
+ properties |= TSNEP_GCL_CHANGE;
+
+ /* last operation of list is reserved to insert operation */
+ tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties,
+ interval, false);
+
+ return tsnep_set_gcl_change(gcl, ref, change, true);
+}
+
+static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension)
+{
+ int ref = gcl->count - 1;
+ u32 interval = gcl->operation[ref].interval + extension;
+
+ start -= gcl->operation[ref].interval;
+
+ return tsnep_insert_gcl_operation(gcl, ref, start, interval);
+}
+
+static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time)
+{
+ u64 sum = 0;
+ int i;
+
+ /* find operation which shall be cutted */
+ for (i = 0; i < gcl->count; i++) {
+ u64 sum_tmp = sum + gcl->operation[i].interval;
+ u64 interval;
+
+ /* sum up operations as long as cycle time is not exceeded */
+ if (sum_tmp > cycle_time)
+ break;
+
+ /* remaining interval must be big enough for hardware */
+ interval = cycle_time - sum_tmp;
+ if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL)
+ break;
+
+ sum = sum_tmp;
+ }
+ if (sum == cycle_time) {
+ /* no need to cut operation itself or whole cycle
+ * => change exactly at operation
+ */
+ return tsnep_set_gcl_change(gcl, i, start + sum, false);
+ }
+ return tsnep_insert_gcl_operation(gcl, i, start + sum,
+ cycle_time - sum);
+}
+
+static int tsnep_enable_gcl(struct tsnep_adapter *adapter,
+ struct tsnep_gcl *gcl, struct tsnep_gcl *curr)
+{
+ u64 system_time;
+ u64 timeout;
+ u64 limit;
+
+ /* estimate timeout limit after timeout enable, actually timeout limit
+ * in hardware will be earlier than estimate so we are on the safe side
+ */
+ tsnep_get_system_time(adapter, &system_time);
+ timeout = system_time + TSNEP_GC_TIMEOUT;
+
+ if (curr)
+ limit = timeout + curr->change_limit;
+ else
+ limit = timeout;
+
+ gcl->start_time = tsnep_gcl_start_after(gcl, limit);
+
+ /* gate control time register is only 32bit => time shall be in the near
+ * future (no driver support for far future implemented)
+ */
+ if ((gcl->start_time - system_time) >= U32_MAX)
+ return -EAGAIN;
+
+ if (curr) {
+ /* change gate control list */
+ u64 last;
+ u64 change;
+
+ last = tsnep_gcl_start_before(curr, gcl->start_time);
+ if ((last + curr->cycle_time) == gcl->start_time)
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+ else if (((gcl->start_time - last) <=
+ curr->cycle_time_extension) ||
+ ((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL))
+ change = tsnep_extend_gcl(curr, last,
+ gcl->start_time - last);
+ else
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+
+ WARN_ON(change <= timeout);
+ gcl->change = true;
+ iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE);
+ } else {
+ /* start gate control list */
+ WARN_ON(gcl->start_time <= timeout);
+ gcl->change = false;
+ iowrite32(gcl->start_time & 0xFFFFFFFF,
+ adapter->addr + TSNEP_GC_TIME);
+ }
+
+ return 0;
+}
+
+static int tsnep_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct tsnep_gcl *gcl;
+ struct tsnep_gcl *curr;
+ int retval;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ if (qopt->cmd == TAPRIO_CMD_DESTROY) {
+ /* disable gate control if active */
+ mutex_lock(&adapter->gate_control_lock);
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+ } else if (qopt->cmd != TAPRIO_CMD_REPLACE) {
+ return -EOPNOTSUPP;
+ }
+
+ retval = tsnep_validate_gcl(qopt);
+ if (retval)
+ return retval;
+
+ mutex_lock(&adapter->gate_control_lock);
+
+ gcl = &adapter->gcl[adapter->next_gcl];
+ tsnep_write_gcl(gcl, qopt);
+
+ /* select current gate control list if active */
+ if (adapter->gate_control_active) {
+ if (adapter->next_gcl == 0)
+ curr = &adapter->gcl[1];
+ else
+ curr = &adapter->gcl[0];
+ } else {
+ curr = NULL;
+ }
+
+ for (;;) {
+ /* start timeout which discards late enable, this helps ensuring
+ * that start/change time are in the future at enable
+ */
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+
+ retval = tsnep_enable_gcl(adapter, gcl, curr);
+ if (retval) {
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return retval;
+ }
+
+ /* enable gate control list */
+ if (adapter->next_gcl == 0)
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+ else
+ iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC);
+
+ /* done if timeout did not happen */
+ if (!(ioread32(adapter->addr + TSNEP_GC) &
+ TSNEP_GC_TIMEOUT_SIGNAL))
+ break;
+
+ /* timeout is acknowledged with any enable */
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+
+ if (curr)
+ tsnep_clean_gcl(curr);
+
+ /* retry because of timeout */
+ }
+
+ adapter->gate_control_active = true;
+
+ if (adapter->next_gcl == 0)
+ adapter->next_gcl = 1;
+ else
+ adapter->next_gcl = 0;
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+}
+
+static int tsnep_tc_query_caps(struct tsnep_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return tsnep_tc_query_caps(adapter, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return tsnep_taprio(adapter, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int tsnep_tc_init(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return 0;
+
+ /* open all gates */
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC);
+
+ adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A;
+ adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B;
+
+ return 0;
+}
+
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return;
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_xdp.c b/drivers/net/ethernet/engleder/tsnep_xdp.c
new file mode 100644
index 000000000000..c0513848c547
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_xdp.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include <linux/if_vlan.h>
+#include <net/xdp_sock_drv.h>
+
+#include "tsnep.h"
+
+int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct bpf_prog *old_prog;
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int tsnep_xdp_enable_pool(struct tsnep_adapter *adapter,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ struct tsnep_queue *queue;
+ int retval;
+
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ queue = &adapter->queue[queue_id];
+ if (queue->rx->queue_index != queue_id ||
+ queue->tx->queue_index != queue_id) {
+ netdev_err(adapter->netdev,
+ "XSK support only for TX/RX queue pairs\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ retval = xsk_pool_dma_map(pool, adapter->dmadev,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (retval) {
+ netdev_err(adapter->netdev, "failed to map XSK pool\n");
+
+ return retval;
+ }
+
+ retval = tsnep_enable_xsk(queue, pool);
+ if (retval) {
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
+
+ return retval;
+ }
+
+ return 0;
+}
+
+static int tsnep_xdp_disable_pool(struct tsnep_adapter *adapter, u16 queue_id)
+{
+ struct xsk_buff_pool *pool;
+ struct tsnep_queue *queue;
+
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
+ if (!pool)
+ return -EINVAL;
+
+ queue = &adapter->queue[queue_id];
+
+ tsnep_disable_xsk(queue);
+
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
+
+ return 0;
+}
+
+int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ return pool ? tsnep_xdp_enable_pool(adapter, pool, queue_id) :
+ tsnep_xdp_disable_pool(adapter, queue_id);
+}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index b1c8ffea6ad2..0c418557264c 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -945,7 +945,9 @@ static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
}
static void ethoc_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ethoc *priv = netdev_priv(dev);
@@ -961,7 +963,9 @@ static void ethoc_get_ringparam(struct net_device *dev,
}
static int ethoc_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ethoc *priv = netdev_priv(dev);
@@ -1074,14 +1078,11 @@ static int ethoc_probe(struct platform_device *pdev)
/* obtain device IRQ number */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot obtain IRQ\n");
- ret = -ENXIO;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto free;
- }
- netdev->irq = res->start;
+ netdev->irq = ret;
/* setup driver-private data */
priv = netdev_priv(netdev);
@@ -1223,7 +1224,7 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->ethtool_ops = &ethoc_ethtool_ops;
/* setup NAPI */
- netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ethoc_poll);
spin_lock_init(&priv->lock);
@@ -1253,7 +1254,7 @@ out:
* ethoc_remove - shutdown OpenCores ethernet MAC
* @pdev: platform device
*/
-static int ethoc_remove(struct platform_device *pdev)
+static void ethoc_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct ethoc *priv = netdev_priv(netdev);
@@ -1270,8 +1271,6 @@ static int ethoc_remove(struct platform_device *pdev)
unregister_netdev(netdev);
free_netdev(netdev);
}
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1297,7 +1296,7 @@ MODULE_DEVICE_TABLE(of, ethoc_match);
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
- .remove = ethoc_remove,
+ .remove = ethoc_remove,
.suspend = ethoc_suspend,
.resume = ethoc_resume,
.driver = {
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 323340826dab..5cb478e98697 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -6,10 +6,9 @@
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include "nps_enet.h"
#define DRV_NAME "nps_mgt_enet"
@@ -199,7 +198,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
*/
if (nps_enet_is_tx_pending(priv)) {
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
- napi_reschedule(napi);
+ napi_schedule(napi);
}
}
@@ -608,13 +607,12 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* Get IRQ number */
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0) {
- dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
err = -ENODEV;
goto out_netdev;
}
- netif_napi_add(ndev, &priv->napi, nps_enet_poll,
- NPS_ENET_NAPI_POLL_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, nps_enet_poll,
+ NPS_ENET_NAPI_POLL_WEIGHT);
/* Register the driver. Should be the last thing in probe */
err = register_netdev(ndev);
@@ -635,7 +633,7 @@ out_netdev:
return err;
}
-static s32 nps_enet_remove(struct platform_device *pdev)
+static void nps_enet_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct nps_enet_priv *priv = netdev_priv(ndev);
@@ -643,8 +641,6 @@ static s32 nps_enet_remove(struct platform_device *pdev)
unregister_netdev(ndev);
netif_napi_del(&priv->napi);
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id nps_enet_dt_ids[] = {
@@ -665,4 +661,5 @@ static struct platform_driver nps_enet_driver = {
module_platform_driver(nps_enet_driver);
MODULE_AUTHOR("EZchip Semiconductor");
+MODULE_DESCRIPTION("EZchip NPS Ethernet driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index 3d1e9a302148..474073c7f94d 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_FARADAY
bool "Faraday devices"
default y
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,24 +19,23 @@ if NET_VENDOR_FARADAY
config FTMAC100
tristate "Faraday FTMAC100 10/100 Ethernet support"
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select MII
help
This driver supports the FTMAC100 10/100 Ethernet controller
- from Faraday. It is used on Faraday A320, Andes AG101 and some
- other ARM/NDS32 SoC's.
+ from Faraday. It is used on Faraday A320 and some other ARM SoC's.
config FTGMAC100
tristate "Faraday FTGMAC100 Gigabit Ethernet support"
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
+ select FIXED_PHY
select MDIO_ASPEED if MACH_ASPEED_G6
select CRC32
help
This driver supports the FTGMAC100 Gigabit Ethernet controller
- from Faraday. It is used on Faraday A369, Andes AG102 and some
- other ARM/NDS32 SoC's.
+ from Faraday. It is used on Faraday A369 and some other ARM SoC's.
endif # NET_VENDOR_FARADAY
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 97c5d70de76e..a863f7841210 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -24,6 +25,7 @@
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/of_net.h>
+#include <linux/phy_fixed.h>
#include <net/ip.h>
#include <net/ncsi.h>
@@ -50,6 +52,15 @@
#define FTGMAC_100MHZ 100000000
#define FTGMAC_25MHZ 25000000
+/* For NC-SI to register a fixed-link phy device */
+static struct fixed_phy_status ncsi_phy_status = {
+ .link = 1,
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+ .pause = 0,
+ .asym_pause = 0
+};
+
struct ftgmac100 {
/* Registers */
struct resource *res;
@@ -91,6 +102,8 @@ struct ftgmac100 {
/* AST2500/AST2600 RMII ref clock gate */
struct clk *rclk;
+ /* Aspeed reset control */
+ struct reset_control *rst;
/* Link management */
int cur_speed;
@@ -138,6 +151,23 @@ static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
{
u32 maccr = 0;
+ /* Aspeed RMII needs SCU reset to clear status */
+ if (priv->is_aspeed && priv->netdev->phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ int err;
+
+ err = reset_control_assert(priv->rst);
+ if (err) {
+ dev_err(priv->dev, "Failed to reset mac (%d)\n", err);
+ return err;
+ }
+ usleep_range(10000, 20000);
+ err = reset_control_deassert(priv->rst);
+ if (err) {
+ dev_err(priv->dev, "Failed to deassert mac reset (%d)\n", err);
+ return err;
+ }
+ }
+
switch (priv->cur_speed) {
case SPEED_10:
case 0: /* no link */
@@ -177,16 +207,20 @@ static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
}
-static void ftgmac100_initial_mac(struct ftgmac100 *priv)
+static int ftgmac100_initial_mac(struct ftgmac100 *priv)
{
u8 mac[ETH_ALEN];
unsigned int m;
unsigned int l;
+ int err;
- if (!device_get_ethdev_address(priv->dev, priv->netdev)) {
+ err = of_get_ethdev_address(priv->dev->of_node, priv->netdev);
+ if (err == -EPROBE_DEFER)
+ return err;
+ if (!err) {
dev_info(priv->dev, "Read MAC address %pM from device tree\n",
priv->netdev->dev_addr);
- return;
+ return 0;
}
m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
@@ -207,6 +241,8 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv)
dev_info(priv->dev, "Generated random MAC address %pM\n",
priv->netdev->dev_addr);
}
+
+ return 0;
}
static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
@@ -566,7 +602,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
(*processed)++;
return true;
- drop:
+drop:
/* Clean rxdes0 (which resets own bit) */
rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
@@ -650,6 +686,11 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
+ /* Ensure the descriptor config is visible before setting the tx
+ * pointer.
+ */
+ smp_wmb();
+
priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
return true;
@@ -803,6 +844,11 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
dma_wmb();
first->txdes0 = cpu_to_le32(f_ctl_stat);
+ /* Ensure the descriptor config is visible before setting the tx
+ * pointer.
+ */
+ smp_wmb();
+
/* Update next TX pointer */
priv->tx_pointer = pointer;
@@ -823,7 +869,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
- dma_err:
+dma_err:
if (net_ratelimit())
netdev_err(netdev, "map tx fragment failed\n");
@@ -845,7 +891,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
* last fragment, so we know ftgmac100_free_tx_packet()
* hasn't freed the skb yet.
*/
- drop:
+drop:
/* Drop the packet */
dev_kfree_skb_any(skb);
netdev->stats.tx_dropped++;
@@ -989,117 +1035,6 @@ static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
return 0;
}
-static void ftgmac100_adjust_link(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- bool tx_pause, rx_pause;
- int new_speed;
-
- /* We store "no link" as speed 0 */
- if (!phydev->link)
- new_speed = 0;
- else
- new_speed = phydev->speed;
-
- /* Grab pause settings from PHY if configured to do so */
- if (priv->aneg_pause) {
- rx_pause = tx_pause = phydev->pause;
- if (phydev->asym_pause)
- tx_pause = !rx_pause;
- } else {
- rx_pause = priv->rx_pause;
- tx_pause = priv->tx_pause;
- }
-
- /* Link hasn't changed, do nothing */
- if (phydev->speed == priv->cur_speed &&
- phydev->duplex == priv->cur_duplex &&
- rx_pause == priv->rx_pause &&
- tx_pause == priv->tx_pause)
- return;
-
- /* Print status if we have a link or we had one and just lost it,
- * don't print otherwise.
- */
- if (new_speed || priv->cur_speed)
- phy_print_status(phydev);
-
- priv->cur_speed = new_speed;
- priv->cur_duplex = phydev->duplex;
- priv->rx_pause = rx_pause;
- priv->tx_pause = tx_pause;
-
- /* Link is down, do nothing else */
- if (!new_speed)
- return;
-
- /* Disable all interrupts */
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
-
- /* Reset the adapter asynchronously */
- schedule_work(&priv->reset_task);
-}
-
-static int ftgmac100_mii_probe(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct platform_device *pdev = to_platform_device(priv->dev);
- struct device_node *np = pdev->dev.of_node;
- struct phy_device *phydev;
- phy_interface_t phy_intf;
- int err;
-
- /* Default to RGMII. It's a gigabit part after all */
- err = of_get_phy_mode(np, &phy_intf);
- if (err)
- phy_intf = PHY_INTERFACE_MODE_RGMII;
-
- /* Aspeed only supports these. I don't know about other IP
- * block vendors so I'm going to just let them through for
- * now. Note that this is only a warning if for some obscure
- * reason the DT really means to lie about it or it's a newer
- * part we don't know about.
- *
- * On the Aspeed SoC there are additionally straps and SCU
- * control bits that could tell us what the interface is
- * (or allow us to configure it while the IP block is held
- * in reset). For now I chose to keep this driver away from
- * those SoC specific bits and assume the device-tree is
- * right and the SCU has been configured properly by pinmux
- * or the firmware.
- */
- if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
- netdev_warn(netdev,
- "Unsupported PHY mode %s !\n",
- phy_modes(phy_intf));
- }
-
- phydev = phy_find_first(priv->mii_bus);
- if (!phydev) {
- netdev_info(netdev, "%s: no PHY found\n", netdev->name);
- return -ENODEV;
- }
-
- phydev = phy_connect(netdev, phydev_name(phydev),
- &ftgmac100_adjust_link, phy_intf);
-
- if (IS_ERR(phydev)) {
- netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
- return PTR_ERR(phydev);
- }
-
- /* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.rst)
- */
- phy_support_asym_pause(phydev);
-
- /* Display what we found */
- phy_attached_info(phydev);
-
- return 0;
-}
-
static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
struct net_device *netdev = bus->priv;
@@ -1174,12 +1109,15 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
-static void ftgmac100_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static void
+ftgmac100_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1190,8 +1128,11 @@ static void ftgmac100_get_ringparam(struct net_device *netdev,
ering->tx_pending = priv->tx_q_entries;
}
-static int ftgmac100_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static int
+ftgmac100_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1404,10 +1345,8 @@ static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
return err;
}
-static void ftgmac100_reset_task(struct work_struct *work)
+static void ftgmac100_reset(struct ftgmac100 *priv)
{
- struct ftgmac100 *priv = container_of(work, struct ftgmac100,
- reset_task);
struct net_device *netdev = priv->netdev;
int err;
@@ -1445,7 +1384,7 @@ static void ftgmac100_reset_task(struct work_struct *work)
ftgmac100_init_all(priv, true);
netdev_dbg(netdev, "Reset done !\n");
- bail:
+bail:
if (priv->mii_bus)
mutex_unlock(&priv->mii_bus->mdio_lock);
if (netdev->phydev)
@@ -1453,6 +1392,134 @@ static void ftgmac100_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static void ftgmac100_reset_task(struct work_struct *work)
+{
+ struct ftgmac100 *priv = container_of(work, struct ftgmac100,
+ reset_task);
+
+ ftgmac100_reset(priv);
+}
+
+static void ftgmac100_adjust_link(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ bool tx_pause, rx_pause;
+ int new_speed;
+
+ /* We store "no link" as speed 0 */
+ if (!phydev->link)
+ new_speed = 0;
+ else
+ new_speed = phydev->speed;
+
+ /* Grab pause settings from PHY if configured to do so */
+ if (priv->aneg_pause) {
+ rx_pause = tx_pause = phydev->pause;
+ if (phydev->asym_pause)
+ tx_pause = !rx_pause;
+ } else {
+ rx_pause = priv->rx_pause;
+ tx_pause = priv->tx_pause;
+ }
+
+ /* Link hasn't changed, do nothing */
+ if (phydev->speed == priv->cur_speed &&
+ phydev->duplex == priv->cur_duplex &&
+ rx_pause == priv->rx_pause &&
+ tx_pause == priv->tx_pause)
+ return;
+
+ /* Print status if we have a link or we had one and just lost it,
+ * don't print otherwise.
+ */
+ if (new_speed || priv->cur_speed)
+ phy_print_status(phydev);
+
+ priv->cur_speed = new_speed;
+ priv->cur_duplex = phydev->duplex;
+ priv->rx_pause = rx_pause;
+ priv->tx_pause = tx_pause;
+
+ /* Link is down, do nothing else */
+ if (!new_speed)
+ return;
+
+ /* Disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+ /* Release phy lock to allow ftgmac100_reset to acquire it, keeping lock
+ * order consistent to prevent dead lock.
+ */
+ if (netdev->phydev)
+ mutex_unlock(&netdev->phydev->lock);
+
+ ftgmac100_reset(priv);
+
+ if (netdev->phydev)
+ mutex_lock(&netdev->phydev->lock);
+
+}
+
+static int ftgmac100_mii_probe(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct phy_device *phydev;
+ phy_interface_t phy_intf;
+ int err;
+
+ /* Default to RGMII. It's a gigabit part after all */
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
+ phy_intf = PHY_INTERFACE_MODE_RGMII;
+
+ /* Aspeed only supports these. I don't know about other IP
+ * block vendors so I'm going to just let them through for
+ * now. Note that this is only a warning if for some obscure
+ * reason the DT really means to lie about it or it's a newer
+ * part we don't know about.
+ *
+ * On the Aspeed SoC there are additionally straps and SCU
+ * control bits that could tell us what the interface is
+ * (or allow us to configure it while the IP block is held
+ * in reset). For now I chose to keep this driver away from
+ * those SoC specific bits and assume the device-tree is
+ * right and the SCU has been configured properly by pinmux
+ * or the firmware.
+ */
+ if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
+ netdev_warn(netdev,
+ "Unsupported PHY mode %s !\n",
+ phy_modes(phy_intf));
+ }
+
+ phydev = phy_find_first(priv->mii_bus);
+ if (!phydev) {
+ netdev_info(netdev, "%s: no PHY found\n", netdev->name);
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(netdev, phydev_name(phydev),
+ &ftgmac100_adjust_link, phy_intf);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.rst)
+ */
+ phy_support_asym_pause(phydev);
+
+ /* Display what we found */
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
static int ftgmac100_open(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1485,7 +1552,7 @@ static int ftgmac100_open(struct net_device *netdev)
goto err_hw;
/* Initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftgmac100_poll);
/* Grab our interrupt */
err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
@@ -1504,7 +1571,8 @@ static int ftgmac100_open(struct net_device *netdev)
if (netdev->phydev) {
/* If we have a PHY, start polling */
phy_start(netdev->phydev);
- } else if (priv->use_ncsi) {
+ }
+ if (priv->use_ncsi) {
/* If using NC-SI, set our carrier on and start the stack */
netif_carrier_on(netdev);
@@ -1516,15 +1584,16 @@ static int ftgmac100_open(struct net_device *netdev)
return 0;
- err_ncsi:
+err_ncsi:
+ phy_stop(netdev->phydev);
napi_disable(&priv->napi);
netif_stop_queue(netdev);
- err_alloc:
+err_alloc:
ftgmac100_free_buffers(priv);
free_irq(netdev->irq, netdev);
- err_irq:
+err_irq:
netif_napi_del(&priv->napi);
- err_hw:
+err_hw:
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
ftgmac100_free_rings(priv);
return err;
@@ -1550,7 +1619,7 @@ static int ftgmac100_stop(struct net_device *netdev)
netif_napi_del(&priv->napi);
if (netdev->phydev)
phy_stop(netdev->phydev);
- else if (priv->use_ncsi)
+ if (priv->use_ncsi)
ncsi_stop_dev(priv->ndev);
ftgmac100_stop_hw(priv);
@@ -1680,10 +1749,18 @@ err_register_mdiobus:
static void ftgmac100_phy_disconnect(struct net_device *netdev)
{
- if (!netdev->phydev)
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+
+ if (!phydev)
return;
- phy_disconnect(netdev->phydev);
+ phy_disconnect(phydev);
+ if (of_phy_is_fixed_link(priv->dev->of_node))
+ of_phy_deregister_fixed_link(priv->dev->of_node);
+
+ if (priv->use_ncsi)
+ fixed_phy_unregister(phydev);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1743,11 +1820,25 @@ cleanup_clk:
return rc;
}
+static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
+{
+ struct device_node *child_np = of_get_child_by_name(np, name);
+ bool ret = false;
+
+ if (child_np) {
+ ret = true;
+ of_node_put(child_np);
+ }
+
+ return ret;
+}
+
static int ftgmac100_probe(struct platform_device *pdev)
{
struct resource *res;
int irq;
struct net_device *netdev;
+ struct phy_device *phydev;
struct ftgmac100 *priv;
struct device_node *np;
int err = 0;
@@ -1805,7 +1896,9 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->aneg_pause = true;
/* MAC address from chip or random one */
- ftgmac100_initial_mac(priv);
+ err = ftgmac100_initial_mac(priv);
+ if (err)
+ goto err_phy_connect;
np = pdev->dev.of_node;
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
@@ -1814,11 +1907,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
- /* Disable ast2600 problematic HW arbitration */
- if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
- iowrite32(FTGMAC100_TM_DEFAULT,
- priv->base + FTGMAC100_OFFSET_TM);
- }
} else {
priv->rxdes0_edorr_mask = BIT(15);
priv->txdes0_edotr_mask = BIT(15);
@@ -1838,15 +1926,30 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_phy_connect;
}
- } else if (np && of_get_property(np, "phy-handle", NULL)) {
+
+ phydev = fixed_phy_register(&ncsi_phy_status, np);
+ if (IS_ERR(phydev)) {
+ dev_err(&pdev->dev, "failed to register fixed PHY device\n");
+ err = PTR_ERR(phydev);
+ goto err_phy_connect;
+ }
+ err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
+ PHY_INTERFACE_MODE_RMII);
+ if (err) {
+ dev_err(&pdev->dev, "Connecting PHY failed\n");
+ goto err_phy_connect;
+ }
+ } else if (np && (of_phy_is_fixed_link(np) ||
+ of_get_property(np, "phy-handle", NULL))) {
struct phy_device *phy;
/* Support "mdio"/"phy" child nodes for ast2400/2500 with
* an embedded MDIO controller. Automatically scan the DTS for
* available PHYs and register them.
*/
- if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac")) {
+ if (of_get_property(np, "phy-handle", NULL) &&
+ (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
err = ftgmac100_setup_mdio(netdev);
if (err)
goto err_setup_mdio;
@@ -1867,7 +1970,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* Display what we found */
phy_attached_info(phy);
- } else if (np && !of_get_child_by_name(np, "mdio")) {
+ } else if (np && !ftgmac100_has_child_node(np, "mdio")) {
/* Support legacy ASPEED devicetree descriptions that decribe a
* MAC with an embedded MDIO controller but have no "mdio"
* child node. Automatically scan the MDIO bus for available
@@ -1886,10 +1989,21 @@ static int ftgmac100_probe(struct platform_device *pdev)
}
+ priv->rst = devm_reset_control_get_optional_exclusive(priv->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ err = PTR_ERR(priv->rst);
+ goto err_phy_connect;
+ }
+
if (priv->is_aspeed) {
err = ftgmac100_setup_clk(priv);
if (err)
goto err_phy_connect;
+
+ /* Disable ast2600 problematic HW arbitration */
+ if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ iowrite32(FTGMAC100_TM_DEFAULT,
+ priv->base + FTGMAC100_OFFSET_TM);
}
/* Default ring sizes */
@@ -1907,6 +2021,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* AST2400 doesn't have working HW checksum generation */
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
+ /* AST2600 tx checksum with NCSI is broken */
+ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
if (np && of_get_property(np, "no-hw-checksum", NULL))
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
netdev->features |= netdev->hw_features;
@@ -1941,7 +2060,7 @@ err_alloc_etherdev:
return err;
}
-static int ftgmac100_remove(struct platform_device *pdev)
+static void ftgmac100_remove(struct platform_device *pdev)
{
struct net_device *netdev;
struct ftgmac100 *priv;
@@ -1969,7 +2088,6 @@ static int ftgmac100_remove(struct platform_device *pdev)
netif_napi_del(&priv->napi);
free_netdev(netdev);
- return 0;
}
static const struct of_device_id ftgmac100_of_match[] = {
@@ -1980,7 +2098,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
static struct platform_driver ftgmac100_driver = {
.probe = ftgmac100_probe,
- .remove = ftgmac100_remove,
+ .remove = ftgmac100_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ftgmac100_of_match,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index 63b3e02fab16..4968f6f0bdbc 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -84,7 +84,7 @@
FTGMAC100_INT_RPKT_BUF)
/* All the interrupts we care about */
-#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \
+#define FTGMAC100_INT_ALL (FTGMAC100_INT_RXTX | \
FTGMAC100_INT_BAD)
/*
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8a341e2d5833..5803a382f0ba 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -11,6 +11,8 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -27,8 +29,8 @@
#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
-#define MAX_PKT_SIZE 1518
#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */
+#define MAX_PKT_SIZE RX_BUF_SIZE /* multi-segment not supported */
#if MAX_PKT_SIZE > 0x7ff
#error invalid MAX_PKT_SIZE
@@ -147,6 +149,40 @@ static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR);
}
+static void ftmac100_setup_mc_ht(struct ftmac100 *priv)
+{
+ struct netdev_hw_addr *ha;
+ u64 maht = 0; /* Multicast Address Hash Table */
+
+ netdev_for_each_mc_addr(ha, priv->netdev) {
+ u32 hash = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ maht |= BIT_ULL(hash);
+ }
+
+ iowrite32(lower_32_bits(maht), priv->base + FTMAC100_OFFSET_MAHT0);
+ iowrite32(upper_32_bits(maht), priv->base + FTMAC100_OFFSET_MAHT1);
+}
+
+static void ftmac100_set_rx_bits(struct ftmac100 *priv, unsigned int *maccr)
+{
+ struct net_device *netdev = priv->netdev;
+
+ /* Clear all */
+ *maccr &= ~(FTMAC100_MACCR_RCV_ALL | FTMAC100_MACCR_RX_MULTIPKT |
+ FTMAC100_MACCR_HT_MULTI_EN);
+
+ /* Set the requested bits */
+ if (netdev->flags & IFF_PROMISC)
+ *maccr |= FTMAC100_MACCR_RCV_ALL;
+ if (netdev->flags & IFF_ALLMULTI)
+ *maccr |= FTMAC100_MACCR_RX_MULTIPKT;
+ else if (netdev_mc_count(netdev)) {
+ *maccr |= FTMAC100_MACCR_HT_MULTI_EN;
+ ftmac100_setup_mc_ht(priv);
+ }
+}
+
#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \
FTMAC100_MACCR_RCV_EN | \
FTMAC100_MACCR_XDMA_EN | \
@@ -159,6 +195,7 @@ static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
static int ftmac100_start_hw(struct ftmac100 *priv)
{
struct net_device *netdev = priv->netdev;
+ unsigned int maccr = MACCR_ENABLE_ALL;
if (ftmac100_reset(priv))
return -EIO;
@@ -175,7 +212,13 @@ static int ftmac100_start_hw(struct ftmac100 *priv)
ftmac100_set_mac(priv, netdev->dev_addr);
- iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR);
+ /* See ftmac100_change_mtu() */
+ if (netdev->mtu > ETH_DATA_LEN)
+ maccr |= FTMAC100_MACCR_RX_FTL;
+
+ ftmac100_set_rx_bits(priv, &maccr);
+
+ iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
return 0;
}
@@ -218,11 +261,6 @@ static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes)
return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR);
}
-static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL);
-}
-
static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT);
@@ -337,13 +375,7 @@ static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
error = true;
}
- if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx frame too long\n");
-
- netdev->stats.rx_length_errors++;
- error = true;
- } else if (unlikely(ftmac100_rxdes_runt(rxdes))) {
+ if (unlikely(ftmac100_rxdes_runt(rxdes))) {
if (net_ratelimit())
netdev_info(netdev, "rx runt\n");
@@ -356,6 +388,11 @@ static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
netdev->stats.rx_length_errors++;
error = true;
}
+ /*
+ * FTMAC100_RXDES0_FTL is not an error, it just indicates that the
+ * frame is longer than 1518 octets. Receiving these is possible when
+ * we told the hardware not to drop them, via FTMAC100_MACCR_RX_FTL.
+ */
return error;
}
@@ -400,12 +437,13 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
return true;
}
- /*
- * It is impossible to get multi-segment packets
- * because we always provide big enough receive buffers.
- */
+ /* We don't support multi-segment packets for now, so drop them. */
ret = ftmac100_rxdes_last_segment(rxdes);
- BUG_ON(!ret);
+ if (unlikely(!ret)) {
+ netdev->stats.rx_length_errors++;
+ ftmac100_rx_drop_packet(priv);
+ return true;
+ }
/* start processing */
skb = netdev_alloc_skb_ip_align(netdev, 128);
@@ -807,8 +845,8 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftmac100_get_link_ksettings(struct net_device *netdev,
@@ -1037,6 +1075,37 @@ static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int c
return generic_mii_ioctl(&priv->mii, data, cmd, NULL);
}
+static int ftmac100_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ unsigned int maccr;
+
+ maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
+ if (mtu > ETH_DATA_LEN) {
+ /* process long packets in the driver */
+ maccr |= FTMAC100_MACCR_RX_FTL;
+ } else {
+ /* Let the controller drop incoming packets greater
+ * than 1518 (that is 1500 + 14 Ethernet + 4 FCS).
+ */
+ maccr &= ~FTMAC100_MACCR_RX_FTL;
+ }
+ iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
+
+ WRITE_ONCE(netdev->mtu, mtu);
+
+ return 0;
+}
+
+static void ftmac100_set_rx_mode(struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ unsigned int maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
+
+ ftmac100_set_rx_bits(priv, &maccr);
+ iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
+}
+
static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_open = ftmac100_open,
.ndo_stop = ftmac100_stop,
@@ -1044,6 +1113,8 @@ static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = ftmac100_do_ioctl,
+ .ndo_change_mtu = ftmac100_change_mtu,
+ .ndo_set_rx_mode = ftmac100_set_rx_mode,
};
/******************************************************************************
@@ -1075,6 +1146,11 @@ static int ftmac100_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
netdev->ethtool_ops = &ftmac100_ethtool_ops;
netdev->netdev_ops = &ftmac100_netdev_ops;
+ netdev->max_mtu = MAX_PKT_SIZE - VLAN_ETH_HLEN;
+
+ err = platform_get_ethdev_address(&pdev->dev, netdev);
+ if (err == -EPROBE_DEFER)
+ goto defer_get_mac;
platform_set_drvdata(pdev, netdev);
@@ -1086,7 +1162,7 @@ static int ftmac100_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
/* initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftmac100_poll);
/* map io memory */
priv->res = request_mem_region(res->start, resource_size(res),
@@ -1137,12 +1213,13 @@ err_ioremap:
release_resource(priv->res);
err_req_mem:
netif_napi_del(&priv->napi);
+defer_get_mac:
free_netdev(netdev);
err_alloc_etherdev:
return err;
}
-static int ftmac100_remove(struct platform_device *pdev)
+static void ftmac100_remove(struct platform_device *pdev)
{
struct net_device *netdev;
struct ftmac100 *priv;
@@ -1157,7 +1234,6 @@ static int ftmac100_remove(struct platform_device *pdev)
netif_napi_del(&priv->napi);
free_netdev(netdev);
- return 0;
}
static const struct of_device_id ftmac100_of_ids[] = {
diff --git a/drivers/net/ethernet/faraday/ftmac100.h b/drivers/net/ethernet/faraday/ftmac100.h
index fe986f1673fc..8af32f9070f4 100644
--- a/drivers/net/ethernet/faraday/ftmac100.h
+++ b/drivers/net/ethernet/faraday/ftmac100.h
@@ -122,9 +122,9 @@
* Transmit descriptor, aligned to 16 bytes
*/
struct ftmac100_txdes {
- unsigned int txdes0;
- unsigned int txdes1;
- unsigned int txdes2; /* TXBUF_BADR */
+ __le32 txdes0;
+ __le32 txdes1;
+ __le32 txdes2; /* TXBUF_BADR */
unsigned int txdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
@@ -143,9 +143,9 @@ struct ftmac100_txdes {
* Receive descriptor, aligned to 16 bytes
*/
struct ftmac100_rxdes {
- unsigned int rxdes0;
- unsigned int rxdes1;
- unsigned int rxdes2; /* RXBUF_BADR */
+ __le32 rxdes0;
+ __le32 rxdes1;
+ __le32 rxdes2; /* RXBUF_BADR */
unsigned int rxdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index b3939a5f7b03..3c9961806f75 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -196,7 +196,7 @@ enum intr_status_bits {
ERI = 0x00000080, /* receive early int */
CNTOVF = 0x00000040, /* counter overflow */
RBU = 0x00000020, /* receive buffer unavailable */
- TBU = 0x00000010, /* transmit buffer unavilable */
+ TBU = 0x00000010, /* transmit buffer unavailable */
TI = 0x00000008, /* transmit interrupt */
RI = 0x00000004, /* receive interrupt */
RxErr = 0x00000002, /* receive error */
@@ -215,7 +215,7 @@ enum rx_mode_bits {
CR_W_RXMODEMASK = 0x000000e0,
CR_W_PROM = 0x00000080, /* promiscuous mode */
CR_W_AB = 0x00000040, /* accept broadcast */
- CR_W_AM = 0x00000020, /* accept mutlicast */
+ CR_W_AM = 0x00000020, /* accept multicast */
CR_W_ARP = 0x00000008, /* receive runt pkt */
CR_W_ALP = 0x00000004, /* receive long pkt */
CR_W_SEP = 0x00000002, /* receive error pkt */
@@ -1074,7 +1074,7 @@ static void allocate_rx_buffers(struct net_device *dev)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = np->mii.dev;
void __iomem *ioaddr = np->mem;
int old_crvalue = np->crvalue;
@@ -1163,7 +1163,7 @@ static void enable_rxtx(struct net_device *dev)
static void reset_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, reset_timer);
+ struct netdev_private *np = timer_container_of(np, t, reset_timer);
struct net_device *dev = np->mii.dev;
unsigned long flags;
@@ -1809,8 +1809,8 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
@@ -1900,8 +1900,8 @@ static int netdev_close(struct net_device *dev)
/* Stop the chip's Tx and Rx processes. */
stop_nic_rxtx(ioaddr, 0);
- del_timer_sync(&np->timer);
- del_timer_sync(&np->reset_timer);
+ timer_delete_sync(&np->timer);
+ timer_delete_sync(&np->reset_timer);
free_irq(np->pci_dev->irq, dev);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index e04e1c5cb013..e2a591cf9601 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -9,7 +9,7 @@ config NET_VENDOR_FREESCALE
depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
- ARCH_LAYERSCAPE || COMPILE_TEST
+ ARCH_LAYERSCAPE || ARCH_S32 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -23,15 +23,18 @@ if NET_VENDOR_FREESCALE
config FEC
tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
+ ARCH_MXC || ARCH_S32 || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
select PHYLIB
+ select FIXED_PHY if M5272
+ select PAGE_POOL
+ imply PAGE_POOL_STATS
imply NET_SELFTESTS
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
- controller on some Motorola ColdFire and Freescale i.MX processors.
+ controller on some Motorola ColdFire and Freescale i.MX/S32 processors.
config FEC_MPC52xx
tristate "FEC MPC52xx driver"
@@ -78,8 +81,7 @@ config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
- select PHYLIB
- select FIXED_PHY
+ select PHYLINK
help
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index 0e1439fd00bd..2b560661c82a 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -2,8 +2,8 @@
menuconfig FSL_DPAA_ETH
tristate "DPAA Ethernet"
depends on FSL_DPAA && FSL_FMAN
- select PHYLIB
- select FIXED_PHY
+ select PHYLINK
+ select PCS_LYNX
help
Data Path Acceleration Architecture Ethernet driver,
supporting the Freescale QorIQ chips.
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6b2927d863e2..3edc8d142dd5 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1,39 +1,14 @@
-/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/io.h>
@@ -42,6 +17,7 @@
#include <linux/icmp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/platform_device.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/net.h>
@@ -52,7 +28,6 @@
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
#include <linux/sort.h>
-#include <linux/phy_fixed.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <soc/fsl/bman.h>
@@ -222,12 +197,15 @@ static int dpaa_rx_extra_headroom;
#define dpaa_get_max_mtu() \
(dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed);
+
static int dpaa_netdev_init(struct net_device *net_dev,
const struct net_device_ops *dpaa_ops,
u16 tx_timeout)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
+ struct mac_device *mac_dev = priv->mac_dev;
struct dpaa_percpu_priv *percpu_priv;
const u8 *mac_addr;
int i, err;
@@ -241,16 +219,16 @@ static int dpaa_netdev_init(struct net_device *net_dev,
}
net_dev->netdev_ops = dpaa_ops;
- mac_addr = priv->mac_dev->addr;
+ mac_addr = mac_dev->addr;
- net_dev->mem_start = priv->mac_dev->res->start;
- net_dev->mem_end = priv->mac_dev->res->end;
+ net_dev->mem_start = (unsigned long)priv->mac_dev->res->start;
+ net_dev->mem_end = (unsigned long)priv->mac_dev->res->end;
net_dev->min_mtu = ETH_MIN_MTU;
net_dev->max_mtu = dpaa_get_max_mtu();
net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_LLTX | NETIF_F_RXHASH);
+ NETIF_F_RXHASH);
net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
/* The kernels enables GSO automatically, if we declare NETIF_F_SG.
@@ -260,18 +238,23 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->features |= NETIF_F_RXCSUM;
net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ net_dev->lltx = true;
/* we do not want shared skbs on TX */
net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
net_dev->features |= net_dev->hw_features;
net_dev->vlan_features = net_dev->features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
if (is_valid_ether_addr(mac_addr)) {
memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
eth_hw_addr_set(net_dev, mac_addr);
} else {
eth_hw_addr_random(net_dev);
- err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
+ err = mac_dev->change_addr(mac_dev->fman_mac,
(const enet_addr_t *)net_dev->dev_addr);
if (err) {
dev_err(dev, "Failed to set random MAC address\n");
@@ -286,12 +269,27 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->needed_headroom = priv->tx_headroom;
net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+ /* The rest of the config is filled in by the mac device already */
+ mac_dev->phylink_config.dev = &net_dev->dev;
+ mac_dev->phylink_config.type = PHYLINK_NETDEV;
+ mac_dev->update_speed = dpaa_eth_cgr_set_speed;
+ mac_dev->phylink = phylink_create(&mac_dev->phylink_config,
+ dev_fwnode(mac_dev->dev),
+ mac_dev->phy_if,
+ mac_dev->phylink_ops);
+ if (IS_ERR(mac_dev->phylink)) {
+ err = PTR_ERR(mac_dev->phylink);
+ dev_err_probe(dev, err, "Could not create phylink\n");
+ return err;
+ }
+
/* start without the RUNNING flag, phylib controls it later */
netif_carrier_off(net_dev);
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() = %d\n", err);
+ phylink_destroy(mac_dev->phylink);
return err;
}
@@ -302,7 +300,8 @@ static int dpaa_stop(struct net_device *net_dev)
{
struct mac_device *mac_dev;
struct dpaa_priv *priv;
- int i, err, error;
+ int i, error;
+ int err = 0;
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
@@ -313,10 +312,8 @@ static int dpaa_stop(struct net_device *net_dev)
*/
msleep(200);
- err = mac_dev->stop(mac_dev);
- if (err < 0)
- netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
- err);
+ phylink_stop(mac_dev->phylink);
+ mac_dev->disable(mac_dev->fman_mac);
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
error = fman_port_disable(mac_dev->port[i]);
@@ -324,8 +321,7 @@ static int dpaa_stop(struct net_device *net_dev)
err = error;
}
- if (net_dev->phydev)
- phy_disconnect(net_dev->phydev);
+ phylink_disconnect_phy(mac_dev->phylink);
net_dev->phydev = NULL;
msleep(200);
@@ -375,6 +371,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
+ int num_txqs_per_tc = dpaa_num_txqs_per_tc();
struct tc_mqprio_qopt *mqprio = type_data;
u8 num_tc;
int i;
@@ -402,12 +399,12 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
netdev_set_num_tc(net_dev, num_tc);
for (i = 0; i < num_tc; i++)
- netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
- i * DPAA_TC_TXQ_NUM);
+ netdev_set_tc_queue(net_dev, i, num_txqs_per_tc,
+ i * num_txqs_per_tc);
out:
priv->num_tc = num_tc ? : 1;
- netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc);
return 0;
}
@@ -465,6 +462,22 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
return 0;
}
+static int dpaa_addr_sync(struct net_device *net_dev, const u8 *addr)
+{
+ const struct dpaa_priv *priv = netdev_priv(net_dev);
+
+ return priv->mac_dev->add_hash_mac_addr(priv->mac_dev->fman_mac,
+ (enet_addr_t *)addr);
+}
+
+static int dpaa_addr_unsync(struct net_device *net_dev, const u8 *addr)
+{
+ const struct dpaa_priv *priv = netdev_priv(net_dev);
+
+ return priv->mac_dev->remove_hash_mac_addr(priv->mac_dev->fman_mac,
+ (enet_addr_t *)addr);
+}
+
static void dpaa_set_rx_mode(struct net_device *net_dev)
{
const struct dpaa_priv *priv;
@@ -492,9 +505,9 @@ static void dpaa_set_rx_mode(struct net_device *net_dev)
err);
}
- err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
+ err = __dev_mc_sync(net_dev, dpaa_addr_sync, dpaa_addr_unsync);
if (err < 0)
- netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
+ netif_err(priv, drv, net_dev, "dpaa_addr_sync() = %d\n",
err);
}
@@ -653,7 +666,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
fq->wq = 6;
break;
case FQ_TYPE_TX:
- switch (idx / DPAA_TC_TXQ_NUM) {
+ switch (idx / dpaa_num_txqs_per_tc()) {
case 0:
/* Low priority (best effort) */
fq->wq = 6;
@@ -671,8 +684,8 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
fq->wq = 0;
break;
default:
- WARN(1, "Too many TX FQs: more than %d!\n",
- DPAA_ETH_TXQ_NUM);
+ WARN(1, "Too many TX FQs: more than %zu!\n",
+ dpaa_max_num_txqs());
}
break;
default:
@@ -744,7 +757,8 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
port_fqs->rx_pcdq = &dpaa_fq[0];
- if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
+ if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list,
+ FQ_TYPE_TX_CONF_MQ))
goto fq_alloc_failed;
dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
@@ -759,7 +773,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
port_fqs->tx_defq = &dpaa_fq[0];
- if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
+ if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, FQ_TYPE_TX))
goto fq_alloc_failed;
return 0;
@@ -851,12 +865,12 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
- /* Set different thresholds based on the MAC speed.
- * This may turn suboptimal if the MAC is reconfigured at a speed
- * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
- * In such cases, we ought to reconfigure the threshold, too.
+ /* Set different thresholds based on the configured MAC speed.
+ * This may turn suboptimal if the MAC is reconfigured at another
+ * speed, so MACs must call dpaa_eth_cgr_set_speed in their link_up
+ * callback.
*/
- if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
+ if (priv->mac_dev->phylink_config.mac_capabilities & MAC_10000FD)
cs_th = DPAA_CS_THRESHOLD_10G;
else
cs_th = DPAA_CS_THRESHOLD_1G;
@@ -883,6 +897,31 @@ out_error:
return err;
}
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
+{
+ struct net_device *net_dev = to_net_dev(mac_dev->phylink_config.dev);
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct qm_mcc_initcgr opts = { };
+ u32 cs_th;
+ int err;
+
+ opts.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
+ switch (speed) {
+ case SPEED_10000:
+ cs_th = DPAA_CS_THRESHOLD_10G;
+ break;
+ case SPEED_1000:
+ default:
+ cs_th = DPAA_CS_THRESHOLD_1G;
+ break;
+ }
+ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, cs_th, 1);
+
+ err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts);
+ if (err)
+ netdev_err(net_dev, "could not update speed: %d\n", err);
+}
+
static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
struct dpaa_fq *fq,
const struct qman_fq *template)
@@ -910,14 +949,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
}
}
-static void dpaa_fq_setup(struct dpaa_priv *priv,
- const struct dpaa_fq_cbs *fq_cbs,
- struct fman_port *tx_port)
+static int dpaa_fq_setup(struct dpaa_priv *priv,
+ const struct dpaa_fq_cbs *fq_cbs,
+ struct fman_port *tx_port)
{
int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
const cpumask_t *affine_cpus = qman_affine_cpus();
- u16 channels[NR_CPUS];
struct dpaa_fq *fq;
+ u16 *channels;
+
+ channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
channels[num_portals++] = qman_affine_channel(cpu);
@@ -944,11 +987,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
case FQ_TYPE_TX:
dpaa_setup_egress(priv, fq, tx_port,
&fq_cbs->egress_ern);
- /* If we have more Tx queues than the number of cores,
- * just ignore the extra ones.
- */
- if (egress_cnt < DPAA_ETH_TXQ_NUM)
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
break;
case FQ_TYPE_TX_CONF_MQ:
priv->conf_fqs[conf_cnt++] = &fq->fq_base;
@@ -966,16 +1005,9 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
}
}
- /* Make sure all CPUs receive a corresponding Tx queue. */
- while (egress_cnt < DPAA_ETH_TXQ_NUM) {
- list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
- if (fq->fq_type != FQ_TYPE_TX)
- continue;
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- if (egress_cnt == DPAA_ETH_TXQ_NUM)
- break;
- }
- }
+ kfree(channels);
+
+ return 0;
}
static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
@@ -983,7 +1015,7 @@ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
{
int i;
- for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
+ for (i = 0; i < dpaa_max_num_txqs(); i++)
if (priv->egress_fqs[i] == tx_fq)
return i;
@@ -1463,13 +1495,8 @@ static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
parse_result = (struct fman_prs_result *)parse_results;
/* If we're dealing with VLAN, get the real Ethernet type */
- if (ethertype == ETH_P_8021Q) {
- /* We can't always assume the MAC header is set correctly
- * by the stack, so reset to beginning of skb->data
- */
- skb_reset_mac_header(skb);
- ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
- }
+ if (ethertype == ETH_P_8021Q)
+ ethertype = ntohs(skb_vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
/* Fill in the relevant L3 parse result fields
* and read the L4 protocol type
@@ -1792,7 +1819,6 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
struct page *page, *head_page;
struct dpaa_bp *dpaa_bp;
void *vaddr, *sg_vaddr;
- int frag_off, frag_len;
struct sk_buff *skb;
dma_addr_t sg_addr;
int page_offset;
@@ -1835,6 +1861,11 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
* on Tx, if extra headers are added.
*/
WARN_ON(fd_off != priv->rx_headroom);
+ /* The offset to data start within the buffer holding
+ * the SGT should always be equal to the offset to data
+ * start within the first buffer holding the frame.
+ */
+ WARN_ON_ONCE(fd_off != qm_sg_entry_get_off(&sgt[i]));
skb_reserve(skb, fd_off);
skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
} else {
@@ -1848,21 +1879,23 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
page = virt_to_page(sg_vaddr);
head_page = virt_to_head_page(sg_vaddr);
- /* Compute offset in (possibly tail) page */
+ /* Compute offset of sg_vaddr in (possibly tail) page */
page_offset = ((unsigned long)sg_vaddr &
(PAGE_SIZE - 1)) +
(page_address(page) - page_address(head_page));
- /* page_offset only refers to the beginning of sgt[i];
- * but the buffer itself may have an internal offset.
+
+ /* Non-initial SGT entries should not have a buffer
+ * offset.
*/
- frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
- frag_len = qm_sg_entry_get_len(&sgt[i]);
+ WARN_ON_ONCE(qm_sg_entry_get_off(&sgt[i]));
+
/* skb_add_rx_frag() does no checking on the page; if
* we pass it a tail page, we'll end up with
- * bad page accounting and eventually with segafults.
+ * bad page accounting and eventually with segfaults.
*/
- skb_add_rx_frag(skb, i - 1, head_page, frag_off,
- frag_len, dpaa_bp->size);
+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+ qm_sg_entry_get_len(&sgt[i]),
+ dpaa_bp->size);
}
/* Update the pool count for the current {cpu x bpool} */
@@ -2247,7 +2280,7 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
- new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+ new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0;
/* Release the initial buffer */
xdp_return_frame_rx_napi(xdpf);
@@ -2261,12 +2294,12 @@ static netdev_tx_t
dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{
const int queue_mapping = skb_get_queue_mapping(skb);
- bool nonlinear = skb_is_nonlinear(skb);
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
struct netdev_queue *txq;
struct dpaa_priv *priv;
struct qm_fd fd;
+ bool nonlinear;
int offset = 0;
int err = 0;
@@ -2276,6 +2309,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
qm_fd_clear_fd(&fd);
+ /* Packet data is always read as 32-bit words, so zero out any part of
+ * the skb which might be sent if we have to pad the packet
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN, false))
+ goto enomem;
+
+ nonlinear = skb_is_nonlinear(skb);
if (!nonlinear) {
/* We're going to store the skb backpointer at the beginning
* of the data buffer, so we need a privately owned skb
@@ -2325,7 +2365,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
txq = netdev_get_tx_queue(net_dev, queue_mapping);
/* LLTX requires to do our own update of trans_start */
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
@@ -2395,6 +2435,9 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
cleaned = qman_p_poll_dqrr(np->p, budget);
+ if (np->xdp_act & XDP_REDIRECT)
+ xdp_do_flush();
+
if (cleaned < budget) {
napi_complete_done(napi, cleaned);
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
@@ -2402,9 +2445,6 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
}
- if (np->xdp_act & XDP_REDIRECT)
- xdp_do_flush();
-
return cleaned;
}
@@ -2531,7 +2571,7 @@ static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
/* Bump the trans_start */
txq = netdev_get_tx_queue(net_dev, smp_processor_id());
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd);
if (err) {
@@ -2623,7 +2663,7 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
}
break;
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
@@ -2731,7 +2771,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
!fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
&hash_offset)) {
- hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset));
+ hash = be32_to_cpu(*(__be32 *)(vaddr + hash_offset));
hash_valid = true;
}
@@ -2899,54 +2939,6 @@ static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
}
}
-static void dpaa_adjust_link(struct net_device *net_dev)
-{
- struct mac_device *mac_dev;
- struct dpaa_priv *priv;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
- mac_dev->adjust_link(mac_dev);
-}
-
-/* The Aquantia PHYs are capable of performing rate adaptation */
-#define PHY_VEND_AQUANTIA 0x03a1b400
-
-static int dpaa_phy_init(struct net_device *net_dev)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct mac_device *mac_dev;
- struct phy_device *phy_dev;
- struct dpaa_priv *priv;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
- phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
- &dpaa_adjust_link, 0,
- mac_dev->phy_if);
- if (!phy_dev) {
- netif_err(priv, ifup, net_dev, "init_phy() failed\n");
- return -ENODEV;
- }
-
- /* Unless the PHY is capable of rate adaptation */
- if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
- ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
- /* remove any features not supported by the controller */
- ethtool_convert_legacy_u32_to_link_mode(mask,
- mac_dev->if_support);
- linkmode_and(phy_dev->supported, phy_dev->supported, mask);
- }
-
- phy_support_asym_pause(phy_dev);
-
- mac_dev->phy_dev = phy_dev;
- net_dev->phydev = phy_dev;
-
- return 0;
-}
-
static int dpaa_open(struct net_device *net_dev)
{
struct mac_device *mac_dev;
@@ -2957,7 +2949,8 @@ static int dpaa_open(struct net_device *net_dev)
mac_dev = priv->mac_dev;
dpaa_eth_napi_enable(priv);
- err = dpaa_phy_init(net_dev);
+ err = phylink_of_phy_connect(mac_dev->phylink,
+ mac_dev->dev->of_node, 0);
if (err)
goto phy_init_failed;
@@ -2967,11 +2960,12 @@ static int dpaa_open(struct net_device *net_dev)
goto mac_start_failed;
}
- err = priv->mac_dev->start(mac_dev);
+ err = priv->mac_dev->enable(mac_dev->fman_mac);
if (err < 0) {
- netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
+ netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
goto mac_start_failed;
}
+ phylink_start(mac_dev->phylink);
netif_tx_start_all_queues(net_dev);
@@ -2980,6 +2974,7 @@ static int dpaa_open(struct net_device *net_dev)
mac_start_failed:
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
fman_port_disable(mac_dev->port[i]);
+ phylink_disconnect_phy(mac_dev->phylink);
phy_init_failed:
dpaa_eth_napi_disable(priv);
@@ -3024,7 +3019,7 @@ static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu))
return -EINVAL;
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
return 0;
}
@@ -3093,15 +3088,25 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
return nxmit;
}
-static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct dpaa_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
+ config->tx_type = priv->tx_tstamp ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
+}
+
+static int dpaa_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
@@ -3116,7 +3121,7 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
*/
@@ -3125,26 +3130,17 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
- int ret = -EINVAL;
-
- if (cmd == SIOCGMIIREG) {
- if (net_dev->phydev)
- return phy_mii_ioctl(net_dev->phydev, rq, cmd);
- }
-
- if (cmd == SIOCSHWTSTAMP)
- return dpaa_ts_ioctl(net_dev, rq, cmd);
+ struct dpaa_priv *priv = netdev_priv(net_dev);
- return ret;
+ return phylink_mii_ioctl(priv->mac_dev->phylink, rq, cmd);
}
static const struct net_device_ops dpaa_ops = {
@@ -3153,7 +3149,6 @@ static const struct net_device_ops dpaa_ops = {
.ndo_stop = dpaa_eth_stop,
.ndo_tx_timeout = dpaa_tx_timeout,
.ndo_get_stats64 = dpaa_get_stats64,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
@@ -3162,6 +3157,8 @@ static const struct net_device_ops dpaa_ops = {
.ndo_change_mtu = dpaa_change_mtu,
.ndo_bpf = dpaa_xdp,
.ndo_xdp_xmit = dpaa_xdp_xmit,
+ .ndo_hwtstamp_get = dpaa_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa_hwtstamp_set,
};
static int dpaa_napi_add(struct net_device *net_dev)
@@ -3173,8 +3170,7 @@ static int dpaa_napi_add(struct net_device *net_dev)
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- netif_napi_add(net_dev, &percpu_priv->np.napi,
- dpaa_eth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll);
}
return 0;
@@ -3189,8 +3185,9 @@ static void dpaa_napi_del(struct net_device *net_dev)
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- netif_napi_del(&percpu_priv->np.napi);
+ __netif_napi_del(&percpu_priv->np.napi);
}
+ synchronize_net();
}
static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
@@ -3352,7 +3349,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
/* Allocate this early, so we can store relevant information in
* the private area
*/
- net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
+ net_dev = alloc_etherdev_mq(sizeof(*priv), dpaa_max_num_txqs());
if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n");
return -ENOMEM;
@@ -3367,6 +3364,22 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
+ priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(),
+ sizeof(*priv->egress_fqs),
+ GFP_KERNEL);
+ if (!priv->egress_fqs) {
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+
+ priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(),
+ sizeof(*priv->conf_fqs),
+ GFP_KERNEL);
+ if (!priv->conf_fqs) {
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+
mac_dev = dpaa_mac_dev_get(pdev);
if (IS_ERR(mac_dev)) {
netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
@@ -3444,7 +3457,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
*/
dpaa_eth_add_channel(priv->channel, &pdev->dev);
- dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+ err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+ if (err)
+ goto free_dpaa_bps;
/* Create a congestion group for this netdev, with
* dynamically-allocated CGR ID.
@@ -3490,7 +3505,8 @@ static int dpaa_eth_probe(struct platform_device *pdev)
}
priv->num_tc = 1;
- netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ netif_set_real_num_tx_queues(net_dev,
+ priv->num_tc * dpaa_num_txqs_per_tc());
/* Initialize NAPI */
err = dpaa_napi_add(net_dev);
@@ -3526,7 +3542,7 @@ free_netdev:
return err;
}
-static int dpaa_remove(struct platform_device *pdev)
+static void dpaa_remove(struct platform_device *pdev)
{
struct net_device *net_dev;
struct dpaa_priv *priv;
@@ -3542,8 +3558,12 @@ static int dpaa_remove(struct platform_device *pdev)
dev_set_drvdata(dev, NULL);
unregister_netdev(net_dev);
+ phylink_destroy(priv->mac_dev->phylink);
err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
+ if (err)
+ dev_err(dev, "Failed to free FQs on remove (%pE)\n",
+ ERR_PTR(err));
qman_delete_cgr_safe(&priv->ingress_cgr);
qman_release_cgrid(priv->ingress_cgr.cgrid);
@@ -3555,8 +3575,6 @@ static int dpaa_remove(struct platform_device *pdev)
dpaa_bps_free(priv);
free_netdev(net_dev);
-
- return err;
}
static const struct platform_device_id dpaa_devtype[] = {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index daf894a97050..7ed659eb08de 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -1,31 +1,6 @@
-/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#ifndef __DPAA_H
@@ -33,6 +8,7 @@
#include <linux/netdevice.h>
#include <linux/refcount.h>
+#include <net/xdp.h>
#include <soc/fsl/qman.h>
#include <soc/fsl/bman.h>
@@ -42,10 +18,6 @@
/* Number of prioritised traffic classes */
#define DPAA_TC_NUM 4
-/* Number of Tx queues per traffic class */
-#define DPAA_TC_TXQ_NUM NR_CPUS
-/* Total number of Tx queues */
-#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
/* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type {
@@ -166,8 +138,8 @@ struct dpaa_priv {
struct mac_device *mac_dev;
struct device *rx_dma_dev;
struct device *tx_dma_dev;
- struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
- struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
+ struct qman_fq **egress_fqs;
+ struct qman_fq **conf_fqs;
u16 channel;
struct list_head dpaa_fq_list;
@@ -209,4 +181,16 @@ extern const struct ethtool_ops dpaa_ethtool_ops;
/* from dpaa_eth_sysfs.c */
void dpaa_eth_sysfs_remove(struct device *dev);
void dpaa_eth_sysfs_init(struct device *dev);
+
+static inline size_t dpaa_num_txqs_per_tc(void)
+{
+ return num_possible_cpus();
+}
+
+/* Total number of Tx queues */
+static inline size_t dpaa_max_num_txqs(void)
+{
+ return DPAA_TC_NUM * dpaa_num_txqs_per_tc();
+}
+
#endif /* __DPAA_H */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index ee62d25cac81..aad470e9caea 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#include <linux/init.h>
@@ -61,7 +35,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev,
u32 last_fqid = 0;
ssize_t bytes = 0;
char *str;
- int i = 0;
list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) {
switch (fq->fq_type) {
@@ -111,7 +84,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev,
prev = fq;
prevstr = str;
- i++;
}
if (prev) {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
index 409c1dc39430..9e1d44ae92cc 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -1,32 +1,6 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
*/
#undef TRACE_SYSTEM
@@ -82,8 +56,8 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
__entry->fd_format = qm_fd_get_format(fd);
__entry->fd_offset = qm_fd_get_offset(fd);
__entry->fd_length = qm_fd_get_length(fd);
- __entry->fd_status = fd->status;
- __assign_str(name, netdev->name);
+ __entry->fd_status = __be32_to_cpu(fd->status);
+ __assign_str(name);
),
/* This is what gets printed when the trace event is triggered */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 763d2c7b5fb1..ed3fa80af8c3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -1,38 +1,14 @@
-/* Copyright 2008-2016 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/string.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/net_tstamp.h>
#include <linux/fsl/ptp_qoriq.h>
@@ -80,35 +56,27 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
static int dpaa_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- if (!net_dev->phydev)
- return 0;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- phy_ethtool_ksettings_get(net_dev->phydev, cmd);
-
- return 0;
+ return phylink_ethtool_ksettings_get(mac_dev->phylink, cmd);
}
static int dpaa_set_link_ksettings(struct net_device *net_dev,
const struct ethtool_link_ksettings *cmd)
{
- int err;
-
- if (!net_dev->phydev)
- return -ENODEV;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
- if (err < 0)
- netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
-
- return err;
+ return phylink_ethtool_ksettings_set(mac_dev->phylink, cmd);
}
static void dpaa_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, KBUILD_MODNAME,
+ strscpy(drvinfo->driver, KBUILD_MODNAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
@@ -125,80 +93,28 @@ static void dpaa_set_msglevel(struct net_device *net_dev,
static int dpaa_nway_reset(struct net_device *net_dev)
{
- int err;
-
- if (!net_dev->phydev)
- return -ENODEV;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- err = 0;
- if (net_dev->phydev->autoneg) {
- err = phy_start_aneg(net_dev->phydev);
- if (err < 0)
- netdev_err(net_dev, "phy_start_aneg() = %d\n",
- err);
- }
-
- return err;
+ return phylink_ethtool_nway_reset(mac_dev->phylink);
}
static void dpaa_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *epause)
{
- struct mac_device *mac_dev;
- struct dpaa_priv *priv;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
- if (!net_dev->phydev)
- return;
-
- epause->autoneg = mac_dev->autoneg_pause;
- epause->rx_pause = mac_dev->rx_pause_active;
- epause->tx_pause = mac_dev->tx_pause_active;
+ phylink_ethtool_get_pauseparam(mac_dev->phylink, epause);
}
static int dpaa_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *epause)
{
- struct mac_device *mac_dev;
- struct phy_device *phydev;
- bool rx_pause, tx_pause;
- struct dpaa_priv *priv;
- int err;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
- phydev = net_dev->phydev;
- if (!phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
- return -ENODEV;
- }
-
- if (!phy_validate_pause(phydev, epause))
- return -EINVAL;
-
- /* The MAC should know how to handle PAUSE frame autonegotiation before
- * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
- * settings.
- */
- mac_dev->autoneg_pause = !!epause->autoneg;
- mac_dev->rx_pause_req = !!epause->rx_pause;
- mac_dev->tx_pause_req = !!epause->tx_pause;
-
- /* Determine the sym/asym advertised PAUSE capabilities from the desired
- * rx/tx pause settings.
- */
-
- phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
-
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
-
- return err;
+ return phylink_ethtool_set_pauseparam(mac_dev->phylink, epause);
}
static int dpaa_get_sset_count(struct net_device *net_dev, int type)
@@ -327,42 +243,28 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
u8 *data)
{
- unsigned int i, j, num_cpus, size;
- char string_cpu[ETH_GSTRING_LEN];
- u8 *strings;
+ unsigned int i, j, num_cpus;
- memset(string_cpu, 0, sizeof(string_cpu));
- strings = data;
- num_cpus = num_online_cpus();
- size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
+ num_cpus = num_online_cpus();
for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
- for (j = 0; j < num_cpus; j++) {
- snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
- dpaa_stats_percpu[i], j);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
- }
- snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
- dpaa_stats_percpu[i]);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
- }
- for (j = 0; j < num_cpus; j++) {
- snprintf(string_cpu, ETH_GSTRING_LEN,
- "bpool [CPU %d]", j);
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
+ for (j = 0; j < num_cpus; j++)
+ ethtool_sprintf(&data, "%s [CPU %d]",
+ dpaa_stats_percpu[i], j);
+
+ ethtool_sprintf(&data, "%s [TOTAL]", dpaa_stats_percpu[i]);
}
- snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
- memcpy(strings, string_cpu, ETH_GSTRING_LEN);
- strings += ETH_GSTRING_LEN;
+ for (i = 0; i < num_cpus; i++)
+ ethtool_sprintf(&data, "bpool [CPU %d]", i);
+
+ ethtool_puts(&data, "bpool [TOTAL]");
- memcpy(strings, dpaa_stats_global, size);
+ for (i = 0; i < DPAA_STATS_GLOBAL_LEN; i++)
+ ethtool_puts(&data, dpaa_stats_global[i]);
}
-static int dpaa_get_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *cmd)
+static int dpaa_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
struct dpaa_priv *priv = netdev_priv(dev);
@@ -397,22 +299,6 @@ static int dpaa_get_hash_opts(struct net_device *dev,
return 0;
}
-static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *unused)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = dpaa_get_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void dpaa_set_hash(struct net_device *net_dev, bool enable)
{
struct mac_device *mac_dev;
@@ -427,8 +313,9 @@ static void dpaa_set_hash(struct net_device *net_dev, bool enable)
priv->keygen_in_use = enable;
}
-static int dpaa_set_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *nfc)
+static int dpaa_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
int ret = -EINVAL;
@@ -462,23 +349,8 @@ static int dpaa_set_hash_opts(struct net_device *dev,
return ret;
}
-static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = dpaa_set_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int dpaa_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct device *dev = net_dev->dev.parent;
struct device_node *mac_node = dev->of_node;
@@ -489,14 +361,20 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
info->phc_index = -1;
fman_node = of_get_parent(mac_node);
- if (fman_node)
+ if (fman_node) {
ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+ of_node_put(fman_node);
+ }
- if (ptp_node)
+ if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
+ }
- if (ptp_dev)
+ if (ptp_dev) {
ptp = platform_get_drvdata(ptp_dev);
+ put_device(&ptp_dev->dev);
+ }
if (ptp)
info->phc_index = ptp->phc_index;
@@ -537,12 +415,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
struct netlink_ext_ack *extack)
{
const cpumask_t *cpus = qman_affine_cpus();
- bool needs_revert[NR_CPUS] = {false};
struct qman_portal *portal;
u32 period, prev_period;
u8 thresh, prev_thresh;
+ bool *needs_revert;
int cpu, res;
+ needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
+ if (!needs_revert)
+ return -ENOMEM;
+
period = c->rx_coalesce_usecs;
thresh = c->rx_max_coalesced_frames;
@@ -565,6 +447,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
needs_revert[cpu] = true;
}
+ kfree(needs_revert);
+
return 0;
revert_values:
@@ -578,9 +462,52 @@ revert_values:
qman_dqrr_set_ithresh(portal, prev_thresh);
}
+ kfree(needs_revert);
+
return res;
}
+static void dpaa_get_pause_stats(struct net_device *net_dev,
+ struct ethtool_pause_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_pause_stats)
+ mac_dev->get_pause_stats(mac_dev->fman_mac, s);
+}
+
+static void dpaa_get_rmon_stats(struct net_device *net_dev,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_rmon_stats)
+ mac_dev->get_rmon_stats(mac_dev->fman_mac, s, ranges);
+}
+
+static void dpaa_get_eth_ctrl_stats(struct net_device *net_dev,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_eth_ctrl_stats)
+ mac_dev->get_eth_ctrl_stats(mac_dev->fman_mac, s);
+}
+
+static void dpaa_get_eth_mac_stats(struct net_device *net_dev,
+ struct ethtool_eth_mac_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_eth_mac_stats)
+ mac_dev->get_eth_mac_stats(mac_dev->fman_mac, s);
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_RX_MAX_FRAMES,
@@ -596,9 +523,13 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_strings = dpaa_get_strings,
.get_link_ksettings = dpaa_get_link_ksettings,
.set_link_ksettings = dpaa_set_link_ksettings,
- .get_rxnfc = dpaa_get_rxnfc,
- .set_rxnfc = dpaa_set_rxnfc,
+ .get_rxfh_fields = dpaa_get_rxfh_fields,
+ .set_rxfh_fields = dpaa_set_rxfh_fields,
.get_ts_info = dpaa_get_ts_info,
.get_coalesce = dpaa_get_coalesce,
.set_coalesce = dpaa_set_coalesce,
+ .get_pause_stats = dpaa_get_pause_stats,
+ .get_rmon_stats = dpaa_get_rmon_stats,
+ .get_eth_ctrl_stats = dpaa_get_eth_ctrl_stats,
+ .get_eth_mac_stats = dpaa_get_eth_mac_stats,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 3d9842af7f10..1b05ba8d1cbf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o
-fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o dpaa2-xsk.o
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index 8356af4631fd..1af254caeb0d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -98,14 +98,14 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
int i;
seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
- seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
- "CHID", "CPU", "Deq busy", "Frames", "CDANs",
+ seq_printf(file, "%s %5s%16s%16s%16s%16s%16s%16s\n",
+ "IDX", "CHID", "CPU", "Deq busy", "Frames", "CDANs",
"Avg Frm/CDAN", "Buf count");
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
- ch->ch_id,
+ seq_printf(file, "%3s%d%6d%16d%16llu%16llu%16llu%16llu%16d\n",
+ "CH#", i, ch->ch_id,
ch->nctx.desired_cpu,
ch->stats.dequeue_portal_busy,
ch->stats.frames,
@@ -119,6 +119,51 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch);
+static int dpaa2_dbg_bp_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ int i, j, num_queues, buf_cnt;
+ struct dpaa2_eth_bp *bp;
+ char ch_name[10];
+ int err;
+
+ /* Print out the header */
+ seq_printf(file, "Buffer pool info for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s %10s%15s", "IDX", "BPID", "Buf count");
+ num_queues = dpaa2_eth_queue_count(priv);
+ for (i = 0; i < num_queues; i++) {
+ snprintf(ch_name, sizeof(ch_name), "CH#%d", i);
+ seq_printf(file, "%10s", ch_name);
+ }
+ seq_printf(file, "\n");
+
+ /* For each buffer pool, print out its BPID, the number of buffers in
+ * that buffer pool and the channels which are using it.
+ */
+ for (i = 0; i < priv->num_bps; i++) {
+ bp = priv->bp[i];
+
+ err = dpaa2_io_query_bp_count(NULL, bp->bpid, &buf_cnt);
+ if (err) {
+ netdev_warn(priv->net_dev, "Buffer count query error %d\n", err);
+ return err;
+ }
+
+ seq_printf(file, "%3s%d%10d%15d", "BP#", i, bp->bpid, buf_cnt);
+ for (j = 0; j < num_queues; j++) {
+ if (priv->channel[j]->bp == bp)
+ seq_printf(file, "%10s", "x");
+ else
+ seq_printf(file, "%10s", "");
+ }
+ seq_printf(file, "\n");
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_bp);
+
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
{
struct fsl_mc_device *dpni_dev;
@@ -139,6 +184,10 @@ void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
/* per-fq stats file */
debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops);
+
+ /* per buffer pool stats file */
+ debugfs_create_file("bp_stats", 0444, dir, priv, &dpaa2_dbg_bp_fops);
+
}
void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
index 7fefe1574b6a..76f808d38066 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -37,18 +37,9 @@ static int dpaa2_eth_dl_info_get(struct devlink *devlink,
struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
char buf[10];
- int err;
-
- err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (err)
- return err;
scnprintf(buf, 10, "%d.%d", priv->dpni_ver_major, priv->dpni_ver_minor);
- err = devlink_info_version_running_put(req, "dpni", buf);
- if (err)
- return err;
-
- return 0;
+ return devlink_info_version_running_put(req, "dpni", buf);
}
static struct dpaa2_eth_trap_item *
@@ -226,25 +217,16 @@ int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv)
{
struct devlink_port *devlink_port = &priv->devlink_port;
struct devlink_port_attrs attrs = {};
- int err;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
devlink_port_attrs_set(devlink_port, &attrs);
-
- err = devlink_port_register(priv->devlink, devlink_port, 0);
- if (err)
- return err;
-
- devlink_port_type_eth_set(devlink_port, priv->net_dev);
-
- return 0;
+ return devlink_port_register(priv->devlink, devlink_port, 0);
}
void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv)
{
struct devlink_port *devlink_port = &priv->devlink_port;
- devlink_port_type_clear(devlink_port);
devlink_port_unregister(devlink_port);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
index 5fb5f14e01ec..956767e0869c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
@@ -48,7 +48,7 @@ DECLARE_EVENT_CLASS(dpaa2_eth_fd,
__entry->fd_addr = dpaa2_fd_get_addr(fd);
__entry->fd_len = dpaa2_fd_get_len(fd);
__entry->fd_offset = dpaa2_fd_get_offset(fd);
- __assign_str(name, netdev->name);
+ __assign_str(name);
),
/* This is what gets printed when the trace event is
@@ -73,6 +73,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
TP_ARGS(netdev, fd)
);
+/* Tx (egress) XSK fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_xsk_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
/* Rx fd */
DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
TP_PROTO(struct net_device *netdev,
@@ -81,6 +89,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
TP_ARGS(netdev, fd)
);
+/* Rx XSK fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_xsk_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
/* Tx confirmation fd */
DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
TP_PROTO(struct net_device *netdev,
@@ -90,57 +106,81 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
);
/* Log data about raw buffers. Useful for tracing DPBP content. */
-TRACE_EVENT(dpaa2_eth_buf_seed,
- /* Trace function prototype */
- TP_PROTO(struct net_device *netdev,
- /* virtual address and size */
- void *vaddr,
- size_t size,
- /* dma map address and size */
- dma_addr_t dma_addr,
- size_t map_size,
- /* buffer pool id, if relevant */
- u16 bpid),
-
- /* Repeat argument list here */
- TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
-
- /* A structure containing the relevant information we want
- * to record. Declare name and type for each normal element,
- * name, type and size for arrays. Use __string for variable
- * length strings.
- */
- TP_STRUCT__entry(
- __field(void *, vaddr)
- __field(size_t, size)
- __field(dma_addr_t, dma_addr)
- __field(size_t, map_size)
- __field(u16, bpid)
- __string(name, netdev->name)
- ),
-
- /* The function that assigns values to the above declared
- * fields
- */
- TP_fast_assign(
- __entry->vaddr = vaddr;
- __entry->size = size;
- __entry->dma_addr = dma_addr;
- __entry->map_size = map_size;
- __entry->bpid = bpid;
- __assign_str(name, netdev->name);
- ),
-
- /* This is what gets printed when the trace event is
- * triggered.
- */
- TP_printk(TR_BUF_FMT,
- __get_str(name),
- __entry->vaddr,
- __entry->size,
- &__entry->dma_addr,
- __entry->map_size,
- __entry->bpid)
+DECLARE_EVENT_CLASS(dpaa2_eth_buf,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ /* virtual address and size */
+ void *vaddr,
+ size_t size,
+ /* dma map address and size */
+ dma_addr_t dma_addr,
+ size_t map_size,
+ /* buffer pool id, if relevant */
+ u16 bpid),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
+
+ /* A structure containing the relevant information we want
+ * to record. Declare name and type for each normal element,
+ * name, type and size for arrays. Use __string for variable
+ * length strings.
+ */
+ TP_STRUCT__entry(
+ __field(void *, vaddr)
+ __field(size_t, size)
+ __field(dma_addr_t, dma_addr)
+ __field(size_t, map_size)
+ __field(u16, bpid)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared
+ * fields
+ */
+ TP_fast_assign(
+ __entry->vaddr = vaddr;
+ __entry->size = size;
+ __entry->dma_addr = dma_addr;
+ __entry->map_size = map_size;
+ __entry->bpid = bpid;
+ __assign_str(name);
+ ),
+
+ /* This is what gets printed when the trace event is
+ * triggered.
+ */
+ TP_printk(TR_BUF_FMT,
+ __get_str(name),
+ __entry->vaddr,
+ __entry->size,
+ &__entry->dma_addr,
+ __entry->map_size,
+ __entry->bpid)
+);
+
+/* Main memory buff seeding */
+DEFINE_EVENT(dpaa2_eth_buf, dpaa2_eth_buf_seed,
+ TP_PROTO(struct net_device *netdev,
+ void *vaddr,
+ size_t size,
+ dma_addr_t dma_addr,
+ size_t map_size,
+ u16 bpid),
+
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
+);
+
+/* UMEM buff seeding on AF_XDP fast path */
+DEFINE_EVENT(dpaa2_eth_buf, dpaa2_xsk_buf_seed,
+ TP_PROTO(struct net_device *netdev,
+ void *vaddr,
+ size_t size,
+ dma_addr_t dma_addr,
+ size_t map_size,
+ u16 bpid),
+
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
);
/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 714e961e7a77..18d86badd6ea 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2022 NXP
*/
#include <linux/init.h>
#include <linux/module.h>
@@ -8,7 +8,6 @@
#include <linux/etherdevice.h>
#include <linux/of_net.h>
#include <linux/interrupt.h>
-#include <linux/msi.h>
#include <linux/kthread.h>
#include <linux/iommu.h>
#include <linux/fsl/mc.h>
@@ -18,6 +17,8 @@
#include <linux/ptp_classify.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
+#include <net/tso.h>
+#include <net/xdp_sock_drv.h>
#include "dpaa2-eth.h"
@@ -34,8 +35,77 @@ MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
struct ptp_qoriq *dpaa2_ptp;
EXPORT_SYMBOL(dpaa2_ptp);
-static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
- dma_addr_t iova_addr)
+static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
+{
+ priv->features = 0;
+
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
+ DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
+ priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
+}
+
+static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ struct dpni_single_step_cfg cfg;
+
+ cfg.en = 1;
+ cfg.ch_update = udp;
+ cfg.offset = offset;
+ cfg.peer_delay = 0;
+
+ if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
+ WARN_ONCE(1, "Failed to set single step register");
+}
+
+static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ u32 val = 0;
+
+ val = DPAA2_PTP_SINGLE_STEP_ENABLE |
+ DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
+
+ if (udp)
+ val |= DPAA2_PTP_SINGLE_STEP_CH;
+
+ if (priv->onestep_reg_base)
+ writel(val, priv->onestep_reg_base);
+}
+
+static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_single_step_cfg ptp_cfg;
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
+
+ if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
+ return;
+
+ if (dpni_get_single_step_cfg(priv->mc_io, 0,
+ priv->mc_token, &ptp_cfg)) {
+ dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
+ return;
+ }
+
+ if (!ptp_cfg.ptp_onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
+ return;
+ }
+
+ priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
+ sizeof(u32));
+ if (!priv->onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
+ return;
+ }
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
+}
+
+void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
{
phys_addr_t phys_addr;
@@ -209,23 +279,33 @@ static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
* be released in the pool
*/
static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
- int count)
+ int count, bool xsk_zc)
{
struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_eth_swa *swa;
+ struct xdp_buff *xdp_buff;
void *vaddr;
int i;
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
- dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
- DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vaddr, 0);
+
+ if (!xsk_zc) {
+ dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ swa = (struct dpaa2_eth_swa *)
+ (vaddr + DPAA2_ETH_RX_HWA_SIZE);
+ xdp_buff = swa->xsk.xdp_buff;
+ xsk_buff_free(xdp_buff);
+ }
}
}
-static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- dma_addr_t addr)
+void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
{
int retries = 0;
int err;
@@ -234,7 +314,7 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
return;
- while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+ while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
ch->recycled_bufs,
ch->recycled_bufs_cnt)) == -EBUSY) {
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
@@ -243,7 +323,8 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
}
if (err) {
- dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
+ dpaa2_eth_free_bufs(priv, ch->recycled_bufs,
+ ch->recycled_bufs_cnt, ch->xsk_zc);
ch->buf_count -= ch->recycled_bufs_cnt;
}
@@ -307,10 +388,10 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
fq->xdp_tx_fds.num = 0;
}
-static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_fd *fd,
- void *buf_start, u16 queue_id)
+void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
{
struct dpaa2_faead *faead;
struct dpaa2_fd *dest_fd;
@@ -374,7 +455,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
break;
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
@@ -415,19 +496,15 @@ out:
return xdp_act;
}
-static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- void *fd_vaddr)
+struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, u32 fd_length,
+ void *fd_vaddr)
{
u16 fd_offset = dpaa2_fd_get_offset(fd);
- struct dpaa2_eth_priv *priv = ch->priv;
- u32 fd_length = dpaa2_fd_get_len(fd);
struct sk_buff *skb = NULL;
unsigned int skb_len;
- if (fd_length > priv->rx_copybreak)
- return NULL;
-
skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
skb = napi_alloc_skb(&ch->napi, skb_len);
@@ -439,16 +516,69 @@ static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
- dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
-
return skb;
}
+static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ struct dpaa2_eth_priv *priv = ch->priv;
+ u32 fd_length = dpaa2_fd_get_len(fd);
+
+ if (fd_length > priv->rx_copybreak)
+ return NULL;
+
+ return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr);
+}
+
+void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, void *vaddr,
+ struct dpaa2_eth_fq *fq,
+ struct rtnl_link_stats64 *percpu_stats,
+ struct sk_buff *skb)
+{
+ struct dpaa2_fas *fas;
+ u32 status = 0;
+
+ fas = dpaa2_get_fas(vaddr, false);
+ prefetch(fas);
+ prefetch(skb->data);
+
+ /* Get the timestamp value */
+ if (priv->rx_tstamp) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ __le64 *ts = dpaa2_get_ts(vaddr, false);
+ u64 ns;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ }
+
+ /* Check if we need to validate the L4 csum */
+ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+ status = le32_to_cpu(fas->status);
+ dpaa2_eth_validate_rx_csum(priv, status, skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+ skb_record_rx_queue(skb, fq->flowid);
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
+
+ list_add_tail(&skb->list, ch->rx_list);
+}
+
/* Main Rx frame processing routine */
-static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- struct dpaa2_eth_fq *fq)
+void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
u8 fd_format = dpaa2_fd_get_format(fd);
@@ -457,9 +587,8 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
struct device *dev = priv->net_dev->dev.parent;
- struct dpaa2_fas *fas;
+ bool recycle_rx_buf = false;
void *buf_data;
- u32 status = 0;
u32 xdp_act;
/* Tracing point */
@@ -469,8 +598,6 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
- fas = dpaa2_get_fas(vaddr, false);
- prefetch(fas);
buf_data = vaddr + dpaa2_fd_get_offset(fd);
prefetch(buf_data);
@@ -490,6 +617,8 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ } else {
+ recycle_rx_buf = true;
}
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
@@ -508,35 +637,10 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
if (unlikely(!skb))
goto err_build_skb;
- prefetch(skb->data);
-
- /* Get the timestamp value */
- if (priv->rx_tstamp) {
- struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- __le64 *ts = dpaa2_get_ts(vaddr, false);
- u64 ns;
-
- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-
- ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
- shhwtstamps->hwtstamp = ns_to_ktime(ns);
- }
-
- /* Check if we need to validate the L4 csum */
- if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
- status = le32_to_cpu(fas->status);
- dpaa2_eth_validate_rx_csum(priv, status, skb);
- }
-
- skb->protocol = eth_type_trans(skb, priv->net_dev);
- skb_record_rx_queue(skb, fq->flowid);
-
- percpu_stats->rx_packets++;
- percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
- ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
-
- list_add_tail(&skb->list, ch->rx_list);
+ dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
+ if (recycle_rx_buf)
+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
return;
err_build_skb:
@@ -695,7 +799,6 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
{
struct ptp_tstamp origin_timestamp;
- struct dpni_single_step_cfg cfg;
u8 msgtype, twostep, udp;
struct dpaa2_faead *faead;
struct dpaa2_fas *fas;
@@ -749,17 +852,48 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
htonl(origin_timestamp.sec_lsb);
*(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
- cfg.en = 1;
- cfg.ch_update = udp;
- cfg.offset = offset1;
- cfg.peer_delay = 0;
+ if (priv->ptp_correction_off == offset1)
+ return;
+
+ priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
+ priv->ptp_correction_off = offset1;
- if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
- &cfg))
- WARN_ONCE(1, "Failed to set single step register");
}
}
+void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+ void *sgt_buf = NULL;
+ int sgt_buf_size;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ sgt_buf_size = priv->tx_data_offset +
+ DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
+
+ if (sgt_cache->count == 0)
+ sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ else
+ sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ if (!sgt_buf)
+ return NULL;
+
+ memset(sgt_buf, 0, sgt_buf_size);
+
+ return sgt_buf;
+}
+
+void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
+ skb_free_frag(sgt_buf);
+ else
+ sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+}
+
/* Create a frame descriptor based on a fragmented skb */
static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
struct sk_buff *skb,
@@ -805,12 +939,11 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
/* Prepare the HW SGT structure */
sgt_buf_size = priv->tx_data_offset +
sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
- sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ sgt_buf = dpaa2_eth_sgt_get(priv);
if (unlikely(!sgt_buf)) {
err = -ENOMEM;
goto sgt_buf_alloc_failed;
}
- memset(sgt_buf, 0, sgt_buf_size);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
@@ -846,6 +979,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
err = -ENOMEM;
goto dma_map_single_failed;
}
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_offset(fd, priv->tx_data_offset);
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, addr);
@@ -855,7 +989,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
return 0;
dma_map_single_failed:
- skb_free_frag(sgt_buf);
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
sgt_buf_alloc_failed:
dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
dma_map_sg_failed:
@@ -875,7 +1009,6 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
- struct dpaa2_eth_sgt_cache *sgt_cache;
struct dpaa2_sg_entry *sgt;
struct dpaa2_eth_swa *swa;
dma_addr_t addr, sgt_addr;
@@ -884,18 +1017,10 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
int err;
/* Prepare the HW SGT structure */
- sgt_cache = this_cpu_ptr(priv->sgt_cache);
sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
-
- if (sgt_cache->count == 0)
- sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
- GFP_ATOMIC);
- else
- sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ sgt_buf = dpaa2_eth_sgt_get(priv);
if (unlikely(!sgt_buf))
return -ENOMEM;
-
- sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
@@ -923,6 +1048,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
goto sgt_map_failed;
}
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_offset(fd, priv->tx_data_offset);
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, sgt_addr);
@@ -934,10 +1060,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
sgt_map_failed:
dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
data_map_failed:
- if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
- kfree(sgt_buf);
- else
- sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
return err;
}
@@ -954,14 +1077,11 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
-
- /* If there's enough room to align the FD address, do it.
- * It will help hardware optimize accesses.
- */
- aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
- DPAA2_ETH_TX_BUF_ALIGN);
+ aligned_start = PTR_ALIGN(buffer_start, DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head)
buffer_start = aligned_start;
+ else
+ return -ENOMEM;
/* Store a backpointer to the skb at the beginning of the buffer
* (in the private data area) such that we can release it
@@ -978,6 +1098,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
if (unlikely(dma_mapping_error(dev, addr)))
return -ENOMEM;
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
dpaa2_fd_set_len(fd, skb->len);
@@ -994,9 +1115,10 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
* This can be called either from dpaa2_eth_tx_conf() or on the error path of
* dpaa2_eth_tx().
*/
-static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq,
- const struct dpaa2_fd *fd, bool in_napi)
+void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr, sg_addr;
@@ -1005,9 +1127,10 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_swa *swa;
u8 fd_format = dpaa2_fd_get_format(fd);
u32 fd_len = dpaa2_fd_get_len(fd);
-
- struct dpaa2_eth_sgt_cache *sgt_cache;
struct dpaa2_sg_entry *sgt;
+ int should_free_skb = 1;
+ void *tso_hdr;
+ int i;
fd_addr = dpaa2_fd_get_addr(fd);
buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
@@ -1039,6 +1162,33 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
/* Unmap the SGT buffer */
dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
DMA_BIDIRECTIONAL);
+ } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
+ skb = swa->tso.skb;
+
+ sgt = (struct dpaa2_sg_entry *)(buffer_start +
+ priv->tx_data_offset);
+
+ /* Unmap the SGT buffer */
+ dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
+ DMA_BIDIRECTIONAL);
+
+ /* Unmap and free the header */
+ tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
+ dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
+ DMA_TO_DEVICE);
+ kfree(tso_hdr);
+
+ /* Unmap the other SG entries for the data */
+ for (i = 1; i < swa->tso.num_sg; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ if (!swa->tso.is_last_fd)
+ should_free_skb = 0;
+ } else if (swa->type == DPAA2_ETH_SWA_XSK) {
+ /* Unmap the SGT Buffer */
+ dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size,
+ DMA_BIDIRECTIONAL);
} else {
skb = swa->single.skb;
@@ -1056,6 +1206,12 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
return;
}
+ if (swa->type == DPAA2_ETH_SWA_XSK) {
+ ch->xsk_tx_pkts_sent++;
+ dpaa2_eth_sgt_recycle(priv, buffer_start);
+ return;
+ }
+
if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
fq->dq_frames++;
fq->dq_bytes += fd_len;
@@ -1067,55 +1223,195 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
}
/* Get the timestamp value */
- if (skb->cb[0] == TX_TSTAMP) {
- struct skb_shared_hwtstamps shhwtstamps;
- __le64 *ts = dpaa2_get_ts(buffer_start, true);
- u64 ns;
-
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-
- ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
- shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
- } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
- mutex_unlock(&priv->onestep_tstamp_lock);
+ if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
+ if (skb->cb[0] == TX_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ __le64 *ts = dpaa2_get_ts(buffer_start, true);
+ u64 ns;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ mutex_unlock(&priv->onestep_tstamp_lock);
+ }
}
/* Free SGT buffer allocated on tx */
- if (fd_format != dpaa2_fd_single) {
- sgt_cache = this_cpu_ptr(priv->sgt_cache);
- if (swa->type == DPAA2_ETH_SWA_SG) {
- skb_free_frag(buffer_start);
- } else {
- if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
- kfree(buffer_start);
- else
- sgt_cache->buf[sgt_cache->count++] = buffer_start;
+ if (fd_format != dpaa2_fd_single)
+ dpaa2_eth_sgt_recycle(priv, buffer_start);
+
+ /* Move on with skb release. If we are just confirming multiple FDs
+ * from the same TSO skb then only the last one will need to free the
+ * skb.
+ */
+ if (should_free_skb)
+ napi_consume_skb(skb, in_napi);
+}
+
+static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb, struct dpaa2_fd *fd,
+ int *num_fds, u32 *total_fds_len)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int hdr_len, total_len, data_left, fd_len;
+ int num_sge, err, i, sgt_buf_size;
+ struct dpaa2_fd *fd_start = fd;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ dma_addr_t sgt_addr, addr;
+ dma_addr_t tso_hdr_dma;
+ unsigned int index = 0;
+ struct tso_t tso;
+ char *tso_hdr;
+ void *sgt_buf;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ *total_fds_len = 0;
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ /* Prepare the HW SGT structure for this frame */
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf)) {
+ netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
+ err = -ENOMEM;
+ goto err_sgt_get;
+ }
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Determine the data length of this frame */
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ fd_len = data_left + hdr_len;
+
+ /* Prepare packet headers: MAC + IP + TCP */
+ tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
+ if (!tso_hdr) {
+ err = -ENOMEM;
+ goto err_alloc_tso_hdr;
+ }
+
+ tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
+ tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, tso_hdr_dma)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
+ err = -ENOMEM;
+ goto err_map_tso_hdr;
}
+
+ /* Setup the SG entry for the header */
+ dpaa2_sg_set_addr(sgt, tso_hdr_dma);
+ dpaa2_sg_set_len(sgt, hdr_len);
+ dpaa2_sg_set_final(sgt, data_left <= 0);
+
+ /* Compose the SG entries for each fragment of data */
+ num_sge = 1;
+ while (data_left > 0) {
+ int size;
+
+ /* Move to the next SG entry */
+ sgt++;
+ size = min_t(int, tso.size, data_left);
+
+ addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
+ err = -ENOMEM;
+ goto err_map_data;
+ }
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, size);
+ dpaa2_sg_set_final(sgt, size == data_left);
+
+ num_sge++;
+
+ /* Build the data for the __next__ fragment */
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ /* Store the skb backpointer in the SGT buffer */
+ sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_SW_TSO;
+ swa->tso.skb = skb;
+ swa->tso.num_sg = num_sge;
+ swa->tso.sgt_size = sgt_buf_size;
+ swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
+ err = -ENOMEM;
+ goto err_map_sgt;
+ }
+
+ /* Setup the frame descriptor */
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, fd_len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ *total_fds_len += fd_len;
+ /* Advance to the next frame descriptor */
+ fd++;
+ index++;
}
- /* Move on with skb release */
- napi_consume_skb(skb, in_napi);
+ *num_fds = index;
+
+ return 0;
+
+err_map_sgt:
+err_map_data:
+ /* Unmap all the data S/G entries for the current FD */
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+ for (i = 1; i < num_sge; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ /* Unmap the header entry */
+ dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+err_map_tso_hdr:
+ kfree(tso_hdr);
+err_alloc_tso_hdr:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+err_sgt_get:
+ /* Free all the other FDs that were already fully created */
+ for (i = 0; i < index; i++)
+ dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false);
+
+ return err;
}
static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- struct dpaa2_fd fd;
- struct rtnl_link_stats64 *percpu_stats;
+ int total_enqueued = 0, retries = 0, enqueued;
struct dpaa2_eth_drv_stats *percpu_extras;
+ struct rtnl_link_stats64 *percpu_stats;
+ unsigned int needed_headroom;
+ int num_fds = 1, max_retries;
struct dpaa2_eth_fq *fq;
struct netdev_queue *nq;
+ struct dpaa2_fd *fd;
u16 queue_mapping;
- unsigned int needed_headroom;
- u32 fd_len;
+ void *swa = NULL;
u8 prio = 0;
int err, i;
- void *swa;
+ u32 fd_len;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ fd = (this_cpu_ptr(priv->fd))->array;
needed_headroom = dpaa2_eth_needed_headroom(skb);
@@ -1130,20 +1426,28 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
}
/* Setup the FD fields */
- memset(&fd, 0, sizeof(fd));
- if (skb_is_nonlinear(skb)) {
- err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
+ if (skb_is_gso(skb)) {
+ err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
+ percpu_extras->tx_sg_frames += num_fds;
+ percpu_extras->tx_sg_bytes += fd_len;
+ percpu_extras->tx_tso_frames += num_fds;
+ percpu_extras->tx_tso_bytes += fd_len;
+ } else if (skb_is_nonlinear(skb)) {
+ err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
} else if (skb_headroom(skb) < needed_headroom) {
- err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
+ err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
percpu_extras->tx_converted_sg_frames++;
percpu_extras->tx_converted_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
} else {
- err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
+ err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
+ fd_len = dpaa2_fd_get_len(fd);
}
if (unlikely(err)) {
@@ -1151,11 +1455,12 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
goto err_build_fd;
}
- if (skb->cb[0])
- dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
+ if (swa && skb->cb[0])
+ dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
/* Tracing point */
- trace_dpaa2_tx_fd(net_dev, &fd);
+ for (i = 0; i < num_fds; i++)
+ trace_dpaa2_tx_fd(net_dev, &fd[i]);
/* TxConf FQ selection relies on queue id from the stack.
* In case of a forwarded frame from another DPNI interface, we choose
@@ -1175,27 +1480,32 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
queue_mapping %= dpaa2_eth_queue_count(priv);
}
fq = &priv->fq[queue_mapping];
-
- fd_len = dpaa2_fd_get_len(&fd);
nq = netdev_get_tx_queue(net_dev, queue_mapping);
netdev_tx_sent_queue(nq, fd_len);
/* Everything that happens after this enqueues might race with
* the Tx confirmation callback for this frame
*/
- for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
- if (err != -EBUSY)
- break;
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fd[total_enqueued],
+ prio, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ retries++;
+ continue;
+ }
+
+ total_enqueued += enqueued;
}
- percpu_extras->tx_portal_busy += i;
+ percpu_extras->tx_portal_busy += retries;
+
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
+ dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false);
netdev_tx_completed_queue(nq, 1, fd_len);
} else {
- percpu_stats->tx_packets++;
+ percpu_stats->tx_packets += total_enqueued;
percpu_stats->tx_bytes += fd_len;
}
@@ -1285,7 +1595,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
- dpaa2_eth_free_tx_fd(priv, fq, fd, true);
+ dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true);
if (likely(!fd_errors))
return;
@@ -1363,44 +1673,76 @@ static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
* to the specified buffer pool
*/
static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch, u16 bpid)
+ struct dpaa2_eth_channel *ch)
{
+ struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD];
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ struct dpaa2_eth_swa *swa;
struct page *page;
dma_addr_t addr;
int retries = 0;
- int i, err;
-
- for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
- /* Allocate buffer visible to WRIOP + skb shared info +
- * alignment padding
- */
- /* allocate one page for each Rx buffer. WRIOP sees
- * the entire page except for a tailroom reserved for
- * skb shared info
+ int i = 0, err;
+ u32 batch;
+
+ /* Allocate buffers visible to WRIOP */
+ if (!ch->xsk_zc) {
+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+ /* Also allocate skb shared info and alignment padding.
+ * There is one page for each Rx buffer. WRIOP sees
+ * the entire page except for a tailroom reserved for
+ * skb shared info
+ */
+ page = dev_alloc_pages(0);
+ if (!page)
+ goto err_alloc;
+
+ addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
+
+ buf_array[i] = addr;
+
+ /* tracing point */
+ trace_dpaa2_eth_buf_seed(priv->net_dev,
+ page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
+ addr, priv->rx_buf_size,
+ ch->bp->bpid);
+ }
+ } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) {
+ /* Allocate XSK buffers for AF_XDP fast path in batches
+ * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot
+ * provide enough buffers at the moment
*/
- page = dev_alloc_pages(0);
- if (!page)
+ batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs,
+ DPAA2_ETH_BUFS_PER_CMD);
+ if (!batch)
goto err_alloc;
- addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, addr)))
- goto err_map;
+ for (i = 0; i < batch; i++) {
+ swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start +
+ DPAA2_ETH_RX_HWA_SIZE);
+ swa->xsk.xdp_buff = xdp_buffs[i];
- buf_array[i] = addr;
+ addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
- /* tracing point */
- trace_dpaa2_eth_buf_seed(priv->net_dev,
- page, DPAA2_ETH_RX_BUF_RAW_SIZE,
- addr, priv->rx_buf_size,
- bpid);
+ buf_array[i] = addr;
+
+ trace_dpaa2_xsk_buf_seed(priv->net_dev,
+ xdp_buffs[i]->data_hard_start,
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
+ addr, priv->rx_buf_size,
+ ch->bp->bpid);
+ }
}
release_bufs:
/* In case the portal is busy, retry until successful */
- while ((err = dpaa2_io_service_release(ch->dpio, bpid,
+ while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
buf_array, i)) == -EBUSY) {
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
break;
@@ -1411,14 +1753,19 @@ release_bufs:
* not much else we can do about it
*/
if (err) {
- dpaa2_eth_free_bufs(priv, buf_array, i);
+ dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc);
return 0;
}
return i;
err_map:
- __free_pages(page, 0);
+ if (!ch->xsk_zc) {
+ __free_pages(page, 0);
+ } else {
+ for (; i < batch; i++)
+ xsk_buff_free(xdp_buffs[i]);
+ }
err_alloc:
/* If we managed to allocate at least some buffers,
* release them to hardware
@@ -1429,39 +1776,64 @@ err_alloc:
return 0;
}
-static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch)
{
- int i, j;
+ int i;
int new_count;
- for (j = 0; j < priv->num_channels; j++) {
- for (i = 0; i < DPAA2_ETH_NUM_BUFS;
- i += DPAA2_ETH_BUFS_PER_CMD) {
- new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
- priv->channel[j]->buf_count += new_count;
+ for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
+ new_count = dpaa2_eth_add_bufs(priv, ch);
+ ch->buf_count += new_count;
- if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
- return -ENOMEM;
- }
- }
+ if (new_count < DPAA2_ETH_BUFS_PER_CMD)
+ return -ENOMEM;
}
return 0;
}
+static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct dpaa2_eth_channel *channel;
+ int i, err = 0;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ channel = priv->channel[i];
+
+ err = dpaa2_eth_seed_pool(priv, channel);
+
+ /* Not much to do; the buffer pool, though not filled up,
+ * may still contain some buffers which would enable us
+ * to limp on.
+ */
+ if (err)
+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
+ channel->bp->dev->obj_desc.id,
+ channel->bp->bpid);
+ }
+}
+
/*
- * Drain the specified number of buffers from the DPNI's private buffer pool.
+ * Drain the specified number of buffers from one of the DPNI's private buffer
+ * pools.
* @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
*/
-static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
+static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
+ int count)
{
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ bool xsk_zc = false;
int retries = 0;
- int ret;
+ int i, ret;
+
+ for (i = 0; i < priv->num_channels; i++)
+ if (priv->channel[i]->bp->bpid == bpid)
+ xsk_zc = priv->channel[i]->xsk_zc;
do {
- ret = dpaa2_io_service_acquire(NULL, priv->bpid,
- buf_array, count);
+ ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count);
if (ret < 0) {
if (ret == -EBUSY &&
retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
@@ -1469,28 +1841,40 @@ static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
- dpaa2_eth_free_bufs(priv, buf_array, ret);
+ dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc);
retries = 0;
} while (ret);
}
-static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
{
int i;
- dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
- dpaa2_eth_drain_bufs(priv, 1);
+ /* Drain the buffer pool */
+ dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
+ dpaa2_eth_drain_bufs(priv, bpid, 1);
+ /* Setup to zero the buffer count of all channels which were
+ * using this buffer pool.
+ */
for (i = 0; i < priv->num_channels; i++)
- priv->channel[i]->buf_count = 0;
+ if (priv->channel[i]->bp->bpid == bpid)
+ priv->channel[i]->buf_count = 0;
+}
+
+static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_bps; i++)
+ dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
}
/* Function is called from softirq context only, so we don't need to guard
* the access to percpu count
*/
static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- u16 bpid)
+ struct dpaa2_eth_channel *ch)
{
int new_count;
@@ -1498,7 +1882,7 @@ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
return 0;
do {
- new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
+ new_count = dpaa2_eth_add_bufs(priv, ch);
if (unlikely(!new_count)) {
/* Out of memory; abort for now, we'll try later on */
break;
@@ -1523,7 +1907,7 @@ static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
count = sgt_cache->count;
for (i = 0; i < count; i++)
- kfree(sgt_cache->buf[i]);
+ skb_free_frag(sgt_cache->buf[i]);
sgt_cache->count = 0;
}
}
@@ -1562,6 +1946,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_fq *fq, *txc_fq = NULL;
struct netdev_queue *nq;
int store_cleaned, work_done;
+ bool work_done_zc = false;
struct list_head rx_list;
int retries = 0;
u16 flowid;
@@ -1574,13 +1959,22 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
INIT_LIST_HEAD(&rx_list);
ch->rx_list = &rx_list;
+ if (ch->xsk_zc) {
+ work_done_zc = dpaa2_xsk_tx(priv, ch);
+ /* If we reached the XSK Tx per NAPI threshold, we're done */
+ if (work_done_zc) {
+ work_done = budget;
+ goto out;
+ }
+ }
+
do {
err = dpaa2_eth_pull_channel(ch);
if (unlikely(err))
break;
/* Refill pool if appropriate */
- dpaa2_eth_refill_pool(priv, ch, priv->bpid);
+ dpaa2_eth_refill_pool(priv, ch);
store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
if (store_cleaned <= 0)
@@ -1600,10 +1994,15 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
if (rx_cleaned >= budget ||
txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
work_done = budget;
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
goto out;
}
} while (store_cleaned);
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
+
/* Update NET DIM with the values for this CDAN */
dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
ch->stats.bytes_per_cdan);
@@ -1626,6 +2025,11 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
out:
netif_receive_skb_list(ch->rx_list);
+ if (ch->xsk_tx_pkts_sent) {
+ xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
+ ch->xsk_tx_pkts_sent = 0;
+ }
+
if (txc_fq && txc_fq->dq_frames) {
nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
netdev_tx_completed_queue(nq, txc_fq->dq_frames,
@@ -1634,9 +2038,7 @@ out:
txc_fq->dq_bytes = 0;
}
- if (ch->xdp.res & XDP_REDIRECT)
- xdp_do_flush_map();
- else if (rx_cleaned && ch->xdp.res & XDP_TX)
+ if (rx_cleaned && ch->xdp.res & XDP_TX)
dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
@@ -1749,8 +2151,11 @@ static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
+ * We can avoid locking because we are called from the "link changed"
+ * IRQ handler, which is the same as the "endpoint changed" IRQ handler
+ * (the writer to priv->mac), so we cannot race with it.
*/
- if (dpaa2_eth_is_type_phy(priv))
+ if (dpaa2_mac_is_type_phy(priv->mac))
goto out;
/* Chech link state; speed / duplex changes are not treated yet */
@@ -1779,15 +2184,9 @@ static int dpaa2_eth_open(struct net_device *net_dev)
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int err;
- err = dpaa2_eth_seed_pool(priv, priv->bpid);
- if (err) {
- /* Not much to do; the buffer pool, though not filled up,
- * may still contain some buffers which would enable us
- * to limp on.
- */
- netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
- priv->dpbp_dev->obj_desc.id, priv->bpid);
- }
+ dpaa2_eth_seed_pools(priv);
+
+ mutex_lock(&priv->mac_lock);
if (!dpaa2_eth_is_type_phy(priv)) {
/* We'll only start the txqs when the link is actually ready;
@@ -1807,18 +2206,21 @@ static int dpaa2_eth_open(struct net_device *net_dev)
err = dpni_enable(priv->mc_io, 0, priv->mc_token);
if (err < 0) {
+ mutex_unlock(&priv->mac_lock);
netdev_err(net_dev, "dpni_enable() failed\n");
goto enable_err;
}
if (dpaa2_eth_is_type_phy(priv))
- phylink_start(priv->mac->phylink);
+ dpaa2_mac_start(priv->mac);
+
+ mutex_unlock(&priv->mac_lock);
return 0;
enable_err:
dpaa2_eth_disable_ch_napi(priv);
- dpaa2_eth_drain_pool(priv);
+ dpaa2_eth_drain_pools(priv);
return err;
}
@@ -1885,13 +2287,17 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
int dpni_enabled = 0;
int retries = 10;
+ mutex_lock(&priv->mac_lock);
+
if (dpaa2_eth_is_type_phy(priv)) {
- phylink_stop(priv->mac->phylink);
+ dpaa2_mac_stop(priv->mac);
} else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
}
+ mutex_unlock(&priv->mac_lock);
+
/* On dpni_disable(), the MC firmware will:
* - stop MAC Rx and wait for all Rx frames to be enqueued to software
* - cut off WRIOP dequeues from egress FQs and wait until transmission
@@ -1922,7 +2328,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
dpaa2_eth_disable_ch_napi(priv);
/* Empty the buffer pool */
- dpaa2_eth_drain_pool(priv);
+ dpaa2_eth_drain_pools(priv);
/* Empty the Scatter-Gather Buffer cache */
dpaa2_eth_sgt_cache_drain(priv);
@@ -2178,48 +2584,68 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
return 0;
}
-static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa2_eth_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
if (!dpaa2_ptp)
return -EINVAL;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_ONESTEP_SYNC:
- priv->tx_tstamp_type = config.tx_type;
+ priv->tx_tstamp_type = config->tx_type;
break;
default:
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
priv->rx_tstamp = false;
} else {
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ dpaa2_ptp_onestep_reg_update_method(priv);
+
+ return 0;
+}
+
+static int dpaa2_eth_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+ if (!dpaa2_ptp)
+ return -EINVAL;
+
+ config->tx_type = priv->tx_tstamp_type;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ int err;
- if (cmd == SIOCSHWTSTAMP)
- return dpaa2_eth_ts_ioctl(dev, rq, cmd);
+ mutex_lock(&priv->mac_lock);
- if (dpaa2_eth_is_type_phy(priv))
- return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
+ mutex_unlock(&priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&priv->mac_lock);
return -EOPNOTSUPP;
}
@@ -2280,7 +2706,7 @@ static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
return err;
out:
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -2328,7 +2754,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
need_update = (!!priv->xdp_prog != !!prog);
if (up)
- dpaa2_eth_stop(dev);
+ dev_close(dev);
/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
* Also, when switching between xdp/non-xdp modes we need to reconfigure
@@ -2356,7 +2782,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
}
if (up) {
- err = dpaa2_eth_open(dev);
+ err = dev_open(dev, NULL);
if (err)
return err;
}
@@ -2367,7 +2793,7 @@ out_err:
if (prog)
bpf_prog_sub(prog, priv->num_channels);
if (up)
- dpaa2_eth_open(dev);
+ dev_open(dev, NULL);
return err;
}
@@ -2377,6 +2803,8 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return dpaa2_eth_setup_xdp(dev, xdp->prog);
+ case XDP_SETUP_XSK_POOL:
+ return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -2476,11 +2904,14 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
static int update_xps(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
- struct cpumask xps_mask;
- struct dpaa2_eth_fq *fq;
int i, num_queues, netdev_queues;
+ struct dpaa2_eth_fq *fq;
+ cpumask_var_t xps_mask;
int err = 0;
+ if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL))
+ return -ENOMEM;
+
num_queues = dpaa2_eth_queue_count(priv);
netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
@@ -2490,16 +2921,17 @@ static int update_xps(struct dpaa2_eth_priv *priv)
for (i = 0; i < netdev_queues; i++) {
fq = &priv->fq[i % num_queues];
- cpumask_clear(&xps_mask);
- cpumask_set_cpu(fq->target_cpu, &xps_mask);
+ cpumask_clear(xps_mask);
+ cpumask_set_cpu(fq->target_cpu, xps_mask);
- err = netif_set_xps_queue(net_dev, &xps_mask, i);
+ err = netif_set_xps_queue(net_dev, xps_mask, i);
if (err) {
netdev_warn_once(net_dev, "Error setting XPS queue\n");
break;
}
}
+ free_cpumask_var(xps_mask);
return err;
}
@@ -2607,9 +3039,12 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_change_mtu = dpaa2_eth_change_mtu,
.ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
+ .ndo_xsk_wakeup = dpaa2_xsk_wakeup,
.ndo_setup_tc = dpaa2_eth_setup_tc,
.ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
- .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid,
+ .ndo_hwtstamp_get = dpaa2_eth_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa2_eth_hwtstamp_set,
};
static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -2621,7 +3056,11 @@ static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
/* Update NAPI statistics */
ch->stats.cdan++;
- napi_schedule(&ch->napi);
+ /* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed
+ * so that it can be rescheduled again.
+ */
+ if (!napi_if_scheduled_mark_missed(&ch->napi))
+ napi_schedule(&ch->napi);
}
/* Allocate and configure a DPCON object */
@@ -2634,10 +3073,12 @@ static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
FSL_MC_POOL_DPCON, &dpcon);
if (err) {
- if (err == -ENXIO)
+ if (err == -ENXIO) {
+ dev_dbg(dev, "Waiting for DPCON\n");
err = -EPROBE_DEFER;
- else
+ } else {
dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+ }
return ERR_PTR(err);
}
@@ -2747,7 +3188,9 @@ static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
channel = dpaa2_eth_alloc_channel(priv);
if (IS_ERR_OR_NULL(channel)) {
err = PTR_ERR_OR_ZERO(channel);
- if (err != -EPROBE_DEFER)
+ if (err == -EPROBE_DEFER)
+ dev_dbg(dev, "waiting for affine channel\n");
+ else
dev_info(dev,
"No affine channel for cpu %d and above\n", i);
goto err_alloc_ch;
@@ -2930,13 +3373,14 @@ static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
dpaa2_eth_set_fq_affinity(priv);
}
-/* Allocate and configure one buffer pool for each interface */
-static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
+/* Allocate and configure a buffer pool */
+struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
{
- int err;
- struct fsl_mc_device *dpbp_dev;
struct device *dev = priv->net_dev->dev.parent;
+ struct fsl_mc_device *dpbp_dev;
struct dpbp_attr dpbp_attrs;
+ struct dpaa2_eth_bp *bp;
+ int err;
err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
&dpbp_dev);
@@ -2945,12 +3389,16 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
err = -EPROBE_DEFER;
else
dev_err(dev, "DPBP device allocation failed\n");
- return err;
+ return ERR_PTR(err);
}
- priv->dpbp_dev = dpbp_dev;
+ bp = kzalloc(sizeof(*bp), GFP_KERNEL);
+ if (!bp) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
- err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
+ err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id,
&dpbp_dev->mc_handle);
if (err) {
dev_err(dev, "dpbp_open() failed\n");
@@ -2975,9 +3423,11 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
dev_err(dev, "dpbp_get_attributes() failed\n");
goto err_get_attr;
}
- priv->bpid = dpbp_attrs.bpid;
- return 0;
+ bp->dev = dpbp_dev;
+ bp->bpid = dpbp_attrs.bpid;
+
+ return bp;
err_get_attr:
dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
@@ -2985,17 +3435,58 @@ err_enable:
err_reset:
dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
err_open:
+ kfree(bp);
+err_alloc:
fsl_mc_object_free(dpbp_dev);
- return err;
+ return ERR_PTR(err);
+}
+
+static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_bp *bp;
+ int i;
+
+ bp = dpaa2_eth_allocate_dpbp(priv);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp;
+ priv->num_bps++;
+
+ for (i = 0; i < priv->num_channels; i++)
+ priv->channel[i]->bp = bp;
+
+ return 0;
+}
+
+void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
+{
+ int idx_bp;
+
+ /* Find the index at which this BP is stored */
+ for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++)
+ if (priv->bp[idx_bp] == bp)
+ break;
+
+ /* Drain the pool and disable the associated MC object */
+ dpaa2_eth_drain_pool(priv, bp->bpid);
+ dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
+ dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
+ fsl_mc_object_free(bp->dev);
+ kfree(bp);
+
+ /* Move the last in use DPBP over in this position */
+ priv->bp[idx_bp] = priv->bp[priv->num_bps - 1];
+ priv->num_bps--;
}
-static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
{
- dpaa2_eth_drain_pool(priv);
- dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
- dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
- fsl_mc_object_free(priv->dpbp_dev);
+ int i;
+
+ for (i = 0; i < priv->num_bps; i++)
+ dpaa2_eth_free_dpbp(priv, priv->bp[i]);
}
static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
@@ -3336,7 +3827,7 @@ static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
priv->dpni_ver_major, priv->dpni_ver_minor,
DPNI_VER_MAJOR, DPNI_VER_MINOR);
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
goto close;
}
@@ -3447,6 +3938,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
MEM_TYPE_PAGE_ORDER0, NULL);
if (err) {
dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
+ xdp_rxq_info_unreg(&fq->channel->xdp_rxq);
return err;
}
@@ -3880,15 +4372,16 @@ out:
*/
static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
{
+ struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
struct net_device *net_dev = priv->net_dev;
+ struct dpni_pools_cfg pools_params = { 0 };
struct device *dev = net_dev->dev.parent;
- struct dpni_pools_cfg pools_params;
struct dpni_error_cfg err_cfg;
int err = 0;
int i;
pools_params.num_dpbp = 1;
- pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
+ pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0;
pools_params.pools[0].buffer_size = priv->rx_buf_size;
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
@@ -3939,17 +4432,25 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
return -EINVAL;
}
if (err)
- return err;
+ goto out;
}
err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &priv->tx_qdid);
if (err) {
dev_err(dev, "dpni_get_qdid() failed\n");
- return err;
+ goto out;
}
return 0;
+
+out:
+ while (i--) {
+ if (priv->fq[i].type == DPAA2_RX_FQ &&
+ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
+ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
+ }
+ return err;
}
/* Allocate rings for storing incoming frame descriptors */
@@ -4100,6 +4601,8 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
return err;
}
+ dpaa2_eth_detect_features(priv);
+
/* Capabilities listing */
supported |= IFF_LIVE_ADDR_CHANGE;
@@ -4110,13 +4613,21 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
net_dev->priv_flags |= supported;
net_dev->priv_flags &= ~not_supported;
+ net_dev->lltx = true;
/* Features */
net_dev->features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_LLTX | NETIF_F_HW_TC;
+ NETIF_F_HW_TC | NETIF_F_TSO;
+ net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
net_dev->hw_features = net_dev->features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+ if (priv->dpni_attrs.wriop_version >= DPAA2_WRIOP_VERSION(3, 0, 0) &&
+ priv->dpni_attrs.num_queues <= 8)
+ net_dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
if (priv->dpni_attrs.vlan_filter_entries)
net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -4149,15 +4660,24 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
- if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) {
+ netdev_dbg(priv->net_dev, "waiting for mac\n");
return PTR_ERR(dpmac_dev);
+ }
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = priv->mc_io;
@@ -4166,38 +4686,53 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
err = dpaa2_mac_open(mac);
if (err)
goto err_free_mac;
- priv->mac = mac;
- if (dpaa2_eth_is_type_phy(priv)) {
+ if (dpaa2_mac_is_type_phy(mac)) {
err = dpaa2_mac_connect(mac);
- if (err && err != -EPROBE_DEFER)
- netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
- ERR_PTR(err));
- if (err)
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ netdev_dbg(priv->net_dev,
+ "could not connect to MAC\n");
+ else
+ netdev_err(priv->net_dev,
+ "Error connecting to the MAC endpoint: %pe",
+ ERR_PTR(err));
goto err_close_mac;
+ }
}
+ mutex_lock(&priv->mac_lock);
+ priv->mac = mac;
+ mutex_unlock(&priv->mac_lock);
+
return 0;
err_close_mac:
dpaa2_mac_close(mac);
- priv->mac = NULL;
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
{
- if (dpaa2_eth_is_type_phy(priv))
- dpaa2_mac_disconnect(priv->mac);
+ struct dpaa2_mac *mac;
- if (!dpaa2_eth_has_mac(priv))
+ mutex_lock(&priv->mac_lock);
+ mac = priv->mac;
+ priv->mac = NULL;
+ mutex_unlock(&priv->mac_lock);
+
+ if (!mac)
return;
- dpaa2_mac_close(priv->mac);
- kfree(priv->mac);
- priv->mac = NULL;
+ if (dpaa2_mac_is_type_phy(mac))
+ dpaa2_mac_disconnect(mac);
+
+ dpaa2_mac_close(mac);
+ kfree(mac);
}
static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
@@ -4207,6 +4742,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
struct net_device *net_dev = dev_get_drvdata(dev);
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ bool had_mac;
int err;
err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
@@ -4223,12 +4759,15 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
dpaa2_eth_update_tx_fqids(priv);
- rtnl_lock();
- if (dpaa2_eth_has_mac(priv))
+ /* We can avoid locking because the "endpoint changed" IRQ
+ * handler is the only one who changes priv->mac at runtime,
+ * so we are not racing with anyone.
+ */
+ had_mac = !!priv->mac;
+ if (had_mac)
dpaa2_eth_disconnect_mac(priv);
else
dpaa2_eth_connect_mac(priv);
- rtnl_unlock();
}
return IRQ_HANDLED;
@@ -4246,7 +4785,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
}
irq = ls_dev->irqs[0];
- err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
+ err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
NULL, dpni_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(&ls_dev->dev), &ls_dev->dev);
@@ -4273,7 +4812,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
return 0;
free_irq:
- devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
+ devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
free_mc_irq:
fsl_mc_free_irqs(ls_dev);
@@ -4288,8 +4827,7 @@ static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
- netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
}
}
@@ -4304,6 +4842,17 @@ static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
}
}
+static void dpaa2_eth_free_rx_xdp_rxq(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ if (priv->fq[i].type == DPAA2_RX_FQ &&
+ xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq))
+ xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq);
+ }
+}
+
static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
{
struct device *dev;
@@ -4325,20 +4874,23 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv = netdev_priv(net_dev);
priv->net_dev = net_dev;
+ SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port);
+
+ mutex_init(&priv->mac_lock);
priv->iommu_domain = iommu_get_domain_for_dev(dev);
priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
priv->rx_tstamp = false;
- priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
+ priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", WQ_PERCPU, 0);
if (!priv->dpaa2_ptp_wq) {
err = -ENOMEM;
goto err_wq_alloc;
}
INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
-
+ mutex_init(&priv->onestep_tstamp_lock);
skb_queue_head_init(&priv->tx_skbs);
priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
@@ -4347,10 +4899,12 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&priv->mc_io);
if (err) {
- if (err == -ENXIO)
+ if (err == -ENXIO) {
+ dev_dbg(dev, "waiting for MC portal\n");
err = -EPROBE_DEFER;
- else
+ } else {
dev_err(dev, "MC portal allocation failed\n");
+ }
goto err_portal_alloc;
}
@@ -4365,7 +4919,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
dpaa2_eth_setup_fqs(priv);
- err = dpaa2_eth_setup_dpbp(priv);
+ err = dpaa2_eth_setup_default_dpbp(priv);
if (err)
goto err_dpbp_setup;
@@ -4397,6 +4951,13 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
goto err_alloc_sgt_cache;
}
+ priv->fd = alloc_percpu(*priv->fd);
+ if (!priv->fd) {
+ dev_err(dev, "alloc_percpu(fds) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_fds;
+ }
+
err = dpaa2_eth_netdev_init(net_dev);
if (err)
goto err_netdev_init;
@@ -4424,6 +4985,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
#endif
+ err = dpaa2_eth_connect_mac(priv);
+ if (err)
+ goto err_connect_mac;
+
err = dpaa2_eth_setup_irqs(dpni_dev);
if (err) {
netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
@@ -4436,10 +5001,6 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->do_link_poll = true;
}
- err = dpaa2_eth_connect_mac(priv);
- if (err)
- goto err_connect_mac;
-
err = dpaa2_eth_dl_alloc(priv);
if (err)
goto err_dl_register;
@@ -4452,6 +5013,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_dl_port_add;
+ net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
+
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() failed\n");
@@ -4473,17 +5036,19 @@ err_dl_port_add:
err_dl_trap_register:
dpaa2_eth_dl_free(priv);
err_dl_register:
- dpaa2_eth_disconnect_mac(priv);
-err_connect_mac:
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
else
fsl_mc_free_irqs(dpni_dev);
err_poll_thread:
+ dpaa2_eth_disconnect_mac(priv);
+err_connect_mac:
dpaa2_eth_free_rings(priv);
err_alloc_rings:
err_csum:
err_netdev_init:
+ free_percpu(priv->fd);
+err_alloc_fds:
free_percpu(priv->sgt_cache);
err_alloc_sgt_cache:
free_percpu(priv->percpu_extras);
@@ -4491,8 +5056,9 @@ err_alloc_percpu_extras:
free_percpu(priv->percpu_stats);
err_alloc_percpu_stats:
dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_rx_xdp_rxq(priv);
err_bind:
- dpaa2_eth_free_dpbp(priv);
+ dpaa2_eth_free_dpbps(priv);
err_dpbp_setup:
dpaa2_eth_free_dpio(priv);
err_dpio_setup:
@@ -4508,7 +5074,7 @@ err_wq_alloc:
return err;
}
-static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
+static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
{
struct device *dev;
struct net_device *net_dev;
@@ -4523,9 +5089,6 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
- rtnl_lock();
- dpaa2_eth_disconnect_mac(priv);
- rtnl_unlock();
unregister_netdev(net_dev);
@@ -4538,23 +5101,28 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
else
fsl_mc_free_irqs(ls_dev);
+ dpaa2_eth_disconnect_mac(priv);
dpaa2_eth_free_rings(priv);
+ free_percpu(priv->fd);
free_percpu(priv->sgt_cache);
free_percpu(priv->percpu_stats);
free_percpu(priv->percpu_extras);
dpaa2_eth_del_ch_napi(priv);
- dpaa2_eth_free_dpbp(priv);
+ dpaa2_eth_free_rx_xdp_rxq(priv);
+ dpaa2_eth_free_dpbps(priv);
dpaa2_eth_free_dpio(priv);
dpaa2_eth_free_dpni(priv);
+ if (priv->onestep_reg_base)
+ iounmap(priv->onestep_reg_base);
fsl_mc_portal_free(priv->mc_io);
- free_netdev(net_dev);
+ destroy_workqueue(priv->dpaa2_ptp_wq);
dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
- return 0;
+ free_netdev(net_dev);
}
static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
@@ -4569,7 +5137,6 @@ MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
static struct fsl_mc_driver dpaa2_eth_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
.probe = dpaa2_eth_probe,
.remove = dpaa2_eth_remove,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 2085844227fe..834cba8c3a41 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2022 NXP
*/
#ifndef __DPAA2_ETH_H
@@ -12,6 +12,7 @@
#include <linux/fsl/mc.h>
#include <linux/net_tstamp.h>
#include <net/devlink.h>
+#include <net/xdp.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
@@ -53,6 +54,12 @@
*/
#define DPAA2_ETH_TXCONF_PER_NAPI 256
+/* Maximum number of Tx frames to be processed in a single NAPI
+ * call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI
+ * to maximize the throughput.
+ */
+#define DPAA2_ETH_TX_ZC_PER_NAPI DPAA2_ETH_TXCONF_PER_NAPI
+
/* Buffer qouta per channel. We want to keep in check number of ingress frames
* in flight: for small sized frames, congestion group taildrop may kick in
* first; for large sizes, Rx FQ taildrop threshold will ensure only a
@@ -109,6 +116,14 @@
#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
#define DPAA2_ETH_RX_BUF_ALIGN 64
+/* The firmware allows assigning multiple buffer pools to a single DPNI -
+ * maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for
+ * all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs
+ * object: the default and 8 other distinct buffer pools, one for each queue.
+ */
+#define DPAA2_ETH_DEFAULT_BP_IDX 0
+#define DPAA2_ETH_MAX_BPS 9
+
/* We are accommodating a skb backpointer and some S/G info
* in the frame's software annotation. The hardware
* options are either 0 or 64, so we choose the latter.
@@ -122,6 +137,8 @@ enum dpaa2_eth_swa_type {
DPAA2_ETH_SWA_SINGLE,
DPAA2_ETH_SWA_SG,
DPAA2_ETH_SWA_XDP,
+ DPAA2_ETH_SWA_XSK,
+ DPAA2_ETH_SWA_SW_TSO,
};
/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
@@ -142,6 +159,16 @@ struct dpaa2_eth_swa {
int dma_size;
struct xdp_frame *xdpf;
} xdp;
+ struct {
+ struct xdp_buff *xdp_buff;
+ int sgt_size;
+ } xsk;
+ struct {
+ struct sk_buff *skb;
+ int num_sg;
+ int sgt_size;
+ int is_last_fd;
+ } tso;
};
};
@@ -354,6 +381,8 @@ struct dpaa2_eth_drv_stats {
__u64 tx_conf_bytes;
__u64 tx_sg_frames;
__u64 tx_sg_bytes;
+ __u64 tx_tso_frames;
+ __u64 tx_tso_bytes;
__u64 rx_sg_frames;
__u64 rx_sg_bytes;
/* Linear skbs sent as a S/G FD due to insufficient headroom */
@@ -388,6 +417,8 @@ struct dpaa2_eth_ch_stats {
__u64 bytes_per_cdan;
};
+#define DPAA2_ETH_CH_STATS 7
+
/* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS 8
#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
@@ -410,12 +441,19 @@ enum dpaa2_eth_fq_type {
};
struct dpaa2_eth_priv;
+struct dpaa2_eth_channel;
+struct dpaa2_eth_fq;
struct dpaa2_eth_xdp_fds {
struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
ssize_t num;
};
+typedef void dpaa2_eth_consume_cb_t(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq);
+
struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
@@ -428,10 +466,7 @@ struct dpaa2_eth_fq {
struct dpaa2_eth_channel *channel;
enum dpaa2_eth_fq_type type;
- void (*consume)(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- struct dpaa2_eth_fq *fq);
+ dpaa2_eth_consume_cb_t *consume;
struct dpaa2_eth_fq_stats stats;
struct dpaa2_eth_xdp_fds xdp_redirect_fds;
@@ -443,6 +478,11 @@ struct dpaa2_eth_ch_xdp {
unsigned int res;
};
+struct dpaa2_eth_bp {
+ struct fsl_mc_device *dev;
+ int bpid;
+};
+
struct dpaa2_eth_channel {
struct dpaa2_io_notification_ctx nctx;
struct fsl_mc_device *dpcon;
@@ -461,6 +501,11 @@ struct dpaa2_eth_channel {
/* Buffers to be recycled back in the buffer pool */
u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD];
int recycled_bufs_cnt;
+
+ bool xsk_zc;
+ int xsk_tx_pkts_sent;
+ struct xsk_buff_pool *xsk_pool;
+ struct dpaa2_eth_bp *bp;
};
struct dpaa2_eth_dist_fields {
@@ -491,8 +536,15 @@ struct dpaa2_eth_trap_data {
struct dpaa2_eth_priv *priv;
};
+#define DPAA2_ETH_SG_ENTRIES_MAX (PAGE_SIZE / sizeof(struct scatterlist))
+
#define DPAA2_ETH_DEFAULT_COPYBREAK 512
+#define DPAA2_ETH_ENQUEUE_MAX_FDS 256
+struct dpaa2_eth_fds {
+ struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS];
+};
+
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
@@ -508,20 +560,25 @@ struct dpaa2_eth_priv {
u8 num_channels;
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
struct dpaa2_eth_sgt_cache __percpu *sgt_cache;
-
+ unsigned long features;
struct dpni_attr dpni_attrs;
u16 dpni_ver_major;
u16 dpni_ver_minor;
u16 tx_data_offset;
-
- struct fsl_mc_device *dpbp_dev;
+ void __iomem *onestep_reg_base;
+ u8 ptp_correction_off;
+ void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp);
u16 rx_buf_size;
- u16 bpid;
struct iommu_domain *iommu_domain;
enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */
bool rx_tstamp; /* Rx timestamping enabled */
+ /* Buffer pool management */
+ struct dpaa2_eth_bp *bp[DPAA2_ETH_MAX_BPS];
+ int num_bps;
+
u16 tx_qdid;
struct fsl_mc_io *mc_io;
/* Cores which have an affine DPIO/DPCON.
@@ -559,6 +616,8 @@ struct dpaa2_eth_priv {
#endif
struct dpaa2_mac *mac;
+ /* Serializes changes to priv->mac */
+ struct mutex mac_lock;
struct workqueue_struct *dpaa2_ptp_wq;
struct work_struct tx_onestep_tstamp;
struct sk_buff_head tx_skbs;
@@ -575,6 +634,8 @@ struct dpaa2_eth_priv {
struct devlink_port devlink_port;
u32 rx_copybreak;
+
+ struct dpaa2_eth_fds __percpu *fd;
};
struct dpaa2_eth_devlink_priv {
@@ -653,6 +714,13 @@ enum dpaa2_eth_rx_dist {
#define DPAA2_ETH_DIST_L4DST BIT(8)
#define DPAA2_ETH_DIST_ALL (~0ULL)
+#define DPNI_PTP_ONESTEP_VER_MAJOR 8
+#define DPNI_PTP_ONESTEP_VER_MINOR 2
+#define DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT BIT(0)
+#define DPAA2_PTP_SINGLE_STEP_ENABLE BIT(31)
+#define DPAA2_PTP_SINGLE_STEP_CH BIT(7)
+#define DPAA2_PTP_SINGLE_CORRECTION_OFF(v) ((v) << 8)
+
#define DPNI_PAUSE_VER_MAJOR 7
#define DPNI_PAUSE_VER_MINOR 13
#define dpaa2_eth_has_pause_support(priv) \
@@ -672,7 +740,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
{
- unsigned int headroom = DPAA2_ETH_SWA_SIZE;
+ unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
/* If we don't have an skb (e.g. XDP buffer), we only need space for
* the software annotation area
@@ -703,16 +771,15 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
{
- if (priv->mac &&
- (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
- priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
- return true;
+ lockdep_assert_held(&priv->mac_lock);
- return false;
+ return dpaa2_mac_is_type_phy(priv->mac);
}
static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
{
+ lockdep_assert_held(&priv->mac_lock);
+
return priv->mac ? true : false;
}
@@ -741,4 +808,54 @@ void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv);
struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
struct dpaa2_fapr *fapr);
+
+struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp);
+
+struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, u32 fd_length,
+ void *fd_vaddr);
+
+void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, void *vaddr,
+ struct dpaa2_eth_fq *fq,
+ struct rtnl_link_stats64 *percpu_stats,
+ struct sk_buff *skb);
+
+void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq);
+
+struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_bp *bp);
+
+void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr);
+void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr);
+
+void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id);
+
+int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);
+
+void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi);
+bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch);
+
+/* SGT (Scatter-Gather Table) cache management */
+void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv);
+
+void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf);
+
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index adb8ce5306ee..baab4f1c908d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- * Copyright 2020 NXP
+ * Copyright 2016-2022 NXP
*/
#include <linux/net_tstamp.h>
@@ -44,6 +43,8 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
"[drv] tx conf bytes",
"[drv] tx sg frames",
"[drv] tx sg bytes",
+ "[drv] tx tso frames",
+ "[drv] tx tso bytes",
"[drv] rx sg frames",
"[drv] rx sg bytes",
"[drv] tx converted sg frames",
@@ -84,11 +85,16 @@ static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
static int dpaa2_eth_nway_reset(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(&priv->mac_lock);
if (dpaa2_eth_is_type_phy(priv))
- return phylink_ethtool_nway_reset(priv->mac->phylink);
+ err = phylink_ethtool_nway_reset(priv->mac->phylink);
+
+ mutex_unlock(&priv->mac_lock);
- return -EOPNOTSUPP;
+ return err;
}
static int
@@ -96,10 +102,18 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *link_settings)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
- if (dpaa2_eth_is_type_phy(priv))
- return phylink_ethtool_ksettings_get(priv->mac->phylink,
- link_settings);
+ mutex_lock(&priv->mac_lock);
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = phylink_ethtool_ksettings_get(priv->mac->phylink,
+ link_settings);
+ mutex_unlock(&priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&priv->mac_lock);
link_settings->base.autoneg = AUTONEG_DISABLE;
if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
@@ -114,11 +128,17 @@ dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
const struct ethtool_link_ksettings *link_settings)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(&priv->mac_lock);
- if (!dpaa2_eth_is_type_phy(priv))
- return -ENOTSUPP;
+ if (dpaa2_eth_is_type_phy(priv))
+ err = phylink_ethtool_ksettings_set(priv->mac->phylink,
+ link_settings);
- return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
+ mutex_unlock(&priv->mac_lock);
+
+ return err;
}
static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
@@ -127,11 +147,16 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 link_options = priv->link_state.options;
+ mutex_lock(&priv->mac_lock);
+
if (dpaa2_eth_is_type_phy(priv)) {
phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
+ mutex_unlock(&priv->mac_lock);
return;
}
+ mutex_unlock(&priv->mac_lock);
+
pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
pause->autoneg = AUTONEG_DISABLE;
@@ -150,9 +175,17 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
return -EOPNOTSUPP;
}
- if (dpaa2_eth_is_type_phy(priv))
- return phylink_ethtool_set_pauseparam(priv->mac->phylink,
- pause);
+ mutex_lock(&priv->mac_lock);
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = phylink_ethtool_set_pauseparam(priv->mac->phylink,
+ pause);
+ mutex_unlock(&priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&priv->mac_lock);
+
if (pause->autoneg)
return -EOPNOTSUPP;
@@ -184,36 +217,25 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
- struct dpaa2_eth_priv *priv = netdev_priv(netdev);
- u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
- strscpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
- strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- if (dpaa2_eth_has_mac(priv))
- dpaa2_mac_get_strings(p);
+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++)
+ ethtool_puts(&data, dpaa2_ethtool_stats[i]);
+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++)
+ ethtool_puts(&data, dpaa2_ethtool_extras[i]);
+ dpaa2_mac_get_strings(&data);
break;
}
}
static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
{
- int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-
switch (sset) {
case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- if (dpaa2_eth_has_mac(priv))
- num_ss_stats += dpaa2_mac_get_sset_count();
- return num_ss_stats;
+ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS +
+ dpaa2_mac_get_sset_count();
default:
return -EOPNOTSUPP;
}
@@ -225,17 +247,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
{
- int i = 0;
- int j, k, err;
- int num_cnt;
- union dpni_statistics dpni_stats;
- u32 fcnt, bcnt;
- u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
- u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
- u32 buf_cnt;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- struct dpaa2_eth_drv_stats *extras;
- struct dpaa2_eth_ch_stats *ch_stats;
+ union dpni_statistics dpni_stats;
int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
sizeof(dpni_stats.page_0),
sizeof(dpni_stats.page_1),
@@ -245,6 +258,13 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
sizeof(dpni_stats.page_5),
sizeof(dpni_stats.page_6),
};
+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+ struct dpaa2_eth_ch_stats *ch_stats;
+ struct dpaa2_eth_drv_stats *extras;
+ u32 buf_cnt, buf_cnt_total = 0;
+ int j, k, err, num_cnt, i = 0;
+ u32 fcnt, bcnt;
memset(data, 0,
sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
@@ -278,7 +298,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
/* Per-channel stats */
for (k = 0; k < priv->num_channels; k++) {
ch_stats = &priv->channel[k]->stats;
- for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
+ for (j = 0; j < DPAA2_ETH_CH_STATS; j++)
*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
i += j;
@@ -306,15 +326,22 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
*(data + i++) = fcnt_tx_total;
*(data + i++) = bcnt_tx_total;
- err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
- if (err) {
- netdev_warn(net_dev, "Buffer count query error %d\n", err);
- return;
+ for (j = 0; j < priv->num_bps; j++) {
+ err = dpaa2_io_query_bp_count(NULL, priv->bp[j]->bpid, &buf_cnt);
+ if (err) {
+ netdev_warn(net_dev, "Buffer count query error %d\n", err);
+ return;
+ }
+ buf_cnt_total += buf_cnt;
}
- *(data + i++) = buf_cnt;
+ *(data + i++) = buf_cnt_total;
+
+ mutex_lock(&priv->mac_lock);
if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
+
+ mutex_unlock(&priv->mac_lock);
}
static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
@@ -684,6 +711,13 @@ static int dpaa2_eth_update_cls_rule(struct net_device *net_dev,
return 0;
}
+static u32 dpaa2_eth_get_rx_ring_count(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ return dpaa2_eth_queue_count(priv);
+}
+
static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
@@ -692,16 +726,6 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
int i, j = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
- /* we purposely ignore cmd->flow_type for now, because the
- * classifier only supports a single set of fields for all
- * protocols
- */
- rxnfc->data = priv->rx_hash_fields;
- break;
- case ETHTOOL_GRXRINGS:
- rxnfc->data = dpaa2_eth_queue_count(priv);
- break;
case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0;
rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv);
@@ -740,11 +764,6 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
int err = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_SRXFH:
- if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
- return -EOPNOTSUPP;
- err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
- break;
case ETHTOOL_SRXCLSRLINS:
err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
break;
@@ -758,11 +777,33 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
return err;
}
+static int dpaa2_eth_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *rxnfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ /* we purposely ignore cmd->flow_type for now, because the
+ * classifier only supports a single set of fields for all
+ * protocols
+ */
+ rxnfc->data = priv->rx_hash_fields;
+ return 0;
+}
+
+static int dpaa2_eth_set_rxfh_fields(struct net_device *net_dev,
+ const struct ethtool_rxfh_fields *rxnfc,
+ struct netlink_ext_ack *extack)
+{
+ if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
+ return -EOPNOTSUPP;
+ return dpaa2_eth_set_hash(net_dev, rxnfc->data);
+}
+
int dpaa2_phc_index = -1;
EXPORT_SYMBOL(dpaa2_phc_index);
static int dpaa2_eth_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
if (!dpaa2_ptp)
return ethtool_op_get_ts_info(dev, info);
@@ -874,6 +915,29 @@ restore_rx_usecs:
return err;
}
+static void dpaa2_eth_get_channels(struct net_device *net_dev,
+ struct ethtool_channels *channels)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int queue_count = dpaa2_eth_queue_count(priv);
+
+ channels->max_rx = queue_count;
+ channels->max_tx = queue_count;
+ channels->rx_count = queue_count;
+ channels->tx_count = queue_count;
+
+ /* Tx confirmation and Rx error */
+ channels->max_other = queue_count + 1;
+ channels->max_combined = channels->max_rx +
+ channels->max_tx +
+ channels->max_other;
+ /* Tx conf and Rx err */
+ channels->other_count = queue_count + 1;
+ channels->combined_count = channels->rx_count +
+ channels->tx_count +
+ channels->other_count;
+}
+
const struct ethtool_ops dpaa2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -889,9 +953,13 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_strings = dpaa2_eth_get_strings,
.get_rxnfc = dpaa2_eth_get_rxnfc,
.set_rxnfc = dpaa2_eth_set_rxnfc,
+ .get_rx_ring_count = dpaa2_eth_get_rx_ring_count,
+ .get_rxfh_fields = dpaa2_eth_get_rxfh_fields,
+ .set_rxfh_fields = dpaa2_eth_set_rxfh_fields,
.get_ts_info = dpaa2_eth_get_ts_info,
.get_tunable = dpaa2_eth_get_tunable,
.set_tunable = dpaa2_eth_set_tunable,
.get_coalesce = dpaa2_eth_get_coalesce,
.set_coalesce = dpaa2_eth_set_coalesce,
+ .get_channels = dpaa2_eth_get_channels,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index ef8f0a055024..422ce13a7c94 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -2,6 +2,8 @@
/* Copyright 2019 NXP */
#include <linux/acpi.h>
+#include <linux/pcs-lynx.h>
+#include <linux/phy/phy.h>
#include <linux/property.h>
#include "dpaa2-eth.h"
@@ -10,6 +12,28 @@
#define phylink_to_dpaa2_mac(config) \
container_of((config), struct dpaa2_mac, phylink_config)
+#define DPMAC_PROTOCOL_CHANGE_VER_MAJOR 4
+#define DPMAC_PROTOCOL_CHANGE_VER_MINOR 8
+
+#define DPAA2_MAC_FEATURE_PROTOCOL_CHANGE BIT(0)
+
+static int dpaa2_mac_cmp_ver(struct dpaa2_mac *mac,
+ u16 ver_major, u16 ver_minor)
+{
+ if (mac->ver_major == ver_major)
+ return mac->ver_minor - ver_minor;
+ return mac->ver_major - ver_major;
+}
+
+static void dpaa2_mac_detect_features(struct dpaa2_mac *mac)
+{
+ mac->features = 0;
+
+ if (dpaa2_mac_cmp_ver(mac, DPMAC_PROTOCOL_CHANGE_VER_MAJOR,
+ DPMAC_PROTOCOL_CHANGE_VER_MINOR) >= 0)
+ mac->features |= DPAA2_MAC_FEATURE_PROTOCOL_CHANGE;
+}
+
static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
{
*if_mode = PHY_INTERFACE_MODE_NA;
@@ -30,6 +54,9 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
case DPMAC_ETH_IF_XFI:
*if_mode = PHY_INTERFACE_MODE_10GBASER;
break;
+ case DPMAC_ETH_IF_CAUI:
+ *if_mode = PHY_INTERFACE_MODE_25GBASER;
+ break;
default:
return -EINVAL;
}
@@ -37,10 +64,35 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
return 0;
}
+static enum dpmac_eth_if dpmac_eth_if_mode(phy_interface_t if_mode)
+{
+ switch (if_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return DPMAC_ETH_IF_RGMII;
+ case PHY_INTERFACE_MODE_USXGMII:
+ return DPMAC_ETH_IF_USXGMII;
+ case PHY_INTERFACE_MODE_QSGMII:
+ return DPMAC_ETH_IF_QSGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ return DPMAC_ETH_IF_SGMII;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return DPMAC_ETH_IF_XFI;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return DPMAC_ETH_IF_1000BASEX;
+ case PHY_INTERFACE_MODE_25GBASER:
+ return DPMAC_ETH_IF_CAUI;
+ default:
+ return DPMAC_ETH_IF_MII;
+ }
+}
+
static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
u16 dpmac_id)
{
- struct fwnode_handle *fwnode, *parent, *child = NULL;
+ struct fwnode_handle *fwnode, *parent = NULL, *child = NULL;
struct device_node *dpmacs = NULL;
int err;
u32 id;
@@ -53,6 +105,13 @@ static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
parent = of_fwnode_handle(dpmacs);
} else if (is_acpi_node(fwnode)) {
parent = fwnode;
+ } else {
+ /* The root dprc device didn't yet get to finalize it's probe,
+ * thus the fwnode field is not yet set. Defer probe if we are
+ * facing this situation.
+ */
+ dev_dbg(dev, "dprc not finished probing\n");
+ return ERR_PTR(-EPROBE_DEFER);
}
fwnode_for_each_child_node(parent, child) {
@@ -90,86 +149,12 @@ static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
return err;
}
-static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
- phy_interface_t interface)
-{
- switch (interface) {
- /* We can switch between SGMII and 1000BASE-X at runtime with
- * pcs-lynx
- */
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- if (mac->pcs &&
- (mac->if_mode == PHY_INTERFACE_MODE_SGMII ||
- mac->if_mode == PHY_INTERFACE_MODE_1000BASEX))
- return false;
- return interface != mac->if_mode;
-
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- return (interface != mac->if_mode);
- default:
- return true;
- }
-}
-
-static void dpaa2_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct phylink_pcs *dpaa2_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- dpaa2_mac_phy_mode_mismatch(mac, state->interface)) {
- goto empty_set;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_USXGMII:
- phylink_set_10g_modes(mask);
- if (state->interface == PHY_INTERFACE_MODE_10GBASER)
- break;
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 2500baseT_Full);
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 1000baseT_Full);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- break;
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 10baseT_Full);
- break;
- default:
- goto empty_set;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-empty_set:
- linkmode_zero(supported);
+ return mac->pcs;
}
static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
@@ -179,7 +164,8 @@ static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
struct dpmac_link_state *dpmac_state = &mac->state;
int err;
- if (state->an_enabled)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ state->advertising))
dpmac_state->options |= DPMAC_LINK_OPT_AUTONEG;
else
dpmac_state->options &= ~DPMAC_LINK_OPT_AUTONEG;
@@ -189,6 +175,19 @@ static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
if (err)
netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
__func__, err);
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* This happens only if we support changing of protocol at runtime */
+ err = dpmac_set_protocol(mac->mc_io, 0, mac->mc_dev->mc_handle,
+ dpmac_eth_if_mode(state->interface));
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_protocol() = %d\n", err);
+
+ err = phy_set_mode_ext(mac->serdes_phy, PHY_MODE_ETHERNET, state->interface);
+ if (err)
+ netdev_err(mac->net_dev, "phy_set_mode_ext() = %d\n", err);
}
static void dpaa2_mac_link_up(struct phylink_config *config,
@@ -243,7 +242,7 @@ static void dpaa2_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
- .validate = dpaa2_mac_validate,
+ .mac_select_pcs = dpaa2_mac_select_pcs,
.mac_config = dpaa2_mac_config,
.mac_link_up = dpaa2_mac_link_up,
.mac_link_down = dpaa2_mac_link_down,
@@ -253,8 +252,8 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
struct fwnode_handle *dpmac_node,
int id)
{
- struct mdio_device *mdiodev;
struct fwnode_handle *node;
+ struct phylink_pcs *pcs;
node = fwnode_find_reference(dpmac_node, "pcs-handle", 0);
if (IS_ERR(node)) {
@@ -263,43 +262,108 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
return 0;
}
- if (!fwnode_device_is_available(node)) {
- netdev_err(mac->net_dev, "pcs-handle node not available\n");
- fwnode_handle_put(node);
- return -ENODEV;
- }
-
- mdiodev = fwnode_mdio_find_device(node);
+ pcs = lynx_pcs_create_fwnode(node);
fwnode_handle_put(node);
- if (!mdiodev)
+
+ if (pcs == ERR_PTR(-EPROBE_DEFER)) {
+ netdev_dbg(mac->net_dev, "missing PCS device\n");
return -EPROBE_DEFER;
+ }
+
+ if (pcs == ERR_PTR(-ENODEV)) {
+ netdev_err(mac->net_dev, "pcs-handle node not available\n");
+ return PTR_ERR(pcs);
+ }
- mac->pcs = lynx_pcs_create(mdiodev);
- if (!mac->pcs) {
- netdev_err(mac->net_dev, "lynx_pcs_create() failed\n");
- put_device(&mdiodev->dev);
- return -ENOMEM;
+ if (IS_ERR(pcs)) {
+ netdev_err(mac->net_dev,
+ "lynx_pcs_create_fwnode() failed: %pe\n", pcs);
+ return PTR_ERR(pcs);
}
+ mac->pcs = pcs;
+
return 0;
}
static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
{
- struct lynx_pcs *pcs = mac->pcs;
+ struct phylink_pcs *phylink_pcs = mac->pcs;
- if (pcs) {
- struct device *dev = &pcs->mdio->dev;
- lynx_pcs_destroy(pcs);
- put_device(dev);
+ if (phylink_pcs) {
+ lynx_pcs_destroy(phylink_pcs);
mac->pcs = NULL;
}
}
+static void dpaa2_mac_set_supported_interfaces(struct dpaa2_mac *mac)
+{
+ int intf, err;
+
+ /* We support the current interface mode, and if we have a PCS
+ * similar interface modes that do not require the SerDes lane to be
+ * reconfigured.
+ */
+ __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
+ if (mac->pcs) {
+ switch (mac->if_mode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* In case we have access to the SerDes phy/lane, then ask the SerDes
+ * driver what interfaces are supported based on the current PLL
+ * configuration.
+ */
+ for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
+ if (intf == PHY_INTERFACE_MODE_NA)
+ continue;
+
+ err = phy_validate(mac->serdes_phy, PHY_MODE_ETHERNET, intf, NULL);
+ if (err)
+ continue;
+
+ __set_bit(intf, mac->phylink_config.supported_interfaces);
+ }
+}
+
+void dpaa2_mac_start(struct dpaa2_mac *mac)
+{
+ ASSERT_RTNL();
+
+ if (mac->serdes_phy)
+ phy_power_on(mac->serdes_phy);
+
+ phylink_start(mac->phylink);
+}
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac)
+{
+ ASSERT_RTNL();
+
+ phylink_stop(mac->phylink);
+
+ if (mac->serdes_phy)
+ phy_power_off(mac->serdes_phy);
+}
+
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
struct net_device *net_dev = mac->net_dev;
struct fwnode_handle *dpmac_node;
+ struct phy *serdes_phy = NULL;
struct phylink *phylink;
int err;
@@ -316,6 +380,20 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
return -EINVAL;
mac->if_mode = err;
+ if (mac->features & DPAA2_MAC_FEATURE_PROTOCOL_CHANGE &&
+ !phy_interface_mode_is_rgmii(mac->if_mode) &&
+ is_of_node(dpmac_node)) {
+ serdes_phy = of_phy_get(to_of_node(dpmac_node), NULL);
+
+ if (serdes_phy == ERR_PTR(-ENODEV))
+ serdes_phy = NULL;
+ else if (IS_ERR(serdes_phy))
+ return PTR_ERR(serdes_phy);
+ else
+ phy_init(serdes_phy);
+ }
+ mac->serdes_phy = serdes_phy;
+
/* The MAC does not have the capability to add RGMII delays so
* error out if the interface mode requests them and there is no PHY
* to act upon them
@@ -336,9 +414,16 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
return err;
}
+ memset(&mac->phylink_config, 0, sizeof(mac->phylink_config));
mac->phylink_config.dev = &net_dev->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
+ mac->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD | MAC_25000FD;
+
+ dpaa2_mac_set_supported_interfaces(mac);
+
phylink = phylink_create(&mac->phylink_config,
dpmac_node, mac->if_mode,
&dpaa2_mac_phylink_ops);
@@ -348,10 +433,9 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
}
mac->phylink = phylink;
- if (mac->pcs)
- phylink_set_pcs(mac->phylink, &mac->pcs->pcs);
-
+ rtnl_lock();
err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
+ rtnl_unlock();
if (err) {
netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err);
goto err_phylink_destroy;
@@ -369,18 +453,21 @@ err_pcs_destroy:
void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
{
- if (!mac->phylink)
- return;
-
+ rtnl_lock();
phylink_disconnect_phy(mac->phylink);
+ rtnl_unlock();
+
phylink_destroy(mac->phylink);
dpaa2_pcs_destroy(mac);
+ of_phy_put(mac->serdes_phy);
+ mac->serdes_phy = NULL;
}
int dpaa2_mac_open(struct dpaa2_mac *mac)
{
struct fsl_mc_device *dpmac_dev = mac->mc_dev;
struct net_device *net_dev = mac->net_dev;
+ struct fwnode_handle *fw_node;
int err;
err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
@@ -397,10 +484,24 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
goto err_close_dpmac;
}
+ err = dpmac_get_api_version(mac->mc_io, 0, &mac->ver_major, &mac->ver_minor);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_api_version() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ dpaa2_mac_detect_features(mac);
+
/* Find the device node representing the MAC device and link the device
* behind the associated netdev to it.
*/
- mac->fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ if (IS_ERR(fw_node)) {
+ err = PTR_ERR(fw_node);
+ goto err_close_dpmac;
+ }
+
+ mac->fw_node = fw_node;
net_dev->dev.of_node = to_of_node(mac->fw_node);
return 0;
@@ -457,15 +558,12 @@ int dpaa2_mac_get_sset_count(void)
return DPAA2_MAC_NUM_STATS;
}
-void dpaa2_mac_get_strings(u8 *data)
+void dpaa2_mac_get_strings(u8 **data)
{
- u8 *p = data;
int i;
- for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
- strlcpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < DPAA2_MAC_NUM_STATS; i++)
+ ethtool_puts(data, dpaa2_mac_ethtool_stats[i]);
}
void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 7842cbb2207a..53f8d106d11e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -7,7 +7,6 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phylink.h>
-#include <linux/pcs-lynx.h>
#include "dpmac.h"
#include "dpmac-cmd.h"
@@ -18,17 +17,27 @@ struct dpaa2_mac {
struct net_device *net_dev;
struct fsl_mc_io *mc_io;
struct dpmac_attr attr;
+ u16 ver_major, ver_minor;
+ unsigned long features;
struct phylink_config phylink_config;
struct phylink *phylink;
phy_interface_t if_mode;
enum dpmac_link_type if_link_type;
- struct lynx_pcs *pcs;
+ struct phylink_pcs *pcs;
struct fwnode_handle *fw_node;
+
+ struct phy *serdes_phy;
};
-bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
- struct fsl_mc_io *mc_io);
+static inline bool dpaa2_mac_is_type_phy(struct dpaa2_mac *mac)
+{
+ if (!mac)
+ return false;
+
+ return mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+ mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE;
+}
int dpaa2_mac_open(struct dpaa2_mac *mac);
@@ -40,8 +49,12 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
int dpaa2_mac_get_sset_count(void);
-void dpaa2_mac_get_strings(u8 *data);
+void dpaa2_mac_get_strings(u8 **data);
void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
+void dpaa2_mac_start(struct dpaa2_mac *mac);
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac);
+
#endif /* DPAA2_MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 32b5faa87bb8..4497e3c0456d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/msi.h>
#include <linux/fsl/mc.h>
#include "dpaa2-ptp.h"
@@ -129,7 +128,6 @@ static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
- struct fsl_mc_device_irq *irq;
struct ptp_qoriq *ptp_qoriq;
struct device_node *node;
void __iomem *base;
@@ -168,7 +166,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
base = of_iomap(node, 0);
if (!base) {
err = -ENOMEM;
- goto err_close;
+ goto err_put;
}
err = fsl_mc_allocate_irqs(mc_dev);
@@ -177,8 +175,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
goto err_unmap;
}
- irq = mc_dev->irqs[0];
- ptp_qoriq->irq = irq->msi_desc->irq;
+ ptp_qoriq->irq = mc_dev->irqs[0]->virq;
err = request_threaded_irq(ptp_qoriq->irq, NULL,
dpaa2_ptp_irq_handler_thread,
@@ -212,6 +209,8 @@ err_free_mc_irq:
fsl_mc_free_irqs(mc_dev);
err_unmap:
iounmap(base);
+err_put:
+ of_node_put(node);
err_close:
dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
err_free_mcp:
@@ -220,7 +219,7 @@ err_exit:
return err;
}
-static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev)
+static void dpaa2_ptp_remove(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
struct ptp_qoriq *ptp_qoriq;
@@ -233,8 +232,6 @@ static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev)
fsl_mc_free_irqs(mc_dev);
dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
fsl_mc_portal_free(mc_dev->mc_io);
-
- return 0;
}
static const struct fsl_mc_device_id dpaa2_ptp_match_id_table[] = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
index 720c9230cab5..a888f6e6e9b0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
@@ -60,11 +60,18 @@ dpaa2_switch_get_link_ksettings(struct net_device *netdev,
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct dpsw_link_state state = {0};
- int err = 0;
+ int err;
+
+ mutex_lock(&port_priv->mac_lock);
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ err = phylink_ethtool_ksettings_get(port_priv->mac->phylink,
+ link_ksettings);
+ mutex_unlock(&port_priv->mac_lock);
+ return err;
+ }
- if (dpaa2_switch_port_is_type_phy(port_priv))
- return phylink_ethtool_ksettings_get(port_priv->mac->phylink,
- link_ksettings);
+ mutex_unlock(&port_priv->mac_lock);
err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
@@ -99,9 +106,16 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev,
bool if_running;
int err = 0, ret;
- if (dpaa2_switch_port_is_type_phy(port_priv))
- return phylink_ethtool_ksettings_set(port_priv->mac->phylink,
- link_ksettings);
+ mutex_lock(&port_priv->mac_lock);
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ err = phylink_ethtool_ksettings_set(port_priv->mac->phylink,
+ link_ksettings);
+ mutex_unlock(&port_priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&port_priv->mac_lock);
/* Interface needs to be down to change link settings */
if_running = netif_running(netdev);
@@ -145,14 +159,9 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev,
static int
dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int num_ss_stats = DPAA2_SWITCH_NUM_COUNTERS;
-
switch (sset) {
case ETH_SS_STATS:
- if (port_priv->mac)
- num_ss_stats += dpaa2_mac_get_sset_count();
- return num_ss_stats;
+ return DPAA2_SWITCH_NUM_COUNTERS + dpaa2_mac_get_sset_count();
default:
return -EOPNOTSUPP;
}
@@ -161,19 +170,16 @@ dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- u8 *p = data;
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) {
- memcpy(p, dpaa2_switch_ethtool_counters[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
+ str = dpaa2_switch_ethtool_counters[i].name;
+ ethtool_puts(&data, str);
}
- if (port_priv->mac)
- dpaa2_mac_get_strings(p);
+ dpaa2_mac_get_strings(&data);
break;
}
}
@@ -196,8 +202,12 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
dpaa2_switch_ethtool_counters[i].name, err);
}
- if (port_priv->mac)
+ mutex_lock(&port_priv->mac_lock);
+
+ if (dpaa2_switch_port_has_mac(port_priv))
dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i);
+
+ mutex_unlock(&port_priv->mac_lock);
}
const struct ethtool_ops dpaa2_switch_port_ethtool_ops = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index d6eefbbf163f..701a87370737 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -17,14 +17,14 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
struct dpsw_acl_fields *acl_h, *acl_m;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_IP) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported keys used");
return -EOPNOTSUPP;
@@ -33,6 +33,9 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
acl_h = &acl_key->match;
acl_m = &acl_key->mask;
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -132,16 +135,19 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
dev_err(dev, "DMA mapping failed\n");
+ kfree(cmd_buff);
return -EFAULT;
}
err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
filter_block->acl_id, acl_entry_cfg);
- dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+ dma_unmap_single(dev, acl_entry_cfg->key_iova,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
DMA_TO_DEVICE);
if (err) {
dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
+ kfree(cmd_buff);
return err;
}
@@ -172,16 +178,18 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
dev_err(dev, "DMA mapping failed\n");
+ kfree(cmd_buff);
return -EFAULT;
}
err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
block->acl_id, acl_entry_cfg);
- dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
- DMA_TO_DEVICE);
+ dma_unmap_single(dev, acl_entry_cfg->key_iova,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE);
if (err) {
dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
+ kfree(cmd_buff);
return err;
}
@@ -532,16 +540,20 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = cls->common.extack;
+ int ret = -EOPNOTSUPP;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
NL_SET_ERR_MSG_MOD(extack,
"Mirroring is supported only per VLAN");
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
@@ -561,9 +573,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
}
*vlan = (u16)match.key->vlan_id;
+ ret = 0;
}
- return 0;
+ return ret;
}
static int
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index d039457928b0..b1e1ad9e4b48 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/msi.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/iommu.h>
@@ -290,7 +289,7 @@ static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
int err;
if (port_priv->vlans[vid]) {
- netdev_warn(netdev, "VLAN %d already configured\n", vid);
+ netdev_err(netdev, "VLAN %d already configured\n", vid);
return -EEXIST;
}
@@ -394,7 +393,8 @@ static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
ppriv_local = ethsw->ports[i];
- ppriv_local->vlans[vid] = 0;
+ if (ppriv_local)
+ ppriv_local->vlans[vid] = 0;
}
return 0;
@@ -590,7 +590,7 @@ static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
return err;
}
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return 0;
}
@@ -602,8 +602,11 @@ static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
+ * We can avoid locking because we are called from the "link changed"
+ * IRQ handler, which is the same as the "endpoint changed" IRQ handler
+ * (the writer to port_priv->mac), so we cannot race with it.
*/
- if (dpaa2_switch_port_is_type_phy(port_priv))
+ if (dpaa2_mac_is_type_phy(port_priv->mac))
return 0;
/* Interrupts are received even though no one issued an 'ifconfig up'
@@ -683,6 +686,8 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
struct ethsw_core *ethsw = port_priv->ethsw_data;
int err;
+ mutex_lock(&port_priv->mac_lock);
+
if (!dpaa2_switch_port_is_type_phy(port_priv)) {
/* Explicitly set carrier off, otherwise
* netif_carrier_ok() will return true and cause 'ip link show'
@@ -696,6 +701,7 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
port_priv->ethsw_data->dpsw_handle,
port_priv->idx);
if (err) {
+ mutex_unlock(&port_priv->mac_lock);
netdev_err(netdev, "dpsw_if_enable err %d\n", err);
return err;
}
@@ -703,7 +709,9 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
dpaa2_switch_enable_ctrl_if_napi(ethsw);
if (dpaa2_switch_port_is_type_phy(port_priv))
- phylink_start(port_priv->mac->phylink);
+ dpaa2_mac_start(port_priv->mac);
+
+ mutex_unlock(&port_priv->mac_lock);
return 0;
}
@@ -714,13 +722,17 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
struct ethsw_core *ethsw = port_priv->ethsw_data;
int err;
+ mutex_lock(&port_priv->mac_lock);
+
if (dpaa2_switch_port_is_type_phy(port_priv)) {
- phylink_stop(port_priv->mac->phylink);
+ dpaa2_mac_stop(port_priv->mac);
} else {
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
}
+ mutex_unlock(&port_priv->mac_lock);
+
err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
port_priv->idx);
@@ -768,13 +780,14 @@ struct ethsw_dump_ctx {
static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
struct ethsw_dump_ctx *dump)
{
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -1435,12 +1448,19 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
return PTR_ERR(dpmac_dev);
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(*mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = port_priv->ethsw_data->mc_io;
@@ -1449,9 +1469,8 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
err = dpaa2_mac_open(mac);
if (err)
goto err_free_mac;
- port_priv->mac = mac;
- if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ if (dpaa2_mac_is_type_phy(mac)) {
err = dpaa2_mac_connect(mac);
if (err) {
netdev_err(port_priv->netdev,
@@ -1461,27 +1480,38 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
}
}
+ mutex_lock(&port_priv->mac_lock);
+ port_priv->mac = mac;
+ mutex_unlock(&port_priv->mac_lock);
+
return 0;
err_close_mac:
dpaa2_mac_close(mac);
- port_priv->mac = NULL;
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
{
- if (dpaa2_switch_port_is_type_phy(port_priv))
- dpaa2_mac_disconnect(port_priv->mac);
+ struct dpaa2_mac *mac;
+
+ mutex_lock(&port_priv->mac_lock);
+ mac = port_priv->mac;
+ port_priv->mac = NULL;
+ mutex_unlock(&port_priv->mac_lock);
- if (!dpaa2_switch_port_has_mac(port_priv))
+ if (!mac)
return;
- dpaa2_mac_close(port_priv->mac);
- kfree(port_priv->mac);
- port_priv->mac = NULL;
+ if (dpaa2_mac_is_type_phy(mac))
+ dpaa2_mac_disconnect(mac);
+
+ dpaa2_mac_close(mac);
+ kfree(mac);
}
static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
@@ -1489,8 +1519,9 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
struct device *dev = (struct device *)arg;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
struct ethsw_port_priv *port_priv;
- u32 status = ~0;
int err, if_id;
+ bool had_mac;
+ u32 status;
err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, &status);
@@ -1502,34 +1533,36 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
if_id = (status & 0xFFFF0000) >> 16;
port_priv = ethsw->ports[if_id];
- if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
dpaa2_switch_port_link_state_update(port_priv->netdev);
- dpaa2_switch_port_set_mac_addr(port_priv);
- }
if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
- rtnl_lock();
- if (dpaa2_switch_port_has_mac(port_priv))
+ dpaa2_switch_port_set_mac_addr(port_priv);
+ /* We can avoid locking because the "endpoint changed" IRQ
+ * handler is the only one who changes priv->mac at runtime,
+ * so we are not racing with anyone.
+ */
+ had_mac = !!port_priv->mac;
+ if (had_mac)
dpaa2_switch_port_disconnect_mac(port_priv);
else
dpaa2_switch_port_connect_mac(port_priv);
- rtnl_unlock();
}
-out:
err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, status);
if (err)
dev_err(dev, "Can't clear irq status (err %d)\n", err);
+out:
return IRQ_HANDLED;
}
static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
{
+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED | DPSW_IRQ_EVENT_ENDPOINT_CHANGED;
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
- u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
struct fsl_mc_device_irq *irq;
int err;
@@ -1553,8 +1586,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
- err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
- NULL,
+ err = devm_request_threaded_irq(dev, irq->virq, NULL,
dpaa2_switch_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(dev), dev);
@@ -1580,7 +1612,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
return 0;
free_devm_irq:
- devm_free_irq(dev, irq->msi_desc->irq, dev);
+ devm_free_irq(dev, irq->virq, dev);
free_irq:
fsl_mc_free_irqs(sw_dev);
return err;
@@ -1752,8 +1784,10 @@ int dpaa2_switch_port_vlans_add(struct net_device *netdev,
/* Make sure that the VLAN is not already configured
* on the switch port
*/
- if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
+ if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) {
+ netdev_err(netdev, "VLAN %d already configured\n", vlan->vid);
return -EEXIST;
+ }
/* Check if there is space for a new VLAN */
err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
@@ -1896,9 +1930,11 @@ static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid
/* Delete VLAN from switch if it is no longer configured on
* any port
*/
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
- if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ if (ethsw->ports[i] &&
+ ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
return 0; /* Found a port member in VID */
+ }
ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
@@ -1973,33 +2009,16 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
return notifier_from_errno(err);
}
-static struct notifier_block dpaa2_switch_port_switchdev_nb;
-static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
-
static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
struct net_device *upper_dev,
struct netlink_ext_ack *extack)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct ethsw_port_priv *other_port_priv;
- struct net_device *other_dev;
- struct list_head *iter;
bool learn_ena;
int err;
- netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
- if (!dpaa2_switch_port_dev_check(other_dev))
- continue;
-
- other_port_priv = netdev_priv(other_dev);
- if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
- NL_SET_ERR_MSG_MOD(extack,
- "Interface from a different DPSW is in the bridge already");
- return -EINVAL;
- }
- }
-
/* Delete the previously manually installed VLAN 1 */
err = dpaa2_switch_port_del_vlan(port_priv, 1);
if (err)
@@ -2017,10 +2036,13 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
if (err)
goto err_egress_flood;
+ /* Recreate the egress flood domain of the FDB that we just left. */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
+ if (err)
+ goto err_egress_flood;
+
err = switchdev_bridge_port_offload(netdev, netdev, NULL,
- &dpaa2_switch_port_switchdev_nb,
- &dpaa2_switch_port_switchdev_blocking_nb,
- false, extack);
+ NULL, NULL, false, extack);
if (err)
goto err_switchdev_offload;
@@ -2054,9 +2076,7 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo
static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
{
- switchdev_bridge_port_unoffload(netdev, NULL,
- &dpaa2_switch_port_switchdev_nb,
- &dpaa2_switch_port_switchdev_blocking_nb);
+ switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL);
}
static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
@@ -2137,6 +2157,10 @@ dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
struct net_device *upper_dev,
struct netlink_ext_ack *extack)
{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_port_priv *other_port_priv;
+ struct net_device *other_dev;
+ struct list_head *iter;
int err;
if (!br_vlan_enabled(upper_dev)) {
@@ -2151,54 +2175,93 @@ dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
return 0;
}
+ netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
+ if (!dpaa2_switch_port_dev_check(other_dev))
+ continue;
+
+ other_port_priv = netdev_priv(other_dev);
+ if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interface from a different DPSW is in the bridge already");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
-static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int dpaa2_switch_port_prechangeupper(struct net_device *netdev,
+ struct netdev_notifier_changeupper_info *info)
{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
- int err = 0;
+ int err;
if (!dpaa2_switch_port_dev_check(netdev))
- return NOTIFY_DONE;
+ return 0;
extack = netdev_notifier_info_to_extack(&info->info);
-
- switch (event) {
- case NETDEV_PRECHANGEUPPER:
- upper_dev = info->upper_dev;
- if (!netif_is_bridge_master(upper_dev))
- break;
-
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
upper_dev,
extack);
if (err)
- goto out;
+ return err;
if (!info->linking)
dpaa2_switch_port_pre_bridge_leave(netdev);
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_changeupper(struct net_device *netdev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ if (!dpaa2_switch_port_dev_check(netdev))
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ return dpaa2_switch_port_bridge_join(netdev,
+ upper_dev,
+ extack);
+ else
+ return dpaa2_switch_port_bridge_leave(netdev);
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ err = dpaa2_switch_port_prechangeupper(netdev, ptr);
+ if (err)
+ return notifier_from_errno(err);
break;
case NETDEV_CHANGEUPPER:
- upper_dev = info->upper_dev;
- if (netif_is_bridge_master(upper_dev)) {
- if (info->linking)
- err = dpaa2_switch_port_bridge_join(netdev,
- upper_dev,
- extack);
- else
- err = dpaa2_switch_port_bridge_leave(netdev);
- }
+ err = dpaa2_switch_port_changeupper(netdev, ptr);
+ if (err)
+ return notifier_from_errno(err);
+
break;
}
-out:
- return notifier_from_errno(err);
+ return NOTIFY_DONE;
}
struct ethsw_switchdev_event_work {
@@ -2585,13 +2648,14 @@ static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
{
- int *count, i;
+ int *count, ret, i;
for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
+ ret = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
count = &ethsw->buf_count;
- *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+ *count += ret;
- if (unlikely(*count < BUFS_PER_CMD))
+ if (unlikely(ret < BUFS_PER_CMD))
return -ENOMEM;
}
@@ -2672,7 +2736,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
goto err_get_attr;
}
- ethsw->bpid = dpbp_attrs.id;
+ ethsw->bpid = dpbp_attrs.bpid;
return 0;
@@ -2930,9 +2994,7 @@ static void dpaa2_switch_remove_port(struct ethsw_core *ethsw,
{
struct ethsw_port_priv *port_priv = ethsw->ports[port_idx];
- rtnl_lock();
dpaa2_switch_port_disconnect_mac(port_priv);
- rtnl_unlock();
free_netdev(port_priv->netdev);
ethsw->ports[port_idx] = NULL;
}
@@ -3198,7 +3260,7 @@ static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
dev_warn(dev, "dpsw_close err %d\n", err);
}
-static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
+static void dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
{
struct ethsw_port_priv *port_priv;
struct ethsw_core *ethsw;
@@ -3229,8 +3291,6 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
kfree(ethsw);
dev_set_drvdata(dev, NULL);
-
- return 0;
}
static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
@@ -3251,6 +3311,8 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
port_priv->netdev = port_netdev;
port_priv->ethsw_data = ethsw;
+ mutex_init(&port_priv->mac_lock);
+
port_priv->idx = port_idx;
port_priv->stp_state = BR_STATE_FORWARDING;
@@ -3278,6 +3340,7 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_STAG_FILTER |
NETIF_F_HW_TC;
+ port_netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
err = dpaa2_switch_port_init(port_priv, port_idx);
if (err)
@@ -3368,9 +3431,8 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
* different queues for each switch ports.
*/
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
- netif_napi_add(ethsw->ports[0]->netdev,
- &ethsw->fq[i].napi, dpaa2_switch_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(ethsw->ports[0]->netdev, &ethsw->fq[i].napi,
+ dpaa2_switch_poll);
/* Setup IRQs */
err = dpaa2_switch_setup_irqs(sw_dev);
@@ -3435,7 +3497,6 @@ MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
static struct fsl_mc_driver dpaa2_switch_drv = {
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
.probe = dpaa2_switch_probe,
.remove = dpaa2_switch_remove,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
index 0002dca4d417..42b3ca73f55d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
@@ -161,6 +161,8 @@ struct ethsw_port_priv {
struct dpaa2_switch_filter_block *filter_block;
struct dpaa2_mac *mac;
+ /* Protects against changes to port_priv->mac */
+ struct mutex mac_lock;
};
/* Switch data */
@@ -230,12 +232,7 @@ static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
static inline bool
dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv)
{
- if (port_priv->mac &&
- (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
- port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
- return true;
-
- return false;
+ return dpaa2_mac_is_type_phy(port_priv->mac);
}
static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
new file mode 100644
index 000000000000..4b0ae7d9af92
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2022 NXP
+ */
+#include <linux/filter.h>
+#include <linux/compiler.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
+
+#include "dpaa2-eth.h"
+
+static void dpaa2_eth_setup_consume_func(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ enum dpaa2_eth_fq_type type,
+ dpaa2_eth_consume_cb_t *consume)
+{
+ struct dpaa2_eth_fq *fq;
+ int i;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+
+ if (fq->type != type)
+ continue;
+ if (fq->channel != ch)
+ continue;
+
+ fq->consume = consume;
+ }
+}
+
+static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *rx_fq,
+ struct dpaa2_fd *fd, void *vaddr)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff *xdp_buff;
+ struct dpaa2_eth_swa *swa;
+ u32 xdp_act = XDP_PASS;
+ int err;
+
+ xdp_prog = READ_ONCE(ch->xdp.prog);
+ if (!xdp_prog)
+ goto out;
+
+ swa = (struct dpaa2_eth_swa *)(vaddr + DPAA2_ETH_RX_HWA_SIZE +
+ ch->xsk_pool->umem->headroom);
+ xdp_buff = swa->xsk.xdp_buff;
+
+ xdp_buff->data_hard_start = vaddr;
+ xdp_buff->data = vaddr + dpaa2_fd_get_offset(fd);
+ xdp_buff->data_end = xdp_buff->data + dpaa2_fd_get_len(fd);
+ xdp_set_data_meta_invalid(xdp_buff);
+ xdp_buff->rxq = &ch->xdp_rxq;
+
+ xsk_buff_dma_sync_for_cpu(xdp_buff);
+ xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff);
+
+ /* xdp.data pointer may have changed */
+ dpaa2_fd_set_offset(fd, xdp_buff->data - vaddr);
+ dpaa2_fd_set_len(fd, xdp_buff->data_end - xdp_buff->data);
+
+ if (likely(xdp_act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(priv->net_dev, xdp_buff, xdp_prog);
+ if (unlikely(err)) {
+ ch->stats.xdp_drop++;
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+ } else {
+ ch->buf_count--;
+ ch->stats.xdp_redirect++;
+ }
+
+ goto xdp_redir;
+ }
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+ ch->stats.xdp_drop++;
+ break;
+ }
+
+xdp_redir:
+ ch->xdp.res |= xdp_act;
+out:
+ return xdp_act;
+}
+
+/* Rx frame processing routine for the AF_XDP fast path */
+static void dpaa2_xsk_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct rtnl_link_stats64 *percpu_stats;
+ u32 fd_length = dpaa2_fd_get_len(fd);
+ struct sk_buff *skb;
+ void *vaddr;
+ u32 xdp_act;
+
+ trace_dpaa2_rx_xsk_fd(priv->net_dev, fd);
+
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ if (fd_format != dpaa2_fd_single) {
+ WARN_ON(priv->xdp_prog);
+ /* AF_XDP doesn't support any other formats */
+ goto err_frame_format;
+ }
+
+ xdp_act = dpaa2_xsk_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+ if (xdp_act != XDP_PASS) {
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ return;
+ }
+
+ /* Build skb */
+ skb = dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, vaddr);
+ if (!skb)
+ /* Nothing else we can do, recycle the buffer and
+ * drop the frame.
+ */
+ goto err_alloc_skb;
+
+ /* Send the skb to the Linux networking stack */
+ dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
+
+ return;
+
+err_alloc_skb:
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+err_frame_format:
+ percpu_stats->rx_dropped++;
+}
+
+static void dpaa2_xsk_set_bp_per_qdbin(struct dpaa2_eth_priv *priv,
+ struct dpni_pools_cfg *pools_params)
+{
+ int curr_bp = 0, i, j;
+
+ pools_params->pool_options = DPNI_POOL_ASSOC_QDBIN;
+ for (i = 0; i < priv->num_bps; i++) {
+ for (j = 0; j < priv->num_channels; j++)
+ if (priv->bp[i] == priv->channel[j]->bp)
+ pools_params->pools[curr_bp].priority_mask |= (1 << j);
+ if (!pools_params->pools[curr_bp].priority_mask)
+ continue;
+
+ pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid;
+ pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size;
+ pools_params->pools[curr_bp++].backup_pool = 0;
+ }
+ pools_params->num_dpbp = curr_bp;
+}
+
+static int dpaa2_xsk_disable_pool(struct net_device *dev, u16 qid)
+{
+ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(dev, qid);
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpni_pools_cfg pools_params = { 0 };
+ struct dpaa2_eth_channel *ch;
+ int err;
+ bool up;
+
+ ch = priv->channel[qid];
+ if (!ch->xsk_pool)
+ return -EINVAL;
+
+ up = netif_running(dev);
+ if (up)
+ dev_close(dev);
+
+ xsk_pool_dma_unmap(pool, 0);
+ err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err)
+ netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed (err = %d)\n",
+ err);
+
+ dpaa2_eth_free_dpbp(priv, ch->bp);
+
+ ch->xsk_zc = false;
+ ch->xsk_pool = NULL;
+ ch->xsk_tx_pkts_sent = 0;
+ ch->bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
+
+ dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_eth_rx);
+
+ dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params);
+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+ if (err)
+ netdev_err(dev, "dpni_set_pools() failed\n");
+
+ if (up) {
+ err = dev_open(dev, NULL);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_xsk_enable_pool(struct net_device *dev,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpni_pools_cfg pools_params = { 0 };
+ struct dpaa2_eth_channel *ch;
+ int err, err2;
+ bool up;
+
+ if (priv->dpni_attrs.wriop_version < DPAA2_WRIOP_VERSION(3, 0, 0)) {
+ netdev_err(dev, "AF_XDP zero-copy not supported on devices <= WRIOP(3, 0, 0)\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->dpni_attrs.num_queues > 8) {
+ netdev_err(dev, "AF_XDP zero-copy not supported on DPNI with more then 8 queues\n");
+ return -EOPNOTSUPP;
+ }
+
+ up = netif_running(dev);
+ if (up)
+ dev_close(dev);
+
+ err = xsk_pool_dma_map(pool, priv->net_dev->dev.parent, 0);
+ if (err) {
+ netdev_err(dev, "xsk_pool_dma_map() failed (err = %d)\n",
+ err);
+ goto err_dma_unmap;
+ }
+
+ ch = priv->channel[qid];
+ err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err) {
+ netdev_err(dev, "xdp_rxq_info_reg_mem_model() failed (err = %d)\n", err);
+ goto err_mem_model;
+ }
+ xsk_pool_set_rxq_info(pool, &ch->xdp_rxq);
+
+ priv->bp[priv->num_bps] = dpaa2_eth_allocate_dpbp(priv);
+ if (IS_ERR(priv->bp[priv->num_bps])) {
+ err = PTR_ERR(priv->bp[priv->num_bps]);
+ goto err_bp_alloc;
+ }
+ ch->xsk_zc = true;
+ ch->xsk_pool = pool;
+ ch->bp = priv->bp[priv->num_bps++];
+
+ dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_xsk_rx);
+
+ dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params);
+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+ if (err) {
+ netdev_err(dev, "dpni_set_pools() failed\n");
+ goto err_set_pools;
+ }
+
+ if (up) {
+ err = dev_open(dev, NULL);
+ if (err)
+ return err;
+ }
+
+ return 0;
+
+err_set_pools:
+ err2 = dpaa2_xsk_disable_pool(dev, qid);
+ if (err2)
+ netdev_err(dev, "dpaa2_xsk_disable_pool() failed %d\n", err2);
+err_bp_alloc:
+ err2 = xdp_rxq_info_reg_mem_model(&priv->channel[qid]->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err2)
+ netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed with %d)\n", err2);
+err_mem_model:
+ xsk_pool_dma_unmap(pool, 0);
+err_dma_unmap:
+ if (up)
+ dev_open(dev, NULL);
+
+ return err;
+}
+
+int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
+{
+ return pool ? dpaa2_xsk_enable_pool(dev, pool, qid) :
+ dpaa2_xsk_disable_pool(dev, qid);
+}
+
+int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_eth_channel *ch = priv->channel[qid];
+
+ if (!priv->link_state.up)
+ return -ENETDOWN;
+
+ if (!priv->xdp_prog)
+ return -EINVAL;
+
+ if (!ch->xsk_zc)
+ return -EINVAL;
+
+ /* We do not have access to a per channel SW interrupt, so instead we
+ * schedule a NAPI instance.
+ */
+ if (!napi_if_scheduled_mark_missed(&ch->napi))
+ napi_schedule(&ch->napi);
+
+ return 0;
+}
+
+static int dpaa2_xsk_tx_build_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ struct xdp_desc *xdp_desc)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ void *sgt_buf = NULL;
+ dma_addr_t sgt_addr;
+ int sgt_buf_size;
+ dma_addr_t addr;
+ int err = 0;
+
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf))
+ return -ENOMEM;
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Get the address of the XSK Tx buffer */
+ addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr);
+ xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len);
+
+ /* Fill in the HW SGT structure */
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, xdp_desc->len);
+ dpaa2_sg_set_final(sgt, true);
+
+ /* Store the necessary info in the SGT buffer */
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_XSK;
+ swa->xsk.sgt_size = sgt_buf_size;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ err = -ENOMEM;
+ goto sgt_map_failed;
+ }
+
+ /* Initialize FD fields */
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, xdp_desc->len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ return 0;
+
+sgt_map_failed:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+
+ return err;
+}
+
+bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch)
+{
+ struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct rtnl_link_stats64 *percpu_stats;
+ int budget = DPAA2_ETH_TX_ZC_PER_NAPI;
+ int total_enqueued, enqueued;
+ int retries, max_retries;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_fd *fds;
+ int batch, i, err;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ fds = (this_cpu_ptr(priv->fd))->array;
+
+ /* Use the FQ with the same idx as the affine CPU */
+ fq = &priv->fq[ch->nctx.desired_cpu];
+
+ batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget);
+ if (!batch)
+ return false;
+
+ /* Create a FD for each XSK frame to be sent */
+ for (i = 0; i < batch; i++) {
+ err = dpaa2_xsk_tx_build_fd(priv, ch, &fds[i], &xdp_descs[i]);
+ if (err) {
+ batch = i;
+ break;
+ }
+
+ trace_dpaa2_tx_xsk_fd(priv->net_dev, &fds[i]);
+ }
+
+ /* Enqueue all the created FDs */
+ max_retries = batch * DPAA2_ETH_ENQUEUE_RETRIES;
+ total_enqueued = 0;
+ enqueued = 0;
+ retries = 0;
+ while (total_enqueued < batch && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fds[total_enqueued], 0,
+ batch - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ retries++;
+ continue;
+ }
+
+ total_enqueued += enqueued;
+ }
+ percpu_extras->tx_portal_busy += retries;
+
+ /* Update statistics */
+ percpu_stats->tx_packets += total_enqueued;
+ for (i = 0; i < total_enqueued; i++)
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+ for (i = total_enqueued; i < batch; i++) {
+ dpaa2_eth_free_tx_fd(priv, ch, fq, &fds[i], false);
+ percpu_stats->tx_errors++;
+ }
+
+ return total_enqueued == budget;
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
index a24b20f76938..e9ac2ecef3be 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -19,11 +19,15 @@
#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
+#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
+
#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3)
#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+#define DPMAC_CMDID_SET_PROTOCOL DPMAC_CMD(0x0c7)
+
/* Macros for accessing command fields smaller than 1byte */
#define DPMAC_MASK(field) \
GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
@@ -70,4 +74,12 @@ struct dpmac_rsp_get_counter {
__le64 counter;
};
+struct dpmac_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+struct dpmac_cmd_set_protocol {
+ u8 eth_if;
+};
#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
index d5997b654562..f440a4c3b70c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
@@ -181,3 +181,57 @@ int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
return 0;
}
+
+/**
+ * dpmac_get_api_version() - Get Data Path MAC version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path mac API
+ * @minor_ver: Minor version of data path mac API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct dpmac_rsp_get_api_version *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_protocol() - Reconfigure the DPMAC protocol
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @protocol: New protocol for the DPMAC to be reconfigured in.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol)
+{
+ struct dpmac_cmd_set_protocol *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PROTOCOL,
+ cmd_flags, token);
+ cmd_params = (struct dpmac_cmd_set_protocol *)cmd.params;
+ cmd_params->eth_if = protocol;
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
index 8f7ceb731282..17488819ef68 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -205,4 +205,9 @@ enum dpmac_counter_id {
int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
enum dpmac_counter_id id, u64 *value);
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol);
#endif /* __FSL_DPMAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 9f80bdfeedec..be9492b8d5dc 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -13,10 +13,12 @@
#define DPNI_VER_MINOR 0
#define DPNI_CMD_BASE_VERSION 1
#define DPNI_CMD_2ND_VERSION 2
+#define DPNI_CMD_3RD_VERSION 3
#define DPNI_CMD_ID_OFFSET 4
#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
+#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_3RD_VERSION)
#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
@@ -39,7 +41,7 @@
#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
-#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
+#define DPNI_CMDID_SET_POOLS DPNI_CMD_V3(0x200)
#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
@@ -98,7 +100,7 @@
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
#define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279)
-#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD(0x27a)
+#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD_V2(0x27a)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -115,14 +117,19 @@ struct dpni_cmd_open {
};
#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+
+struct dpni_cmd_pool {
+ __le16 dpbp_id;
+ u8 priority_mask;
+ u8 pad;
+};
+
struct dpni_cmd_set_pools {
- /* cmd word 0 */
u8 num_dpbp;
u8 backup_pool_mask;
- __le16 pad;
- /* cmd word 0..4 */
- __le32 dpbp_id[DPNI_MAX_DPBP];
- /* cmd word 4..6 */
+ u8 pad;
+ u8 pool_options;
+ struct dpni_cmd_pool pool[DPNI_MAX_DPBP];
__le16 buffer_size[DPNI_MAX_DPBP];
};
@@ -658,12 +665,16 @@ struct dpni_cmd_single_step_cfg {
__le16 flags;
__le16 offset;
__le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
};
struct dpni_rsp_single_step_cfg {
__le16 flags;
__le16 offset;
__le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
};
struct dpni_cmd_enable_vlan_filter {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index d6afada99fb6..02601a283b59 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -173,8 +173,12 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
cmd_params->num_dpbp = cfg->num_dpbp;
+ cmd_params->pool_options = cfg->pool_options;
for (i = 0; i < DPNI_MAX_DPBP; i++) {
- cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+ cmd_params->pool[i].dpbp_id =
+ cpu_to_le16(cfg->pools[i].dpbp_id);
+ cmd_params->pool[i].priority_mask =
+ cfg->pools[i].priority_mask;
cmd_params->buffer_size[i] =
cpu_to_le16(cfg->pools[i].buffer_size);
cmd_params->backup_pool_mask |=
@@ -2136,6 +2140,8 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags),
PTP_CH_UPDATE) ? 1 : 0;
ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay);
+ ptp_cfg->ptp_onestep_reg_base =
+ le32_to_cpu(rsp_params->ptp_onestep_reg_base);
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 7de0562bbf59..5c0a1d5ac934 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -92,19 +92,28 @@ int dpni_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
+#define DPNI_POOL_ASSOC_QPRI 0
+#define DPNI_POOL_ASSOC_QDBIN 1
+
/**
* struct dpni_pools_cfg - Structure representing buffer pools configuration
* @num_dpbp: Number of DPBPs
+ * @pool_options: Buffer assignment options.
+ * This field is a combination of DPNI_POOL_ASSOC_flags
* @pools: Array of buffer pools parameters; The number of valid entries
* must match 'num_dpbp' value
* @pools.dpbp_id: DPBP object ID
+ * @pools.priority: Priority mask that indicates TC's used with this buffer.
+ * If set to 0x00 MC will assume value 0xff.
* @pools.buffer_size: Buffer size
* @pools.backup_pool: Backup pool
*/
struct dpni_pools_cfg {
u8 num_dpbp;
+ u8 pool_options;
struct {
int dpbp_id;
+ u8 priority_mask;
u16 buffer_size;
int backup_pool;
} pools[DPNI_MAX_DPBP];
@@ -1074,12 +1083,18 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
* @peer_delay: For peer-to-peer transparent clocks add this value to the
* correction field in addition to the transient time update.
* The value expresses nanoseconds.
+ * @ptp_onestep_reg_base: 1588 SINGLE_STEP register base address. This address
+ * is used to update directly the register contents.
+ * User has to create an address mapping for it.
+ *
+ *
*/
struct dpni_single_step_cfg {
u8 en;
u8 ch_update;
u16 offset;
u32 peer_delay;
+ u32 ptp_onestep_reg_base;
};
int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index cdc0ff89388a..117038104b69 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,9 +1,39 @@
# SPDX-License-Identifier: GPL-2.0
+config FSL_ENETC_CORE
+ tristate
+ select NXP_NETC_LIB if NXP_NTMP
+ help
+ This module supports common functionality between the PF and VF
+ drivers for the NXP ENETC controller.
+
+ If compiled as module (M), the module name is fsl-enetc-core.
+
+config NXP_ENETC_PF_COMMON
+ tristate
+ help
+ This module supports common functionality between drivers of
+ different versions of NXP ENETC PF controllers.
+
+ If compiled as module (M), the module name is nxp-enetc-pf-common.
+
+config NXP_NETC_LIB
+ tristate
+ help
+ This module provides common functionalities for both ENETC and NETC
+ Switch, such as NETC Table Management Protocol (NTMP) 2.0, common tc
+ flower and debugfs interfaces and so on.
+
+config NXP_NTMP
+ bool
+
config FSL_ENETC
tristate "ENETC PF driver"
- depends on PCI && PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ depends on PCI_MSI
+ select FSL_ENETC_CORE
select FSL_ENETC_IERB
select FSL_ENETC_MDIO
+ select NXP_ENETC_PF_COMMON
select PHYLINK
select PCS_LYNX
select DIMLIB
@@ -14,9 +44,29 @@ config FSL_ENETC
If compiled as module (M), the module name is fsl-enetc.
+config NXP_ENETC4
+ tristate "ENETC4 PF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
+ depends on PCI_MSI
+ select FSL_ENETC_CORE
+ select FSL_ENETC_MDIO
+ select NXP_ENETC_PF_COMMON
+ select NXP_NTMP
+ select PHYLINK
+ select DIMLIB
+ help
+ This driver supports NXP ENETC devices with major revision 4. ENETC is
+ as the NIC functionality in NETC, it supports virtualization/isolation
+ based on PCIe Single Root IO Virtualization (SR-IOV) and a full range
+ of TSN standards and NIC offload capabilities.
+
+ If compiled as module (M), the module name is nxp-enetc4.
+
config FSL_ENETC_VF
tristate "ENETC VF driver"
- depends on PCI && PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ depends on PCI_MSI
+ select FSL_ENETC_CORE
select FSL_ENETC_MDIO
select PHYLINK
select DIMLIB
@@ -36,7 +86,7 @@ config FSL_ENETC_IERB
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI && MDIO_DEVRES && MDIO_BUS
+ depends on PCI && PHYLIB
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
@@ -64,3 +114,17 @@ config FSL_ENETC_QOS
enable/disable from user space via Qos commands(tc). In the kernel
side, it can be loaded by Qos driver. Currently, it is only support
taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu).
+
+config NXP_NETC_BLK_CTRL
+ tristate "NETC blocks control driver"
+ help
+ This driver configures Integrated Endpoint Register Block (IERB) and
+ Privileged Register Block (PRB) of NETC. For i.MX platforms, it also
+ includes the configuration of NETCMIX block.
+ The IERB contains registers that are used for pre-boot initialization,
+ debug, and non-customer configuration. The PRB controls global reset
+ and global error handling for NETC. The NETCMIX block is mainly used
+ to set MII protocol and PCS protocol of the links, it also contains
+ settings for some other functions.
+
+ If compiled as module (M), the module name is nxp-netc-blk-ctrl.
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index a139f2e9d59f..f1c5ad45fd76 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -1,15 +1,25 @@
# SPDX-License-Identifier: GPL-2.0
-common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
+obj-$(CONFIG_FSL_ENETC_CORE) += fsl-enetc-core.o
+fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o
+
+obj-$(CONFIG_NXP_ENETC_PF_COMMON) += nxp-enetc-pf-common.o
+nxp-enetc-pf-common-y := enetc_pf_common.o
+
+obj-$(CONFIG_NXP_NETC_LIB) += nxp-netc-lib.o
+nxp-netc-lib-y := ntmp.o
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
-fsl-enetc-y := enetc_pf.o $(common-objs)
+fsl-enetc-y := enetc_pf.o
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
+obj-$(CONFIG_NXP_ENETC4) += nxp-enetc4.o
+nxp-enetc4-y := enetc4_pf.o
+nxp-enetc4-$(CONFIG_DEBUG_FS) += enetc4_debugfs.o
+
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
-fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
+fsl-enetc-vf-y := enetc_vf.o
obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
fsl-enetc-ierb-y := enetc_ierb.o
@@ -19,3 +29,6 @@ fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o
fsl-enetc-ptp-y := enetc_ptp.o
+
+obj-$(CONFIG_NXP_NETC_BLK_CTRL) += nxp-netc-blk-ctrl.o
+nxp-netc-blk-ctrl-y := netc_blk_ctrl.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 504e12554079..d5e5800b84ef 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -3,6 +3,7 @@
#include "enetc.h"
#include <linux/bpf_trace.h>
+#include <linux/clk.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/vmalloc.h>
@@ -11,14 +12,81 @@
#include <net/pkt_sched.h>
#include <net/tso.h>
+u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg)
+{
+ /* ENETC with pseudo MAC does not have Ethernet MAC
+ * port registers.
+ */
+ if (enetc_is_pseudo_mac(si))
+ return 0;
+
+ return enetc_port_rd(&si->hw, reg);
+}
+EXPORT_SYMBOL_GPL(enetc_port_mac_rd);
+
+void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val)
+{
+ if (enetc_is_pseudo_mac(si))
+ return;
+
+ enetc_port_wr(&si->hw, reg, val);
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_port_wr(&si->hw, reg + si->drvdata->pmac_offset, val);
+}
+EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
+
+static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
+ u8 preemptible_tcs)
+{
+ if (!(priv->si->hw_features & ENETC_SI_F_QBU))
+ return;
+
+ priv->preemptible_tcs = preemptible_tcs;
+ enetc_mm_commit_preemptible_tcs(priv);
+}
+
+static int enetc_mac_addr_hash_idx(const u8 *addr)
+{
+ u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
+ u64 mask = 0;
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ mask |= BIT_ULL(i * 6);
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight64(fold & (mask << i)) & 0x1) << i;
+
+ return res;
+}
+
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ int idx = enetc_mac_addr_hash_idx(addr);
+
+ /* add hash table entry */
+ __set_bit(idx, filter->mac_hash_table);
+ filter->mac_addr_cnt++;
+}
+EXPORT_SYMBOL_GPL(enetc_add_mac_addr_ht_filter);
+
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
+{
+ filter->mac_addr_cnt = 0;
+
+ bitmap_zero(filter->mac_hash_table,
+ ENETC_MADDR_HASH_TBL_SZ);
+}
+EXPORT_SYMBOL_GPL(enetc_reset_mac_addr_filter);
+
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
int num_tx_rings = priv->num_tx_rings;
- int i;
- for (i = 0; i < priv->num_rx_rings; i++)
- if (priv->rx_ring[i]->xdp.prog)
- return num_tx_rings - num_possible_cpus();
+ if (priv->xdp_prog)
+ return num_tx_rings - num_possible_cpus();
return num_tx_rings;
}
@@ -123,22 +191,181 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
return 0;
}
+static bool enetc_tx_csum_offload_check(struct sk_buff *skb)
+{
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ case offsetof(struct udphdr, check):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool enetc_skb_is_ipv6(struct sk_buff *skb)
+{
+ return vlan_get_protocol(skb) == htons(ETH_P_IPV6);
+}
+
+static bool enetc_skb_is_tcp(struct sk_buff *skb)
+{
+ return skb->csum_offset == offsetof(struct tcphdr, check);
+}
+
+/**
+ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
+ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
+ * @count: Number of Tx buffer descriptors which need to be unmapped
+ * @i: Index of the last successfully mapped Tx buffer descriptor
+ */
+static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
+{
+ while (count--) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ }
+}
+
+static void enetc_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
+{
+ u32 val = ENETC_PM0_SINGLE_STEP_EN;
+
+ val |= ENETC_SET_SINGLE_STEP_OFFSET(offset);
+ if (udp)
+ val |= ENETC_PM0_SINGLE_STEP_CH;
+
+ /* The "Correction" field of a packet is updated based on the
+ * current time and the timestamp provided
+ */
+ enetc_port_mac_wr(si, ENETC_PM0_SINGLE_STEP, val);
+}
+
+static void enetc4_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
+{
+ u32 val = PM_SINGLE_STEP_EN;
+
+ val |= PM_SINGLE_STEP_OFFSET_SET(offset);
+ if (udp)
+ val |= PM_SINGLE_STEP_CH;
+
+ enetc_port_mac_wr(si, ENETC4_PM_SINGLE_STEP(0), val);
+}
+
+static u32 enetc_update_ptp_sync_msg(struct enetc_ndev_priv *priv,
+ struct sk_buff *skb, bool csum_offload)
+{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
+ u16 tstamp_off = enetc_cb->origin_tstamp_off;
+ u16 corr_off = enetc_cb->correction_off;
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ __be32 new_sec_l, new_nsec;
+ __be16 new_sec_h;
+ u32 lo, hi, nsec;
+ u8 *data;
+ u64 sec;
+
+ lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ hi = enetc_rd_hot(hw, ENETC_SICTR1);
+ sec = (u64)hi << 32 | lo;
+ nsec = do_div(sec, 1000000000);
+
+ /* Update originTimestamp field of Sync packet
+ * - 48 bits seconds field
+ * - 32 bits nanseconds field
+ *
+ * In addition, if csum_offload is false, the UDP checksum needs
+ * to be updated by software after updating originTimestamp field,
+ * otherwise the hardware will calculate the wrong checksum when
+ * updating the correction field and update it to the packet.
+ */
+
+ data = skb_mac_header(skb);
+ new_sec_h = htons((sec >> 32) & 0xffff);
+ new_sec_l = htonl(sec & 0xffffffff);
+ new_nsec = htonl(nsec);
+ if (enetc_cb->udp && !csum_offload) {
+ struct udphdr *uh = udp_hdr(skb);
+ __be32 old_sec_l, old_nsec;
+ __be16 old_sec_h;
+
+ old_sec_h = *(__be16 *)(data + tstamp_off);
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
+ new_sec_h, false);
+
+ old_sec_l = *(__be32 *)(data + tstamp_off + 2);
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
+ new_sec_l, false);
+
+ old_nsec = *(__be32 *)(data + tstamp_off + 6);
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
+ new_nsec, false);
+ }
+
+ *(__be16 *)(data + tstamp_off) = new_sec_h;
+ *(__be32 *)(data + tstamp_off + 2) = new_sec_l;
+ *(__be32 *)(data + tstamp_off + 6) = new_nsec;
+
+ /* Configure single-step register */
+ if (is_enetc_rev1(si))
+ enetc_set_one_step_ts(si, enetc_cb->udp, corr_off);
+ else
+ enetc4_set_one_step_ts(si, enetc_cb->udp, corr_off);
+
+ return lo & ENETC_TXBD_TSTAMP;
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_tx_swbd *tx_swbd;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
- u8 msgtype, twostep, udp;
+ bool csum_offload = false;
union enetc_tx_bd *txbd;
- u16 offset1, offset2;
int i, count = 0;
skb_frag_t *frag;
unsigned int f;
dma_addr_t dma;
u8 flags = 0;
+ u32 tstamp;
+
+ enetc_clear_tx_bd(&temp_bd);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* Can not support TSD and checksum offload at the same time */
+ if (priv->active_offloads & ENETC_F_TXCSUM &&
+ enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) {
+ temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START,
+ skb_network_offset(skb));
+ temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ skb_network_header_len(skb) / 4);
+ temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T,
+ enetc_skb_is_ipv6(skb));
+ if (enetc_skb_is_tcp(skb))
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_TCP);
+ else
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_UDP);
+ flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS;
+ csum_offload = true;
+ } else if (skb_checksum_help(skb)) {
+ return 0;
+ }
+ }
+
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ do_onestep_tstamp = true;
+ tstamp = enetc_update_ptp_sync_msg(priv, skb, csum_offload);
+ } else if (enetc_cb->flag & ENETC_F_TX_TSTAMP) {
+ do_twostep_tstamp = true;
+ }
i = tx_ring->next_to_use;
txbd = ENETC_TXBD(*tx_ring, i);
@@ -150,7 +377,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
temp_bd.addr = cpu_to_le64(dma);
temp_bd.buf_len = cpu_to_le16(len);
- temp_bd.lstatus = 0;
tx_swbd = &tx_ring->tx_swbd[i];
tx_swbd->dma = dma;
@@ -160,19 +386,9 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
count++;
do_vlan = skb_vlan_tag_present(skb);
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
- if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
- &offset2) ||
- msgtype != PTP_MSGTYPE_SYNC || twostep)
- WARN_ONCE(1, "Bad packet for one-step timestamping\n");
- else
- do_onestep_tstamp = true;
- } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
- do_twostep_tstamp = true;
- }
-
tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
- tx_swbd->check_wb = tx_swbd->do_twostep_tstamp;
+ tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
+ tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
@@ -212,38 +428,9 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
if (do_onestep_tstamp) {
- u32 lo, hi, val;
- u64 sec, nsec;
- u8 *data;
-
- lo = enetc_rd_hot(hw, ENETC_SICTR0);
- hi = enetc_rd_hot(hw, ENETC_SICTR1);
- sec = (u64)hi << 32 | lo;
- nsec = do_div(sec, 1000000000);
-
/* Configure extension BD */
- temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
+ temp_bd.ext.tstamp = cpu_to_le32(tstamp);
e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
-
- /* Update originTimestamp field of Sync packet
- * - 48 bits seconds field
- * - 32 bits nanseconds field
- */
- data = skb_mac_header(skb);
- *(__be16 *)(data + offset2) =
- htons((sec >> 32) & 0xffff);
- *(__be32 *)(data + offset2 + 2) =
- htonl(sec & 0xffffffff);
- *(__be32 *)(data + offset2 + 6) = htonl(nsec);
-
- /* Configure single-step register */
- val = ENETC_PM0_SINGLE_STEP_EN;
- val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
- if (udp)
- val |= ENETC_PM0_SINGLE_STEP_CH;
-
- enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
- enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
} else if (do_twostep_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
@@ -305,25 +492,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_err:
dev_err(tx_ring->dev, "DMA map error");
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- struct enetc_tx_swbd *tx_swbd,
- union enetc_tx_bd *txbd, int *i, int hdr_len,
- int data_len)
+static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
{
union enetc_tx_bd txbd_tmp;
u8 flags = 0, e_flags = 0;
dma_addr_t addr;
+ int count = 1;
enetc_clear_tx_bd(&txbd_tmp);
addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
@@ -366,7 +548,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
/* Write the BD */
txbd_tmp.ext.e_flags = e_flags;
*txbd = txbd_tmp;
+ count++;
}
+
+ return count;
}
static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
@@ -465,8 +650,233 @@ static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso
}
}
+static int enetc_lso_count_descs(const struct sk_buff *skb)
+{
+ /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD
+ * for linear area data but not include LSO header, namely
+ * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's
+ * okay, we only need to consider the worst case). And 1 BD
+ * for gap.
+ */
+ return skb_shinfo(skb)->nr_frags + 4;
+}
+
+static int enetc_lso_get_hdr_len(const struct sk_buff *skb)
+{
+ int hdr_len, tlen;
+
+ tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr);
+ hdr_len = skb_transport_offset(skb) + tlen;
+
+ return hdr_len;
+}
+
+static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso)
+{
+ lso->lso_seg_size = skb_shinfo(skb)->gso_size;
+ lso->ipv6 = enetc_skb_is_ipv6(skb);
+ lso->tcp = skb_is_gso_tcp(skb);
+ lso->l3_hdr_len = skb_network_header_len(skb);
+ lso->l3_start = skb_network_offset(skb);
+ lso->hdr_len = enetc_lso_get_hdr_len(skb);
+ lso->total_len = skb->len - lso->hdr_len;
+}
+
+static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso)
+{
+ union enetc_tx_bd txbd_tmp, *txbd;
+ struct enetc_tx_swbd *tx_swbd;
+ u16 frm_len, frm_len_ext;
+ u8 flags, e_flags = 0;
+ dma_addr_t addr;
+ char *hdr;
+
+ /* Get the first BD of the LSO BDs chain */
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Prepare LSO header: MAC + IP + TCP/UDP */
+ hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE;
+ memcpy(hdr, skb->data, lso->hdr_len);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ /* {frm_len_ext, frm_len} indicates the total length of
+ * large transmit data unit. frm_len contains the 16 least
+ * significant bits and frm_len_ext contains the 4 most
+ * significant bits.
+ */
+ frm_len = lso->total_len & 0xffff;
+ frm_len_ext = (lso->total_len >> 16) & 0xf;
+
+ /* Set the flags of the first BD */
+ flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO |
+ ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(frm_len);
+ txbd_tmp.flags = flags;
+
+ txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start);
+ /* l3_hdr_size in 32-bits (4 bytes) */
+ txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ lso->l3_hdr_len / 4);
+ if (lso->ipv6)
+ txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T;
+ else
+ txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS;
+
+ txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ?
+ ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP);
+
+ /* For the LSO header we do not set the dma address since
+ * we do not want it unmapped when we do cleanup. We still
+ * set len so that we count the bytes sent.
+ */
+ tx_swbd->len = lso->hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Get the next BD, and the next BD is extended BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ if (skb_vlan_tag_present(skb)) {
+ /* Setup the VLAN fields */
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = ENETC_TPID_8021Q;
+ e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
+ }
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size);
+ txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext);
+ *txbd = txbd_tmp;
+}
+
+static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso, int *count)
+{
+ union enetc_tx_bd txbd_tmp, *txbd = NULL;
+ struct enetc_tx_swbd *tx_swbd;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u8 flags = 0;
+ int len, f;
+
+ len = skb_headlen(skb) - lso->hdr_len;
+ if (len > 0) {
+ dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 0;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+ if (txbd)
+ *txbd = txbd_tmp;
+
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 1;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ /* Last BD needs 'F' bit set */
+ flags |= ENETC_TXBD_FLAGS_F;
+ txbd_tmp.flags = flags;
+ *txbd = txbd_tmp;
+
+ tx_swbd->is_eof = 1;
+ tx_swbd->skb = skb;
+
+ return 0;
+}
+
+static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_lso_t lso = {0};
+ int err, i, count = 0;
+
+ /* Initialize the LSO handler */
+ enetc_lso_start(skb, &lso);
+ i = tx_ring->next_to_use;
+
+ enetc_lso_map_hdr(tx_ring, skb, &i, &lso);
+ /* First BD and an extend BD */
+ count += 2;
+
+ err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count);
+ if (err)
+ goto dma_err;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+dma_err:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (--count);
+
+ return 0;
+}
+
static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
int hdr_len, total_len, data_len;
struct enetc_tx_swbd *tx_swbd;
union enetc_tx_bd *txbd;
@@ -498,9 +908,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
/* compute the csum over the L4 header */
csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
- enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
+ &i, hdr_len, data_len);
bd_data_num = 0;
- count++;
while (data_len > 0) {
int size;
@@ -524,15 +934,20 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
tso.data, size,
size == data_len);
- if (err)
+ if (err) {
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+
goto err_map_data;
+ }
data_len -= size;
count++;
bd_data_num++;
tso_build_data(skb, &tso, size);
- if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ if (unlikely(bd_data_num >= priv->max_frags && data_len))
goto err_chained_bd;
}
@@ -554,13 +969,7 @@ err_map_data:
dev_err(tx_ring->dev, "DMA map error");
err_chained_bd:
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
@@ -568,12 +977,13 @@ err_chained_bd:
static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count, err;
+ int count;
/* Queue one-step Sync packet if already locked */
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
&priv->flags)) {
skb_queue_tail(&priv->tx_skbs, skb);
@@ -584,16 +994,28 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping];
if (skb_is_gso(skb)) {
- if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+ /* LSO data unit lengths of up to 256KB are supported */
+ if (priv->active_offloads & ENETC_F_LSO &&
+ (skb->len - enetc_lso_get_hdr_len(skb)) <=
+ ENETC_LSO_MAX_DATA_LEN) {
+ if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- enetc_lock_mdio();
- count = enetc_map_tx_tso_buffs(tx_ring, skb);
- enetc_unlock_mdio();
+ count = enetc_lso_hw_offload(tx_ring, skb);
+ } else {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
} else {
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags))
if (unlikely(skb_linearize(skb)))
goto drop_packet_err;
@@ -603,11 +1025,6 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- err = skb_checksum_help(skb);
- if (err)
- goto drop_packet_err;
- }
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb);
enetc_unlock_mdio();
@@ -616,7 +1033,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
if (unlikely(!count))
goto drop_packet_err;
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags))
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_OK;
@@ -628,28 +1045,34 @@ drop_packet_err:
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_ndev_priv *priv = netdev_priv(ndev);
u8 udp, msgtype, twostep;
u16 offset1, offset2;
- /* Mark tx timestamp type on skb->cb[0] if requires */
+ /* Mark tx timestamp type on enetc_cb->flag if requires */
if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
- skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
- } else {
- skb->cb[0] = 0;
- }
+ (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK))
+ enetc_cb->flag = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
+ else
+ enetc_cb->flag = 0;
/* Fall back to two-step timestamp if not one-step Sync packet */
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
&offset1, &offset2) ||
- msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
- skb->cb[0] = ENETC_F_TX_TSTAMP;
+ msgtype != PTP_MSGTYPE_SYNC || twostep != 0) {
+ enetc_cb->flag = ENETC_F_TX_TSTAMP;
+ } else {
+ enetc_cb->udp = !!udp;
+ enetc_cb->correction_off = offset1;
+ enetc_cb->origin_tstamp_off = offset2;
+ }
}
return enetc_start_xmit(skb, ndev);
}
+EXPORT_SYMBOL_GPL(enetc_xmit);
static irqreturn_t enetc_msix(int irq, void *data)
{
@@ -679,8 +1102,9 @@ static void enetc_rx_dim_work(struct work_struct *w)
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
struct enetc_int_vector *v =
container_of(dim, struct enetc_int_vector, rx_dim);
+ struct enetc_ndev_priv *priv = netdev_priv(v->rx_ring.ndev);
- v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
+ v->rx_ictt = enetc_usecs_to_cycles(moder.usec, priv->sysclk_freq);
dim->state = DIM_START_MEASURE;
}
@@ -697,7 +1121,7 @@ static void enetc_rx_net_dim(struct enetc_int_vector *v)
v->rx_ring.stats.packets,
v->rx_ring.stats.bytes,
&dim_sample);
- net_dim(&v->rx_dim, dim_sample);
+ net_dim(&v->rx_dim, &dim_sample);
}
static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
@@ -792,9 +1216,9 @@ static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
{
+ int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0;
struct net_device *ndev = tx_ring->ndev;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int tx_frm_cnt = 0, tx_byte_cnt = 0;
struct enetc_tx_swbd *tx_swbd;
int i, bds_to_clean;
bool do_twostep_tstamp;
@@ -821,6 +1245,10 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
&tstamp);
do_twostep_tstamp = true;
}
+
+ if (tx_swbd->qbv_en &&
+ txbd->wb.status & ENETC_TXBD_STATS_WIN)
+ tx_win_drop++;
}
if (tx_swbd->is_xdp_tx)
@@ -831,7 +1259,9 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (xdp_frame) {
xdp_return_frame(xdp_frame);
} else if (skb) {
- if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
+
+ if (unlikely(enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
/* Start work to release lock for next one-step
* timestamping packet. And send one skb in
* tx_skbs queue if has.
@@ -873,10 +1303,13 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
tx_ring->next_to_clean = i;
tx_ring->stats.packets += tx_frm_cnt;
tx_ring->stats.bytes += tx_byte_cnt;
+ tx_ring->stats.win_drop += tx_win_drop;
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
- (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+ !test_bit(ENETC_TX_DOWN, &priv->flags) &&
+ (enetc_bd_unused(tx_ring) >=
+ ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
@@ -951,7 +1384,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
return j;
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static void enetc_get_rx_tstamp(struct net_device *ndev,
union enetc_rx_bd *rxbd,
struct sk_buff *skb)
@@ -975,7 +1407,6 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
}
}
-#endif
static void enetc_get_offloads(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd, struct sk_buff *skb)
@@ -991,6 +1422,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
}
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
+ struct enetc_hw *hw = &priv->si->hw;
__be16 tpid = 0;
switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
@@ -1001,24 +1433,19 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
tpid = htons(ETH_P_8021AD);
break;
case 2:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR1));
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR1) &
+ SICVLANR_ETYPE);
break;
case 3:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR2));
- break;
- default:
- break;
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR2) &
+ SICVLANR_ETYPE);
}
__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (priv->active_offloads & ENETC_F_RX_TSTAMP)
enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
-#endif
}
/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
@@ -1177,6 +1604,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
@@ -1203,10 +1632,18 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
if (!skb)
break;
- rx_byte_cnt += skb->len;
+ /* When set, the outer VLAN header is extracted and reported
+ * in the receive buffer descriptor. So rx_byte_cnt should
+ * add the length of the extracted VLAN header.
+ */
+ if (bd_status & ENETC_RXBD_FLAG_VLAN)
+ rx_byte_cnt += VLAN_HLEN;
+ rx_byte_cnt += skb->len + ETH_HLEN;
rx_frm_cnt++;
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
}
rx_ring->next_to_clean = i;
@@ -1214,6 +1651,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -1299,6 +1738,10 @@ static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
xdp_tx_swbd->xdp_frame = NULL;
n++;
+
+ if (!xdp_frame_has_frags(xdp_frame))
+ goto out;
+
xdp_tx_swbd = &xdp_tx_arr[n];
shinfo = xdp_get_shared_info_from_frame(xdp_frame);
@@ -1328,7 +1771,7 @@ static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
n++;
xdp_tx_swbd = &xdp_tx_arr[n];
}
-
+out:
xdp_tx_arr[n - 1].is_eof = true;
xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
@@ -1344,6 +1787,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
int xdp_tx_bd_cnt, i, k;
int xdp_tx_frm_cnt = 0;
+ if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
+ return -ENETDOWN;
+
enetc_lock_mdio();
tx_ring = priv->xdp_tx_ring[smp_processor_id()];
@@ -1378,22 +1824,19 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
return xdp_tx_frm_cnt;
}
+EXPORT_SYMBOL_GPL(enetc_xdp_xmit);
static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
struct xdp_buff *xdp_buff, u16 size)
{
struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
- struct skb_shared_info *shinfo;
/* To be used for XDP_TX */
rx_swbd->len = size;
xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
rx_ring->buffer_offset, size, false);
-
- shinfo = xdp_get_shared_info_from_buff(xdp_buff);
- shinfo->nr_frags = 0;
}
static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
@@ -1401,14 +1844,25 @@ static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
{
struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
- skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
+ skb_frag_t *frag;
/* To be used for XDP_TX */
rx_swbd->len = size;
- skb_frag_off_set(frag, rx_swbd->page_offset);
- skb_frag_size_set(frag, size);
- __skb_frag_set_page(frag, rx_swbd->page);
+ if (!xdp_buff_has_frags(xdp_buff)) {
+ xdp_buff_set_frags_flag(xdp_buff);
+ shinfo->xdp_frags_size = size;
+ shinfo->nr_frags = 0;
+ } else {
+ shinfo->xdp_frags_size += size;
+ }
+
+ if (page_is_pfmemalloc(rx_swbd->page))
+ xdp_buff_set_frag_pfmemalloc(xdp_buff);
+
+ frag = &shinfo->frags[shinfo->nr_frags];
+ skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset,
+ size);
shinfo->nr_frags++;
}
@@ -1480,24 +1934,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
&rx_ring->rx_swbd[rx_ring_first]);
enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
}
- rx_ring->stats.xdp_drops++;
}
-static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
- int rx_ring_last)
+static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
{
while (rx_ring_first != rx_ring_last) {
- struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
-
- if (rx_swbd->page) {
- dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
- rx_swbd->dir);
- __free_page(rx_swbd->page);
- rx_swbd->page = NULL;
- }
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[rx_ring_first]);
enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
}
- rx_ring->stats.xdp_redirect_failures++;
}
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
@@ -1516,12 +1962,13 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
- int orig_i, orig_cleaned_cnt;
struct xdp_buff xdp_buff;
struct sk_buff *skb;
- int tmp_orig_i, err;
+ int orig_i, err;
u32 bd_status;
rxbd = enetc_rxbd(rx_ring, i);
@@ -1537,39 +1984,61 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
orig_rxbd = rxbd;
- orig_cleaned_cnt = cleaned_cnt;
orig_i = i;
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
&cleaned_cnt, &xdp_buff);
+ /* When set, the outer VLAN header is extracted and reported
+ * in the receive buffer descriptor. So rx_byte_cnt should
+ * add the length of the extracted VLAN header.
+ */
+ if (bd_status & ENETC_RXBD_FLAG_VLAN)
+ rx_byte_cnt += VLAN_HLEN;
+ rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
+
xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
switch (xdp_act) {
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
fallthrough;
case XDP_DROP:
enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_drops++;
break;
case XDP_PASS:
- rxbd = orig_rxbd;
- cleaned_cnt = orig_cleaned_cnt;
- i = orig_i;
-
- skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
- &i, &cleaned_cnt,
- ENETC_RXB_DMA_SIZE_XDP);
- if (unlikely(!skb))
+ skb = xdp_build_skb_from_buff(&xdp_buff);
+ /* Probably under memory pressure, stop NAPI */
+ if (unlikely(!skb)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_drops++;
goto out;
+ }
+
+ enetc_get_offloads(rx_ring, orig_rxbd, skb);
+
+ /* These buffers are about to be owned by the stack.
+ * Update our buffer cache (the rx_swbd array elements)
+ * with their other page halves.
+ */
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
+ if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ tx_ring->stats.xdp_tx_drops++;
+ break;
+ }
+
xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
rx_ring,
orig_i, i);
@@ -1578,7 +2047,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
tx_ring->stats.xdp_tx_drops++;
} else {
- tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ tx_ring->stats.xdp_tx++;
rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
xdp_tx_frm_cnt++;
/* The XDP_TX enqueue was successful, so we
@@ -1595,32 +2064,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
}
break;
case XDP_REDIRECT:
- /* xdp_return_frame does not support S/G in the sense
- * that it leaks the fragments (__xdp_return should not
- * call page_frag_free only for the initial buffer).
- * Until XDP_REDIRECT gains support for S/G let's keep
- * the code structure in place, but dead. We drop the
- * S/G frames ourselves to avoid memory leaks which
- * would otherwise leave the kernel OOM.
- */
- if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
- enetc_xdp_drop(rx_ring, orig_i, i);
- rx_ring->stats.xdp_redirect_sg++;
- break;
- }
-
- tmp_orig_i = orig_i;
-
- while (orig_i != i) {
- enetc_flip_rx_buff(rx_ring,
- &rx_ring->rx_swbd[orig_i]);
- enetc_bdr_idx_inc(rx_ring, &orig_i);
- }
-
+ enetc_unlock_mdio();
err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ enetc_lock_mdio();
if (unlikely(err)) {
- enetc_xdp_free(rx_ring, tmp_orig_i, i);
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_redirect_failures++;
} else {
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
xdp_redirect_frm_cnt++;
rx_ring->stats.xdp_redirect++;
}
@@ -1635,8 +2086,11 @@ out:
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
- if (xdp_redirect_frm_cnt)
- xdp_do_flush_map();
+ if (xdp_redirect_frm_cnt) {
+ enetc_unlock_mdio();
+ xdp_do_flush();
+ enetc_lock_mdio();
+ }
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
@@ -1645,6 +2099,8 @@ out:
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
rx_ring->xdp.xdp_tx_in_flight);
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -1663,6 +2119,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
for (i = 0; i < v->count_tx_rings; i++)
if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
complete = false;
+ enetc_unlock_mdio();
prog = rx_ring->xdp.prog;
if (prog)
@@ -1674,10 +2131,8 @@ static int enetc_poll(struct napi_struct *napi, int budget)
if (work_done)
v->rx_napi_work = true;
- if (!complete) {
- enetc_unlock_mdio();
+ if (!complete)
return budget;
- }
napi_complete_done(napi, work_done);
@@ -1686,6 +2141,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false;
+ enetc_lock_mdio();
/* enable interrupts */
enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
@@ -1710,9 +2166,15 @@ void enetc_get_si_caps(struct enetc_si *si)
si->num_rx_rings = (val >> 16) & 0xff;
si->num_tx_rings = val & 0xff;
- val = enetc_rd(hw, ENETC_SIRFSCAPR);
- si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
- si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+ val = enetc_rd(hw, ENETC_SIPCAPR0);
+ if (val & ENETC_SIPCAPR0_RFS) {
+ val = enetc_rd(hw, ENETC_SIRFSCAPR);
+ si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
+ si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
+ } else {
+ /* ENETC which not supports RFS */
+ si->num_fs_entries = 0;
+ }
si->num_rss = 0;
val = enetc_rd(hw, ENETC_SIPCAPR0);
@@ -1723,207 +2185,260 @@ void enetc_get_si_caps(struct enetc_si *si)
si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
}
- if (val & ENETC_SIPCAPR0_QBV)
- si->hw_features |= ENETC_SI_F_QBV;
-
- if (val & ENETC_SIPCAPR0_PSFP)
- si->hw_features |= ENETC_SI_F_PSFP;
+ if (val & ENETC_SIPCAPR0_LSO)
+ si->hw_features |= ENETC_SI_F_LSO;
}
+EXPORT_SYMBOL_GPL(enetc_get_si_caps);
-static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
+static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res)
{
- r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
- &r->bd_dma_base, GFP_KERNEL);
- if (!r->bd_base)
+ size_t bd_base_size = res->bd_count * res->bd_size;
+
+ res->bd_base = dma_alloc_coherent(res->dev, bd_base_size,
+ &res->bd_dma_base, GFP_KERNEL);
+ if (!res->bd_base)
return -ENOMEM;
/* h/w requires 128B alignment */
- if (!IS_ALIGNED(r->bd_dma_base, 128)) {
- dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
- r->bd_dma_base);
+ if (!IS_ALIGNED(res->bd_dma_base, 128)) {
+ dma_free_coherent(res->dev, bd_base_size, res->bd_base,
+ res->bd_dma_base);
return -EINVAL;
}
return 0;
}
-static int enetc_alloc_txbdr(struct enetc_bdr *txr)
+static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res)
+{
+ size_t bd_base_size = res->bd_count * res->bd_size;
+
+ dma_free_coherent(res->dev, bd_base_size, res->bd_base,
+ res->bd_dma_base);
+}
+
+static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res,
+ struct device *dev, size_t bd_count)
{
int err;
- txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
- if (!txr->tx_swbd)
+ res->dev = dev;
+ res->bd_count = bd_count;
+ res->bd_size = sizeof(union enetc_tx_bd);
+
+ res->tx_swbd = vcalloc(bd_count, sizeof(*res->tx_swbd));
+ if (!res->tx_swbd)
return -ENOMEM;
- err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
+ err = enetc_dma_alloc_bdr(res);
if (err)
goto err_alloc_bdr;
- txr->tso_headers = dma_alloc_coherent(txr->dev,
- txr->bd_count * TSO_HEADER_SIZE,
- &txr->tso_headers_dma,
+ res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE,
+ &res->tso_headers_dma,
GFP_KERNEL);
- if (!txr->tso_headers) {
+ if (!res->tso_headers) {
err = -ENOMEM;
goto err_alloc_tso;
}
- txr->next_to_clean = 0;
- txr->next_to_use = 0;
-
return 0;
err_alloc_tso:
- dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
- txr->bd_base, txr->bd_dma_base);
- txr->bd_base = NULL;
+ enetc_dma_free_bdr(res);
err_alloc_bdr:
- vfree(txr->tx_swbd);
- txr->tx_swbd = NULL;
+ vfree(res->tx_swbd);
+ res->tx_swbd = NULL;
return err;
}
-static void enetc_free_txbdr(struct enetc_bdr *txr)
+static void enetc_free_tx_resource(const struct enetc_bdr_resource *res)
{
- int size, i;
-
- for (i = 0; i < txr->bd_count; i++)
- enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
-
- size = txr->bd_count * sizeof(union enetc_tx_bd);
-
- dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
- txr->tso_headers, txr->tso_headers_dma);
- txr->tso_headers = NULL;
-
- dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
- txr->bd_base = NULL;
-
- vfree(txr->tx_swbd);
- txr->tx_swbd = NULL;
+ dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE,
+ res->tso_headers, res->tso_headers_dma);
+ enetc_dma_free_bdr(res);
+ vfree(res->tx_swbd);
}
-static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
+static struct enetc_bdr_resource *
+enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
{
+ struct enetc_bdr_resource *tx_res;
int i, err;
+ tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL);
+ if (!tx_res)
+ return ERR_PTR(-ENOMEM);
+
for (i = 0; i < priv->num_tx_rings; i++) {
- err = enetc_alloc_txbdr(priv->tx_ring[i]);
+ struct enetc_bdr *tx_ring = priv->tx_ring[i];
+ err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev,
+ tx_ring->bd_count);
if (err)
goto fail;
}
- return 0;
+ return tx_res;
fail:
while (i-- > 0)
- enetc_free_txbdr(priv->tx_ring[i]);
+ enetc_free_tx_resource(&tx_res[i]);
- return err;
+ kfree(tx_res);
+
+ return ERR_PTR(err);
}
-static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
+static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res,
+ size_t num_resources)
{
- int i;
+ size_t i;
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_free_txbdr(priv->tx_ring[i]);
+ for (i = 0; i < num_resources; i++)
+ enetc_free_tx_resource(&tx_res[i]);
+
+ kfree(tx_res);
}
-static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
+static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res,
+ struct device *dev, size_t bd_count,
+ bool extended)
{
- size_t size = sizeof(union enetc_rx_bd);
int err;
- rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
- if (!rxr->rx_swbd)
- return -ENOMEM;
-
+ res->dev = dev;
+ res->bd_count = bd_count;
+ res->bd_size = sizeof(union enetc_rx_bd);
if (extended)
- size *= 2;
+ res->bd_size *= 2;
+
+ res->rx_swbd = vcalloc(bd_count, sizeof(struct enetc_rx_swbd));
+ if (!res->rx_swbd)
+ return -ENOMEM;
- err = enetc_dma_alloc_bdr(rxr, size);
+ err = enetc_dma_alloc_bdr(res);
if (err) {
- vfree(rxr->rx_swbd);
+ vfree(res->rx_swbd);
return err;
}
- rxr->next_to_clean = 0;
- rxr->next_to_use = 0;
- rxr->next_to_alloc = 0;
- rxr->ext_en = extended;
-
return 0;
}
-static void enetc_free_rxbdr(struct enetc_bdr *rxr)
+static void enetc_free_rx_resource(const struct enetc_bdr_resource *res)
{
- int size;
-
- size = rxr->bd_count * sizeof(union enetc_rx_bd);
-
- dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
- rxr->bd_base = NULL;
-
- vfree(rxr->rx_swbd);
- rxr->rx_swbd = NULL;
+ enetc_dma_free_bdr(res);
+ vfree(res->rx_swbd);
}
-static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
+static struct enetc_bdr_resource *
+enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended)
{
- bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+ struct enetc_bdr_resource *rx_res;
int i, err;
+ rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL);
+ if (!rx_res)
+ return ERR_PTR(-ENOMEM);
+
for (i = 0; i < priv->num_rx_rings; i++) {
- err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
+ struct enetc_bdr *rx_ring = priv->rx_ring[i];
+ err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev,
+ rx_ring->bd_count, extended);
if (err)
goto fail;
}
- return 0;
+ return rx_res;
fail:
while (i-- > 0)
- enetc_free_rxbdr(priv->rx_ring[i]);
+ enetc_free_rx_resource(&rx_res[i]);
- return err;
+ kfree(rx_res);
+
+ return ERR_PTR(err);
+}
+
+static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res,
+ size_t num_resources)
+{
+ size_t i;
+
+ for (i = 0; i < num_resources; i++)
+ enetc_free_rx_resource(&rx_res[i]);
+
+ kfree(rx_res);
+}
+
+static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring,
+ const struct enetc_bdr_resource *res)
+{
+ tx_ring->bd_base = res ? res->bd_base : NULL;
+ tx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
+ tx_ring->tx_swbd = res ? res->tx_swbd : NULL;
+ tx_ring->tso_headers = res ? res->tso_headers : NULL;
+ tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0;
+}
+
+static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring,
+ const struct enetc_bdr_resource *res)
+{
+ rx_ring->bd_base = res ? res->bd_base : NULL;
+ rx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
+ rx_ring->rx_swbd = res ? res->rx_swbd : NULL;
}
-static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
+static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv,
+ const struct enetc_bdr_resource *res)
{
int i;
- for (i = 0; i < priv->num_rx_rings; i++)
- enetc_free_rxbdr(priv->rx_ring[i]);
+ if (priv->tx_res)
+ enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings);
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ enetc_assign_tx_resource(priv->tx_ring[i],
+ res ? &res[i] : NULL);
+ }
+
+ priv->tx_res = res;
}
-static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv,
+ const struct enetc_bdr_resource *res)
{
int i;
- if (!tx_ring->tx_swbd)
- return;
+ if (priv->rx_res)
+ enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings);
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ enetc_assign_rx_resource(priv->rx_ring[i],
+ res ? &res[i] : NULL);
+ }
+
+ priv->rx_res = res;
+}
+
+static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+{
+ int i;
for (i = 0; i < tx_ring->bd_count; i++) {
struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
enetc_free_tx_frame(tx_ring, tx_swbd);
}
-
- tx_ring->next_to_clean = 0;
- tx_ring->next_to_use = 0;
}
static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
{
int i;
- if (!rx_ring->rx_swbd)
- return;
-
for (i = 0; i < rx_ring->bd_count; i++) {
struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
@@ -1935,10 +2450,6 @@ static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
__free_page(rx_swbd->page);
rx_swbd->page = NULL;
}
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
- rx_ring->next_to_alloc = 0;
}
static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
@@ -1965,13 +2476,35 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
for (i = 0; i < si->num_rss; i++)
rss_table[i] = i % num_groups;
- enetc_set_rss_table(si, rss_table, si->num_rss);
+ si->ops->set_rss_table(si, rss_table, si->num_rss);
kfree(rss_table);
return 0;
}
+static void enetc_set_lso_flags_mask(struct enetc_hw *hw)
+{
+ enetc_wr(hw, ENETC4_SILSOSFMR0,
+ SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK,
+ ENETC4_TCP_NL_SEG_FLAGS_DMASK));
+ enetc_wr(hw, ENETC4_SILSOSFMR1, 0);
+}
+
+static void enetc_set_rss(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
+
+ reg = enetc_rd(hw, ENETC_SIMR);
+ reg &= ~ENETC_SIMR_RSSE;
+ reg |= (en) ? ENETC_SIMR_RSSE : 0;
+ enetc_wr(hw, ENETC_SIMR, reg);
+}
+
int enetc_configure_si(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
@@ -1985,14 +2518,21 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
/* enable SI */
enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
+ if (si->hw_features & ENETC_SI_F_LSO)
+ enetc_set_lso_flags_mask(hw);
+
if (si->num_rss) {
err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
if (err)
return err;
+
+ if (priv->ndev->features & NETIF_F_RXHASH)
+ enetc_set_rss(priv->ndev, true);
}
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_configure_si);
void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
{
@@ -2008,10 +2548,11 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
*/
priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
priv->num_tx_rings = si->num_tx_rings;
- priv->bdr_int_num = cpus;
+ priv->bdr_int_num = priv->num_rx_rings;
priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
- priv->tx_ictt = ENETC_TXIC_TIMETHR;
+ priv->tx_ictt = enetc_usecs_to_cycles(600, priv->sysclk_freq);
}
+EXPORT_SYMBOL_GPL(enetc_init_si_rings_params);
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
{
@@ -2024,11 +2565,13 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_alloc_si_resources);
void enetc_free_si_resources(struct enetc_ndev_priv *priv)
{
kfree(priv->cls_rules);
}
+EXPORT_SYMBOL_GPL(enetc_free_si_resources);
static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
{
@@ -2052,7 +2595,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
/* enable Tx ints by setting pkt thr to 1 */
enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
- tbmr = ENETC_TBMR_EN;
+ tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio);
if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
tbmr |= ENETC_TBMR_VIH;
@@ -2064,10 +2607,11 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
tx_ring->idr = hw->reg + ENETC_SITXIDR;
}
-static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
+ bool extended)
{
int idx = rx_ring->index;
- u32 rbmr;
+ u32 rbmr = 0;
enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
lower_32_bits(rx_ring->bd_dma_base));
@@ -2084,13 +2628,17 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
else
enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+ /* Also prepare the consumer index in case page allocation never
+ * succeeds. In that case, hardware will never advance producer index
+ * to match consumer index, and will drop all frames.
+ */
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
/* enable Rx ints by setting pkt thr to 1 */
enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
- rbmr = ENETC_RBMR_EN;
-
+ rx_ring->ext_en = extended;
if (rx_ring->ext_en)
rbmr |= ENETC_RBMR_BDS;
@@ -2100,26 +2648,68 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->next_to_alloc = 0;
+
enetc_lock_mdio();
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
enetc_unlock_mdio();
- /* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
}
-static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_setup_txbdr(hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_setup_rxbdr(hw, priv->rx_ring[i], extended);
+}
+
+static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int idx = tx_ring->index;
+ u32 tbmr;
+
+ tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
+ tbmr |= ENETC_TBMR_EN;
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
+}
+
+static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+ int idx = rx_ring->index;
+ u32 rbmr;
+
+ rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
+ rbmr |= ENETC_RBMR_EN;
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+}
+
+static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_enable_rxbdr(hw, priv->rx_ring[i]);
+}
+
+static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_enable_txbdr(hw, priv->tx_ring[i]);
}
-static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
int idx = rx_ring->index;
@@ -2127,13 +2717,36 @@ static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
}
-static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
- int delay = 8, timeout = 100;
- int idx = tx_ring->index;
+ int idx = rx_ring->index;
/* disable EN bit on ring */
enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
+}
+
+static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_disable_rxbdr(hw, priv->rx_ring[i]);
+}
+
+static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_disable_txbdr(hw, priv->tx_ring[i]);
+}
+
+static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int delay = 8, timeout = 100;
+ int idx = tx_ring->index;
/* wait for busy to clear */
while (delay < timeout &&
@@ -2147,38 +2760,33 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
idx);
}
-static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_wait_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
-
- for (i = 0; i < priv->num_rx_rings; i++)
- enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
-
- udelay(1);
+ enetc_wait_txbdr(hw, priv->tx_ring[i]);
}
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ struct enetc_hw *hw = &priv->si->hw;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
struct enetc_int_vector *v = priv->int_vector[i];
int entry = ENETC_BDR_INT_BASE_IDX + i;
- struct enetc_hw *hw = &priv->si->hw;
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
- err = request_irq(irq, enetc_msix, 0, v->name, v);
+ err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v);
if (err) {
dev_err(priv->dev, "request_irq() failed!\n");
goto irq_err;
}
- disable_irq(irq);
v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
@@ -2257,23 +2865,27 @@ static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
+ enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
+ enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0);
}
static int enetc_phylink_connect(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct ethtool_eee edata;
+ struct ethtool_keee edata;
int err;
- if (!priv->phylink)
- return 0; /* phy-less mode */
+ if (!priv->phylink) {
+ /* phy-less mode */
+ netif_carrier_on(ndev);
+ return 0;
+ }
err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
if (err) {
@@ -2282,9 +2894,11 @@ static int enetc_phylink_connect(struct net_device *ndev)
}
/* disable EEE autoneg, until ENETC driver supports it */
- memset(&edata, 0, sizeof(struct ethtool_eee));
+ memset(&edata, 0, sizeof(struct ethtool_keee));
phylink_ethtool_set_eee(priv->phylink, &edata);
+ phylink_start(priv->phylink);
+
return 0;
}
@@ -2295,14 +2909,14 @@ static void enetc_tx_onestep_tstamp(struct work_struct *work)
priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
- netif_tx_lock(priv->ndev);
+ netif_tx_lock_bh(priv->ndev);
clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
skb = skb_dequeue(&priv->tx_skbs);
if (skb)
enetc_start_xmit(skb, priv->ndev);
- netif_tx_unlock(priv->ndev);
+ netif_tx_unlock_bh(priv->ndev);
}
static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
@@ -2326,72 +2940,86 @@ void enetc_start(struct net_device *ndev)
enable_irq(irq);
}
- if (priv->phylink)
- phylink_start(priv->phylink);
- else
- netif_carrier_on(ndev);
+ enetc_enable_tx_bdrs(priv);
+
+ enetc_enable_rx_bdrs(priv);
netif_tx_start_all_queues(ndev);
+
+ clear_bit(ENETC_TX_DOWN, &priv->flags);
}
+EXPORT_SYMBOL_GPL(enetc_start);
int enetc_open(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int num_stack_tx_queues;
+ struct enetc_bdr_resource *tx_res, *rx_res;
+ bool extended;
int err;
- err = enetc_setup_irqs(priv);
+ extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+
+ err = clk_prepare_enable(priv->ref_clk);
if (err)
return err;
+ err = enetc_setup_irqs(priv);
+ if (err)
+ goto err_setup_irqs;
+
err = enetc_phylink_connect(ndev);
if (err)
goto err_phy_connect;
- err = enetc_alloc_tx_resources(priv);
- if (err)
+ tx_res = enetc_alloc_tx_resources(priv);
+ if (IS_ERR(tx_res)) {
+ err = PTR_ERR(tx_res);
goto err_alloc_tx;
+ }
- err = enetc_alloc_rx_resources(priv);
- if (err)
+ rx_res = enetc_alloc_rx_resources(priv, extended);
+ if (IS_ERR(rx_res)) {
+ err = PTR_ERR(rx_res);
goto err_alloc_rx;
-
- num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
-
- err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
- if (err)
- goto err_set_queues;
+ }
enetc_tx_onestep_tstamp_init(priv);
- enetc_setup_bdrs(priv);
+ enetc_assign_tx_resources(priv, tx_res);
+ enetc_assign_rx_resources(priv, rx_res);
+ enetc_setup_bdrs(priv, extended);
enetc_start(ndev);
return 0;
-err_set_queues:
- enetc_free_rx_resources(priv);
err_alloc_rx:
- enetc_free_tx_resources(priv);
+ enetc_free_tx_resources(tx_res, priv->num_tx_rings);
err_alloc_tx:
if (priv->phylink)
phylink_disconnect_phy(priv->phylink);
err_phy_connect:
enetc_free_irqs(priv);
+err_setup_irqs:
+ clk_disable_unprepare(priv->ref_clk);
return err;
}
+EXPORT_SYMBOL_GPL(enetc_open);
void enetc_stop(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int i;
+ set_bit(ENETC_TX_DOWN, &priv->flags);
+
netif_tx_stop_all_queues(ndev);
+ enetc_disable_rx_bdrs(priv);
+
+ enetc_wait_bdrs(priv);
+
+ enetc_disable_tx_bdrs(priv);
+
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(priv->si->pdev,
ENETC_BDR_INT_BASE_IDX + i);
@@ -2401,120 +3029,210 @@ void enetc_stop(struct net_device *ndev)
napi_disable(&priv->int_vector[i]->napi);
}
- if (priv->phylink)
- phylink_stop(priv->phylink);
- else
- netif_carrier_off(ndev);
-
enetc_clear_interrupts(priv);
}
+EXPORT_SYMBOL_GPL(enetc_stop);
int enetc_close(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
enetc_stop(ndev);
- enetc_clear_bdrs(priv);
- if (priv->phylink)
+ if (priv->phylink) {
+ phylink_stop(priv->phylink);
phylink_disconnect_phy(priv->phylink);
+ } else {
+ netif_carrier_off(ndev);
+ }
+
enetc_free_rxtx_rings(priv);
- enetc_free_rx_resources(priv);
- enetc_free_tx_resources(priv);
+
+ /* Avoids dangling pointers and also frees old resources */
+ enetc_assign_rx_resources(priv, NULL);
+ enetc_assign_tx_resources(priv, NULL);
+
enetc_free_irqs(priv);
+ clk_disable_unprepare(priv->ref_clk);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_close);
+
+static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
+ int (*cb)(struct enetc_ndev_priv *priv, void *ctx),
+ void *ctx)
+{
+ struct enetc_bdr_resource *tx_res, *rx_res;
+ int err;
+
+ ASSERT_RTNL();
+
+ /* If the interface is down, run the callback right away,
+ * without reconfiguration.
+ */
+ if (!netif_running(priv->ndev)) {
+ if (cb) {
+ err = cb(priv, ctx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+ }
+
+ tx_res = enetc_alloc_tx_resources(priv);
+ if (IS_ERR(tx_res)) {
+ err = PTR_ERR(tx_res);
+ goto out;
+ }
+
+ rx_res = enetc_alloc_rx_resources(priv, extended);
+ if (IS_ERR(rx_res)) {
+ err = PTR_ERR(rx_res);
+ goto out_free_tx_res;
+ }
+
+ enetc_stop(priv->ndev);
+ enetc_free_rxtx_rings(priv);
+
+ /* Interface is down, run optional callback now */
+ if (cb) {
+ err = cb(priv, ctx);
+ if (err)
+ goto out_restart;
+ }
+
+ enetc_assign_tx_resources(priv, tx_res);
+ enetc_assign_rx_resources(priv, rx_res);
+ enetc_setup_bdrs(priv, extended);
+ enetc_start(priv->ndev);
return 0;
+
+out_restart:
+ enetc_setup_bdrs(priv, extended);
+ enetc_start(priv->ndev);
+ enetc_free_rx_resources(rx_res, priv->num_rx_rings);
+out_free_tx_res:
+ enetc_free_tx_resources(tx_res, priv->num_tx_rings);
+out:
+ return err;
}
-static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i,
+ priv->tx_ring[i]->prio);
+}
+
+void enetc_reset_tc_mqprio(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
int num_stack_tx_queues;
- u8 num_tc;
int i;
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
- mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- num_tc = mqprio->num_tc;
- if (!num_tc) {
- netdev_reset_tc(ndev);
- netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ priv->min_num_stack_tx_queues = num_possible_cpus();
- /* Reset all ring priorities to 0 */
- for (i = 0; i < priv->num_tx_rings; i++) {
- tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
- }
+ /* Reset all ring priorities to 0 */
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = 0;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
+
+ enetc_debug_tx_ring_prios(priv);
+
+ enetc_change_preemptible_tcs(priv, 0);
+}
+EXPORT_SYMBOL_GPL(enetc_reset_tc_mqprio);
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_mqprio_qopt *qopt = &mqprio->qopt;
+ struct enetc_hw *hw = &priv->si->hw;
+ int num_stack_tx_queues = 0;
+ struct enetc_bdr *tx_ring;
+ u8 num_tc = qopt->num_tc;
+ int offset, count;
+ int err, tc, q;
+
+ if (!num_tc) {
+ enetc_reset_tc_mqprio(ndev);
return 0;
}
- /* Check if we have enough BD rings available to accommodate all TCs */
- if (num_tc > num_stack_tx_queues) {
- netdev_err(ndev, "Max %d traffic classes supported\n",
- priv->num_tx_rings);
- return -EINVAL;
- }
+ err = netdev_set_num_tc(ndev, num_tc);
+ if (err)
+ return err;
- /* For the moment, we use only one BD ring per TC.
- *
- * Configure num_tc BD rings with increasing priorities.
- */
- for (i = 0; i < num_tc; i++) {
- tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
+ for (tc = 0; tc < num_tc; tc++) {
+ offset = qopt->offset[tc];
+ count = qopt->count[tc];
+ num_stack_tx_queues += count;
+
+ err = netdev_set_tc_queue(ndev, tc, count, offset);
+ if (err)
+ goto err_reset_tc;
+
+ for (q = offset; q < offset + count; q++) {
+ tx_ring = priv->tx_ring[q];
+ /* The prio_tc_map is skb_tx_hash()'s way of selecting
+ * between TX queues based on skb->priority. As such,
+ * there's nothing to offload based on it.
+ * Make the mqprio "traffic class" be the priority of
+ * this ring group, and leave the Tx IPV to traffic
+ * class mapping as its default mapping value of 1:1.
+ */
+ tx_ring->prio = tc;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
}
- /* Reset the number of netdev queues based on the TC count */
- netif_set_real_num_tx_queues(ndev, num_tc);
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ if (err)
+ goto err_reset_tc;
+
+ priv->min_num_stack_tx_queues = num_stack_tx_queues;
- netdev_set_num_tc(ndev, num_tc);
+ enetc_debug_tx_ring_prios(priv);
- /* Each TC is associated with one netdev queue */
- for (i = 0; i < num_tc; i++)
- netdev_set_tc_queue(ndev, i, 1, i);
+ enetc_change_preemptible_tcs(priv, mqprio->preemptible_tcs);
return 0;
-}
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_MQPRIO:
- return enetc_setup_tc_mqprio(ndev, type_data);
- case TC_SETUP_QDISC_TAPRIO:
- return enetc_setup_tc_taprio(ndev, type_data);
- case TC_SETUP_QDISC_CBS:
- return enetc_setup_tc_cbs(ndev, type_data);
- case TC_SETUP_QDISC_ETF:
- return enetc_setup_tc_txtime(ndev, type_data);
- case TC_SETUP_BLOCK:
- return enetc_setup_tc_psfp(ndev, type_data);
- default:
- return -EOPNOTSUPP;
- }
+err_reset_tc:
+ enetc_reset_tc_mqprio(ndev);
+ return err;
}
+EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio);
-static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
- struct netlink_ext_ack *extack)
+static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
{
- struct enetc_ndev_priv *priv = netdev_priv(dev);
- struct bpf_prog *old_prog;
- bool is_up;
- int i;
-
- /* The buffer layout is changing, so we need to drain the old
- * RX buffers and seed new ones.
- */
- is_up = netif_running(dev);
- if (is_up)
- dev_close(dev);
+ struct bpf_prog *old_prog, *prog = ctx;
+ int num_stack_tx_queues;
+ int err, i;
old_prog = xchg(&priv->xdp_prog, prog);
+
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err) {
+ xchg(&priv->xdp_prog, old_prog);
+ return err;
+ }
+
if (old_prog)
bpf_prog_put(old_prog);
@@ -2529,29 +3247,53 @@ static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
rx_ring->buffer_offset = ENETC_RXB_PAD;
}
- if (is_up)
- return dev_open(dev, extack);
-
return 0;
}
-int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
+static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
- switch (xdp->command) {
+ int num_xdp_tx_queues = prog ? num_possible_cpus() : 0;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ bool extended;
+
+ if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
+ priv->num_tx_rings) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Reserving %d XDP TXQs leaves under %d for stack (total %d)",
+ num_xdp_tx_queues,
+ priv->min_num_stack_tx_queues,
+ priv->num_tx_rings);
+ return -EBUSY;
+ }
+
+ extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+
+ /* The buffer layout is changing, so we need to drain the old
+ * RX buffers and seed new ones.
+ */
+ return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog);
+}
+
+int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
case XDP_SETUP_PROG:
- return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
+ return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack);
default:
return -EINVAL;
}
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_setup_bpf);
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
unsigned long packets = 0, bytes = 0;
+ unsigned long tx_dropped = 0;
int i;
for (i = 0; i < priv->num_rx_rings; i++) {
@@ -2567,76 +3309,40 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev)
for (i = 0; i < priv->num_tx_rings; i++) {
packets += priv->tx_ring[i]->stats.packets;
bytes += priv->tx_ring[i]->stats.bytes;
+ tx_dropped += priv->tx_ring[i]->stats.win_drop;
}
stats->tx_packets = packets;
stats->tx_bytes = bytes;
+ stats->tx_dropped = tx_dropped;
return stats;
}
-
-static int enetc_set_rss(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- u32 reg;
-
- enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
-
- reg = enetc_rd(hw, ENETC_SIMR);
- reg &= ~ENETC_SIMR_RSSE;
- reg |= (en) ? ENETC_SIMR_RSSE : 0;
- enetc_wr(hw, ENETC_SIMR, reg);
-
- return 0;
-}
-
-static int enetc_set_psfp(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int err;
-
- if (en) {
- err = enetc_psfp_enable(priv);
- if (err)
- return err;
-
- priv->active_offloads |= ENETC_F_QCI;
- return 0;
- }
-
- err = enetc_psfp_disable(priv);
- if (err)
- return err;
-
- priv->active_offloads &= ~ENETC_F_QCI;
-
- return 0;
-}
+EXPORT_SYMBOL_GPL(enetc_get_stats);
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_rxvlan(hw, i, en);
}
static void enetc_enable_txvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_txvlan(hw, i, en);
}
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features)
+void enetc_set_features(struct net_device *ndev, netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
- int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
@@ -2648,102 +3354,189 @@ int enetc_set_features(struct net_device *ndev,
if (changed & NETIF_F_HW_VLAN_CTAG_TX)
enetc_enable_txvlan(ndev,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
- if (changed & NETIF_F_HW_TC)
- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
-
- return err;
}
+EXPORT_SYMBOL_GPL(enetc_set_features);
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
-static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct hwtstamp_config config;
- int ao;
+ int err, new_offloads = priv->active_offloads;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!enetc_ptp_clock_is_enabled(priv->si))
+ return -EOPNOTSUPP;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
case HWTSTAMP_TX_ON:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
- priv->active_offloads |= ENETC_F_TX_TSTAMP;
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads |= ENETC_F_TX_TSTAMP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
- priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
+ if (!enetc_si_is_pf(priv->si) ||
+ enetc_is_pseudo_mac(priv->si))
+ return -EOPNOTSUPP;
+
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
break;
default:
return -ERANGE;
}
- ao = priv->active_offloads;
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
+ new_offloads &= ~ENETC_F_RX_TSTAMP;
break;
default:
- priv->active_offloads |= ENETC_F_RX_TSTAMP;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ new_offloads |= ENETC_F_RX_TSTAMP;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
- if (netif_running(ndev) && ao != priv->active_offloads) {
- enetc_close(ndev);
- enetc_open(ndev);
+ if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
+ bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP);
+
+ err = enetc_reconfigure(priv, extended, NULL, NULL);
+ if (err)
+ return err;
}
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ priv->active_offloads = new_offloads;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_set);
-static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct hwtstamp_config config;
- config.flags = 0;
+ if (!enetc_ptp_clock_is_enabled(priv->si))
+ return -EOPNOTSUPP;
if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ON;
+ config->tx_type = HWTSTAMP_TX_ON;
else
- config.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ config->rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
-#endif
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_get);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
- if (cmd == SIOCSHWTSTAMP)
- return enetc_hwtstamp_set(ndev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return enetc_hwtstamp_get(ndev, rq);
-#endif
if (!priv->phylink)
return -EOPNOTSUPP;
return phylink_mii_ioctl(priv->phylink, rq, cmd);
}
+EXPORT_SYMBOL_GPL(enetc_ioctl);
+
+static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
+ int v_tx_rings)
+{
+ struct enetc_int_vector *v;
+ struct enetc_bdr *bdr;
+ int j, err;
+
+ v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ priv->int_vector[i] = v;
+ bdr = &v->rx_ring;
+ bdr->index = i;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->rx_bd_count;
+ bdr->buffer_offset = ENETC_RXB_PAD;
+ priv->rx_ring[i] = bdr;
+
+ err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
+ ENETC_RXB_DMA_SIZE_XDP);
+ if (err)
+ goto free_vector;
+
+ err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err) {
+ xdp_rxq_info_unreg(&bdr->xdp.rxq);
+ goto free_vector;
+ }
+
+ /* init defaults for adaptive IC */
+ if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
+ v->rx_ictt = 0x1;
+ v->rx_dim_en = true;
+ }
+
+ INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll);
+ v->count_tx_rings = v_tx_rings;
+
+ for (j = 0; j < v_tx_rings; j++) {
+ int idx;
+
+ /* default tx ring mapping policy */
+ idx = priv->bdr_int_num * j + i;
+ __set_bit(idx, &v->tx_rings_map);
+ bdr = &v->tx_ring[j];
+ bdr->index = idx;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->tx_bd_count;
+ priv->tx_ring[idx] = bdr;
+ }
+
+ return 0;
+
+free_vector:
+ priv->rx_ring[i] = NULL;
+ priv->int_vector[i] = NULL;
+ kfree(v);
+
+ return err;
+}
+
+static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i)
+{
+ struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ int j, tx_ring_index;
+
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
+ netif_napi_del(&v->napi);
+ cancel_work_sync(&v->rx_dim.work);
+
+ for (j = 0; j < v->count_tx_rings; j++) {
+ tx_ring_index = priv->bdr_int_num * j + i;
+ priv->tx_ring[tx_ring_index] = NULL;
+ }
+
+ priv->rx_ring[i] = NULL;
+ priv->int_vector[i] = NULL;
+ kfree(v);
+}
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ int v_tx_rings, v_remainder;
+ int num_stack_tx_queues;
int first_xdp_tx_ring;
int i, n, err, nvec;
- int v_tx_rings;
nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
/* allocate MSIX for both messaging and Rx/Tx interrupts */
@@ -2757,117 +3550,56 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
/* # of tx rings per int vector */
v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
+ v_remainder = priv->num_tx_rings % priv->bdr_int_num;
for (i = 0; i < priv->bdr_int_num; i++) {
- struct enetc_int_vector *v;
- struct enetc_bdr *bdr;
- int j;
+ /* Distribute the remaining TX rings to the first v_remainder
+ * interrupt vectors
+ */
+ int num_tx_rings = i < v_remainder ? v_tx_rings + 1 : v_tx_rings;
- v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
- if (!v) {
- err = -ENOMEM;
+ err = enetc_int_vector_init(priv, i, num_tx_rings);
+ if (err)
goto fail;
- }
-
- priv->int_vector[i] = v;
-
- bdr = &v->rx_ring;
- bdr->index = i;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->rx_bd_count;
- bdr->buffer_offset = ENETC_RXB_PAD;
- priv->rx_ring[i] = bdr;
+ }
- err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
- if (err) {
- kfree(v);
- goto fail;
- }
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
- err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
- if (err) {
- xdp_rxq_info_unreg(&bdr->xdp.rxq);
- kfree(v);
- goto fail;
- }
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err)
+ goto fail;
- /* init defaults for adaptive IC */
- if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
- v->rx_ictt = 0x1;
- v->rx_dim_en = true;
- }
- INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
- netif_napi_add(priv->ndev, &v->napi, enetc_poll,
- NAPI_POLL_WEIGHT);
- v->count_tx_rings = v_tx_rings;
-
- for (j = 0; j < v_tx_rings; j++) {
- int idx;
-
- /* default tx ring mapping policy */
- idx = priv->bdr_int_num * j + i;
- __set_bit(idx, &v->tx_rings_map);
- bdr = &v->tx_ring[j];
- bdr->index = idx;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->tx_bd_count;
- priv->tx_ring[idx] = bdr;
- }
- }
+ err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings);
+ if (err)
+ goto fail;
+ priv->min_num_stack_tx_queues = num_possible_cpus();
first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
return 0;
fail:
- while (i--) {
- struct enetc_int_vector *v = priv->int_vector[i];
- struct enetc_bdr *rx_ring = &v->rx_ring;
-
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
- netif_napi_del(&v->napi);
- cancel_work_sync(&v->rx_dim.work);
- kfree(v);
- }
+ while (i--)
+ enetc_int_vector_destroy(priv, i);
pci_free_irq_vectors(pdev);
return err;
}
+EXPORT_SYMBOL_GPL(enetc_alloc_msix);
void enetc_free_msix(struct enetc_ndev_priv *priv)
{
int i;
- for (i = 0; i < priv->bdr_int_num; i++) {
- struct enetc_int_vector *v = priv->int_vector[i];
- struct enetc_bdr *rx_ring = &v->rx_ring;
-
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
- netif_napi_del(&v->napi);
- cancel_work_sync(&v->rx_dim.work);
- }
-
- for (i = 0; i < priv->num_rx_rings; i++)
- priv->rx_ring[i] = NULL;
-
- for (i = 0; i < priv->num_tx_rings; i++)
- priv->tx_ring[i] = NULL;
-
- for (i = 0; i < priv->bdr_int_num; i++) {
- kfree(priv->int_vector[i]);
- priv->int_vector[i] = NULL;
- }
+ for (i = 0; i < priv->bdr_int_num; i++)
+ enetc_int_vector_destroy(priv, i);
/* disable all MSIX for this device */
pci_free_irq_vectors(priv->si->pdev);
}
+EXPORT_SYMBOL_GPL(enetc_free_msix);
static void enetc_kfree_si(struct enetc_si *si)
{
@@ -2897,12 +3629,8 @@ int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, name);
@@ -2961,6 +3689,7 @@ err_dma:
return err;
}
+EXPORT_SYMBOL_GPL(enetc_pci_probe);
void enetc_pci_remove(struct pci_dev *pdev)
{
@@ -2972,3 +3701,77 @@ void enetc_pci_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
}
+EXPORT_SYMBOL_GPL(enetc_pci_remove);
+
+static const struct enetc_drvdata enetc_pf_data = {
+ .sysclk_freq = ENETC_CLK_400M,
+ .pmac_offset = ENETC_PMAC_OFFSET,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
+ .eth_ops = &enetc_pf_ethtool_ops,
+};
+
+static const struct enetc_drvdata enetc4_pf_data = {
+ .sysclk_freq = ENETC_CLK_333M,
+ .tx_csum = true,
+ .max_frags = ENETC4_MAX_SKB_FRAGS,
+ .pmac_offset = ENETC4_PMAC_OFFSET,
+ .eth_ops = &enetc4_pf_ethtool_ops,
+};
+
+static const struct enetc_drvdata enetc4_ppm_data = {
+ .sysclk_freq = ENETC_CLK_333M,
+ .tx_csum = true,
+ .max_frags = ENETC4_MAX_SKB_FRAGS,
+ .eth_ops = &enetc4_ppm_ethtool_ops,
+};
+
+static const struct enetc_drvdata enetc_vf_data = {
+ .sysclk_freq = ENETC_CLK_400M,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
+ .eth_ops = &enetc_vf_ethtool_ops,
+};
+
+static const struct enetc_platform_info enetc_info[] = {
+ { .revision = ENETC_REV_1_0,
+ .dev_id = ENETC_DEV_ID_PF,
+ .data = &enetc_pf_data,
+ },
+ { .revision = ENETC_REV_4_1,
+ .dev_id = NXP_ENETC_PF_DEV_ID,
+ .data = &enetc4_pf_data,
+ },
+ { .revision = ENETC_REV_1_0,
+ .dev_id = ENETC_DEV_ID_VF,
+ .data = &enetc_vf_data,
+ },
+ {
+ .revision = ENETC_REV_4_3,
+ .dev_id = NXP_ENETC_PPM_DEV_ID,
+ .data = &enetc4_ppm_data,
+ },
+ { .revision = ENETC_REV_4_3,
+ .dev_id = NXP_ENETC_PF_DEV_ID,
+ .data = &enetc4_pf_data,
+ },
+};
+
+int enetc_get_driver_data(struct enetc_si *si)
+{
+ u16 dev_id = si->pdev->device;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(enetc_info); i++) {
+ if (si->revision == enetc_info[i].revision &&
+ dev_id == enetc_info[i].dev_id) {
+ si->drvdata = enetc_info[i].data;
+
+ return 0;
+ }
+ }
+
+ return -ERANGE;
+}
+EXPORT_SYMBOL_GPL(enetc_get_driver_data);
+
+MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index fb39e406b7fc..dce27bd67a7d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -8,16 +8,33 @@
#include <linux/dma-mapping.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
+#include <linux/fsl/ntmp.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
#include <linux/dim.h>
+#include <net/xdp.h>
#include "enetc_hw.h"
+#include "enetc4_hw.h"
#define ENETC_MAC_MAXFRM_SIZE 9600
#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
+#define ENETC_CBD_DATA_MEM_ALIGN 64
+
+#define ENETC_MADDR_HASH_TBL_SZ 64
+
+enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
+
+struct enetc_mac_filter {
+ union {
+ char mac_addr[ETH_ALEN];
+ DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
+ };
+ int mac_addr_cnt;
+};
+
struct enetc_tx_swbd {
union {
struct sk_buff *skb;
@@ -34,10 +51,32 @@ struct enetc_tx_swbd {
u8 is_eof:1;
u8 is_xdp_tx:1;
u8 is_xdp_redirect:1;
+ u8 qbv_en:1;
+};
+
+struct enetc_skb_cb {
+ u8 flag;
+ bool udp;
+ u16 correction_off;
+ u16 origin_tstamp_off;
};
+#define ENETC_SKB_CB(skb) ((struct enetc_skb_cb *)((skb)->cb))
+
+struct enetc_lso_t {
+ bool ipv6;
+ bool tcp;
+ u8 l3_hdr_len;
+ u8 hdr_len; /* LSO header length */
+ u8 l3_start;
+ u16 lso_seg_size;
+ int total_len; /* total data length, not include LSO header */
+};
+
+#define ENETC_LSO_MAX_DATA_LEN SZ_256K
+
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
-#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1)
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
@@ -54,22 +93,29 @@ struct enetc_rx_swbd {
/* ENETC overhead: optional extension BD + 1 BD gap */
#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
-/* max # of chained Tx BDs is 15, including head and extension BD */
+/* For LS1028A, max # of chained Tx BDs is 15, including head and
+ * extension BD.
+ */
#define ENETC_MAX_SKB_FRAGS 13
-#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+/* For ENETC v4 and later versions, max # of chained Tx BDs is 63,
+ * including head and extension BD, but the range of MAX_SKB_FRAGS
+ * is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS.
+ */
+#define ENETC4_MAX_SKB_FRAGS MAX_SKB_FRAGS
+#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1)
struct enetc_ring_stats {
- unsigned int packets;
- unsigned int bytes;
- unsigned int rx_alloc_errs;
- unsigned int xdp_drops;
- unsigned int xdp_tx;
- unsigned int xdp_tx_drops;
- unsigned int xdp_redirect;
- unsigned int xdp_redirect_failures;
- unsigned int xdp_redirect_sg;
- unsigned int recycles;
- unsigned int recycle_failures;
+ unsigned long packets;
+ unsigned long bytes;
+ unsigned long rx_alloc_errs;
+ unsigned long xdp_drops;
+ unsigned long xdp_tx;
+ unsigned long xdp_tx_drops;
+ unsigned long xdp_redirect;
+ unsigned long xdp_redirect_failures;
+ unsigned long recycles;
+ unsigned long recycle_failures;
+ unsigned long win_drop;
};
struct enetc_xdp_data {
@@ -82,6 +128,23 @@ struct enetc_xdp_data {
#define ENETC_TX_RING_DEFAULT_SIZE 2048
#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2)
+struct enetc_bdr_resource {
+ /* Input arguments saved for teardown */
+ struct device *dev; /* for DMA mapping */
+ size_t bd_count;
+ size_t bd_size;
+
+ /* Resource proper */
+ void *bd_base; /* points to Rx or Tx BD ring */
+ dma_addr_t bd_dma_base;
+ union {
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_rx_swbd *rx_swbd;
+ };
+ char *tso_headers;
+ dma_addr_t tso_headers_dma;
+};
+
struct enetc_bdr {
struct device *dev; /* for DMA mapping */
struct net_device *ndev;
@@ -91,6 +154,7 @@ struct enetc_bdr {
void __iomem *rcir;
};
u16 index;
+ u16 prio;
int bd_count; /* # of BDs */
int next_to_use;
int next_to_clean;
@@ -162,10 +226,9 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
{
int hw_idx = i;
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (rx_ring->ext_en)
hw_idx = 2 * i;
-#endif
+
return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
}
@@ -177,10 +240,8 @@ static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
new_rxbd++;
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (rx_ring->ext_en)
new_rxbd++;
-#endif
if (unlikely(++new_index == rx_ring->bd_count)) {
new_rxbd = rx_ring->bd_base;
@@ -208,8 +269,38 @@ enum enetc_errata {
ENETC_ERR_UCMCSWP = BIT(1),
};
-#define ENETC_SI_F_QBV BIT(0)
-#define ENETC_SI_F_PSFP BIT(1)
+#define ENETC_SI_F_PSFP BIT(0)
+#define ENETC_SI_F_QBV BIT(1)
+#define ENETC_SI_F_QBU BIT(2)
+#define ENETC_SI_F_LSO BIT(3)
+#define ENETC_SI_F_PPM BIT(4) /* pseudo MAC */
+
+struct enetc_drvdata {
+ u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
+ u8 tx_csum:1;
+ u8 max_frags;
+ u64 sysclk_freq;
+ const struct ethtool_ops *eth_ops;
+};
+
+struct enetc_platform_info {
+ u16 revision;
+ u16 dev_id;
+ const struct enetc_drvdata *data;
+};
+
+struct enetc_si;
+
+/*
+ * This structure defines the some common hooks for ENETC PSI and VSI.
+ * In addition, since VSI only uses the struct enetc_si as its private
+ * driver data, so this structure also define some hooks specifically
+ * for VSI. For VSI-specific hooks, the format is ‘vf_*()’.
+ */
+struct enetc_si_ops {
+ int (*get_rss_table)(struct enetc_si *si, u32 *table, int count);
+ int (*set_rss_table)(struct enetc_si *si, const u32 *table, int count);
+};
/* PCI IEP device data */
struct enetc_si {
@@ -219,18 +310,33 @@ struct enetc_si {
struct net_device *ndev; /* back ref. */
- struct enetc_cbdr cbd_ring;
+ union {
+ struct enetc_cbdr cbd_ring; /* Only ENETC 1.0 */
+ struct ntmp_user ntmp_user; /* ENETC 4.1 and later */
+ };
int num_rx_rings; /* how many rings are available in the SI */
int num_tx_rings;
int num_fs_entries;
int num_rss; /* number of RSS buckets */
unsigned short pad;
+ u16 revision;
int hw_features;
+ const struct enetc_drvdata *drvdata;
+ const struct enetc_si_ops *ops;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct rx_mode_task;
+ struct dentry *debugfs_root;
};
#define ENETC_SI_ALIGN 32
+static inline bool is_enetc_rev1(struct enetc_si *si)
+{
+ return si->pdev->revision == ENETC_REV1;
+}
+
static inline void *enetc_si_priv(const struct enetc_si *si)
{
return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
@@ -257,6 +363,11 @@ static inline int enetc_pf_to_port(struct pci_dev *pf_pdev)
}
}
+static inline bool enetc_is_pseudo_mac(struct enetc_si *si)
+{
+ return si->hw_features & ENETC_SI_F_PPM;
+}
+
#define ENETC_MAX_NUM_TXQS 8
#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8)
@@ -274,7 +385,7 @@ struct enetc_int_vector {
char name[ENETC_INT_NAME_MAX];
struct enetc_bdr rx_ring;
- struct enetc_bdr tx_ring[];
+ struct enetc_bdr tx_ring[] __counted_by(count_tx_rings);
} ____cacheline_aligned_in_smp;
struct enetc_cls_rule {
@@ -282,7 +393,7 @@ struct enetc_cls_rule {
int used;
};
-#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */
+#define ENETC_MAX_BDR_INT 6 /* fixed to max # of available cpus */
struct psfp_cap {
u32 max_streamid;
u32 max_psfp_filter;
@@ -292,7 +403,6 @@ struct psfp_cap {
};
#define ENETC_F_TX_TSTAMP_MASK 0xff
-/* TODO: more hardware offloads */
enum enetc_active_offloads {
/* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
ENETC_F_TX_TSTAMP = BIT(0),
@@ -301,10 +411,14 @@ enum enetc_active_offloads {
ENETC_F_RX_TSTAMP = BIT(8),
ENETC_F_QBV = BIT(9),
ENETC_F_QCI = BIT(10),
+ ENETC_F_QBU = BIT(11),
+ ENETC_F_TXCSUM = BIT(12),
+ ENETC_F_LSO = BIT(13),
};
enum enetc_flags_bit {
ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
+ ENETC_TX_DOWN,
};
/* interrupt coalescing modes */
@@ -320,7 +434,6 @@ enum enetc_ic_mode {
#define ENETC_RXIC_PKTTHR min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2)
#define ENETC_TXIC_PKTTHR min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2)
-#define ENETC_TXIC_TIMETHR enetc_usecs_to_cycles(600)
struct enetc_ndev_priv {
struct net_device *ndev;
@@ -333,6 +446,10 @@ struct enetc_ndev_priv {
u16 rx_bd_count, tx_bd_count;
u16 msg_enable;
+
+ u8 preemptible_tcs;
+ u8 max_frags; /* The maximum number of BDs for fragments */
+
enum enetc_active_offloads active_offloads;
u32 speed; /* store speed for compare update pspeed */
@@ -340,11 +457,16 @@ struct enetc_ndev_priv {
struct enetc_bdr **xdp_tx_ring;
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
+ const struct enetc_bdr_resource *tx_res;
+ const struct enetc_bdr_resource *rx_res;
struct enetc_cls_rule *cls_rules;
struct psfp_cap psfp_cap;
+ /* Minimum number of TX queues required by the network stack */
+ unsigned int min_num_stack_tx_queues;
+
struct phylink *phylink;
int ic_mode;
u32 tx_ictt;
@@ -355,6 +477,14 @@ struct enetc_ndev_priv {
struct work_struct tx_onestep_tstamp;
struct sk_buff_head tx_skbs;
+
+ /* Serialize access to MAC Merge state between ethtool requests
+ * and link state updates
+ */
+ struct mutex mm_lock;
+
+ struct clk *ref_clk; /* RGMII/RMII reference clock */
+ u64 sysclk_freq; /* NETC system clock frequency */
};
/* Messaging */
@@ -369,10 +499,9 @@ struct enetc_msg_cmd_set_primary_mac {
#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
-/* PTP driver exports */
-extern int enetc_phc_index;
-
/* SI common */
+u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg);
+void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val);
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
void enetc_pci_remove(struct pci_dev *pdev);
int enetc_alloc_msix(struct enetc_ndev_priv *priv);
@@ -382,6 +511,10 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
void enetc_free_si_resources(struct enetc_ndev_priv *priv);
int enetc_configure_si(struct enetc_ndev_priv *priv);
+int enetc_get_driver_data(struct enetc_si *si);
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr);
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter);
int enetc_open(struct net_device *ndev);
int enetc_close(struct net_device *ndev);
@@ -389,33 +522,97 @@ void enetc_start(struct net_device *ndev);
void enetc_stop(struct net_device *ndev);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features);
+void enetc_set_features(struct net_device *ndev, netdev_features_t features);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data);
-int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
+void enetc_reset_tc_mqprio(struct net_device *ndev);
+int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config);
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
/* ethtool */
+extern const struct ethtool_ops enetc_pf_ethtool_ops;
+extern const struct ethtool_ops enetc4_pf_ethtool_ops;
+extern const struct ethtool_ops enetc_vf_ethtool_ops;
+extern const struct ethtool_ops enetc4_ppm_ethtool_ops;
+
void enetc_set_ethtool_ops(struct net_device *ndev);
+void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
+void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
/* control buffer descriptor ring (CBDR) */
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
struct enetc_cbdr *cbdr);
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
+int enetc4_setup_cbdr(struct enetc_si *si);
+void enetc4_teardown_cbdr(struct enetc_si *si);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index);
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes);
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count);
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count);
+
+static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
+ struct enetc_cbd *cbd,
+ int size, dma_addr_t *dma,
+ void **data_align)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ dma_addr_t dma_align;
+ void *data;
+
+ data = dma_alloc_coherent(ring->dma_dev,
+ size + ENETC_CBD_DATA_MEM_ALIGN,
+ dma, GFP_KERNEL);
+ if (!data) {
+ dev_err(ring->dma_dev, "CBD alloc data memory failed!\n");
+ return NULL;
+ }
+
+ dma_align = ALIGN(*dma, ENETC_CBD_DATA_MEM_ALIGN);
+ *data_align = PTR_ALIGN(data, ENETC_CBD_DATA_MEM_ALIGN);
+
+ cbd->addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+ cbd->addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+ cbd->length = cpu_to_le16(size);
+
+ return data;
+}
+
+static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
+ void *data, dma_addr_t *dma)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+
+ dma_free_coherent(ring->dma_dev, size + ENETC_CBD_DATA_MEM_ALIGN,
+ data, *dma);
+}
+
+void enetc_reset_ptcmsdur(struct enetc_hw *hw);
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
+
+static inline bool enetc_ptp_clock_is_enabled(struct enetc_si *si)
+{
+ if (is_enetc_rev1(si))
+ return IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK);
+
+ return IS_ENABLED(CONFIG_PTP_NETC_V4_TIMER);
+}
#ifdef CONFIG_FSL_ENETC_QOS
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
@@ -425,22 +622,24 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
int enetc_psfp_init(struct enetc_ndev_priv *priv);
int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 reg;
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSIDCAPR);
priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
/* Port stream filter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSFCAPR);
priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
/* Port stream gate capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSGCAPR);
priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
/* Port flow meter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
+ reg = enetc_port_rd(hw, ENETC_PFMCAPR);
priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
}
@@ -481,6 +680,7 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
}
#else
+#define enetc_qos_query_caps(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(priv, speed) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
@@ -500,4 +700,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
{
return 0;
}
+
+static inline int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
new file mode 100644
index 000000000000..1b1591dce73d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright 2025 NXP */
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+
+#include "enetc_pf.h"
+#include "enetc4_debugfs.h"
+
+static void enetc_show_si_mac_hash_filter(struct seq_file *s, int i)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ u32 hash_h, hash_l;
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIUMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIUMHFR1(i));
+ seq_printf(s, "SI %d unicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIMMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIMMHFR1(i));
+ seq_printf(s, "SI %d multicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+}
+
+static int enetc_mac_filter_show(struct seq_file *s, void *data)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ struct maft_entry_data maft;
+ struct enetc_pf *pf;
+ int i, err, num_si;
+ u32 val;
+
+ pf = enetc_si_priv(si);
+ num_si = pf->caps.num_vsi + 1;
+
+ val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+ for (i = 0; i < num_si; i++) {
+ seq_printf(s, "SI %d Unicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_UP(i) & val));
+ seq_printf(s, "SI %d Multicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_MP(i) & val));
+ }
+
+ /* MAC hash filter table */
+ for (i = 0; i < num_si; i++)
+ enetc_show_si_mac_hash_filter(s, i);
+
+ if (!pf->num_mfe)
+ return 0;
+
+ /* MAC address filter table */
+ seq_puts(s, "MAC address filter table\n");
+ for (i = 0; i < pf->num_mfe; i++) {
+ memset(&maft, 0, sizeof(maft));
+ err = ntmp_maft_query_entry(&si->ntmp_user, i, &maft);
+ if (err)
+ return err;
+
+ seq_printf(s, "Entry %d, MAC: %pM, SI bitmap: 0x%04x\n", i,
+ maft.keye.mac_addr, le16_to_cpu(maft.cfge.si_bitmap));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(enetc_mac_filter);
+
+void enetc_create_debugfs(struct enetc_si *si)
+{
+ struct net_device *ndev = si->ndev;
+ struct dentry *root;
+
+ root = debugfs_create_dir(netdev_name(ndev), NULL);
+ if (IS_ERR(root))
+ return;
+
+ si->debugfs_root = root;
+
+ debugfs_create_file("mac_filter", 0444, root, si, &enetc_mac_filter_fops);
+}
+
+void enetc_remove_debugfs(struct enetc_si *si)
+{
+ debugfs_remove(si->debugfs_root);
+ si->debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
new file mode 100644
index 000000000000..96caca35f79d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2025 NXP */
+
+#ifndef __ENETC4_DEBUGFS_H
+#define __ENETC4_DEBUGFS_H
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+void enetc_create_debugfs(struct enetc_si *si);
+void enetc_remove_debugfs(struct enetc_si *si);
+#else
+static inline void enetc_create_debugfs(struct enetc_si *si)
+{
+}
+
+static inline void enetc_remove_debugfs(struct enetc_si *si)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
new file mode 100644
index 000000000000..3ed0f7a02767
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * This header file defines the register offsets and bit fields
+ * of ENETC4 PF and VFs. Note that the same registers as ENETC
+ * version 1.0 are defined in the enetc_hw.h file.
+ *
+ * Copyright 2024 NXP
+ */
+#ifndef __ENETC4_HW_H_
+#define __ENETC4_HW_H_
+
+#define NXP_ENETC_VENDOR_ID 0x1131
+#define NXP_ENETC_PF_DEV_ID 0xe101
+#define NXP_ENETC_PPM_DEV_ID 0xe110
+
+/**********************Station interface registers************************/
+/* Station interface LSO segmentation flag mask register 0/1 */
+#define ENETC4_SILSOSFMR0 0x1300
+#define SILSOSFMR0_TCP_MID_SEG GENMASK(27, 16)
+#define SILSOSFMR0_TCP_1ST_SEG GENMASK(11, 0)
+#define SILSOSFMR0_VAL_SET(first, mid) (FIELD_PREP(SILSOSFMR0_TCP_MID_SEG, mid) | \
+ FIELD_PREP(SILSOSFMR0_TCP_1ST_SEG, first))
+
+#define ENETC4_SILSOSFMR1 0x1304
+#define SILSOSFMR1_TCP_LAST_SEG GENMASK(11, 0)
+#define ENETC4_TCP_FLAGS_FIN BIT(0)
+#define ENETC4_TCP_FLAGS_SYN BIT(1)
+#define ENETC4_TCP_FLAGS_RST BIT(2)
+#define ENETC4_TCP_FLAGS_PSH BIT(3)
+#define ENETC4_TCP_FLAGS_ACK BIT(4)
+#define ENETC4_TCP_FLAGS_URG BIT(5)
+#define ENETC4_TCP_FLAGS_ECE BIT(6)
+#define ENETC4_TCP_FLAGS_CWR BIT(7)
+#define ENETC4_TCP_FLAGS_NS BIT(8)
+/* According to tso_build_hdr(), clear all special flags for not last packet. */
+#define ENETC4_TCP_NL_SEG_FLAGS_DMASK (ENETC4_TCP_FLAGS_FIN | \
+ ENETC4_TCP_FLAGS_RST | ENETC4_TCP_FLAGS_PSH)
+
+/***************************ENETC port registers**************************/
+#define ENETC4_ECAPR0 0x0
+#define ECAPR0_RFS BIT(2)
+#define ECAPR0_TSD BIT(5)
+#define ECAPR0_RSS BIT(8)
+#define ECAPR0_RSC BIT(9)
+#define ECAPR0_LSO BIT(10)
+#define ECAPR0_WO BIT(13)
+
+#define ENETC4_ECAPR1 0x4
+#define ECAPR1_NUM_TCS GENMASK(6, 4)
+#define ECAPR1_NUM_MCH GENMASK(9, 8)
+#define ECAPR1_NUM_UCH GENMASK(11, 10)
+#define ECAPR1_NUM_MSIX GENMASK(22, 12)
+#define ECAPR1_NUM_VSI GENMASK(27, 24)
+#define ECAPR1_NUM_IPV BIT(31)
+
+#define ENETC4_ECAPR2 0x8
+#define ECAPR2_NUM_TX_BDR GENMASK(9, 0)
+#define ECAPR2_NUM_RX_BDR GENMASK(25, 16)
+
+#define ENETC4_PMR 0x10
+#define PMR_SI_EN(a) BIT((16 + (a)))
+
+/* Port Pause ON/OFF threshold register */
+#define ENETC4_PPAUONTR 0x108
+#define ENETC4_PPAUOFFTR 0x10c
+
+/* Port Station interface promiscuous MAC mode register */
+#define ENETC4_PSIPMMR 0x200
+#define PSIPMMR_SI_MAC_UP(a) BIT(a) /* a = SI index */
+#define PSIPMMR_SI_MAC_MP(a) BIT((a) + 16)
+
+/* Port Station interface promiscuous VLAN mode register */
+#define ENETC4_PSIPVMR 0x204
+
+/* Port RSS key register n. n = 0,1,2,...,9 */
+#define ENETC4_PRSSKR(n) ((n) * 0x4 + 0x250)
+
+/* Port station interface MAC address filtering capability register */
+#define ENETC4_PSIMAFCAPR 0x280
+#define PSIMAFCAPR_NUM_MAC_AFTE GENMASK(11, 0)
+
+/* Port station interface VLAN filtering capability register */
+#define ENETC4_PSIVLANFCAPR 0x2c0
+#define PSIVLANFCAPR_NUM_VLAN_FTE GENMASK(11, 0)
+
+/* Port station interface VLAN filtering mode register */
+#define ENETC4_PSIVLANFMR 0x2c4
+#define PSIVLANFMR_VS BIT(0)
+
+/* Port Station interface a primary MAC address registers */
+#define ENETC4_PSIPMAR0(a) ((a) * 0x80 + 0x2000)
+#define ENETC4_PSIPMAR1(a) ((a) * 0x80 + 0x2004)
+
+/* Port station interface a configuration register 0/2 */
+#define ENETC4_PSICFGR0(a) ((a) * 0x80 + 0x2010)
+#define PSICFGR0_VASE BIT(13)
+#define PSICFGR0_ASE BIT(15)
+#define PSICFGR0_ANTI_SPOOFING (PSICFGR0_VASE | PSICFGR0_ASE)
+
+#define ENETC4_PSICFGR2(a) ((a) * 0x80 + 0x2018)
+#define PSICFGR2_NUM_MSIX GENMASK(5, 0)
+
+/* Port station interface a unicast MAC hash filter register 0/1 */
+#define ENETC4_PSIUMHFR0(a) ((a) * 0x80 + 0x2050)
+#define ENETC4_PSIUMHFR1(a) ((a) * 0x80 + 0x2054)
+
+/* Port station interface a multicast MAC hash filter register 0/1 */
+#define ENETC4_PSIMMHFR0(a) ((a) * 0x80 + 0x2058)
+#define ENETC4_PSIMMHFR1(a) ((a) * 0x80 + 0x205c)
+
+/* Port station interface a VLAN hash filter register 0/1 */
+#define ENETC4_PSIVHFR0(a) ((a) * 0x80 + 0x2060)
+#define ENETC4_PSIVHFR1(a) ((a) * 0x80 + 0x2064)
+
+#define ENETC4_PMCAPR 0x4004
+#define PMCAPR_HD BIT(8)
+#define PMCAPR_FP GENMASK(10, 9)
+
+/* Port capability register */
+#define ENETC4_PCAPR 0x4000
+#define PCAPR_LINK_TYPE BIT(4)
+
+/* Port configuration register */
+#define ENETC4_PCR 0x4010
+#define PCR_HDR_FMT BIT(0)
+#define PCR_L2DOSE BIT(4)
+#define PCR_TIMER_CS BIT(8)
+#define PCR_PSPEED GENMASK(29, 16)
+#define PCR_PSPEED_VAL(speed) (((speed) / 10 - 1) << 16)
+
+/* Port MAC address register 0/1 */
+#define ENETC4_PMAR0 0x4020
+#define ENETC4_PMAR1 0x4024
+
+/* Port operational register */
+#define ENETC4_POR 0x4100
+
+/* Port traffic class a transmit maximum SDU register */
+#define ENETC4_PTCTMSDUR(a) ((a) * 0x20 + 0x4208)
+#define PTCTMSDUR_MAXSDU GENMASK(15, 0)
+#define PTCTMSDUR_SDU_TYPE GENMASK(17, 16)
+#define SDU_TYPE_PPDU 0
+#define SDU_TYPE_MPDU 1
+#define SDU_TYPE_MSDU 2
+
+#define ENETC4_PMAC_OFFSET 0x400
+#define ENETC4_PM_CMD_CFG(mac) (0x5008 + (mac) * 0x400)
+#define PM_CMD_CFG_TX_EN BIT(0)
+#define PM_CMD_CFG_RX_EN BIT(1)
+#define PM_CMD_CFG_PAUSE_FWD BIT(7)
+#define PM_CMD_CFG_PAUSE_IGN BIT(8)
+#define PM_CMD_CFG_TX_ADDR_INS BIT(9)
+#define PM_CMD_CFG_LOOP_EN BIT(10)
+#define PM_CMD_CFG_LPBK_MODE GENMASK(12, 11)
+#define LPBCK_MODE_EXT_TX_CLK 0
+#define LPBCK_MODE_MAC_LEVEL 1
+#define LPBCK_MODE_INT_TX_CLK 2
+#define PM_CMD_CFG_CNT_FRM_EN BIT(13)
+#define PM_CMD_CFG_TXP BIT(15)
+#define PM_CMD_CFG_SEND_IDLE BIT(16)
+#define PM_CMD_CFG_HD_FCEN BIT(18)
+#define PM_CMD_CFG_SFD BIT(21)
+#define PM_CMD_CFG_TX_FLUSH BIT(22)
+#define PM_CMD_CFG_TX_LOWP_EN BIT(23)
+#define PM_CMD_CFG_RX_LOWP_EMPTY BIT(24)
+#define PM_CMD_CFG_SWR BIT(26)
+#define PM_CMD_CFG_TS_MODE BIT(30)
+#define PM_CMD_CFG_MG BIT(31)
+
+/* Port MAC 0/1 Maximum Frame Length Register */
+#define ENETC4_PM_MAXFRM(mac) (0x5014 + (mac) * 0x400)
+
+/* Port internal MDIO base address, use to access PCS */
+#define ENETC4_PM_IMDIO_BASE 0x5030
+
+/* Port MAC 0/1 Pause Quanta Register */
+#define ENETC4_PM_PAUSE_QUANTA(mac) (0x5054 + (mac) * 0x400)
+
+/* Port MAC 0/1 Pause Quanta Threshold Register */
+#define ENETC4_PM_PAUSE_THRESH(mac) (0x5064 + (mac) * 0x400)
+
+#define ENETC4_PM_SINGLE_STEP(mac) (0x50c0 + (mac) * 0x400)
+#define PM_SINGLE_STEP_CH BIT(6)
+#define PM_SINGLE_STEP_OFFSET GENMASK(15, 7)
+#define PM_SINGLE_STEP_OFFSET_SET(o) FIELD_PREP(PM_SINGLE_STEP_OFFSET, o)
+#define PM_SINGLE_STEP_EN BIT(31)
+
+/* Port MAC 0 Interface Mode Control Register */
+#define ENETC4_PM_IF_MODE(mac) (0x5300 + (mac) * 0x400)
+#define PM_IF_MODE_IFMODE GENMASK(2, 0)
+#define IFMODE_XGMII 0
+#define IFMODE_RMII 3
+#define IFMODE_RGMII 4
+#define IFMODE_SGMII 5
+#define PM_IF_MODE_REVMII BIT(3)
+#define PM_IF_MODE_M10 BIT(4)
+#define PM_IF_MODE_HD BIT(6)
+#define PM_IF_MODE_SSP GENMASK(14, 13)
+#define SSP_100M 0
+#define SSP_10M 1
+#define SSP_1G 2
+#define PM_IF_MODE_ENA BIT(15)
+
+/* Port external MDIO Base address, use to access off-chip PHY */
+#define ENETC4_EMDIO_BASE 0x5c00
+
+/**********************ENETC Pseudo MAC port registers************************/
+/* Port pseudo MAC receive octets counter (64-bit) */
+#define ENETC4_PPMROCR 0x5080
+
+/* Port pseudo MAC receive unicast frame counter register (64-bit) */
+#define ENETC4_PPMRUFCR 0x5088
+
+/* Port pseudo MAC receive multicast frame counter register (64-bit) */
+#define ENETC4_PPMRMFCR 0x5090
+
+/* Port pseudo MAC receive broadcast frame counter register (64-bit) */
+#define ENETC4_PPMRBFCR 0x5098
+
+/* Port pseudo MAC transmit octets counter (64-bit) */
+#define ENETC4_PPMTOCR 0x50c0
+
+/* Port pseudo MAC transmit unicast frame counter register (64-bit) */
+#define ENETC4_PPMTUFCR 0x50c8
+
+/* Port pseudo MAC transmit multicast frame counter register (64-bit) */
+#define ENETC4_PPMTMFCR 0x50d0
+
+/* Port pseudo MAC transmit broadcast frame counter register (64-bit) */
+#define ENETC4_PPMTBFCR 0x50d8
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
new file mode 100644
index 000000000000..498346dd996a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2024 NXP */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/unaligned.h>
+
+#include "enetc_pf_common.h"
+#include "enetc4_debugfs.h"
+
+#define ENETC_SI_MAX_RING_NUM 8
+
+#define ENETC_MAC_FILTER_TYPE_UC BIT(0)
+#define ENETC_MAC_FILTER_TYPE_MC BIT(1)
+#define ENETC_MAC_FILTER_TYPE_ALL (ENETC_MAC_FILTER_TYPE_UC | \
+ ENETC_MAC_FILTER_TYPE_MC)
+
+struct enetc_mac_addr {
+ u8 addr[ETH_ALEN];
+};
+
+static void enetc4_get_port_caps(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC4_ECAPR1);
+ pf->caps.num_vsi = (val & ECAPR1_NUM_VSI) >> 24;
+ pf->caps.num_msix = ((val & ECAPR1_NUM_MSIX) >> 12) + 1;
+
+ val = enetc_port_rd(hw, ENETC4_ECAPR2);
+ pf->caps.num_rx_bdr = (val & ECAPR2_NUM_RX_BDR) >> 16;
+ pf->caps.num_tx_bdr = val & ECAPR2_NUM_TX_BDR;
+
+ val = enetc_port_rd(hw, ENETC4_PMCAPR);
+ pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0;
+
+ val = enetc_port_rd(hw, ENETC4_PSIMAFCAPR);
+ pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE;
+}
+
+static void enetc4_get_psi_hw_features(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC4_PCAPR);
+ if (val & PCAPR_LINK_TYPE)
+ si->hw_features |= ENETC_SI_F_PPM;
+}
+
+static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
+ const u8 *addr)
+{
+ u16 lower = get_unaligned_le16(addr + 4);
+ u32 upper = get_unaligned_le32(addr);
+
+ if (si != 0) {
+ __raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si));
+ __raw_writew(lower, hw->port + ENETC4_PSIPMAR1(si));
+ } else {
+ __raw_writel(upper, hw->port + ENETC4_PMAR0);
+ __raw_writew(lower, hw->port + ENETC4_PMAR1);
+ }
+}
+
+static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
+ u8 *addr)
+{
+ u32 upper;
+ u16 lower;
+
+ upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si));
+ lower = __raw_readw(hw->port + ENETC4_PSIPMAR1(si));
+
+ put_unaligned_le32(upper, addr);
+ put_unaligned_le16(lower, addr + 4);
+}
+
+static void enetc4_pf_set_si_mac_promisc(struct enetc_hw *hw, int si,
+ bool uc_promisc, bool mc_promisc)
+{
+ u32 val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+
+ if (uc_promisc)
+ val |= PSIPMMR_SI_MAC_UP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_UP(si);
+
+ if (mc_promisc)
+ val |= PSIPMMR_SI_MAC_MP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_MP(si);
+
+ enetc_port_wr(hw, ENETC4_PSIPMMR, val);
+}
+
+static void enetc4_pf_set_si_uc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIUMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIUMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_si_mc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIMMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIMMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_loopback(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+ u32 val;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(val, en ? 1 : 0, PM_CMD_CFG_LOOP_EN);
+ /* Default to select MAC level loopback mode if loopback is enabled. */
+ val = u32_replace_bits(val, en ? LPBCK_MODE_MAC_LEVEL : 0,
+ PM_CMD_CFG_LPBK_MODE);
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_pf_clear_maft_entries(struct enetc_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_mfe; i++)
+ ntmp_maft_delete_entry(&pf->si->ntmp_user, i);
+
+ pf->num_mfe = 0;
+}
+
+static int enetc4_pf_add_maft_entries(struct enetc_pf *pf,
+ struct enetc_mac_addr *mac,
+ int mac_cnt)
+{
+ struct maft_entry_data maft = {};
+ u16 si_bit = BIT(0);
+ int i, err;
+
+ maft.cfge.si_bitmap = cpu_to_le16(si_bit);
+ for (i = 0; i < mac_cnt; i++) {
+ ether_addr_copy(maft.keye.mac_addr, mac[i].addr);
+ err = ntmp_maft_add_entry(&pf->si->ntmp_user, i, &maft);
+ if (unlikely(err)) {
+ pf->num_mfe = i;
+ goto clear_maft_entries;
+ }
+ }
+
+ pf->num_mfe = mac_cnt;
+
+ return 0;
+
+clear_maft_entries:
+ enetc4_pf_clear_maft_entries(pf);
+
+ return err;
+}
+
+static int enetc4_pf_set_uc_exact_filter(struct enetc_pf *pf)
+{
+ int max_num_mfe = pf->caps.mac_filter_num;
+ struct enetc_mac_filter mac_filter = {};
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct enetc_mac_addr *mac_tbl;
+ struct netdev_hw_addr *ha;
+ int i = 0, err;
+ int mac_cnt;
+
+ netif_addr_lock_bh(ndev);
+
+ mac_cnt = netdev_uc_count(ndev);
+ if (!mac_cnt) {
+ netif_addr_unlock_bh(ndev);
+ /* clear both MAC hash and exact filters */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+
+ return 0;
+ }
+
+ if (mac_cnt > max_num_mfe) {
+ err = -ENOSPC;
+ goto unlock_netif_addr;
+ }
+
+ mac_tbl = kcalloc(mac_cnt, sizeof(*mac_tbl), GFP_ATOMIC);
+ if (!mac_tbl) {
+ err = -ENOMEM;
+ goto unlock_netif_addr;
+ }
+
+ netdev_for_each_uc_addr(ha, ndev) {
+ enetc_add_mac_addr_ht_filter(&mac_filter, ha->addr);
+ ether_addr_copy(mac_tbl[i++].addr, ha->addr);
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Set temporary unicast hash filters in case of Rx loss when
+ * updating MAC address filter table
+ */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, *mac_filter.mac_hash_table);
+ enetc4_pf_clear_maft_entries(pf);
+
+ if (!enetc4_pf_add_maft_entries(pf, mac_tbl, i))
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+
+ kfree(mac_tbl);
+
+ return 0;
+
+unlock_netif_addr:
+ netif_addr_unlock_bh(ndev);
+
+ return err;
+}
+
+static void enetc4_pf_set_mac_hash_filter(struct enetc_pf *pf, int type)
+{
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_mac_filter *mac_filter;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(ndev);
+ if (type & ENETC_MAC_FILTER_TYPE_UC) {
+ mac_filter = &pf->mac_filter[UC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_uc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_uc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC) {
+ mac_filter = &pf->mac_filter[MC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_mc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_mc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+ netif_addr_unlock_bh(ndev);
+}
+
+static void enetc4_pf_set_mac_filter(struct enetc_pf *pf, int type)
+{
+ /* Currently, the MAC address filter table (MAFT) only has 4 entries,
+ * and multiple multicast addresses for filtering will be configured
+ * in the default network configuration, so MAFT is only suitable for
+ * unicast filtering. If the number of unicast addresses exceeds the
+ * table capacity, the MAC hash filter will be used.
+ */
+ if (type & ENETC_MAC_FILTER_TYPE_UC && enetc4_pf_set_uc_exact_filter(pf)) {
+ /* Fall back to the MAC hash filter */
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_UC);
+ /* Clear the old MAC exact filter */
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC)
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_MC);
+}
+
+static const struct enetc_pf_ops enetc4_pf_ops = {
+ .set_si_primary_mac = enetc4_pf_set_si_primary_mac,
+ .get_si_primary_mac = enetc4_pf_get_si_primary_mac,
+};
+
+static int enetc4_pf_struct_init(struct enetc_si *si)
+{
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ pf->si = si;
+ pf->total_vfs = pci_sriov_get_totalvfs(si->pdev);
+ pf->ops = &enetc4_pf_ops;
+
+ enetc4_get_port_caps(pf);
+ enetc4_get_psi_hw_features(si);
+
+ return 0;
+}
+
+static u32 enetc4_psicfgr0_val_construct(bool is_vf, u32 num_tx_bdr, u32 num_rx_bdr)
+{
+ u32 val;
+
+ val = ENETC_PSICFGR0_SET_TXBDR(num_tx_bdr);
+ val |= ENETC_PSICFGR0_SET_RXBDR(num_rx_bdr);
+ val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
+
+ if (is_vf)
+ val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE;
+
+ return val;
+}
+
+static void enetc4_default_rings_allocation(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 num_rx_bdr, num_tx_bdr, val;
+ u32 vf_tx_bdr, vf_rx_bdr;
+ int i, rx_rem, tx_rem;
+
+ if (pf->caps.num_rx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
+ num_rx_bdr = pf->caps.num_rx_bdr - pf->caps.num_vsi;
+ else
+ num_rx_bdr = ENETC_SI_MAX_RING_NUM;
+
+ if (pf->caps.num_tx_bdr < ENETC_SI_MAX_RING_NUM + pf->caps.num_vsi)
+ num_tx_bdr = pf->caps.num_tx_bdr - pf->caps.num_vsi;
+ else
+ num_tx_bdr = ENETC_SI_MAX_RING_NUM;
+
+ val = enetc4_psicfgr0_val_construct(false, num_tx_bdr, num_rx_bdr);
+ enetc_port_wr(hw, ENETC4_PSICFGR0(0), val);
+
+ num_rx_bdr = pf->caps.num_rx_bdr - num_rx_bdr;
+ rx_rem = num_rx_bdr % pf->caps.num_vsi;
+ num_rx_bdr = num_rx_bdr / pf->caps.num_vsi;
+
+ num_tx_bdr = pf->caps.num_tx_bdr - num_tx_bdr;
+ tx_rem = num_tx_bdr % pf->caps.num_vsi;
+ num_tx_bdr = num_tx_bdr / pf->caps.num_vsi;
+
+ for (i = 0; i < pf->caps.num_vsi; i++) {
+ vf_tx_bdr = (i < tx_rem) ? num_tx_bdr + 1 : num_tx_bdr;
+ vf_rx_bdr = (i < rx_rem) ? num_rx_bdr + 1 : num_rx_bdr;
+ val = enetc4_psicfgr0_val_construct(true, vf_tx_bdr, vf_rx_bdr);
+ enetc_port_wr(hw, ENETC4_PSICFGR0(i + 1), val);
+ }
+}
+
+static void enetc4_allocate_si_rings(struct enetc_pf *pf)
+{
+ enetc4_default_rings_allocation(pf);
+}
+
+static void enetc4_pf_set_si_vlan_promisc(struct enetc_hw *hw, int si, bool en)
+{
+ u32 val = enetc_port_rd(hw, ENETC4_PSIPVMR);
+
+ if (en)
+ val |= BIT(si);
+ else
+ val &= ~BIT(si);
+
+ enetc_port_wr(hw, ENETC4_PSIPVMR, val);
+}
+
+static void enetc4_set_default_si_vlan_promisc(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int num_si = pf->caps.num_vsi + 1;
+ int i;
+
+ /* enforce VLAN promiscuous mode for all SIs */
+ for (i = 0; i < num_si; i++)
+ enetc4_pf_set_si_vlan_promisc(hw, i, true);
+}
+
+/* Allocate the number of MSI-X vectors for per SI. */
+static void enetc4_set_si_msix_num(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int i, num_msix, total_si;
+ u32 val;
+
+ total_si = pf->caps.num_vsi + 1;
+
+ num_msix = pf->caps.num_msix / total_si +
+ pf->caps.num_msix % total_si - 1;
+ val = num_msix & PSICFGR2_NUM_MSIX;
+ enetc_port_wr(hw, ENETC4_PSICFGR2(0), val);
+
+ num_msix = pf->caps.num_msix / total_si - 1;
+ val = num_msix & PSICFGR2_NUM_MSIX;
+ for (i = 0; i < pf->caps.num_vsi; i++)
+ enetc_port_wr(hw, ENETC4_PSICFGR2(i + 1), val);
+}
+
+static void enetc4_enable_all_si(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+ int num_si = pf->caps.num_vsi + 1;
+ u32 si_bitmap = 0;
+ int i;
+
+ /* Master enable for all SIs */
+ for (i = 0; i < num_si; i++)
+ si_bitmap |= PMR_SI_EN(i);
+
+ enetc_port_wr(hw, ENETC4_PMR, si_bitmap);
+}
+
+static void enetc4_configure_port_si(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ enetc4_allocate_si_rings(pf);
+
+ /* Outer VLAN tag will be used for VLAN filtering */
+ enetc_port_wr(hw, ENETC4_PSIVLANFMR, PSIVLANFMR_VS);
+
+ enetc4_set_default_si_vlan_promisc(pf);
+
+ /* Disable SI MAC multicast & unicast promiscuous */
+ enetc_port_wr(hw, ENETC4_PSIPMMR, 0);
+
+ enetc4_set_si_msix_num(pf);
+
+ enetc4_enable_all_si(pf);
+}
+
+static void enetc4_pf_reset_tc_msdu(struct enetc_hw *hw)
+{
+ u32 val = ENETC_MAC_MAXFRM_SIZE;
+ int tc;
+
+ val = u32_replace_bits(val, SDU_TYPE_MPDU, PTCTMSDUR_SDU_TYPE);
+
+ for (tc = 0; tc < ENETC_NUM_TC; tc++)
+ enetc_port_wr(hw, ENETC4_PTCTMSDUR(tc), val);
+}
+
+static void enetc4_set_trx_frame_size(struct enetc_pf *pf)
+{
+ struct enetc_si *si = pf->si;
+
+ enetc_port_mac_wr(si, ENETC4_PM_MAXFRM(0),
+ ENETC_SET_MAXFRM(ENETC_MAC_MAXFRM_SIZE));
+
+ enetc4_pf_reset_tc_msdu(&si->hw);
+}
+
+static void enetc4_enable_trx(struct enetc_pf *pf)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ /* Enable port transmit/receive */
+ enetc_port_wr(hw, ENETC4_POR, 0);
+}
+
+static void enetc4_configure_port(struct enetc_pf *pf)
+{
+ enetc4_configure_port_si(pf);
+ enetc4_set_trx_frame_size(pf);
+ enetc_set_default_rss_key(pf);
+ enetc4_enable_trx(pf);
+}
+
+static int enetc4_init_ntmp_user(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ /* For ENETC 4.1, all table versions are 0 */
+ memset(&user->tbl, 0, sizeof(user->tbl));
+
+ return enetc4_setup_cbdr(si);
+}
+
+static void enetc4_free_ntmp_user(struct enetc_si *si)
+{
+ enetc4_teardown_cbdr(si);
+}
+
+static int enetc4_pf_init(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ int err;
+
+ /* Initialize the MAC address for PF and VFs */
+ err = enetc_setup_mac_addresses(dev->of_node, pf);
+ if (err) {
+ dev_err(dev, "Failed to set MAC addresses\n");
+ return err;
+ }
+
+ err = enetc4_init_ntmp_user(pf->si);
+ if (err) {
+ dev_err(dev, "Failed to init CBDR\n");
+ return err;
+ }
+
+ enetc4_configure_port(pf);
+
+ return 0;
+}
+
+static void enetc4_pf_free(struct enetc_pf *pf)
+{
+ enetc4_free_ntmp_user(pf->si);
+}
+
+static void enetc4_psi_do_set_rx_mode(struct work_struct *work)
+{
+ struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct net_device *ndev = si->ndev;
+ struct enetc_hw *hw = &si->hw;
+ bool uc_promisc = false;
+ bool mc_promisc = false;
+ int type = 0;
+
+ rtnl_lock();
+
+ if (ndev->flags & IFF_PROMISC) {
+ uc_promisc = true;
+ mc_promisc = true;
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ mc_promisc = true;
+ type = ENETC_MAC_FILTER_TYPE_UC;
+ } else {
+ type = ENETC_MAC_FILTER_TYPE_ALL;
+ }
+
+ enetc4_pf_set_si_mac_promisc(hw, 0, uc_promisc, mc_promisc);
+
+ if (uc_promisc) {
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (mc_promisc)
+ enetc4_pf_set_si_mc_hash_filter(hw, 0, 0);
+
+ /* Set new MAC filter */
+ enetc4_pf_set_mac_filter(pf, type);
+
+ rtnl_unlock();
+}
+
+static void enetc4_pf_set_rx_mode(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+
+ queue_work(si->workqueue, &si->rx_mode_task);
+}
+
+static int enetc4_pf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ bool promisc_en = !(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+
+ enetc4_pf_set_si_vlan_promisc(hw, 0, promisc_en);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ enetc4_pf_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static const struct net_device_ops enetc4_ndev_ops = {
+ .ndo_open = enetc_open,
+ .ndo_stop = enetc_close,
+ .ndo_start_xmit = enetc_xmit,
+ .ndo_get_stats = enetc_get_stats,
+ .ndo_set_mac_address = enetc_pf_set_mac_addr,
+ .ndo_set_rx_mode = enetc4_pf_set_rx_mode,
+ .ndo_set_features = enetc4_pf_set_features,
+ .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
+ .ndo_eth_ioctl = enetc_ioctl,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
+};
+
+static struct phylink_pcs *
+enetc4_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ return pf->pcs;
+}
+
+static void enetc4_mac_config(struct enetc_pf *pf, unsigned int mode,
+ phy_interface_t phy_mode)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(pf->si->ndev);
+ struct enetc_si *si = pf->si;
+ u32 val;
+
+ if (enetc_is_pseudo_mac(si))
+ return;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val &= ~(PM_IF_MODE_IFMODE | PM_IF_MODE_ENA);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val |= IFMODE_RGMII;
+ /* We need to enable auto-negotiation for the MAC
+ * if its RGMII interface support In-Band status.
+ */
+ if (phylink_autoneg_inband(mode))
+ val |= PM_IF_MODE_ENA;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val |= IFMODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ val |= IFMODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ val |= IFMODE_XGMII;
+ break;
+ default:
+ dev_err(priv->dev,
+ "Unsupported PHY mode:%d\n", phy_mode);
+ return;
+ }
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_pl_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc4_mac_config(pf, mode, state->interface);
+}
+
+static void enetc4_set_port_speed(struct enetc_ndev_priv *priv, int speed)
+{
+ u32 old_speed = priv->speed;
+ u32 val;
+
+ if (speed == old_speed)
+ return;
+
+ val = enetc_port_rd(&priv->si->hw, ENETC4_PCR);
+ val &= ~PCR_PSPEED;
+
+ switch (speed) {
+ case SPEED_100:
+ case SPEED_1000:
+ case SPEED_2500:
+ case SPEED_10000:
+ val |= (PCR_PSPEED & PCR_PSPEED_VAL(speed));
+ break;
+ case SPEED_10:
+ default:
+ val |= (PCR_PSPEED & PCR_PSPEED_VAL(SPEED_10));
+ }
+
+ priv->speed = speed;
+ enetc_port_wr(&priv->si->hw, ENETC4_PCR, val);
+}
+
+static void enetc4_set_rgmii_mac(struct enetc_pf *pf, int speed, int duplex)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_M10 | PM_IF_MODE_REVMII);
+
+ switch (speed) {
+ case SPEED_1000:
+ val = u32_replace_bits(val, SSP_1G, PM_IF_MODE_SSP);
+ break;
+ case SPEED_100:
+ val = u32_replace_bits(val, SSP_100M, PM_IF_MODE_SSP);
+ break;
+ case SPEED_10:
+ val = u32_replace_bits(val, SSP_10M, PM_IF_MODE_SSP);
+ }
+
+ val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
+ PM_IF_MODE_HD);
+
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_set_rmii_mac(struct enetc_pf *pf, int speed, int duplex)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0));
+ val = old_val & ~(PM_IF_MODE_ENA | PM_IF_MODE_SSP);
+
+ switch (speed) {
+ case SPEED_100:
+ val &= ~PM_IF_MODE_M10;
+ break;
+ case SPEED_10:
+ val |= PM_IF_MODE_M10;
+ }
+
+ val = u32_replace_bits(val, duplex == DUPLEX_FULL ? 0 : 1,
+ PM_IF_MODE_HD);
+
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_IF_MODE(0), val);
+}
+
+static void enetc4_set_hd_flow_control(struct enetc_pf *pf, bool enable)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ if (!pf->caps.half_duplex)
+ return;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(old_val, enable ? 1 : 0, PM_CMD_CFG_HD_FCEN);
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_set_rx_pause(struct enetc_pf *pf, bool rx_pause)
+{
+ struct enetc_si *si = pf->si;
+ u32 old_val, val;
+
+ old_val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(old_val, rx_pause ? 0 : 1, PM_CMD_CFG_PAUSE_IGN);
+ if (val == old_val)
+ return;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_set_tx_pause(struct enetc_pf *pf, int num_rxbdr, bool tx_pause)
+{
+ u32 pause_off_thresh = 0, pause_on_thresh = 0;
+ u32 init_quanta = 0, refresh_quanta = 0;
+ struct enetc_hw *hw = &pf->si->hw;
+ u32 rbmr, old_rbmr;
+ int i;
+
+ for (i = 0; i < num_rxbdr; i++) {
+ old_rbmr = enetc_rxbdr_rd(hw, i, ENETC_RBMR);
+ rbmr = u32_replace_bits(old_rbmr, tx_pause ? 1 : 0, ENETC_RBMR_CM);
+ if (rbmr == old_rbmr)
+ continue;
+
+ enetc_rxbdr_wr(hw, i, ENETC_RBMR, rbmr);
+ }
+
+ if (tx_pause) {
+ /* When the port first enters congestion, send a PAUSE request
+ * with the maximum number of quanta. When the port exits
+ * congestion, it will automatically send a PAUSE frame with
+ * zero quanta.
+ */
+ init_quanta = 0xffff;
+
+ /* Also, set up the refresh timer to send follow-up PAUSE
+ * frames at half the quanta value, in case the congestion
+ * condition persists.
+ */
+ refresh_quanta = 0xffff / 2;
+
+ /* Start emitting PAUSE frames when 3 large frames (or more
+ * smaller frames) have accumulated in the FIFO waiting to be
+ * DMAed to the RX ring.
+ */
+ pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE;
+ pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
+ }
+
+ enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_QUANTA(0), init_quanta);
+ enetc_port_mac_wr(pf->si, ENETC4_PM_PAUSE_THRESH(0), refresh_quanta);
+ enetc_port_wr(hw, ENETC4_PPAUONTR, pause_on_thresh);
+ enetc_port_wr(hw, ENETC4_PPAUOFFTR, pause_off_thresh);
+}
+
+static void enetc4_enable_mac(struct enetc_pf *pf, bool en)
+{
+ struct enetc_si *si = pf->si;
+ u32 val;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val &= ~(PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN);
+ val |= en ? (PM_CMD_CFG_TX_EN | PM_CMD_CFG_RX_EN) : 0;
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_pl_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_si *si = pf->si;
+ struct enetc_ndev_priv *priv;
+ bool hd_fc = false;
+
+ priv = netdev_priv(si->ndev);
+ enetc4_set_port_speed(priv, speed);
+
+ if (!phylink_autoneg_inband(mode) &&
+ phy_interface_mode_is_rgmii(interface))
+ enetc4_set_rgmii_mac(pf, speed, duplex);
+
+ if (interface == PHY_INTERFACE_MODE_RMII)
+ enetc4_set_rmii_mac(pf, speed, duplex);
+
+ if (duplex == DUPLEX_FULL) {
+ /* When preemption is enabled, generation of PAUSE frames
+ * must be disabled, as stated in the IEEE 802.3 standard.
+ */
+ if (priv->active_offloads & ENETC_F_QBU)
+ tx_pause = false;
+ } else { /* DUPLEX_HALF */
+ if (tx_pause || rx_pause)
+ hd_fc = true;
+
+ /* As per 802.3 annex 31B, PAUSE frames are only supported
+ * when the link is configured for full duplex operation.
+ */
+ tx_pause = false;
+ rx_pause = false;
+ }
+
+ enetc4_set_hd_flow_control(pf, hd_fc);
+ enetc4_set_tx_pause(pf, priv->num_rx_rings, tx_pause);
+ enetc4_set_rx_pause(pf, rx_pause);
+ enetc4_enable_mac(pf, true);
+}
+
+static void enetc4_pl_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc4_enable_mac(pf, false);
+}
+
+static const struct phylink_mac_ops enetc_pl_mac_ops = {
+ .mac_select_pcs = enetc4_pl_mac_select_pcs,
+ .mac_config = enetc4_pl_mac_config,
+ .mac_link_up = enetc4_pl_mac_link_up,
+ .mac_link_down = enetc4_pl_mac_link_down,
+};
+
+static void enetc4_pci_remove(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ enetc_pci_remove(pdev);
+}
+
+static int enetc4_link_init(struct enetc_ndev_priv *priv,
+ struct device_node *node)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct device *dev = priv->dev;
+ int err;
+
+ err = of_get_phy_mode(node, &pf->if_mode);
+ if (err) {
+ dev_err(dev, "Failed to get PHY mode\n");
+ return err;
+ }
+
+ err = enetc_mdiobus_create(pf, node);
+ if (err) {
+ dev_err(dev, "Failed to create MDIO bus\n");
+ return err;
+ }
+
+ err = enetc_phylink_create(priv, node, &enetc_pl_mac_ops);
+ if (err) {
+ dev_err(dev, "Failed to create phylink\n");
+ goto err_phylink_create;
+ }
+
+ return 0;
+
+err_phylink_create:
+ enetc_mdiobus_destroy(pf);
+
+ return err;
+}
+
+static void enetc4_link_deinit(struct enetc_ndev_priv *priv)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ enetc_phylink_destroy(priv);
+ enetc_mdiobus_destroy(pf);
+}
+
+static int enetc4_psi_wq_task_init(struct enetc_si *si)
+{
+ char wq_name[24];
+
+ INIT_WORK(&si->rx_mode_task, enetc4_psi_do_set_rx_mode);
+ snprintf(wq_name, sizeof(wq_name), "enetc-%s", pci_name(si->pdev));
+ si->workqueue = create_singlethread_workqueue(wq_name);
+ if (!si->workqueue)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int enetc4_pf_netdev_create(struct enetc_si *si)
+{
+ struct device *dev = &si->pdev->dev;
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ int err;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct enetc_ndev_priv),
+ si->num_tx_rings, si->num_rx_rings);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ref_clk = devm_clk_get_optional(dev, "ref");
+ if (IS_ERR(priv->ref_clk)) {
+ dev_err(dev, "Get reference clock failed\n");
+ err = PTR_ERR(priv->ref_clk);
+ goto err_clk_get;
+ }
+
+ enetc_pf_netdev_setup(si, ndev, &enetc4_ndev_ops);
+
+ enetc_init_si_rings_params(priv);
+
+ err = enetc_configure_si(priv);
+ if (err) {
+ dev_err(dev, "Failed to configure SI\n");
+ goto err_config_si;
+ }
+
+ err = enetc_alloc_msix(priv);
+ if (err) {
+ dev_err(dev, "Failed to alloc MSI-X\n");
+ goto err_alloc_msix;
+ }
+
+ err = enetc4_link_init(priv, dev->of_node);
+ if (err)
+ goto err_link_init;
+
+ err = enetc4_psi_wq_task_init(si);
+ if (err) {
+ dev_err(dev, "Failed to init workqueue\n");
+ goto err_wq_init;
+ }
+
+ err = register_netdev(ndev);
+ if (err) {
+ dev_err(dev, "Failed to register netdev\n");
+ goto err_reg_netdev;
+ }
+
+ return 0;
+
+err_reg_netdev:
+ destroy_workqueue(si->workqueue);
+err_wq_init:
+ enetc4_link_deinit(priv);
+err_link_init:
+ enetc_free_msix(priv);
+err_alloc_msix:
+err_config_si:
+err_clk_get:
+ free_netdev(ndev);
+
+ return err;
+}
+
+static void enetc4_pf_netdev_destroy(struct enetc_si *si)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(si->ndev);
+ struct net_device *ndev = si->ndev;
+
+ unregister_netdev(ndev);
+ cancel_work(&si->rx_mode_task);
+ destroy_workqueue(si->workqueue);
+ enetc4_link_deinit(priv);
+ enetc_free_msix(priv);
+ free_netdev(ndev);
+}
+
+static const struct enetc_si_ops enetc4_psi_ops = {
+ .get_rss_table = enetc4_get_rss_table,
+ .set_rss_table = enetc4_set_rss_table,
+};
+
+static int enetc4_pf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct enetc_si *si;
+ struct enetc_pf *pf;
+ int err;
+
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
+ if (err)
+ return dev_err_probe(dev, err, "PCIe probing failed\n");
+
+ err = devm_add_action_or_reset(dev, enetc4_pci_remove, pdev);
+ if (err)
+ return err;
+
+ /* si is the private data. */
+ si = pci_get_drvdata(pdev);
+ if (!si->hw.port || !si->hw.global)
+ return dev_err_probe(dev, -ENODEV,
+ "Couldn't map PF only space\n");
+
+ si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc4_psi_ops;
+ err = enetc_get_driver_data(si);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Could not get PF driver data\n");
+
+ err = enetc4_pf_struct_init(si);
+ if (err)
+ return err;
+
+ pf = enetc_si_priv(si);
+ err = enetc4_pf_init(pf);
+ if (err)
+ return err;
+
+ enetc_get_si_caps(si);
+
+ err = enetc4_pf_netdev_create(si);
+ if (err)
+ goto err_netdev_create;
+
+ enetc_create_debugfs(si);
+
+ return 0;
+
+err_netdev_create:
+ enetc4_pf_free(pf);
+
+ return err;
+}
+
+static void enetc4_pf_remove(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ enetc_remove_debugfs(si);
+ enetc4_pf_netdev_destroy(si);
+ enetc4_pf_free(pf);
+}
+
+static const struct pci_device_id enetc4_pf_id_table[] = {
+ { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PF_DEV_ID) },
+ { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PPM_DEV_ID) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc4_pf_id_table);
+
+static struct pci_driver enetc4_pf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc4_pf_id_table,
+ .probe = enetc4_pf_probe,
+ .remove = enetc4_pf_remove,
+};
+module_pci_driver(enetc4_pf_driver);
+
+MODULE_DESCRIPTION("ENETC4 PF Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 073e56dcca4e..3d5f31879d5c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -44,6 +44,7 @@ int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_setup_cbdr);
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
{
@@ -57,6 +58,45 @@ void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
cbdr->bd_base = NULL;
cbdr->dma_dev = NULL;
}
+EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
+
+int enetc4_setup_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+ struct device *dev = &si->pdev->dev;
+ struct enetc_hw *hw = &si->hw;
+ struct netc_cbdr_regs regs;
+
+ user->cbdr_num = 1;
+ user->dev = dev;
+ user->ring = devm_kcalloc(dev, user->cbdr_num,
+ sizeof(struct netc_cbdr), GFP_KERNEL);
+ if (!user->ring)
+ return -ENOMEM;
+
+ /* set CBDR cache attributes */
+ enetc_wr(hw, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ regs.pir = hw->reg + ENETC_SICBDRPIR;
+ regs.cir = hw->reg + ENETC_SICBDRCIR;
+ regs.mr = hw->reg + ENETC_SICBDRMR;
+ regs.bar0 = hw->reg + ENETC_SICBDRBAR0;
+ regs.bar1 = hw->reg + ENETC_SICBDRBAR1;
+ regs.lenr = hw->reg + ENETC_SICBDRLENR;
+
+ return ntmp_init_cbdr(user->ring, dev, &regs);
+}
+EXPORT_SYMBOL_GPL(enetc4_setup_cbdr);
+
+void enetc4_teardown_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ ntmp_free_cbdr(user->ring);
+ user->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(enetc4_teardown_cbdr);
static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
@@ -127,6 +167,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_send_cmd);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
{
@@ -140,6 +181,7 @@ int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
return enetc_send_cmd(si, &cbd);
}
+EXPORT_SYMBOL_GPL(enetc_clear_mac_flt_entry);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map)
@@ -165,71 +207,58 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
return enetc_send_cmd(si, &cbd);
}
+EXPORT_SYMBOL_GPL(enetc_set_mac_flt_entry);
-#define RFSE_ALIGN 64
/* Set entry in RFS table */
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
- dma_addr_t dma, dma_align;
void *tmp, *tmp_align;
+ dma_addr_t dma;
int err;
/* fill up the "set" descriptor */
cbd.cmd = 0;
cbd.cls = 4;
cbd.index = cpu_to_le16(index);
- cbd.length = cpu_to_le16(sizeof(*rfse));
cbd.opt[3] = cpu_to_le32(0); /* SI */
- tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
- &dma, GFP_KERNEL);
- if (!tmp) {
- dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n");
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse),
+ &dma, &tmp_align);
+ if (!tmp)
return -ENOMEM;
- }
- dma_align = ALIGN(dma, RFSE_ALIGN);
- tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
memcpy(tmp_align, rfse, sizeof(*rfse));
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
-
err = enetc_send_cmd(si, &cbd);
if (err)
dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
- dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
- tmp, dma);
+ enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma);
return err;
}
+EXPORT_SYMBOL_GPL(enetc_set_fs_entry);
-#define RSSE_ALIGN 64
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
bool read)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
- dma_addr_t dma, dma_align;
u8 *tmp, *tmp_align;
+ dma_addr_t dma;
int err, i;
- if (count < RSSE_ALIGN)
+ if (count < ENETC_CBD_DATA_MEM_ALIGN)
/* HW only takes in a full 64 entry table */
return -EINVAL;
- tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN,
- &dma, GFP_KERNEL);
- if (!tmp) {
- dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n");
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, count,
+ &dma, (void *)&tmp_align);
+ if (!tmp)
return -ENOMEM;
- }
- dma_align = ALIGN(dma, RSSE_ALIGN);
- tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
if (!read)
for (i = 0; i < count; i++)
@@ -238,10 +267,6 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
/* fill up the descriptor */
cbd.cmd = read ? 2 : 1;
cbd.cls = 3;
- cbd.length = cpu_to_le16(count);
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
err = enetc_send_cmd(si, &cbd);
if (err)
@@ -251,7 +276,7 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
for (i = 0; i < count; i++)
table[i] = tmp_align[i];
- dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma);
+ enetc_cbd_free_data_mem(si, count, tmp, &dma);
return err;
}
@@ -261,9 +286,23 @@ int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
{
return enetc_cmd_rss_table(si, table, count, true);
}
+EXPORT_SYMBOL_GPL(enetc_get_rss_table);
/* Set RSS table */
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
{
return enetc_cmd_rss_table(si, (u32 *)table, count, false);
}
+EXPORT_SYMBOL_GPL(enetc_set_rss_table);
+
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count)
+{
+ return ntmp_rsst_query_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_get_rss_table);
+
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count)
+{
+ return ntmp_rsst_update_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_set_rss_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 910b9f722504..fed89d4f1e1d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -1,8 +1,12 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
+#include <linux/ethtool_netlink.h>
#include <linux/net_tstamp.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/ptp_clock_kernel.h>
+
#include "enetc.h"
static const u32 enetc_si_regs[] = {
@@ -31,6 +35,12 @@ static const u32 enetc_port_regs[] = {
ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE
};
+static const u32 enetc_port_mm_regs[] = {
+ ENETC_MMCSR, ENETC_PFPMR, ENETC_PTCFPR(0), ENETC_PTCFPR(1),
+ ENETC_PTCFPR(2), ENETC_PTCFPR(3), ENETC_PTCFPR(4), ENETC_PTCFPR(5),
+ ENETC_PTCFPR(6), ENETC_PTCFPR(7),
+};
+
static int enetc_get_reglen(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -44,6 +54,9 @@ static int enetc_get_reglen(struct net_device *ndev)
if (hw->port)
len += ARRAY_SIZE(enetc_port_regs);
+ if (hw->port && !!(priv->si->hw_features & ENETC_SI_F_QBU))
+ len += ARRAY_SIZE(enetc_port_mm_regs);
+
len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */
return len;
@@ -89,6 +102,14 @@ static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
*buf++ = addr;
*buf++ = enetc_rd(hw, addr);
}
+
+ if (priv->si->hw_features & ENETC_SI_F_QBU) {
+ for (i = 0; i < ARRAY_SIZE(enetc_port_mm_regs); i++) {
+ addr = ENETC_PORT_BASE + enetc_port_mm_regs[i];
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+ }
}
static const struct {
@@ -123,70 +144,76 @@ static const struct {
static const struct {
int reg;
- char name[ETH_GSTRING_LEN];
+ char name[ETH_GSTRING_LEN] __nonstring;
+} enetc_pm_counters[] = {
+ { ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
+ { ENETC_PM_RALN(0), "MAC rx alignment errors" },
+ { ENETC_PM_RXPF(0), "MAC rx valid pause frames" },
+ { ENETC_PM_RFRM(0), "MAC rx valid frames" },
+ { ENETC_PM_RFCS(0), "MAC rx fcs errors" },
+ { ENETC_PM_RVLAN(0), "MAC rx VLAN frames" },
+ { ENETC_PM_RERR(0), "MAC rx frame errors" },
+ { ENETC_PM_RUCA(0), "MAC rx unicast frames" },
+ { ENETC_PM_RMCA(0), "MAC rx multicast frames" },
+ { ENETC_PM_RBCA(0), "MAC rx broadcast frames" },
+ { ENETC_PM_RDRP(0), "MAC rx dropped packets" },
+ { ENETC_PM_RPKT(0), "MAC rx packets" },
+ { ENETC_PM_RUND(0), "MAC rx undersized packets" },
+ { ENETC_PM_R64(0), "MAC rx 64 byte packets" },
+ { ENETC_PM_R127(0), "MAC rx 65-127 byte packets" },
+ { ENETC_PM_R255(0), "MAC rx 128-255 byte packets" },
+ { ENETC_PM_R511(0), "MAC rx 256-511 byte packets" },
+ { ENETC_PM_R1023(0), "MAC rx 512-1023 byte packets" },
+ { ENETC_PM_R1522(0), "MAC rx 1024-1522 byte packets" },
+ { ENETC_PM_R1523X(0), "MAC rx 1523 to max-octet packets" },
+ { ENETC_PM_ROVR(0), "MAC rx oversized packets" },
+ { ENETC_PM_RJBR(0), "MAC rx jabber packets" },
+ { ENETC_PM_RFRG(0), "MAC rx fragment packets" },
+ { ENETC_PM_RCNP(0), "MAC rx control packets" },
+ { ENETC_PM_RDRNTP(0), "MAC rx fifo drop" },
+ { ENETC_PM_TEOCT(0), "MAC tx ethernet octets" },
+ { ENETC_PM_TOCT(0), "MAC tx octets" },
+ { ENETC_PM_TCRSE(0), "MAC tx carrier sense errors" },
+ { ENETC_PM_TXPF(0), "MAC tx valid pause frames" },
+ { ENETC_PM_TFRM(0), "MAC tx frames" },
+ { ENETC_PM_TFCS(0), "MAC tx fcs errors" },
+ { ENETC_PM_TVLAN(0), "MAC tx VLAN frames" },
+ { ENETC_PM_TERR(0), "MAC tx frame errors" },
+ { ENETC_PM_TUCA(0), "MAC tx unicast frames" },
+ { ENETC_PM_TMCA(0), "MAC tx multicast frames" },
+ { ENETC_PM_TBCA(0), "MAC tx broadcast frames" },
+ { ENETC_PM_TPKT(0), "MAC tx packets" },
+ { ENETC_PM_TUND(0), "MAC tx undersized packets" },
+ { ENETC_PM_T64(0), "MAC tx 64 byte packets" },
+ { ENETC_PM_T127(0), "MAC tx 65-127 byte packets" },
+ { ENETC_PM_T255(0), "MAC tx 128-255 byte packets" },
+ { ENETC_PM_T511(0), "MAC tx 256-511 byte packets" },
+ { ENETC_PM_T1023(0), "MAC tx 512-1023 byte packets" },
+ { ENETC_PM_T1522(0), "MAC tx 1024-1522 byte packets" },
+ { ENETC_PM_T1523X(0), "MAC tx 1523 to max-octet packets" },
+ { ENETC_PM_TCNP(0), "MAC tx control packets" },
+ { ENETC_PM_TDFR(0), "MAC tx deferred packets" },
+ { ENETC_PM_TMCOL(0), "MAC tx multiple collisions" },
+ { ENETC_PM_TSCOL(0), "MAC tx single collisions" },
+ { ENETC_PM_TLCOL(0), "MAC tx late collisions" },
+ { ENETC_PM_TECOL(0), "MAC tx excessive collisions" },
+};
+
+static const struct {
+ int reg;
+ char name[ETH_GSTRING_LEN] __nonstring;
} enetc_port_counters[] = {
- { ENETC_PM0_REOCT, "MAC rx ethernet octets" },
- { ENETC_PM0_RALN, "MAC rx alignment errors" },
- { ENETC_PM0_RXPF, "MAC rx valid pause frames" },
- { ENETC_PM0_RFRM, "MAC rx valid frames" },
- { ENETC_PM0_RFCS, "MAC rx fcs errors" },
- { ENETC_PM0_RVLAN, "MAC rx VLAN frames" },
- { ENETC_PM0_RERR, "MAC rx frame errors" },
- { ENETC_PM0_RUCA, "MAC rx unicast frames" },
- { ENETC_PM0_RMCA, "MAC rx multicast frames" },
- { ENETC_PM0_RBCA, "MAC rx broadcast frames" },
- { ENETC_PM0_RDRP, "MAC rx dropped packets" },
- { ENETC_PM0_RPKT, "MAC rx packets" },
- { ENETC_PM0_RUND, "MAC rx undersized packets" },
- { ENETC_PM0_R64, "MAC rx 64 byte packets" },
- { ENETC_PM0_R127, "MAC rx 65-127 byte packets" },
- { ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
- { ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
- { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
- { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
- { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
- { ENETC_PM0_ROVR, "MAC rx oversized packets" },
- { ENETC_PM0_RJBR, "MAC rx jabber packets" },
- { ENETC_PM0_RFRG, "MAC rx fragment packets" },
- { ENETC_PM0_RCNP, "MAC rx control packets" },
- { ENETC_PM0_RDRNTP, "MAC rx fifo drop" },
- { ENETC_PM0_TEOCT, "MAC tx ethernet octets" },
- { ENETC_PM0_TOCT, "MAC tx octets" },
- { ENETC_PM0_TCRSE, "MAC tx carrier sense errors" },
- { ENETC_PM0_TXPF, "MAC tx valid pause frames" },
- { ENETC_PM0_TFRM, "MAC tx frames" },
- { ENETC_PM0_TFCS, "MAC tx fcs errors" },
- { ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
- { ENETC_PM0_TERR, "MAC tx frame errors" },
- { ENETC_PM0_TUCA, "MAC tx unicast frames" },
- { ENETC_PM0_TMCA, "MAC tx multicast frames" },
- { ENETC_PM0_TBCA, "MAC tx broadcast frames" },
- { ENETC_PM0_TPKT, "MAC tx packets" },
- { ENETC_PM0_TUND, "MAC tx undersized packets" },
- { ENETC_PM0_T64, "MAC tx 64 byte packets" },
- { ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
- { ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
- { ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
- { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
- { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
- { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
- { ENETC_PM0_TCNP, "MAC tx control packets" },
- { ENETC_PM0_TDFR, "MAC tx deferred packets" },
- { ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
- { ENETC_PM0_TSCOL, "MAC tx single collisions" },
- { ENETC_PM0_TLCOL, "MAC tx late collisions" },
- { ENETC_PM0_TECOL, "MAC tx excessive collisions" },
- { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
- { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
- { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
- { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
- { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
- { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
- { ENETC_PFDMSAPR, "SI pruning discarded frames" },
- { ENETC_PICDR(0), "ICM DR0 discarded frames" },
- { ENETC_PICDR(1), "ICM DR1 discarded frames" },
- { ENETC_PICDR(2), "ICM DR2 discarded frames" },
- { ENETC_PICDR(3), "ICM DR3 discarded frames" },
+ { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
+ { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
+ { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
+ { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
+ { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
+ { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
+ { ENETC_PFDMSAPR, "SI pruning discarded frames" },
+ { ENETC_PICDR(0), "ICM DR0 discarded frames" },
+ { ENETC_PICDR(1), "ICM DR1 discarded frames" },
+ { ENETC_PICDR(2), "ICM DR2 discarded frames" },
+ { ENETC_PICDR(3), "ICM DR3 discarded frames" },
};
static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
@@ -197,13 +224,13 @@ static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
"Rx ring %2d recycle failures",
"Rx ring %2d redirects",
"Rx ring %2d redirect failures",
- "Rx ring %2d redirect S/G",
};
static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
"Tx ring %2d frames",
"Tx ring %2d XDP frames",
"Tx ring %2d XDP drops",
+ "Tx window drop %2d frames",
};
static int enetc_get_sset_count(struct net_device *ndev, int sset)
@@ -222,6 +249,7 @@ static int enetc_get_sset_count(struct net_device *ndev, int sset)
return len;
len += ARRAY_SIZE(enetc_port_counters);
+ len += ARRAY_SIZE(enetc_pm_counters);
return len;
}
@@ -229,38 +257,28 @@ static int enetc_get_sset_count(struct net_device *ndev, int sset)
static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- u8 *p = data;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
- strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < priv->num_tx_rings; i++) {
- for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) {
- snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j],
- i);
- p += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < priv->num_rx_rings; i++) {
- for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) {
- snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j],
- i);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++)
+ ethtool_puts(&data, enetc_si_counters[i].name);
+ for (i = 0; i < priv->num_tx_rings; i++)
+ for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++)
+ ethtool_sprintf(&data, tx_ring_stats[j], i);
+ for (i = 0; i < priv->num_rx_rings; i++)
+ for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++)
+ ethtool_sprintf(&data, rx_ring_stats[j], i);
if (!enetc_si_is_pf(priv->si))
break;
- for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
- strlcpy(p, enetc_port_counters[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
+ ethtool_cpy(&data, enetc_port_counters[i].name);
+
+ for (i = 0; i < ARRAY_SIZE(enetc_pm_counters); i++)
+ ethtool_cpy(&data, enetc_pm_counters[i].name);
+
break;
}
}
@@ -279,6 +297,7 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = priv->tx_ring[i]->stats.packets;
data[o++] = priv->tx_ring[i]->stats.xdp_tx;
data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops;
+ data[o++] = priv->tx_ring[i]->stats.win_drop;
}
for (i = 0; i < priv->num_rx_rings; i++) {
@@ -289,7 +308,6 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = priv->rx_ring[i]->stats.recycle_failures;
data[o++] = priv->rx_ring[i]->stats.xdp_redirect;
data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures;
- data[o++] = priv->rx_ring[i]->stats.xdp_redirect_sg;
}
if (!enetc_si_is_pf(priv->si))
@@ -297,12 +315,218 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
+
+ for (i = 0; i < ARRAY_SIZE(enetc_pm_counters); i++)
+ data[o++] = enetc_port_rd64(hw, enetc_pm_counters[i].reg);
+}
+
+static void enetc_pause_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_pause_stats *pause_stats)
+{
+ pause_stats->tx_pause_frames = enetc_port_rd64(hw, ENETC_PM_TXPF(mac));
+ pause_stats->rx_pause_frames = enetc_port_rd64(hw, ENETC_PM_RXPF(mac));
+}
+
+static void enetc_get_pause_stats(struct net_device *ndev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ switch (pause_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_pause_stats(hw, 0, pause_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_pause_stats(hw, 1, pause_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_pause_stats(ndev, pause_stats);
+ break;
+ }
+}
+
+static void enetc_mac_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = enetc_port_rd64(hw, ENETC_PM_TFRM(mac));
+ s->SingleCollisionFrames = enetc_port_rd64(hw, ENETC_PM_TSCOL(mac));
+ s->MultipleCollisionFrames = enetc_port_rd64(hw, ENETC_PM_TMCOL(mac));
+ s->FramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RFRM(mac));
+ s->FrameCheckSequenceErrors = enetc_port_rd64(hw, ENETC_PM_RFCS(mac));
+ s->AlignmentErrors = enetc_port_rd64(hw, ENETC_PM_RALN(mac));
+ s->OctetsTransmittedOK = enetc_port_rd64(hw, ENETC_PM_TEOCT(mac));
+ s->FramesWithDeferredXmissions = enetc_port_rd64(hw, ENETC_PM_TDFR(mac));
+ s->LateCollisions = enetc_port_rd64(hw, ENETC_PM_TLCOL(mac));
+ s->FramesAbortedDueToXSColls = enetc_port_rd64(hw, ENETC_PM_TECOL(mac));
+ s->FramesLostDueToIntMACXmitError = enetc_port_rd64(hw, ENETC_PM_TERR(mac));
+ s->CarrierSenseErrors = enetc_port_rd64(hw, ENETC_PM_TCRSE(mac));
+ s->OctetsReceivedOK = enetc_port_rd64(hw, ENETC_PM_REOCT(mac));
+ s->FramesLostDueToIntMACRcvError = enetc_port_rd64(hw, ENETC_PM_RDRNTP(mac));
+ s->MulticastFramesXmittedOK = enetc_port_rd64(hw, ENETC_PM_TMCA(mac));
+ s->BroadcastFramesXmittedOK = enetc_port_rd64(hw, ENETC_PM_TBCA(mac));
+ s->MulticastFramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RMCA(mac));
+ s->BroadcastFramesReceivedOK = enetc_port_rd64(hw, ENETC_PM_RBCA(mac));
+}
+
+static void enetc_ctrl_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = enetc_port_rd64(hw, ENETC_PM_TCNP(mac));
+ s->MACControlFramesReceived = enetc_port_rd64(hw, ENETC_PM_RCNP(mac));
+}
+
+static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1522 },
+ { 1523, ENETC_MAC_MAXFRM_SIZE },
+ {},
+};
+
+static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_rmon_stats *s)
+{
+ s->undersize_pkts = enetc_port_rd64(hw, ENETC_PM_RUND(mac));
+ s->oversize_pkts = enetc_port_rd64(hw, ENETC_PM_ROVR(mac));
+ s->fragments = enetc_port_rd64(hw, ENETC_PM_RFRG(mac));
+ s->jabbers = enetc_port_rd64(hw, ENETC_PM_RJBR(mac));
+
+ s->hist[0] = enetc_port_rd64(hw, ENETC_PM_R64(mac));
+ s->hist[1] = enetc_port_rd64(hw, ENETC_PM_R127(mac));
+ s->hist[2] = enetc_port_rd64(hw, ENETC_PM_R255(mac));
+ s->hist[3] = enetc_port_rd64(hw, ENETC_PM_R511(mac));
+ s->hist[4] = enetc_port_rd64(hw, ENETC_PM_R1023(mac));
+ s->hist[5] = enetc_port_rd64(hw, ENETC_PM_R1522(mac));
+ s->hist[6] = enetc_port_rd64(hw, ENETC_PM_R1523X(mac));
+
+ s->hist_tx[0] = enetc_port_rd64(hw, ENETC_PM_T64(mac));
+ s->hist_tx[1] = enetc_port_rd64(hw, ENETC_PM_T127(mac));
+ s->hist_tx[2] = enetc_port_rd64(hw, ENETC_PM_T255(mac));
+ s->hist_tx[3] = enetc_port_rd64(hw, ENETC_PM_T511(mac));
+ s->hist_tx[4] = enetc_port_rd64(hw, ENETC_PM_T1023(mac));
+ s->hist_tx[5] = enetc_port_rd64(hw, ENETC_PM_T1522(mac));
+ s->hist_tx[6] = enetc_port_rd64(hw, ENETC_PM_T1523X(mac));
+}
+
+static void enetc_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ switch (mac_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_mac_stats(hw, 0, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mac_stats(hw, 1, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_mac_stats(ndev, mac_stats);
+ break;
+ }
+}
+
+static void enetc_ppm_mac_stats(struct enetc_si *si,
+ struct ethtool_eth_mac_stats *s)
+{
+ struct enetc_hw *hw = &si->hw;
+ u64 rufcr, rmfcr, rbfcr;
+ u64 tufcr, tmfcr, tbfcr;
+
+ rufcr = enetc_port_rd64(hw, ENETC4_PPMRUFCR);
+ rmfcr = enetc_port_rd64(hw, ENETC4_PPMRMFCR);
+ rbfcr = enetc_port_rd64(hw, ENETC4_PPMRBFCR);
+
+ tufcr = enetc_port_rd64(hw, ENETC4_PPMTUFCR);
+ tmfcr = enetc_port_rd64(hw, ENETC4_PPMTMFCR);
+ tbfcr = enetc_port_rd64(hw, ENETC4_PPMTBFCR);
+
+ s->FramesTransmittedOK = tufcr + tmfcr + tbfcr;
+ s->FramesReceivedOK = rufcr + rmfcr + rbfcr;
+ s->OctetsTransmittedOK = enetc_port_rd64(hw, ENETC4_PPMTOCR);
+ s->OctetsReceivedOK = enetc_port_rd64(hw, ENETC4_PPMROCR);
+ s->MulticastFramesXmittedOK = tmfcr;
+ s->BroadcastFramesXmittedOK = tbfcr;
+ s->MulticastFramesReceivedOK = rmfcr;
+ s->BroadcastFramesReceivedOK = rbfcr;
+}
+
+static void enetc_ppm_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ switch (mac_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_ppm_mac_stats(priv->si, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_mac_stats(ndev, mac_stats);
+ break;
+ }
+}
+
+static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ switch (ctrl_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_ctrl_stats(hw, 0, ctrl_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_ctrl_stats(hw, 1, ctrl_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_ctrl_stats(ndev, ctrl_stats);
+ break;
+ }
+}
+
+static void enetc_get_rmon_stats(struct net_device *ndev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ *ranges = enetc_rmon_ranges;
+
+ switch (rmon_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_rmon_stats(hw, 0, rmon_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_rmon_stats(hw, 1, rmon_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_rmon_stats(ndev, rmon_stats);
+ break;
+ }
}
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
-static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc)
+static int enetc_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *rxnfc)
{
static const u32 rsshash[] = {
[TCP_V4_FLOW] = ENETC_RSSHASH_L4,
@@ -409,6 +633,13 @@ done:
return enetc_set_fs_entry(si, &rfse, fs->location);
}
+static u32 enetc_get_rx_ring_count(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ return priv->num_rx_rings;
+}
+
static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
u32 *rule_locs)
{
@@ -416,12 +647,6 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
int i, j;
switch (rxnfc->cmd) {
- case ETHTOOL_GRXRINGS:
- rxnfc->data = priv->num_rx_rings;
- break;
- case ETHTOOL_GRXFH:
- /* get RSS hash config */
- return enetc_get_rsshash(rxnfc);
case ETHTOOL_GRXCLSRLCNT:
/* total number of entries */
rxnfc->data = priv->si->num_fs_entries;
@@ -512,57 +737,79 @@ static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
return priv->si->num_rss;
}
-static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int enetc_get_rss_key_base(struct enetc_si *si)
+{
+ if (is_enetc_rev1(si))
+ return ENETC_PRSSK(0);
+
+ return ENETC4_PRSSKR(0);
+}
+
+static void enetc_get_rss_key(struct enetc_si *si, const u8 *key)
+{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
+ int i;
+
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ ((u32 *)key)[i] = enetc_port_rd(hw, base + i * 4);
+}
+
+static int enetc_get_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- int err = 0, i;
+ struct enetc_si *si = priv->si;
+ int err = 0;
/* return hash function */
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
/* return hash key */
- if (key && hw->port)
- for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i));
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_get_rss_key(si, rxfh->key);
/* return RSS table */
- if (indir)
- err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss);
+ if (rxfh->indir)
+ err = si->ops->get_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes)
{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
int i;
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
+ enetc_port_wr(hw, base + i * 4, ((u32 *)bytes)[i]);
}
+EXPORT_SYMBOL_GPL(enetc_set_rss_key);
-static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int enetc_set_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
int err = 0;
/* set hash key, if PF */
- if (key && hw->port)
- enetc_set_rss_key(hw, key);
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_set_rss_key(si, rxfh->key);
/* set RSS table */
- if (indir)
- err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss);
+ if (rxfh->indir)
+ err = si->ops->set_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
static void enetc_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -591,9 +838,10 @@ static int enetc_get_coalesce(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_int_vector *v = priv->int_vector[0];
+ u64 clk_freq = priv->sysclk_freq;
- ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt);
- ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt);
+ ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt, clk_freq);
+ ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt, clk_freq);
ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR;
ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR;
@@ -609,12 +857,13 @@ static int enetc_set_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u64 clk_freq = priv->sysclk_freq;
u32 rx_ictt, tx_ictt;
int i, ic_mode;
bool changed;
- tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs);
- rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs);
+ tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs, clk_freq);
+ rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs, clk_freq);
if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR)
return -EOPNOTSUPP;
@@ -656,34 +905,97 @@ static int enetc_set_coalesce(struct net_device *ndev,
return 0;
}
-static int enetc_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+static int enetc_get_phc_index_by_pdev(struct enetc_si *si)
{
- int *phc_idx;
-
- phc_idx = symbol_get(enetc_phc_index);
- if (phc_idx) {
- info->phc_index = *phc_idx;
- symbol_put(enetc_phc_index);
- } else {
- info->phc_index = -1;
+ struct pci_bus *bus = si->pdev->bus;
+ struct pci_dev *timer_pdev;
+ unsigned int devfn;
+ int phc_index;
+
+ switch (si->revision) {
+ case ENETC_REV_1_0:
+ devfn = PCI_DEVFN(0, 4);
+ break;
+ case ENETC_REV_4_1:
+ devfn = PCI_DEVFN(24, 0);
+ break;
+ case ENETC_REV_4_3:
+ devfn = PCI_DEVFN(0, 1);
+ break;
+ default:
+ return -1;
}
-#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ timer_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(bus),
+ bus->number, devfn);
+ if (!timer_pdev)
+ return -1;
+
+ phc_index = ptp_clock_index_by_dev(&timer_pdev->dev);
+ pci_dev_put(timer_pdev);
+
+ return phc_index;
+}
+
+static int enetc_get_phc_index(struct enetc_si *si)
+{
+ struct device_node *np = si->pdev->dev.of_node;
+ struct device_node *timer_np;
+ int phc_index;
+
+ if (!np)
+ return enetc_get_phc_index_by_pdev(si);
+
+ timer_np = of_parse_phandle(np, "ptp-timer", 0);
+ if (!timer_np)
+ return enetc_get_phc_index_by_pdev(si);
+
+ phc_index = ptp_clock_index_by_of_node(timer_np);
+ of_node_put(timer_np);
+
+ return phc_index;
+}
+
+static void enetc_get_ts_generic_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON) |
- (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+ (1 << HWTSTAMP_TX_ON);
+
+ if (enetc_si_is_pf(priv->si))
+ info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
-#else
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-#endif
+}
+
+static int enetc_get_ts_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+
+ if (!enetc_ptp_clock_is_enabled(si))
+ goto timestamp_tx_sw;
+
+ info->phc_index = enetc_get_phc_index(si);
+ if (info->phc_index < 0)
+ goto timestamp_tx_sw;
+
+ enetc_get_ts_generic_info(ndev, info);
+
+ return 0;
+
+timestamp_tx_sw:
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+
return 0;
}
@@ -750,7 +1062,250 @@ static int enetc_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(priv->phylink, cmd);
}
-static const struct ethtool_ops enetc_pf_ethtool_ops = {
+static void enetc_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return;
+
+ s->MACMergeFrameAssErrorCount = enetc_port_rd(hw, ENETC_MMFAECR);
+ s->MACMergeFrameSmdErrorCount = enetc_port_rd(hw, ENETC_MMFSECR);
+ s->MACMergeFrameAssOkCount = enetc_port_rd(hw, ENETC_MMFAOCR);
+ s->MACMergeFragCountRx = enetc_port_rd(hw, ENETC_MMFCRXR);
+ s->MACMergeFragCountTx = enetc_port_rd(hw, ENETC_MMFCTXR);
+ s->MACMergeHoldCount = enetc_port_rd(hw, ENETC_MMHCR);
+}
+
+static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ u32 lafs, rafs, val;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_PFPMR);
+ state->pmac_enabled = !!(val & ENETC_PFPMR_PMACE);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ switch (ENETC_MMCSR_GET_VSTS(val)) {
+ case 0:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ break;
+ case 2:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+ break;
+ case 3:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
+ break;
+ case 4:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
+ break;
+ case 5:
+ default:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_UNKNOWN;
+ break;
+ }
+
+ rafs = ENETC_MMCSR_GET_RAFS(val);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(rafs);
+ lafs = ENETC_MMCSR_GET_LAFS(val);
+ state->rx_min_frag_size = ethtool_mm_frag_size_add_to_min(lafs);
+ state->tx_enabled = !!(val & ENETC_MMCSR_LPE); /* mirror of MMCSR_ME */
+ state->tx_active = state->tx_enabled &&
+ (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
+ state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED);
+ state->verify_enabled = !(val & ENETC_MMCSR_VDIS);
+ state->verify_time = ENETC_MMCSR_GET_VT(val);
+ /* A verifyTime of 128 ms would exceed the 7 bit width
+ * of the ENETC_MMCSR_VT field
+ */
+ state->max_verify_time = 127;
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+static int enetc_mm_wait_tx_active(struct enetc_hw *hw, int verify_time)
+{
+ int timeout = verify_time * USEC_PER_MSEC * ENETC_MM_VERIFY_RETRIES;
+ u32 val;
+
+ /* This will time out after the standard value of 3 verification
+ * attempts. To not sleep forever, it relies on a non-zero verify_time,
+ * guarantee which is provided by the ethtool nlattr policy.
+ */
+ return read_poll_timeout(enetc_port_rd, val,
+ ENETC_MMCSR_GET_VSTS(val) == 3,
+ ENETC_MM_VERIFY_SLEEP_US, timeout,
+ true, hw, ENETC_MMCSR);
+}
+
+static void enetc_set_ptcfpr(struct enetc_hw *hw, u8 preemptible_tcs)
+{
+ u32 val;
+ int tc;
+
+ for (tc = 0; tc < 8; tc++) {
+ val = enetc_port_rd(hw, ENETC_PTCFPR(tc));
+
+ if (preemptible_tcs & BIT(tc))
+ val |= ENETC_PTCFPR_FPE;
+ else
+ val &= ~ENETC_PTCFPR_FPE;
+
+ enetc_port_wr(hw, ENETC_PTCFPR(tc), val);
+ }
+}
+
+/* ENETC does not have an IRQ to notify changes to the MAC Merge TX status
+ * (active/inactive), but the preemptible traffic classes should only be
+ * committed to hardware once TX is active. Resort to polling.
+ */
+void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u8 preemptible_tcs = 0;
+ u32 val;
+ int err;
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+ if (!(val & ENETC_MMCSR_ME))
+ goto out;
+
+ if (!(val & ENETC_MMCSR_VDIS)) {
+ err = enetc_mm_wait_tx_active(hw, ENETC_MMCSR_GET_VT(val));
+ if (err)
+ goto out;
+ }
+
+ preemptible_tcs = priv->preemptible_tcs;
+out:
+ enetc_set_ptcfpr(hw, preemptible_tcs);
+}
+
+/* FIXME: Workaround for the link partner's verification failing if ENETC
+ * priorly received too much express traffic. The documentation doesn't
+ * suggest this is needed.
+ */
+static void enetc_restart_emac_rx(struct enetc_si *si)
+{
+ u32 val = enetc_port_rd(&si->hw, ENETC_PM0_CMD_CFG);
+
+ enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val & ~ENETC_PM0_RX_EN);
+
+ if (val & ENETC_PM0_RX_EN)
+ enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val);
+}
+
+static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+ u32 val, add_frag_size;
+ int err;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return -EOPNOTSUPP;
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &add_frag_size, extack);
+ if (err)
+ return err;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_PFPMR);
+ if (cfg->pmac_enabled)
+ val |= ENETC_PFPMR_PMACE;
+ else
+ val &= ~ENETC_PFPMR_PMACE;
+ enetc_port_wr(hw, ENETC_PFPMR, val);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ if (cfg->verify_enabled)
+ val &= ~ENETC_MMCSR_VDIS;
+ else
+ val |= ENETC_MMCSR_VDIS;
+
+ if (cfg->tx_enabled)
+ priv->active_offloads |= ENETC_F_QBU;
+ else
+ priv->active_offloads &= ~ENETC_F_QBU;
+
+ /* If link is up, enable/disable MAC Merge right away */
+ if (!(val & ENETC_MMCSR_LINK_FAIL)) {
+ if (!!(priv->active_offloads & ENETC_F_QBU))
+ val |= ENETC_MMCSR_ME;
+ else
+ val &= ~ENETC_MMCSR_ME;
+ }
+
+ val &= ~ENETC_MMCSR_VT_MASK;
+ val |= ENETC_MMCSR_VT(cfg->verify_time);
+
+ val &= ~ENETC_MMCSR_RAFS_MASK;
+ val |= ENETC_MMCSR_RAFS(add_frag_size);
+
+ enetc_port_wr(hw, ENETC_MMCSR, val);
+
+ enetc_restart_emac_rx(priv->si);
+
+ enetc_mm_commit_preemptible_tcs(priv);
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+/* When the link is lost, the verification state machine goes to the FAILED
+ * state and doesn't restart on its own after a new link up event.
+ * According to 802.3 Figure 99-8 - Verify state diagram, the LINK_FAIL bit
+ * should have been sufficient to re-trigger verification, but for ENETC it
+ * doesn't. As a workaround, we need to toggle the Merge Enable bit to
+ * re-trigger verification when link comes up.
+ */
+void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 val;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ if (link) {
+ val &= ~ENETC_MMCSR_LINK_FAIL;
+ if (priv->active_offloads & ENETC_F_QBU)
+ val |= ENETC_MMCSR_ME;
+ } else {
+ val |= ENETC_MMCSR_LINK_FAIL;
+ if (priv->active_offloads & ENETC_F_QBU)
+ val &= ~ENETC_MMCSR_ME;
+ }
+
+ enetc_port_wr(hw, ENETC_MMCSR, val);
+
+ enetc_mm_commit_preemptible_tcs(priv);
+
+ mutex_unlock(&priv->mm_lock);
+}
+EXPORT_SYMBOL_GPL(enetc_mm_link_state_update);
+
+const struct ethtool_ops enetc_pf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -759,12 +1314,18 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_sset_count = enetc_get_sset_count,
.get_strings = enetc_get_strings,
.get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_pause_stats = enetc_get_pause_stats,
+ .get_rmon_stats = enetc_get_rmon_stats,
+ .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats,
+ .get_eth_mac_stats = enetc_get_eth_mac_stats,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_key_size = enetc_get_rxfh_key_size,
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
@@ -776,9 +1337,31 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.set_wol = enetc_set_wol,
.get_pauseparam = enetc_get_pauseparam,
.set_pauseparam = enetc_set_pauseparam,
+ .get_mm = enetc_get_mm,
+ .set_mm = enetc_set_mm,
+ .get_mm_stats = enetc_get_mm_stats,
};
-static const struct ethtool_ops enetc_vf_ethtool_ops = {
+const struct ethtool_ops enetc4_ppm_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .get_eth_mac_stats = enetc_ppm_get_eth_mac_stats,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
+ .get_ringparam = enetc_get_ringparam,
+ .get_coalesce = enetc_get_coalesce,
+ .set_coalesce = enetc_set_coalesce,
+ .get_link_ksettings = enetc_get_link_ksettings,
+ .set_link_ksettings = enetc_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+};
+
+const struct ethtool_ops enetc_vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -787,11 +1370,13 @@ static const struct ethtool_ops enetc_vf_ethtool_ops = {
.get_sset_count = enetc_get_sset_count,
.get_strings = enetc_get_strings,
.get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
@@ -799,12 +1384,33 @@ static const struct ethtool_ops enetc_vf_ethtool_ops = {
.get_ts_info = enetc_get_ts_info,
};
+const struct ethtool_ops enetc4_pf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .get_ringparam = enetc_get_ringparam,
+ .get_coalesce = enetc_get_coalesce,
+ .set_coalesce = enetc_set_coalesce,
+ .get_link_ksettings = enetc_get_link_ksettings,
+ .set_link_ksettings = enetc_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_wol = enetc_get_wol,
+ .set_wol = enetc_set_wol,
+ .get_pauseparam = enetc_get_pauseparam,
+ .set_pauseparam = enetc_set_pauseparam,
+ .get_rx_ring_count = enetc_get_rx_ring_count,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
+ .get_ts_info = enetc_get_ts_info,
+};
+
void enetc_set_ethtool_ops(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (enetc_si_is_pf(priv->si))
- ndev->ethtool_ops = &enetc_pf_ethtool_ops;
- else
- ndev->ethtool_ops = &enetc_vf_ethtool_ops;
+ ndev->ethtool_ops = priv->si->drvdata->eth_ops;
}
+EXPORT_SYMBOL_GPL(enetc_set_ethtool_ops);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 1514e6a4a3ff..7b882b8921fe 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -3,6 +3,11 @@
#include <linux/bitops.h>
+#define ENETC_MM_VERIFY_SLEEP_US USEC_PER_MSEC
+#define ENETC_MM_VERIFY_RETRIES 3
+
+#define ENETC_NUM_TC 8
+
/* ENETC device IDs */
#define ENETC_DEV_ID_PF 0xe100
#define ENETC_DEV_ID_VF 0xef00
@@ -18,9 +23,9 @@
#define ENETC_SICTR0 0x18
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
-#define ENETC_SIPCAPR0_QBV BIT(4)
-#define ENETC_SIPCAPR0_PSFP BIT(9)
#define ENETC_SIPCAPR0_RSS BIT(8)
+#define ENETC_SIPCAPR0_RFS BIT(2)
+#define ENETC_SIPCAPR0_LSO BIT(1)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
@@ -38,6 +43,9 @@
#define ENETC_SIPMAR0 0x80
#define ENETC_SIPMAR1 0x84
+#define ENETC_SICVLANR1 0x90
+#define ENETC_SICVLANR2 0x94
+#define SICVLANR_ETYPE GENMASK(15, 0)
/* VF-PF Message passing */
#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */
@@ -187,6 +195,9 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PCAPR0 0x0900
#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff)
+#define ENETC_PCAPR0_PSFP BIT(9)
+#define ENETC_PCAPR0_QBV BIT(4)
+#define ENETC_PCAPR0_QBU BIT(3)
#define ENETC_PCAPR1 0x0904
#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */
#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff)
@@ -213,7 +224,6 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */
#define ENETC_PFPMR 0x1900
#define ENETC_PFPMR_PMACE BIT(1)
-#define ENETC_PFPMR_MWLM BIT(0)
#define ENETC_EMDIO_BASE 0x1c00
#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
@@ -222,11 +232,35 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */
#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */
#define ENETC_MMCSR 0x1f00
-#define ENETC_MMCSR_ME BIT(16)
+#define ENETC_MMCSR_LINK_FAIL BIT(31)
+#define ENETC_MMCSR_VT_MASK GENMASK(29, 23) /* Verify Time */
+#define ENETC_MMCSR_VT(x) (((x) << 23) & ENETC_MMCSR_VT_MASK)
+#define ENETC_MMCSR_GET_VT(x) (((x) & ENETC_MMCSR_VT_MASK) >> 23)
+#define ENETC_MMCSR_TXSTS_MASK GENMASK(22, 21) /* Merge Status */
+#define ENETC_MMCSR_GET_TXSTS(x) (((x) & ENETC_MMCSR_TXSTS_MASK) >> 21)
+#define ENETC_MMCSR_VSTS_MASK GENMASK(20, 18) /* Verify Status */
+#define ENETC_MMCSR_GET_VSTS(x) (((x) & ENETC_MMCSR_VSTS_MASK) >> 18)
+#define ENETC_MMCSR_VDIS BIT(17) /* Verify Disabled */
+#define ENETC_MMCSR_ME BIT(16) /* Merge Enabled */
+#define ENETC_MMCSR_RAFS_MASK GENMASK(9, 8) /* Remote Additional Fragment Size */
+#define ENETC_MMCSR_RAFS(x) (((x) << 8) & ENETC_MMCSR_RAFS_MASK)
+#define ENETC_MMCSR_GET_RAFS(x) (((x) & ENETC_MMCSR_RAFS_MASK) >> 8)
+#define ENETC_MMCSR_LAFS_MASK GENMASK(4, 3) /* Local Additional Fragment Size */
+#define ENETC_MMCSR_GET_LAFS(x) (((x) & ENETC_MMCSR_LAFS_MASK) >> 3)
+#define ENETC_MMCSR_LPA BIT(2) /* Local Preemption Active */
+#define ENETC_MMCSR_LPE BIT(1) /* Local Preemption Enabled */
+#define ENETC_MMCSR_LPS BIT(0) /* Local Preemption Supported */
+#define ENETC_MMFAECR 0x1f08
+#define ENETC_MMFSECR 0x1f0c
+#define ENETC_MMFAOCR 0x1f10
+#define ENETC_MMFCRXR 0x1f14
+#define ENETC_MMFCTXR 0x1f18
+#define ENETC_MMHCR 0x1f1c
#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */
+#define ENETC_PMAC_OFFSET 0x1000
+
#define ENETC_PM0_CMD_CFG 0x8008
-#define ENETC_PM1_CMD_CFG 0x9008
#define ENETC_PM0_TX_EN BIT(0)
#define ENETC_PM0_RX_EN BIT(1)
#define ENETC_PM0_PROMISC BIT(4)
@@ -245,11 +279,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_PAUSE_QUANTA 0x8054
#define ENETC_PM0_PAUSE_THRESH 0x8064
-#define ENETC_PM1_PAUSE_QUANTA 0x9054
-#define ENETC_PM1_PAUSE_THRESH 0x9064
#define ENETC_PM0_SINGLE_STEP 0x80c0
-#define ENETC_PM1_SINGLE_STEP 0x90c0
#define ENETC_PM0_SINGLE_STEP_CH BIT(7)
#define ENETC_PM0_SINGLE_STEP_EN BIT(31)
#define ENETC_SET_SINGLE_STEP_OFFSET(v) (((v) & 0xff) << 8)
@@ -276,58 +307,60 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PFMCAPR 0x1b38
#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
-/* MAC counters */
-#define ENETC_PM0_REOCT 0x8100
-#define ENETC_PM0_RALN 0x8110
-#define ENETC_PM0_RXPF 0x8118
-#define ENETC_PM0_RFRM 0x8120
-#define ENETC_PM0_RFCS 0x8128
-#define ENETC_PM0_RVLAN 0x8130
-#define ENETC_PM0_RERR 0x8138
-#define ENETC_PM0_RUCA 0x8140
-#define ENETC_PM0_RMCA 0x8148
-#define ENETC_PM0_RBCA 0x8150
-#define ENETC_PM0_RDRP 0x8158
-#define ENETC_PM0_RPKT 0x8160
-#define ENETC_PM0_RUND 0x8168
-#define ENETC_PM0_R64 0x8170
-#define ENETC_PM0_R127 0x8178
-#define ENETC_PM0_R255 0x8180
-#define ENETC_PM0_R511 0x8188
-#define ENETC_PM0_R1023 0x8190
-#define ENETC_PM0_R1522 0x8198
-#define ENETC_PM0_R1523X 0x81A0
-#define ENETC_PM0_ROVR 0x81A8
-#define ENETC_PM0_RJBR 0x81B0
-#define ENETC_PM0_RFRG 0x81B8
-#define ENETC_PM0_RCNP 0x81C0
-#define ENETC_PM0_RDRNTP 0x81C8
-#define ENETC_PM0_TEOCT 0x8200
-#define ENETC_PM0_TOCT 0x8208
-#define ENETC_PM0_TCRSE 0x8210
-#define ENETC_PM0_TXPF 0x8218
-#define ENETC_PM0_TFRM 0x8220
-#define ENETC_PM0_TFCS 0x8228
-#define ENETC_PM0_TVLAN 0x8230
-#define ENETC_PM0_TERR 0x8238
-#define ENETC_PM0_TUCA 0x8240
-#define ENETC_PM0_TMCA 0x8248
-#define ENETC_PM0_TBCA 0x8250
-#define ENETC_PM0_TPKT 0x8260
-#define ENETC_PM0_TUND 0x8268
-#define ENETC_PM0_T64 0x8270
-#define ENETC_PM0_T127 0x8278
-#define ENETC_PM0_T255 0x8280
-#define ENETC_PM0_T511 0x8288
-#define ENETC_PM0_T1023 0x8290
-#define ENETC_PM0_T1522 0x8298
-#define ENETC_PM0_T1523X 0x82A0
-#define ENETC_PM0_TCNP 0x82C0
-#define ENETC_PM0_TDFR 0x82D0
-#define ENETC_PM0_TMCOL 0x82D8
-#define ENETC_PM0_TSCOL 0x82E0
-#define ENETC_PM0_TLCOL 0x82E8
-#define ENETC_PM0_TECOL 0x82F0
+/* Port MAC counters: Port MAC 0 corresponds to the eMAC and
+ * Port MAC 1 to the pMAC.
+ */
+#define ENETC_PM_REOCT(mac) (0x8100 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RALN(mac) (0x8110 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RXPF(mac) (0x8118 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFRM(mac) (0x8120 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFCS(mac) (0x8128 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RVLAN(mac) (0x8130 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RERR(mac) (0x8138 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RUCA(mac) (0x8140 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RMCA(mac) (0x8148 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RBCA(mac) (0x8150 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RDRP(mac) (0x8158 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RPKT(mac) (0x8160 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RUND(mac) (0x8168 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R64(mac) (0x8170 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R127(mac) (0x8178 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R255(mac) (0x8180 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R511(mac) (0x8188 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1023(mac) (0x8190 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1522(mac) (0x8198 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1523X(mac) (0x81A0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_ROVR(mac) (0x81A8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RJBR(mac) (0x81B0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFRG(mac) (0x81B8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RCNP(mac) (0x81C0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RDRNTP(mac) (0x81C8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TEOCT(mac) (0x8200 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TOCT(mac) (0x8208 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TCRSE(mac) (0x8210 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TXPF(mac) (0x8218 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TFRM(mac) (0x8220 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TFCS(mac) (0x8228 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TVLAN(mac) (0x8230 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TERR(mac) (0x8238 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TUCA(mac) (0x8240 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TMCA(mac) (0x8248 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TBCA(mac) (0x8250 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TPKT(mac) (0x8260 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TUND(mac) (0x8268 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T64(mac) (0x8270 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T127(mac) (0x8278 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T255(mac) (0x8280 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T511(mac) (0x8288 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1023(mac) (0x8290 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1522(mac) (0x8298 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1523X(mac) (0x82A0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TCNP(mac) (0x82C0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TDFR(mac) (0x82D0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TMCOL(mac) (0x82D8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TSCOL(mac) (0x82E0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TLCOL(mac) (0x82E8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TECOL(mac) (0x82F0 + ENETC_PMAC_OFFSET * (mac))
/* Port counters */
#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
@@ -342,6 +375,11 @@ enum enetc_bdr_type {TX, RX};
/** Global regs, offset: 2_0000h */
#define ENETC_GLOBAL_BASE 0x20000
#define ENETC_G_EIPBRR0 0x0bf8
+#define EIPBRR0_REVISION GENMASK(15, 0)
+#define ENETC_REV_1_0 0x0100
+#define ENETC_REV_4_1 0X0401
+#define ENETC_REV_4_3 0x0403
+
#define ENETC_G_EIPBRR1 0x0bfc
#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n))
#define ENETC_G_EPFBLPR1_XGMII 0x80000000
@@ -370,18 +408,22 @@ struct enetc_hw {
*/
extern rwlock_t enetc_mdio_lock;
+DECLARE_STATIC_KEY_FALSE(enetc_has_err050089);
+
/* use this locking primitive only on the fast datapath to
* group together multiple non-MDIO register accesses to
* minimize the overhead of the lock
*/
static inline void enetc_lock_mdio(void)
{
- read_lock(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ read_lock(&enetc_mdio_lock);
}
static inline void enetc_unlock_mdio(void)
{
- read_unlock(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ read_unlock(&enetc_mdio_lock);
}
/* use these accessors only on the fast datapath under
@@ -390,14 +432,16 @@ static inline void enetc_unlock_mdio(void)
*/
static inline u32 enetc_rd_reg_hot(void __iomem *reg)
{
- lockdep_assert_held(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ lockdep_assert_held(&enetc_mdio_lock);
return ioread32(reg);
}
static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val)
{
- lockdep_assert_held(&enetc_mdio_lock);
+ if (static_branch_unlikely(&enetc_has_err050089))
+ lockdep_assert_held(&enetc_mdio_lock);
iowrite32(val, reg);
}
@@ -426,9 +470,13 @@ static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg)
unsigned long flags;
u32 val;
- write_lock_irqsave(&enetc_mdio_lock, flags);
- val = ioread32(reg);
- write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ if (static_branch_unlikely(&enetc_has_err050089)) {
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ val = ioread32(reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ } else {
+ val = ioread32(reg);
+ }
return val;
}
@@ -437,9 +485,13 @@ static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val)
{
unsigned long flags;
- write_lock_irqsave(&enetc_mdio_lock, flags);
- iowrite32(val, reg);
- write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ if (static_branch_unlikely(&enetc_has_err050089)) {
+ write_lock_irqsave(&enetc_mdio_lock, flags);
+ iowrite32(val, reg);
+ write_unlock_irqrestore(&enetc_mdio_lock, flags);
+ } else {
+ iowrite32(val, reg);
+ }
}
#ifdef ioread64
@@ -459,7 +511,7 @@ static inline u64 _enetc_rd_reg64(void __iomem *reg)
tmp = ioread32(reg + 4);
} while (high != tmp);
- return le64_to_cpu((__le64)high << 32 | low);
+ return (u64)high << 32 | low;
}
#endif
@@ -485,6 +537,7 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
/* port register accessors - PF only */
#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
+#define enetc_port_rd64(hw, off) _enetc_rd_reg64_wa((hw)->port + (off))
#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off))
#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\
(hw)->port + (off), val)
@@ -507,11 +560,23 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
union enetc_tx_bd {
struct {
__le64 addr;
- __le16 buf_len;
+ union {
+ __le16 buf_len;
+ __le16 hdr_len; /* For LSO, ENETC 4.1 and later */
+ };
__le16 frm_len;
union {
struct {
- u8 reserved[3];
+ u8 l3_aux0;
+#define ENETC_TX_BD_L3_START GENMASK(6, 0)
+#define ENETC_TX_BD_IPCS BIT(7)
+ u8 l3_aux1;
+#define ENETC_TX_BD_L3_HDR_LEN GENMASK(6, 0)
+#define ENETC_TX_BD_L3T BIT(7)
+ u8 l4_aux;
+#define ENETC_TX_BD_L4T GENMASK(7, 5)
+#define ENETC_TXBD_L4T_UDP 1
+#define ENETC_TXBD_L4T_TCP 2
u8 flags;
}; /* default layout */
__le32 txstart;
@@ -522,29 +587,35 @@ union enetc_tx_bd {
__le32 tstamp;
__le16 tpid;
__le16 vid;
- u8 reserved[6];
+ __le16 lso_sg_size; /* For ENETC 4.1 and later */
+ __le16 frm_len_ext; /* For ENETC 4.1 and later */
+ u8 reserved[2];
u8 e_flags;
u8 flags;
} ext; /* Tx BD extension */
struct {
__le32 tstamp;
- u8 reserved[10];
+ u8 reserved[8];
+ __le16 lso_err_count; /* For ENETC 4.1 and later */
u8 status;
u8 flags;
} wb; /* writeback descriptor */
};
enum enetc_txbd_flags {
- ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_L4CS = BIT(0), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_LSO = BIT(1), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_W = BIT(2),
- ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_CSUM_LSO = BIT(3), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TXSTART = BIT(4),
ENETC_TXBD_FLAGS_EX = BIT(6),
ENETC_TXBD_FLAGS_F = BIT(7)
};
+#define ENETC_TXBD_STATS_WIN BIT(7)
#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
#define ENETC_TXBD_FLAGS_OFFSET 24
+#define ENETC_TXBD_TSTAMP GENMASK(29, 0)
static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags)
{
@@ -606,6 +677,8 @@ union enetc_rx_bd {
#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
#define ENETC_CBD_STATUS_MASK 0xf
+#define ENETC_TPID_8021Q 0
+
struct enetc_cmd_rfse {
u8 smac_h[6];
u8 smac_m[6];
@@ -881,7 +954,7 @@ struct sgcl_data {
u32 bth;
u32 ct;
u32 cte;
- struct sgce sgcl[0];
+ struct sgce sgcl[];
};
#define ENETC_CBDR_FMI_MR BIT(0)
@@ -930,25 +1003,31 @@ struct enetc_cbd {
u8 status_flags;
};
-#define ENETC_CLK 400000000ULL
-static inline u32 enetc_cycles_to_usecs(u32 cycles)
+#define ENETC_CLK_400M 400000000ULL
+#define ENETC_CLK_333M 333000000ULL
+
+static inline u32 enetc_cycles_to_usecs(u32 cycles, u64 clk_freq)
{
- return (u32)div_u64(cycles * 1000000ULL, ENETC_CLK);
+ return (u32)div_u64(cycles * 1000000ULL, clk_freq);
}
-static inline u32 enetc_usecs_to_cycles(u32 usecs)
+static inline u32 enetc_usecs_to_cycles(u32 usecs, u64 clk_freq)
{
- return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL);
+ return (u32)div_u64(usecs * clk_freq, 1000000ULL);
}
+/* Port traffic class frame preemption register */
+#define ENETC_PTCFPR(n) (0x1910 + (n) * 4) /* n = [0 ..7] */
+#define ENETC_PTCFPR_FPE BIT(31)
+
/* port time gating control register */
-#define ENETC_QBV_PTGCR_OFFSET 0x11a00
-#define ENETC_QBV_TGE BIT(31)
-#define ENETC_QBV_TGPE BIT(30)
+#define ENETC_PTGCR 0x11a00
+#define ENETC_PTGCR_TGE BIT(31)
+#define ENETC_PTGCR_TGPE BIT(30)
/* Port time gating capability register */
-#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
-#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
+#define ENETC_PTGCAPR 0x11a08
+#define ENETC_PTGCAPR_MAX_GCL_LEN_MASK GENMASK(15, 0)
/* Port time specific departure */
#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
index 91f02c505028..d39617ab9306 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -18,8 +18,8 @@
*/
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include "enetc.h"
@@ -127,11 +127,6 @@ static int enetc_ierb_probe(struct platform_device *pdev)
return 0;
}
-static int enetc_ierb_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id enetc_ierb_match[] = {
{ .compatible = "fsl,ls1028a-enetc-ierb", },
{},
@@ -144,7 +139,6 @@ static struct platform_driver enetc_ierb_driver = {
.of_match_table = enetc_ierb_match,
},
.probe = enetc_ierb_probe,
- .remove = enetc_ierb_remove,
};
module_platform_driver(enetc_ierb_driver);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 70e6d97b380f..998aaa394e9c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -55,7 +55,8 @@ static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
is_busy, !is_busy, 10, 10 * 1000);
}
-int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
u32 mdio_ctl, mdio_cfg;
@@ -63,14 +64,39 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
int ret;
mdio_cfg = ENETC_EMDIO_CFG;
- if (regnum & MII_ADDR_C45) {
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_cfg |= MDIO_CFG_ENC45;
- } else {
- /* clause 22 (ie 1G) */
- dev_addr = regnum & 0x1f;
- mdio_cfg &= ~MDIO_CFG_ENC45;
- }
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* set port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
+
+ /* write the value */
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_mdio_write_c22);
+
+int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 value)
+{
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ u32 mdio_ctl, mdio_cfg;
+ int ret;
+
+ mdio_cfg = ENETC_EMDIO_CFG;
+ mdio_cfg |= MDIO_CFG_ENC45;
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
@@ -83,13 +109,11 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
- if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(mdio_priv);
- if (ret)
- return ret;
- }
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
/* write the value */
enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
@@ -100,9 +124,9 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
return 0;
}
-EXPORT_SYMBOL_GPL(enetc_mdio_write);
+EXPORT_SYMBOL_GPL(enetc_mdio_write_c45);
-int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
u32 mdio_ctl, mdio_cfg;
@@ -110,14 +134,51 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
int ret;
mdio_cfg = ENETC_EMDIO_CFG;
- if (regnum & MII_ADDR_C45) {
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_cfg |= MDIO_CFG_ENC45;
- } else {
- dev_addr = regnum & 0x1f;
- mdio_cfg &= ~MDIO_CFG_ENC45;
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* set port and device addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
+
+ /* initiate the read */
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* return all Fs if nothing was there */
+ if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
+ dev_dbg(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
+ return 0xffff;
}
+ value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff;
+
+ return value;
+}
+EXPORT_SYMBOL_GPL(enetc_mdio_read_c22);
+
+int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum)
+{
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ u32 mdio_ctl, mdio_cfg;
+ u16 value;
+ int ret;
+
+ mdio_cfg = ENETC_EMDIO_CFG;
+ mdio_cfg |= MDIO_CFG_ENC45;
+
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
@@ -129,13 +190,11 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
- if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(mdio_priv);
- if (ret)
- return ret;
- }
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
/* initiate the read */
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
@@ -147,7 +206,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* return all Fs if nothing was there */
if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
- "Error while reading PHY%d reg at %d.%hhu\n",
+ "Error while reading PHY%d reg at %d.%d\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
@@ -156,7 +215,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return value;
}
-EXPORT_SYMBOL_GPL(enetc_mdio_read);
+EXPORT_SYMBOL_GPL(enetc_mdio_read_c45);
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
{
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index 15f37c5b8dc1..e108cac8288d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -4,11 +4,35 @@
#include <linux/of_mdio.h>
#include "enetc_pf.h"
+#define NETC_EMDIO_VEN_ID 0x1131
+#define NETC_EMDIO_DEV_ID 0xee00
#define ENETC_MDIO_DEV_ID 0xee01
#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus"
#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver"
+DEFINE_STATIC_KEY_FALSE(enetc_has_err050089);
+EXPORT_SYMBOL_GPL(enetc_has_err050089);
+
+static void enetc_emdio_enable_err050089(struct pci_dev *pdev)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_FREESCALE &&
+ pdev->device == ENETC_MDIO_DEV_ID) {
+ static_branch_inc(&enetc_has_err050089);
+ dev_info(&pdev->dev, "Enabled ERR050089 workaround\n");
+ }
+}
+
+static void enetc_emdio_disable_err050089(struct pci_dev *pdev)
+{
+ if (pdev->vendor == PCI_VENDOR_ID_FREESCALE &&
+ pdev->device == ENETC_MDIO_DEV_ID) {
+ static_branch_dec(&enetc_has_err050089);
+ if (!static_key_enabled(&enetc_has_err050089.key))
+ dev_info(&pdev->dev, "Disabled ERR050089 workaround\n");
+ }
+}
+
static int enetc_pci_mdio_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -39,8 +63,10 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
}
bus->name = ENETC_MDIO_BUS_NAME;
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
@@ -60,6 +86,8 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
goto err_pci_mem_reg;
}
+ enetc_emdio_enable_err050089(pdev);
+
err = of_mdiobus_register(bus, dev->of_node);
if (err)
goto err_mdiobus_reg;
@@ -69,7 +97,8 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
return 0;
err_mdiobus_reg:
- pci_release_mem_regions(pdev);
+ enetc_emdio_disable_err050089(pdev);
+ pci_release_region(pdev, 0);
err_pci_mem_reg:
pci_disable_device(pdev);
err_pci_enable:
@@ -86,14 +115,18 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev)
struct enetc_mdio_priv *mdio_priv;
mdiobus_unregister(bus);
+
+ enetc_emdio_disable_err050089(pdev);
+
mdio_priv = bus->priv;
iounmap(mdio_priv->hw->port);
- pci_release_mem_regions(pdev);
+ pci_release_region(pdev, 0);
pci_disable_device(pdev);
}
static const struct pci_device_id enetc_pci_mdio_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) },
+ { PCI_DEVICE(NETC_EMDIO_VEN_ID, NETC_EMDIO_DEV_ID) },
{ 0, } /* End of table. */
};
MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 0e87c7043b77..de0fb272c847 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1,15 +1,14 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
-#include <asm/unaligned.h>
-#include <linux/mdio.h>
+#include <linux/unaligned.h>
#include <linux/module.h>
-#include <linux/fsl/enetc_mdio.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/pcs-lynx.h>
#include "enetc_ierb.h"
-#include "enetc_pf.h"
+#include "enetc_pf_common.h"
#define ENETC_DRV_NAME_STR "ENETC PF driver"
@@ -32,18 +31,15 @@ static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
__raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
}
-static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+static struct phylink_pcs *enetc_pf_create_pcs(struct enetc_pf *pf,
+ struct mii_bus *bus)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct sockaddr *saddr = addr;
-
- if (!is_valid_ether_addr(saddr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(ndev, saddr->sa_data);
- enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
+ return lynx_pcs_create_mdiodev(bus, 0);
+}
- return 0;
+static void enetc_pf_destroy_pcs(struct phylink_pcs *pcs)
+{
+ lynx_pcs_destroy(pcs);
}
static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map)
@@ -76,30 +72,6 @@ static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
}
-static int enetc_mac_addr_hash_idx(const u8 *addr)
-{
- u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
- u64 mask = 0;
- int res = 0;
- int i;
-
- for (i = 0; i < 8; i++)
- mask |= BIT_ULL(i * 6);
-
- for (i = 0; i < 6; i++)
- res |= (hweight64(fold & (mask << i)) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
-{
- filter->mac_addr_cnt = 0;
-
- bitmap_zero(filter->mac_hash_table,
- ENETC_MADDR_HASH_TBL_SZ);
-}
-
static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
const unsigned char *addr)
{
@@ -108,16 +80,6 @@ static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
filter->mac_addr_cnt++;
}
-static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
- const unsigned char *addr)
-{
- int idx = enetc_mac_addr_hash_idx(addr);
-
- /* add hash table entry */
- __set_bit(idx, filter->mac_hash_table);
- filter->mac_addr_cnt++;
-}
-
static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
{
bool err = si->errata & ENETC_ERR_UCMCSWP;
@@ -254,88 +216,26 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
}
-static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
- unsigned long hash)
-{
- enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash));
- enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash));
-}
-
-static int enetc_vid_hash_idx(unsigned int vid)
-{
- int res = 0;
- int i;
-
- for (i = 0; i < 6; i++)
- res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
-{
- int i;
-
- if (rehash) {
- bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
-
- for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
- int hidx = enetc_vid_hash_idx(i);
-
- __set_bit(hidx, pf->vlan_ht_filter);
- }
- }
-
- enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter);
-}
-
-static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
- int idx;
-
- __set_bit(vid, pf->active_vlans);
-
- idx = enetc_vid_hash_idx(vid);
- if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
- enetc_sync_vlan_ht_filter(pf, false);
-
- return 0;
-}
-
-static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
-
- __clear_bit(vid, pf->active_vlans);
- enetc_sync_vlan_ht_filter(pf, true);
-
- return 0;
-}
-
static void enetc_set_loopback(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
u32 reg;
- reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ reg = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
if (reg & ENETC_PM0_IFM_RG) {
/* RGMII mode */
reg = (reg & ~ENETC_PM0_IFM_RLP) |
(en ? ENETC_PM0_IFM_RLP : 0);
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, reg);
} else {
/* assume SGMII mode */
- reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ reg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
reg = (reg & ~ENETC_PM0_CMD_XGLP) |
(en ? ENETC_PM0_CMD_XGLP : 0);
reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) |
(en ? ENETC_PM0_CMD_PHY_TX_EN : 0);
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, reg);
}
}
@@ -393,56 +293,6 @@ static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
return 0;
}
-static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
- int si)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_hw *hw = &pf->si->hw;
- u8 mac_addr[ETH_ALEN] = { 0 };
- int err;
-
- /* (1) try to get the MAC address from the device tree */
- if (np) {
- err = of_get_mac_address(np, mac_addr);
- if (err == -EPROBE_DEFER)
- return err;
- }
-
- /* (2) bootloader supplied MAC address */
- if (is_zero_ether_addr(mac_addr))
- enetc_pf_get_primary_mac_addr(hw, si, mac_addr);
-
- /* (3) choose a random one */
- if (is_zero_ether_addr(mac_addr)) {
- eth_random_addr(mac_addr);
- dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
- si, mac_addr);
- }
-
- enetc_pf_set_primary_mac_addr(hw, si, mac_addr);
-
- return 0;
-}
-
-static int enetc_setup_mac_addresses(struct device_node *np,
- struct enetc_pf *pf)
-{
- int err, i;
-
- /* The PF might take its MAC from the device tree */
- err = enetc_setup_mac_address(np, pf, 0);
- if (err)
- return err;
-
- for (i = 0; i < pf->total_vfs; i++) {
- err = enetc_setup_mac_address(NULL, pf, i + 1);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static void enetc_port_assign_rfs_entries(struct enetc_si *si)
{
struct enetc_pf *pf = enetc_si_priv(si);
@@ -464,6 +314,23 @@ static void enetc_port_assign_rfs_entries(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE);
}
+static void enetc_port_get_caps(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PCAPR0);
+
+ if (val & ENETC_PCAPR0_QBV)
+ si->hw_features |= ENETC_SI_F_QBV;
+
+ if (val & ENETC_PCAPR0_QBU)
+ si->hw_features |= ENETC_SI_F_QBU;
+
+ if (val & ENETC_PCAPR0_PSFP)
+ si->hw_features |= ENETC_SI_F_PSFP;
+}
+
static void enetc_port_si_configure(struct enetc_si *si)
{
struct enetc_pf *pf = enetc_si_priv(si);
@@ -471,6 +338,8 @@ static void enetc_port_si_configure(struct enetc_si *si)
int num_rings, i;
u32 val;
+ enetc_port_get_caps(si);
+
val = enetc_port_rd(hw, ENETC_PCAPR0);
num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val));
@@ -515,84 +384,84 @@ static void enetc_port_si_configure(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
}
-static void enetc_configure_port_mac(struct enetc_hw *hw)
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *max_sdu)
{
int tc;
- enetc_port_wr(hw, ENETC_PM0_MAXFRM,
- ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+ for (tc = 0; tc < 8; tc++) {
+ u32 val = ENETC_MAC_MAXFRM_SIZE;
+
+ if (max_sdu[tc])
+ val = max_sdu[tc] + VLAN_ETH_HLEN;
+
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), val);
+ }
+}
+
+void enetc_reset_ptcmsdur(struct enetc_hw *hw)
+{
+ int tc;
for (tc = 0; tc < 8; tc++)
enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
+}
+
+static void enetc_configure_port_mac(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+
+ enetc_port_mac_wr(si, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+ enetc_reset_ptcmsdur(hw);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
/* On LS1028A, the MAC RX FIFO defaults to 2, which is too high
* and may lead to RX lock-up under traffic. Set it to 1 instead,
* as recommended by the hardware team.
*/
- enetc_port_wr(hw, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
+ enetc_port_mac_wr(si, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
}
-static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
+static void enetc_mac_config(struct enetc_si *si, phy_interface_t phy_mode)
{
u32 val;
if (phy_interface_mode_is_rgmii(phy_mode)) {
- val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
if (phy_mode == PHY_INTERFACE_MODE_USXGMII) {
val = ENETC_PM0_IFM_FULL_DPX | ENETC_PM0_IFM_IFMODE_XGMII;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
}
-static void enetc_mac_enable(struct enetc_hw *hw, bool en)
+static void enetc_mac_enable(struct enetc_si *si, bool en)
{
- u32 val = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ u32 val = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
val &= ~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
val |= en ? (ENETC_PM0_TX_EN | ENETC_PM0_RX_EN) : 0;
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, val);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, val);
-}
-
-static void enetc_configure_port_pmac(struct enetc_hw *hw)
-{
- u32 temp;
-
- /* Set pMAC step lock */
- temp = enetc_port_rd(hw, ENETC_PFPMR);
- enetc_port_wr(hw, ENETC_PFPMR,
- temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM);
-
- temp = enetc_port_rd(hw, ENETC_MMCSR);
- enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, val);
}
static void enetc_configure_port(struct enetc_pf *pf)
{
- u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
struct enetc_hw *hw = &pf->si->hw;
- enetc_configure_port_pmac(hw);
-
- enetc_configure_port_mac(hw);
+ enetc_configure_port_mac(pf->si);
enetc_port_si_configure(pf->si);
/* set up hash key */
- get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
- enetc_set_rss_key(hw, hash_key);
+ enetc_set_default_rss_key(pf);
/* split up RFS entries */
enetc_port_assign_rfs_entries(pf->si);
@@ -663,19 +532,11 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) {
enetc_msg_psi_free(pf);
- kfree(pf->vf_state);
pf->num_vfs = 0;
pci_disable_sriov(pdev);
} else {
pf->num_vfs = num_vfs;
- pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state),
- GFP_KERNEL);
- if (!pf->vf_state) {
- pf->num_vfs = 0;
- return -ENOMEM;
- }
-
err = enetc_msg_psi_init(pf);
if (err) {
dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err);
@@ -694,7 +555,6 @@ static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs)
err_en_sriov:
enetc_msg_psi_free(pf);
err_msg_psi:
- kfree(pf->vf_state);
pf->num_vfs = 0;
return err;
@@ -708,6 +568,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
{
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (changed & NETIF_F_HW_TC) {
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+ if (err)
+ return err;
+ }
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
struct enetc_pf *pf = enetc_si_priv(priv->si);
@@ -721,7 +588,30 @@ static int enetc_pf_set_features(struct net_device *ndev,
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return enetc_qos_query_caps(ndev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return enetc_setup_tc_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return enetc_setup_tc_cbs(ndev, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return enetc_setup_tc_txtime(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return enetc_setup_tc_psfp(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct net_device_ops enetc_ndev_ops = {
@@ -738,235 +628,19 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
-static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
- const struct net_device_ops *ndev_ops)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
-
- SET_NETDEV_DEV(ndev, &si->pdev->dev);
- priv->ndev = ndev;
- priv->si = si;
- priv->dev = &si->pdev->dev;
- si->ndev = ndev;
-
- priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
- ndev->netdev_ops = ndev_ops;
- enetc_set_ethtool_ops(ndev);
- ndev->watchdog_timeo = 5 * HZ;
- ndev->max_mtu = ENETC_MAX_MTU;
-
- ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
- ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
- ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6;
-
- if (si->num_rss)
- ndev->hw_features |= NETIF_F_RXHASH;
-
- ndev->priv_flags |= IFF_UNICAST_FLT;
-
- if (si->hw_features & ENETC_SI_F_QBV)
- priv->active_offloads |= ENETC_F_QBV;
-
- if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
- priv->active_offloads |= ENETC_F_QCI;
- ndev->features |= NETIF_F_HW_TC;
- ndev->hw_features |= NETIF_F_HW_TC;
- }
-
- /* pick up primary MAC address from SI */
- enetc_load_primary_mac_addr(&si->hw, ndev);
-}
-
-static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_priv *mdio_priv;
- struct mii_bus *bus;
- int err;
-
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
-
- bus->name = "Freescale ENETC MDIO Bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
- bus->parent = dev;
- mdio_priv = bus->priv;
- mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_EMDIO_BASE;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
-
- err = of_mdiobus_register(bus, np);
- if (err)
- return dev_err_probe(dev, err, "cannot register MDIO bus\n");
-
- pf->mdio = bus;
-
- return 0;
-}
-
-static void enetc_mdio_remove(struct enetc_pf *pf)
-{
- if (pf->mdio)
- mdiobus_unregister(pf->mdio);
-}
-
-static int enetc_imdio_create(struct enetc_pf *pf)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_priv *mdio_priv;
- struct lynx_pcs *pcs_lynx;
- struct mdio_device *pcs;
- struct mii_bus *bus;
- int err;
-
- bus = mdiobus_alloc_size(sizeof(*mdio_priv));
- if (!bus)
- return -ENOMEM;
-
- bus->name = "Freescale ENETC internal MDIO Bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
- bus->parent = dev;
- bus->phy_mask = ~0;
- mdio_priv = bus->priv;
- mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
-
- err = mdiobus_register(bus);
- if (err) {
- dev_err(dev, "cannot register internal MDIO bus (%d)\n", err);
- goto free_mdio_bus;
- }
-
- pcs = mdio_device_create(bus, 0);
- if (IS_ERR(pcs)) {
- err = PTR_ERR(pcs);
- dev_err(dev, "cannot create pcs (%d)\n", err);
- goto unregister_mdiobus;
- }
-
- pcs_lynx = lynx_pcs_create(pcs);
- if (!pcs_lynx) {
- mdio_device_free(pcs);
- err = -ENOMEM;
- dev_err(dev, "cannot create lynx pcs (%d)\n", err);
- goto unregister_mdiobus;
- }
-
- pf->imdio = bus;
- pf->pcs = pcs_lynx;
-
- return 0;
-
-unregister_mdiobus:
- mdiobus_unregister(bus);
-free_mdio_bus:
- mdiobus_free(bus);
- return err;
-}
-
-static void enetc_imdio_remove(struct enetc_pf *pf)
-{
- if (pf->pcs) {
- mdio_device_free(pf->pcs->mdio);
- lynx_pcs_destroy(pf->pcs);
- }
- if (pf->imdio) {
- mdiobus_unregister(pf->imdio);
- mdiobus_free(pf->imdio);
- }
-}
-
-static bool enetc_port_has_pcs(struct enetc_pf *pf)
-{
- return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
- pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
- pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
-}
-
-static int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node)
+static struct phylink_pcs *
+enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
{
- struct device_node *mdio_np;
- int err;
-
- mdio_np = of_get_child_by_name(node, "mdio");
- if (mdio_np) {
- err = enetc_mdio_probe(pf, mdio_np);
-
- of_node_put(mdio_np);
- if (err)
- return err;
- }
-
- if (enetc_port_has_pcs(pf)) {
- err = enetc_imdio_create(pf);
- if (err) {
- enetc_mdio_remove(pf);
- return err;
- }
- }
-
- return 0;
-}
-
-static void enetc_mdiobus_destroy(struct enetc_pf *pf)
-{
- enetc_mdio_remove(pf);
- enetc_imdio_remove(pf);
-}
-
-static void enetc_pl_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_2500BASEX &&
- state->interface != PHY_INTERFACE_MODE_USXGMII &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX ||
- state->interface == PHY_INTERFACE_MODE_USXGMII) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ return pf->pcs;
}
static void enetc_pl_mac_config(struct phylink_config *config,
@@ -974,20 +648,15 @@ static void enetc_pl_mac_config(struct phylink_config *config,
const struct phylink_link_state *state)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
- struct enetc_ndev_priv *priv;
- enetc_mac_config(&pf->si->hw, state->interface);
-
- priv = netdev_priv(pf->si->ndev);
- if (pf->pcs)
- phylink_set_pcs(priv->phylink, &pf->pcs->pcs);
+ enetc_mac_config(pf->si, state->interface);
}
-static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
+static void enetc_force_rgmii_mac(struct enetc_si *si, int speed, int duplex)
{
u32 old_val, val;
- old_val = val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ old_val = val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
if (speed == SPEED_1000) {
val &= ~ENETC_PM0_IFM_SSP_MASK;
@@ -1008,7 +677,7 @@ static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
if (val == old_val)
return;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
static void enetc_pl_mac_link_up(struct phylink_config *config,
@@ -1020,17 +689,19 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
u32 pause_off_thresh = 0, pause_on_thresh = 0;
u32 init_quanta = 0, refresh_quanta = 0;
struct enetc_hw *hw = &pf->si->hw;
+ struct enetc_si *si = pf->si;
struct enetc_ndev_priv *priv;
u32 rbmr, cmd_cfg;
int idx;
priv = netdev_priv(pf->si->ndev);
- if (priv->active_offloads & ENETC_F_QBV)
+
+ if (pf->si->hw_features & ENETC_SI_F_QBV)
enetc_sched_speed_set(priv, speed);
if (!phylink_autoneg_inband(mode) &&
phy_interface_mode_is_rgmii(interface))
- enetc_force_rgmii_mac(hw, speed, duplex);
+ enetc_force_rgmii_mac(si, speed, duplex);
/* Flow control */
for (idx = 0; idx < priv->num_rx_rings; idx++) {
@@ -1066,24 +737,24 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
}
- enetc_port_wr(hw, ENETC_PM0_PAUSE_QUANTA, init_quanta);
- enetc_port_wr(hw, ENETC_PM1_PAUSE_QUANTA, init_quanta);
- enetc_port_wr(hw, ENETC_PM0_PAUSE_THRESH, refresh_quanta);
- enetc_port_wr(hw, ENETC_PM1_PAUSE_THRESH, refresh_quanta);
+ enetc_port_mac_wr(si, ENETC_PM0_PAUSE_QUANTA, init_quanta);
+ enetc_port_mac_wr(si, ENETC_PM0_PAUSE_THRESH, refresh_quanta);
enetc_port_wr(hw, ENETC_PPAUONTR, pause_on_thresh);
enetc_port_wr(hw, ENETC_PPAUOFFTR, pause_off_thresh);
- cmd_cfg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ cmd_cfg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
if (rx_pause)
cmd_cfg &= ~ENETC_PM0_PAUSE_IGN;
else
cmd_cfg |= ENETC_PM0_PAUSE_IGN;
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, cmd_cfg);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, cmd_cfg);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, cmd_cfg);
+
+ enetc_mac_enable(si, true);
- enetc_mac_enable(hw, true);
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mm_link_state_update(priv, true);
}
static void enetc_pl_mac_link_down(struct phylink_config *config,
@@ -1091,45 +762,24 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
phy_interface_t interface)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_si *si = pf->si;
+ struct enetc_ndev_priv *priv;
+
+ priv = netdev_priv(si->ndev);
+
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mm_link_state_update(priv, false);
- enetc_mac_enable(&pf->si->hw, false);
+ enetc_mac_enable(si, false);
}
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
- .validate = enetc_pl_mac_validate,
+ .mac_select_pcs = enetc_pl_mac_select_pcs,
.mac_config = enetc_pl_mac_config,
.mac_link_up = enetc_pl_mac_link_up,
.mac_link_down = enetc_pl_mac_link_down,
};
-static int enetc_phylink_create(struct enetc_ndev_priv *priv,
- struct device_node *node)
-{
- struct enetc_pf *pf = enetc_si_priv(priv->si);
- struct phylink *phylink;
- int err;
-
- pf->phylink_config.dev = &priv->ndev->dev;
- pf->phylink_config.type = PHYLINK_NETDEV;
-
- phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
- pf->if_mode, &enetc_mac_phylink_ops);
- if (IS_ERR(phylink)) {
- err = PTR_ERR(phylink);
- return err;
- }
-
- priv->phylink = phylink;
-
- return 0;
-}
-
-static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
-{
- if (priv->phylink)
- phylink_destroy(priv->phylink);
-}
-
/* Initialize the entire shared memory for the flow steering entries
* of this port (PF + VFs)
*/
@@ -1177,18 +827,19 @@ static int enetc_init_port_rss_memory(struct enetc_si *si)
static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct platform_device *ierb_pdev;
struct device_node *ierb_node;
-
- /* Don't register with the IERB if the PF itself is disabled */
- if (!node || !of_device_is_available(node))
- return 0;
+ int ret;
ierb_node = of_find_compatible_node(NULL, NULL,
"fsl,ls1028a-enetc-ierb");
- if (!ierb_node || !of_device_is_available(ierb_node))
+ if (!ierb_node)
+ return -ENODEV;
+
+ if (!of_device_is_available(ierb_node)) {
+ of_node_put(ierb_node);
return -ENODEV;
+ }
ierb_pdev = of_find_device_by_node(ierb_node);
of_node_put(ierb_node);
@@ -1196,64 +847,122 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
if (!ierb_pdev)
return -EPROBE_DEFER;
- return enetc_ierb_register_pf(ierb_pdev, pdev);
+ ret = enetc_ierb_register_pf(ierb_pdev, pdev);
+
+ put_device(&ierb_pdev->dev);
+
+ return ret;
}
-static int enetc_pf_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static const struct enetc_si_ops enetc_psi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
+static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
{
- struct device_node *node = pdev->dev.of_node;
- struct enetc_ndev_priv *priv;
- struct net_device *ndev;
struct enetc_si *si;
- struct enetc_pf *pf;
int err;
- err = enetc_pf_register_with_ierb(pdev);
- if (err == -EPROBE_DEFER)
- return err;
- if (err)
- dev_warn(&pdev->dev,
- "Could not register with IERB driver: %pe, please update the device tree\n",
- ERR_PTR(err));
-
- err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
- if (err)
- return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(struct enetc_pf));
+ if (err) {
+ dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
+ goto out;
+ }
si = pci_get_drvdata(pdev);
if (!si->hw.port || !si->hw.global) {
err = -ENODEV;
dev_err(&pdev->dev, "could not map PF space, probing a VF?\n");
- goto err_map_pf_space;
+ goto out_pci_remove;
+ }
+
+ si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc_psi_ops;
+ err = enetc_get_driver_data(si);
+ if (err) {
+ dev_err(&pdev->dev, "Could not get PF driver data\n");
+ goto out_pci_remove;
}
err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
&si->cbd_ring);
if (err)
- goto err_setup_cbdr;
+ goto out_pci_remove;
err = enetc_init_port_rfs_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
- goto err_init_port_rfs;
+ goto out_teardown_cbdr;
}
err = enetc_init_port_rss_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
- goto err_init_port_rss;
+ goto out_teardown_cbdr;
}
- if (node && !of_device_is_available(node)) {
- dev_info(&pdev->dev, "device is disabled, skipping\n");
- err = -ENODEV;
- goto err_device_disabled;
+ return si;
+
+out_teardown_cbdr:
+ enetc_teardown_cbdr(&si->cbd_ring);
+out_pci_remove:
+ enetc_pci_remove(pdev);
+out:
+ return ERR_PTR(err);
+}
+
+static void enetc_psi_destroy(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+
+ enetc_teardown_cbdr(&si->cbd_ring);
+ enetc_pci_remove(pdev);
+}
+
+static const struct enetc_pf_ops enetc_pf_ops = {
+ .set_si_primary_mac = enetc_pf_set_primary_mac_addr,
+ .get_si_primary_mac = enetc_pf_get_primary_mac_addr,
+ .create_pcs = enetc_pf_create_pcs,
+ .destroy_pcs = enetc_pf_destroy_pcs,
+ .enable_psfp = enetc_psfp_enable,
+};
+
+static int enetc_pf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ struct enetc_si *si;
+ struct enetc_pf *pf;
+ int err;
+
+ err = enetc_pf_register_with_ierb(pdev);
+ if (err == -EPROBE_DEFER)
+ return err;
+ if (err)
+ dev_warn(&pdev->dev,
+ "Could not register with IERB driver: %pe, please update the device tree\n",
+ ERR_PTR(err));
+
+ si = enetc_psi_create(pdev);
+ if (IS_ERR(si)) {
+ err = PTR_ERR(si);
+ goto err_psi_create;
}
pf = enetc_si_priv(si);
pf->si = si;
+ pf->ops = &enetc_pf_ops;
+
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+ if (pf->total_vfs) {
+ pf->vf_state = kcalloc(pf->total_vfs, sizeof(struct enetc_vf_state),
+ GFP_KERNEL);
+ if (!pf->vf_state)
+ goto err_alloc_vf_state;
+ }
err = enetc_setup_mac_addresses(node, pf);
if (err)
@@ -1274,6 +983,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
priv = netdev_priv(ndev);
+ mutex_init(&priv->mm_lock);
+
enetc_init_si_rings_params(priv);
err = enetc_alloc_si_resources(priv);
@@ -1294,16 +1005,20 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_msix;
}
- if (!of_get_phy_mode(node, &pf->if_mode)) {
- err = enetc_mdiobus_create(pf, node);
- if (err)
- goto err_mdiobus_create;
-
- err = enetc_phylink_create(priv, node);
- if (err)
- goto err_phylink_create;
+ err = of_get_phy_mode(node, &pf->if_mode);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to read PHY mode\n");
+ goto err_phy_mode;
}
+ err = enetc_mdiobus_create(pf, node);
+ if (err)
+ goto err_mdiobus_create;
+
+ err = enetc_phylink_create(priv, node, &enetc_mac_phylink_ops);
+ if (err)
+ goto err_phylink_create;
+
err = register_netdev(ndev);
if (err)
goto err_reg_netdev;
@@ -1315,6 +1030,7 @@ err_reg_netdev:
err_phylink_create:
enetc_mdiobus_destroy(pf);
err_mdiobus_create:
+err_phy_mode:
enetc_free_msix(priv);
err_config_si:
err_alloc_msix:
@@ -1323,15 +1039,11 @@ err_alloc_si_res:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
-err_init_port_rss:
-err_init_port_rfs:
-err_device_disabled:
err_setup_mac_addresses:
- enetc_teardown_cbdr(&si->cbd_ring);
-err_setup_cbdr:
-err_map_pf_space:
- enetc_pci_remove(pdev);
-
+ kfree(pf->vf_state);
+err_alloc_vf_state:
+ enetc_psi_destroy(pdev);
+err_psi_create:
return err;
}
@@ -1354,12 +1066,30 @@ static void enetc_pf_remove(struct pci_dev *pdev)
enetc_free_msix(priv);
enetc_free_si_resources(priv);
- enetc_teardown_cbdr(&si->cbd_ring);
free_netdev(si->ndev);
+ kfree(pf->vf_state);
- enetc_pci_remove(pdev);
+ enetc_psi_destroy(pdev);
+}
+
+static void enetc_fixup_clear_rss_rfs(struct pci_dev *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct enetc_si *si;
+
+ /* Only apply quirk for disabled functions. For the ones
+ * that are enabled, enetc_pf_probe() will apply it.
+ */
+ if (node && of_device_is_available(node))
+ return;
+
+ si = enetc_psi_create(pdev);
+ if (!IS_ERR(si))
+ enetc_psi_destroy(pdev);
}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF,
+ enetc_fixup_clear_rss_rfs);
static const struct pci_device_id enetc_pf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) },
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index 263946c51e37..ae407e9e9ee7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -2,22 +2,11 @@
/* Copyright 2017-2019 NXP */
#include "enetc.h"
-#include <linux/pcs-lynx.h>
+#include <linux/phylink.h>
#define ENETC_PF_NUM_RINGS 8
-
-enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
-#define ENETC_MADDR_HASH_TBL_SZ 64
-struct enetc_mac_filter {
- union {
- char mac_addr[ETH_ALEN];
- DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
- };
- int mac_addr_cnt;
-};
-
#define ENETC_VLAN_HT_SIZE 64
enum enetc_vf_flags {
@@ -28,6 +17,25 @@ struct enetc_vf_state {
enum enetc_vf_flags flags;
};
+struct enetc_port_caps {
+ u32 half_duplex:1;
+ int num_vsi;
+ int num_msix;
+ int num_rx_bdr;
+ int num_tx_bdr;
+ int mac_filter_num;
+};
+
+struct enetc_pf;
+
+struct enetc_pf_ops {
+ void (*set_si_primary_mac)(struct enetc_hw *hw, int si, const u8 *addr);
+ void (*get_si_primary_mac)(struct enetc_hw *hw, int si, u8 *addr);
+ struct phylink_pcs *(*create_pcs)(struct enetc_pf *pf, struct mii_bus *bus);
+ void (*destroy_pcs)(struct phylink_pcs *pcs);
+ int (*enable_psfp)(struct enetc_ndev_priv *priv);
+};
+
struct enetc_pf {
struct enetc_si *si;
int num_vfs; /* number of active VFs, after sriov_init */
@@ -46,10 +54,15 @@ struct enetc_pf {
struct mii_bus *mdio; /* saved for cleanup */
struct mii_bus *imdio;
- struct lynx_pcs *pcs;
+ struct phylink_pcs *pcs;
phy_interface_t if_mode;
struct phylink_config phylink_config;
+
+ struct enetc_port_caps caps;
+ const struct enetc_pf_ops *ops;
+
+ int num_mfe; /* number of mac address filter table entries */
};
#define phylink_to_enetc_pf(config) \
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
new file mode 100644
index 000000000000..76263b8566bb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2024 NXP */
+
+#include <linux/fsl/enetc_mdio.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "enetc_pf_common.h"
+
+static void enetc_set_si_hw_addr(struct enetc_pf *pf, int si,
+ const u8 *mac_addr)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ pf->ops->set_si_primary_mac(hw, si, mac_addr);
+}
+
+static void enetc_get_si_hw_addr(struct enetc_pf *pf, int si, u8 *mac_addr)
+{
+ struct enetc_hw *hw = &pf->si->hw;
+
+ pf->ops->get_si_primary_mac(hw, si, mac_addr);
+}
+
+int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(ndev, saddr->sa_data);
+ enetc_set_si_hw_addr(pf, 0, saddr->sa_data);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_pf_set_mac_addr);
+
+static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
+ int si)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ int err;
+
+ /* (1) try to get the MAC address from the device tree */
+ if (np) {
+ err = of_get_mac_address(np, mac_addr);
+ if (err == -EPROBE_DEFER)
+ return err;
+ }
+
+ /* (2) bootloader supplied MAC address */
+ if (is_zero_ether_addr(mac_addr))
+ enetc_get_si_hw_addr(pf, si, mac_addr);
+
+ /* (3) choose a random one */
+ if (is_zero_ether_addr(mac_addr)) {
+ eth_random_addr(mac_addr);
+ dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
+ si, mac_addr);
+ }
+
+ enetc_set_si_hw_addr(pf, si, mac_addr);
+
+ return 0;
+}
+
+int enetc_setup_mac_addresses(struct device_node *np, struct enetc_pf *pf)
+{
+ int err, i;
+
+ /* The PF might take its MAC from the device tree */
+ err = enetc_setup_mac_address(np, pf, 0);
+ if (err)
+ return err;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ err = enetc_setup_mac_address(NULL, pf, i + 1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_setup_mac_addresses);
+
+void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+
+ SET_NETDEV_DEV(ndev, &si->pdev->dev);
+ priv->ndev = ndev;
+ priv->si = si;
+ priv->dev = &si->pdev->dev;
+ si->ndev = ndev;
+
+ priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
+ priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
+ ndev->netdev_ops = ndev_ops;
+ enetc_set_ethtool_ops(ndev);
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->max_mtu = ENETC_MAX_MTU;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ if (si->drvdata->tx_csum)
+ priv->active_offloads |= ENETC_F_TXCSUM;
+
+ if (si->hw_features & ENETC_SI_F_LSO)
+ priv->active_offloads |= ENETC_F_LSO;
+
+ if (si->num_rss) {
+ ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
+ }
+
+ if (!enetc_is_pseudo_mac(si))
+ ndev->hw_features |= NETIF_F_LOOPBACK;
+
+ /* TODO: currently, i.MX95 ENETC driver does not support advanced features */
+ if (!is_enetc_rev1(si))
+ goto end;
+
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
+ if (si->hw_features & ENETC_SI_F_PSFP && pf->ops->enable_psfp &&
+ !pf->ops->enable_psfp(priv)) {
+ priv->active_offloads |= ENETC_F_QCI;
+ ndev->features |= NETIF_F_HW_TC;
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
+end:
+ /* pick up primary MAC address from SI */
+ enetc_load_primary_mac_addr(&si->hw, ndev);
+}
+EXPORT_SYMBOL_GPL(enetc_pf_netdev_setup);
+
+static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC MDIO Bus";
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+
+ if (is_enetc_rev1(pf->si))
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+ else
+ mdio_priv->mdio_base = ENETC4_EMDIO_BASE;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ err = of_mdiobus_register(bus, np);
+ if (err)
+ return dev_err_probe(dev, err, "cannot register MDIO bus\n");
+
+ pf->mdio = bus;
+
+ return 0;
+}
+
+static void enetc_mdio_remove(struct enetc_pf *pf)
+{
+ if (pf->mdio)
+ mdiobus_unregister(pf->mdio);
+}
+
+static int enetc_imdio_create(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_mdio_priv *mdio_priv;
+ struct phylink_pcs *phylink_pcs;
+ struct mii_bus *bus;
+ int err;
+
+ if (!pf->ops->create_pcs) {
+ dev_err(dev, "Creating PCS is not supported\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale ENETC internal MDIO Bus";
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
+ bus->parent = dev;
+ bus->phy_mask = ~0;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
+
+ if (is_enetc_rev1(pf->si))
+ mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
+ else
+ mdio_priv->mdio_base = ENETC4_PM_IMDIO_BASE;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+
+ err = mdiobus_register(bus);
+ if (err) {
+ dev_err(dev, "cannot register internal MDIO bus (%d)\n", err);
+ goto free_mdio_bus;
+ }
+
+ phylink_pcs = pf->ops->create_pcs(pf, bus);
+ if (IS_ERR(phylink_pcs)) {
+ err = PTR_ERR(phylink_pcs);
+ dev_err(dev, "cannot create lynx pcs (%d)\n", err);
+ goto unregister_mdiobus;
+ }
+
+ pf->imdio = bus;
+ pf->pcs = phylink_pcs;
+
+ return 0;
+
+unregister_mdiobus:
+ mdiobus_unregister(bus);
+free_mdio_bus:
+ mdiobus_free(bus);
+ return err;
+}
+
+static void enetc_imdio_remove(struct enetc_pf *pf)
+{
+ if (pf->pcs && pf->ops->destroy_pcs)
+ pf->ops->destroy_pcs(pf->pcs);
+
+ if (pf->imdio) {
+ mdiobus_unregister(pf->imdio);
+ mdiobus_free(pf->imdio);
+ }
+}
+
+static bool enetc_port_has_pcs(struct enetc_pf *pf)
+{
+ return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ pf->if_mode == PHY_INTERFACE_MODE_1000BASEX ||
+ pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
+ pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
+}
+
+int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node)
+{
+ struct device_node *mdio_np;
+ int err;
+
+ mdio_np = of_get_child_by_name(node, "mdio");
+ if (mdio_np) {
+ err = enetc_mdio_probe(pf, mdio_np);
+
+ of_node_put(mdio_np);
+ if (err)
+ return err;
+ }
+
+ if (enetc_port_has_pcs(pf)) {
+ err = enetc_imdio_create(pf);
+ if (err) {
+ enetc_mdio_remove(pf);
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_mdiobus_create);
+
+void enetc_mdiobus_destroy(struct enetc_pf *pf)
+{
+ enetc_mdio_remove(pf);
+ enetc_imdio_remove(pf);
+}
+EXPORT_SYMBOL_GPL(enetc_mdiobus_destroy);
+
+int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
+ const struct phylink_mac_ops *ops)
+{
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct phylink *phylink;
+ int err;
+
+ pf->phylink_config.dev = &priv->ndev->dev;
+ pf->phylink_config.type = PHYLINK_NETDEV;
+ pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII,
+ pf->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
+ pf->if_mode, ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ return err;
+ }
+
+ priv->phylink = phylink;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_phylink_create);
+
+void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
+{
+ phylink_destroy(priv->phylink);
+}
+EXPORT_SYMBOL_GPL(enetc_phylink_destroy);
+
+void enetc_set_default_rss_key(struct enetc_pf *pf)
+{
+ u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0};
+
+ /* set up hash key */
+ get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
+ enetc_set_rss_key(pf->si, hash_key);
+}
+EXPORT_SYMBOL_GPL(enetc_set_default_rss_key);
+
+static int enetc_vid_hash_idx(unsigned int vid)
+{
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_refresh_vlan_ht_filter(struct enetc_pf *pf)
+{
+ int i;
+
+ bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+ for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
+ int hidx = enetc_vid_hash_idx(i);
+
+ __set_bit(hidx, pf->vlan_ht_filter);
+ }
+}
+
+static void enetc_set_si_vlan_ht_filter(struct enetc_si *si,
+ int si_id, u64 hash)
+{
+ struct enetc_hw *hw = &si->hw;
+ int high_reg_off, low_reg_off;
+
+ if (is_enetc_rev1(si)) {
+ low_reg_off = ENETC_PSIVHFR0(si_id);
+ high_reg_off = ENETC_PSIVHFR1(si_id);
+ } else {
+ low_reg_off = ENETC4_PSIVHFR0(si_id);
+ high_reg_off = ENETC4_PSIVHFR1(si_id);
+ }
+
+ enetc_port_wr(hw, low_reg_off, lower_32_bits(hash));
+ enetc_port_wr(hw, high_reg_off, upper_32_bits(hash));
+}
+
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ int idx;
+
+ __set_bit(vid, pf->active_vlans);
+
+ idx = enetc_vid_hash_idx(vid);
+ if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_add_vid);
+
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (__test_and_clear_bit(vid, pf->active_vlans)) {
+ enetc_refresh_vlan_ht_filter(pf);
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_del_vid);
+
+MODULE_DESCRIPTION("NXP ENETC PF common functionality driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
new file mode 100644
index 000000000000..96d4840a3107
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2024 NXP */
+
+#include "enetc_pf.h"
+
+int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr);
+int enetc_setup_mac_addresses(struct device_node *np, struct enetc_pf *pf);
+void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
+ const struct net_device_ops *ndev_ops);
+int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node);
+void enetc_mdiobus_destroy(struct enetc_pf *pf);
+int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
+ const struct phylink_mac_ops *ops);
+void enetc_phylink_destroy(struct enetc_ndev_priv *priv);
+void enetc_set_default_rss_key(struct enetc_pf *pf);
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid);
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid);
+
+static inline u16 enetc_get_ip_revision(struct enetc_hw *hw)
+{
+ return enetc_global_rd(hw, ENETC_G_EIPBRR0) & EIPBRR0_REVISION;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 36b4f51dd297..b8413d3b4f16 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -7,9 +7,6 @@
#include "enetc.h"
-int enetc_phc_index = -1;
-EXPORT_SYMBOL(enetc_phc_index);
-
static struct ptp_clock_info enetc_ptp_caps = {
.owner = THIS_MODULE,
.name = "ENETC PTP clock",
@@ -42,15 +39,10 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
if (err)
return dev_err_probe(&pdev->dev, err, "device enable failed\n");
- /* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, KBUILD_MODNAME);
@@ -97,7 +89,6 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
if (err)
goto err_no_clock;
- enetc_phc_index = ptp_qoriq->phc_index;
pci_set_drvdata(pdev, ptp_qoriq);
return 0;
@@ -123,7 +114,6 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
{
struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
- enetc_phc_index = -1;
ptp_qoriq_free(ptp_qoriq);
pci_free_irq_vectors(pdev);
kfree(ptp_qoriq);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 0536d2c76fbc..ccf86651455c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -11,14 +11,14 @@
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
{
- return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
- & ENETC_QBV_MAX_GCL_LEN_MASK;
+ return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK;
}
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 old_speed = priv->speed;
- u32 pspeed;
+ u32 pspeed, tmp;
if (speed == old_speed)
return;
@@ -39,38 +39,33 @@ void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
}
priv->speed = speed;
- enetc_port_wr(&priv->si->hw, ENETC_PMR,
- (enetc_port_rd(&priv->si->hw, ENETC_PMR)
- & (~ENETC_PMR_PSPEED_MASK))
- | pspeed);
+ tmp = enetc_port_rd(hw, ENETC_PMR);
+ enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed);
}
-static int enetc_setup_taprio(struct net_device *ndev,
+static int enetc_setup_taprio(struct enetc_ndev_priv *priv,
struct tc_taprio_qopt_offload *admin_conf)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_cbd cbd = {.cmd = 0};
struct tgs_gcl_conf *gcl_config;
struct tgs_gcl_data *gcl_data;
- struct gce *gce;
dma_addr_t dma;
+ struct gce *gce;
u16 data_size;
u16 gcl_len;
+ void *tmp;
u32 tge;
int err;
int i;
- if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
- return -EINVAL;
- gcl_len = admin_conf->num_entries;
+ /* TSD and Qbv are mutually exclusive in hardware */
+ for (i = 0; i < priv->num_tx_rings; i++)
+ if (priv->tx_ring[i]->tsd_enable)
+ return -EBUSY;
- tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
- if (!admin_conf->enable) {
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
- return 0;
- }
+ if (admin_conf->num_entries > enetc_get_max_gcl_len(hw))
+ return -EINVAL;
if (admin_conf->cycle_time > U32_MAX ||
admin_conf->cycle_time_extension > U32_MAX)
@@ -80,10 +75,12 @@ static int enetc_setup_taprio(struct net_device *ndev,
* control BD descriptor.
*/
gcl_config = &cbd.gcl_conf;
+ gcl_len = admin_conf->num_entries;
data_size = struct_size(gcl_data, entry, gcl_len);
- gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!gcl_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&gcl_data);
+ if (!tmp)
return -ENOMEM;
gce = (struct gce *)(gcl_data + 1);
@@ -107,61 +104,119 @@ static int enetc_setup_taprio(struct net_device *ndev,
temp_gce->period = cpu_to_le32(temp_entry->interval);
}
- cbd.length = cpu_to_le16(data_size);
cbd.status_flags = 0;
- dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
- data_size, DMA_TO_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(gcl_data);
- return -ENOMEM;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
cbd.cls = BDCR_CMD_PORT_GCL;
cbd.status_flags = 0;
- enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
- tge | ENETC_QBV_TGE);
+ tge = enetc_rd(hw, ENETC_PTGCR);
+ enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE);
err = enetc_send_cmd(priv->si, &cbd);
if (err)
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
- kfree(gcl_data);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
- return err;
+ if (err)
+ return err;
+
+ enetc_set_ptcmsdur(hw, admin_conf->max_sdu);
+ priv->active_offloads |= ENETC_F_QBV;
+
+ return 0;
}
-int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
+static void enetc_reset_taprio_stats(struct enetc_ndev_priv *priv)
{
- struct tc_taprio_qopt_offload *taprio = type_data;
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int err;
int i;
- /* TSD and Qbv are mutually exclusive in hardware */
for (i = 0; i < priv->num_tx_rings; i++)
- if (priv->tx_ring[i]->tsd_enable)
- return -EBUSY;
+ priv->tx_ring[i]->stats.win_drop = 0;
+}
+
+static void enetc_reset_taprio(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 val;
+
+ val = enetc_rd(hw, ENETC_PTGCR);
+ enetc_wr(hw, ENETC_PTGCR, val & ~ENETC_PTGCR_TGE);
+ enetc_reset_ptcmsdur(hw);
+
+ priv->active_offloads &= ~ENETC_F_QBV;
+}
+
+static void enetc_taprio_destroy(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ enetc_reset_taprio(priv);
+ enetc_reset_tc_mqprio(ndev);
+ enetc_reset_taprio_stats(priv);
+}
+
+static void enetc_taprio_stats(struct net_device *ndev,
+ struct tc_taprio_qopt_stats *stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u64 window_drops = 0;
+ int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
- taprio->enable ? i : 0);
+ window_drops += priv->tx_ring[i]->stats.win_drop;
+
+ stats->window_drops = window_drops;
+}
+
+static void enetc_taprio_queue_stats(struct net_device *ndev,
+ struct tc_taprio_qopt_queue_stats *queue_stats)
+{
+ struct tc_taprio_qopt_stats *stats = &queue_stats->stats;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int queue = queue_stats->queue;
+
+ stats->window_drops = priv->tx_ring[queue]->stats.win_drop;
+}
+
+static int enetc_taprio_replace(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *offload)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
- err = enetc_setup_taprio(ndev, taprio);
+ err = enetc_setup_tc_mqprio(ndev, &offload->mqprio);
+ if (err)
+ return err;
+ err = enetc_setup_taprio(priv, offload);
if (err)
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
- taprio->enable ? 0 : i);
+ enetc_reset_tc_mqprio(ndev);
+
+ return err;
+}
+
+int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_taprio_qopt_offload *offload = type_data;
+ int err = 0;
+
+ switch (offload->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = enetc_taprio_replace(ndev, offload);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ enetc_taprio_destroy(ndev);
+ break;
+ case TAPRIO_CMD_STATS:
+ enetc_taprio_stats(ndev, &offload->stats);
+ break;
+ case TAPRIO_CMD_QUEUE_STATS:
+ enetc_taprio_queue_stats(ndev, &offload->queue_stats);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
return err;
}
@@ -182,7 +237,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
struct tc_cbs_qopt_offload *cbs = type_data;
u32 port_transmit_rate = priv->speed;
u8 tc_nums = netdev_get_num_tc(ndev);
- struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &priv->si->hw;
u32 hi_credit_bit, hi_credit_reg;
u32 max_interference_size;
u32 port_frame_max_size;
@@ -191,8 +246,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
int bw_sum = 0;
u8 bw;
- prio_top = netdev_get_prio_tc_map(ndev, tc_nums - 1);
- prio_next = netdev_get_prio_tc_map(ndev, tc_nums - 2);
+ prio_top = tc_nums - 1;
+ prio_next = tc_nums - 2;
/* Support highest prio and second prio tc in cbs mode */
if (tc != prio_top && tc != prio_next)
@@ -203,15 +258,15 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* lower than this TC have been disabled.
*/
if (tc == prio_top &&
- enetc_get_cbs_enable(&si->hw, prio_next)) {
+ enetc_get_cbs_enable(hw, prio_next)) {
dev_err(&ndev->dev,
"Disable TC%d before disable TC%d\n",
prio_next, tc);
return -EINVAL;
}
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
return 0;
}
@@ -228,13 +283,13 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* higher than this TC have been enabled.
*/
if (tc == prio_next) {
- if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
+ if (!enetc_get_cbs_enable(hw, prio_top)) {
dev_err(&ndev->dev,
"Enable TC%d first before enable TC%d\n",
prio_top, prio_next);
return -EINVAL;
}
- bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
+ bw_sum += enetc_get_cbs_bw(hw, prio_top);
}
if (bw_sum + bw >= 100) {
@@ -243,7 +298,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return -EINVAL;
}
- enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+ enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
/* For top prio TC, the max_interfrence_size is maxSizedFrame.
*
@@ -263,8 +318,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
u32 m0, ma, r0, ra;
m0 = port_frame_max_size * 8;
- ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
- ra = enetc_get_cbs_bw(&si->hw, prio_top) *
+ ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8;
+ ra = enetc_get_cbs_bw(hw, prio_top) *
port_transmit_rate * 10000ULL;
r0 = port_transmit_rate * 1000000ULL;
max_interference_size = m0 + ma +
@@ -281,13 +336,13 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
*
* (enetClockFrequency / portTransmitRate) * 100
*/
- hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
+ hi_credit_reg = (u32)div_u64((priv->sysclk_freq * 100ULL) * hi_credit_bit,
port_transmit_rate * 1000000ULL);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
/* Set bw register and enable this traffic class */
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
return 0;
}
@@ -297,6 +352,7 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_etf_qopt_offload *qopt = type_data;
u8 tc_nums = netdev_get_num_tc(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int tc;
if (!tc_nums)
@@ -307,17 +363,12 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
if (tc < 0 || tc >= priv->num_tx_rings)
return -EINVAL;
- /* Do not support TXSTART and TX CSUM offload simutaniously */
- if (ndev->features & NETIF_F_CSUM_MASK)
- return -EBUSY;
-
/* TSD and Qbv are mutually exclusive in hardware */
- if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
+ if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE)
return -EBUSY;
priv->tx_ring[tc]->tsd_enable = qopt->enable;
- enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
- qopt->enable ? ENETC_TSDE : 0);
+ enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
return 0;
}
@@ -392,7 +443,7 @@ struct enetc_psfp_gate {
u32 num_entries;
refcount_t refcount;
struct hlist_node node;
- struct action_gate_entry entries[];
+ struct action_gate_entry entries[] __counted_by(num_entries);
};
/* Only enable the green color frame now
@@ -432,13 +483,13 @@ struct enetc_psfp {
static struct actions_fwd enetc_act_fwd[] = {
{
BIT(FLOW_ACTION_GATE),
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS),
FILTER_ACTION_TYPE_PSFP
},
{
BIT(FLOW_ACTION_POLICE) |
BIT(FLOW_ACTION_GATE),
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS),
FILTER_ACTION_TYPE_PSFP
},
/* example for ACL actions */
@@ -450,6 +501,7 @@ static struct actions_fwd enetc_act_fwd[] = {
};
static struct enetc_psfp epsfp = {
+ .dev_bitmap = 0,
.psfp_sfi_bitmap = NULL,
};
@@ -463,8 +515,9 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
struct enetc_cbd cbd = {.cmd = 0};
struct streamid_data *si_data;
struct streamid_conf *si_conf;
- u16 data_size;
dma_addr_t dma;
+ u16 data_size;
+ void *tmp;
int port;
int err;
@@ -485,21 +538,11 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
cbd.status_flags = 0;
data_size = sizeof(struct streamid_data);
- si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!si_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&si_data);
+ if (!tmp)
return -ENOMEM;
- cbd.length = cpu_to_le16(data_size);
-
- dma = dma_map_single(&priv->si->pdev->dev, si_data,
- data_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- err = -ENOMEM;
- goto out;
- }
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
eth_broadcast_addr(si_data->dmac);
si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
+ ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
@@ -520,11 +563,6 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
goto out;
/* Enable the entry overwrite again incase space flushed by hardware */
- memset(&cbd, 0, sizeof(cbd));
-
- cbd.index = cpu_to_le16((u16)sid->index);
- cbd.cmd = 0;
- cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
cbd.status_flags = 0;
si_conf->en = 0x80;
@@ -537,11 +575,6 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
memset(si_data, 0, data_size);
- cbd.length = cpu_to_le16(data_size);
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
-
/* VIDM default to be 1.
* VID Match. If set (b1) then the VID must match, otherwise
* any VID is considered a match. VIDM setting is only used
@@ -561,10 +594,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
out:
- if (!dma_mapping_error(&priv->si->pdev->dev, dma))
- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
-
- kfree(si_data);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -635,6 +665,7 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
struct sfi_counter_data *data_buf;
dma_addr_t dma;
u16 data_size;
+ void *tmp;
int err;
cbd.index = cpu_to_le16((u16)index);
@@ -643,21 +674,11 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
cbd.status_flags = 0;
data_size = sizeof(struct sfi_counter_data);
- data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!data_buf)
- return -ENOMEM;
-
- dma = dma_map_single(&priv->si->pdev->dev, data_buf,
- data_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- err = -ENOMEM;
- goto exit;
- }
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
- cbd.length = cpu_to_le16(data_size);
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&data_buf);
+ if (!tmp)
+ return -ENOMEM;
err = enetc_send_cmd(priv->si, &cbd);
if (err)
@@ -684,7 +705,8 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
data_buf->flow_meter_dropl;
exit:
- kfree(data_buf);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
+
return err;
}
@@ -726,6 +748,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
dma_addr_t dma;
u16 data_size;
int err, i;
+ void *tmp;
u64 now;
cbd.index = cpu_to_le16(sgi->index);
@@ -772,25 +795,11 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
-
- sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!sgcl_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&sgcl_data);
+ if (!tmp)
return -ENOMEM;
- cbd.length = cpu_to_le16(data_size);
-
- dma = dma_map_single(&priv->si->pdev->dev,
- sgcl_data, data_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(sgcl_data);
- return -ENOMEM;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
-
sgce = &sgcl_data->sgcl[0];
sgcl_config->agtst = 0x80;
@@ -844,8 +853,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
exit:
- kfree(sgcl_data);
-
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -1061,8 +1069,8 @@ revert_sid:
return err;
}
-static struct actions_fwd *enetc_check_flow_actions(u64 acts,
- unsigned int inputkeys)
+static struct actions_fwd *
+enetc_check_flow_actions(u64 acts, unsigned long long inputkeys)
{
int i;
@@ -1074,6 +1082,46 @@ static struct actions_fwd *enetc_check_flow_actions(u64 acts,
return NULL;
}
+static int enetc_psfp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
struct flow_cls_offload *f)
{
@@ -1182,7 +1230,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
/* parsing gate action */
- if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) {
+ if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) {
NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
err = -ENOSPC;
goto free_filter;
@@ -1202,7 +1250,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
refcount_set(&sgi->refcount, 1);
- sgi->index = entryg->gate.index;
+ sgi->index = entryg->hw_index;
sgi->init_ipv = entryg->gate.prio;
sgi->basetime = entryg->gate.basetime;
sgi->cycletime = entryg->gate.cycletime;
@@ -1230,11 +1278,10 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
/* Flow meter and max frame size */
if (entryp) {
- if (entryp->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
- err = -EOPNOTSUPP;
+ err = enetc_psfp_policer_validate(&rule->action, entryp, extack);
+ if (err)
goto free_sfi;
- }
+
if (entryp->police.burst) {
fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
if (!fmi) {
@@ -1244,7 +1291,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
refcount_set(&fmi->refcount, 1);
fmi->cir = entryp->police.rate_bytes_ps;
fmi->cbs = entryp->police.burst;
- fmi->index = entryp->police.index;
+ fmi->index = entryp->hw_index;
filter->flags |= ENETC_PSFP_FLAGS_FMI;
filter->fmi_index = fmi->index;
sfi->meter_id = fmi->index;
@@ -1265,7 +1312,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
int index;
index = enetc_get_free_index(priv);
- if (sfi->handle < 0) {
+ if (index < 0) {
NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
err = -ENOSPC;
goto free_fmi;
@@ -1529,6 +1576,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
+int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
int enetc_psfp_init(struct enetc_ndev_priv *priv)
{
if (epsfp.psfp_sfi_bitmap)
@@ -1590,3 +1660,30 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
return 0;
}
+
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_query_caps_base *base = type_data;
+ struct enetc_si *si = priv->si;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (si->hw_features & ENETC_SI_F_QBV)
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 17924305afa2..6c4b374bcb0e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -78,17 +78,37 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct sockaddr *saddr = addr;
+ int err;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- return enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+ err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
+ if (err)
+ return err;
+
+ eth_hw_addr_set(ndev, saddr->sa_data);
+
+ return 0;
}
static int enetc_vf_set_features(struct net_device *ndev,
netdev_features_t features)
{
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/* Probing/ Init */
@@ -100,7 +120,9 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_vf_setup_tc,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
@@ -115,6 +137,8 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
si->ndev = ndev;
priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
+ priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -123,21 +147,30 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- if (si->num_rss)
+ if (si->num_rss) {
ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
+ }
/* pick up primary MAC address from SI */
enetc_load_primary_mac_addr(&si->hw, ndev);
}
+static const struct enetc_si_ops enetc_vsi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
static int enetc_vf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -151,6 +184,14 @@ static int enetc_vf_probe(struct pci_dev *pdev,
return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
si = pci_get_drvdata(pdev);
+ si->revision = ENETC_REV_1_0;
+ si->ops = &enetc_vsi_ops;
+ err = enetc_get_driver_data(si);
+ if (err) {
+ dev_err_probe(&pdev->dev, err,
+ "Could not get VF driver data\n");
+ goto err_alloc_netdev;
+ }
enetc_get_si_caps(si);
diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
new file mode 100644
index 000000000000..443983fdecd9
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NXP NETC Blocks Control Driver
+ *
+ * Copyright 2024 NXP
+ *
+ * This driver is used for pre-initialization of NETC, such as PCS and MII
+ * protocols, LDID, warm reset, etc. Therefore, all NETC device drivers can
+ * only be probed after the netc-blk-crtl driver has completed initialization.
+ * In addition, when the system enters suspend mode, IERB, PRB, and NETCMIX
+ * will be powered off, except for WOL. Therefore, when the system resumes,
+ * these blocks need to be reinitialized.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+/* NETCMIX registers */
+#define IMX95_CFG_LINK_IO_VAR 0x0
+#define IO_VAR_16FF_16G_SERDES 0x1
+#define IO_VAR(port, var) (((var) & 0xf) << ((port) << 2))
+
+#define IMX95_CFG_LINK_MII_PROT 0x4
+#define CFG_LINK_MII_PORT_0 GENMASK(3, 0)
+#define CFG_LINK_MII_PORT_1 GENMASK(7, 4)
+#define MII_PROT_MII 0x0
+#define MII_PROT_RMII 0x1
+#define MII_PROT_RGMII 0x2
+#define MII_PROT_SERIAL 0x3
+#define MII_PROT(port, prot) (((prot) & 0xf) << ((port) << 2))
+
+#define IMX95_CFG_LINK_PCS_PROT(a) (0x8 + (a) * 4)
+#define PCS_PROT_1G_SGMII BIT(0)
+#define PCS_PROT_2500M_SGMII BIT(1)
+#define PCS_PROT_XFI BIT(3)
+#define PCS_PROT_SFI BIT(4)
+#define PCS_PROT_10G_SXGMII BIT(6)
+
+#define IMX94_EXT_PIN_CONTROL 0x10
+#define MAC2_MAC3_SEL BIT(1)
+
+#define IMX94_NETC_LINK_CFG(a) (0x4c + (a) * 4)
+#define NETC_LINK_CFG_MII_PROT GENMASK(3, 0)
+#define NETC_LINK_CFG_IO_VAR GENMASK(19, 16)
+
+/* NETC privileged register block register */
+#define PRB_NETCRR 0x100
+#define NETCRR_SR BIT(0)
+#define NETCRR_LOCK BIT(1)
+
+#define PRB_NETCSR 0x104
+#define NETCSR_ERROR BIT(0)
+#define NETCSR_STATE BIT(1)
+
+/* NETC integrated endpoint register block register */
+#define IERB_EMDIOFAUXR 0x344
+#define IERB_T0FAUXR 0x444
+#define IERB_ETBCR(a) (0x300c + 0x100 * (a))
+#define IERB_LBCR(a) (0x1010 + 0x40 * (a))
+#define LBCR_MDIO_PHYAD_PRTAD(addr) (((addr) & 0x1f) << 8)
+
+#define IERB_EFAUXR(a) (0x3044 + 0x100 * (a))
+#define IERB_VFAUXR(a) (0x4004 + 0x40 * (a))
+#define FAUXR_LDID GENMASK(3, 0)
+
+/* Platform information */
+#define IMX95_ENETC0_BUS_DEVFN 0x0
+#define IMX95_ENETC1_BUS_DEVFN 0x40
+#define IMX95_ENETC2_BUS_DEVFN 0x80
+
+#define IMX94_ENETC0_BUS_DEVFN 0x100
+#define IMX94_ENETC1_BUS_DEVFN 0x140
+#define IMX94_ENETC2_BUS_DEVFN 0x180
+#define IMX94_TIMER0_BUS_DEVFN 0x1
+#define IMX94_TIMER1_BUS_DEVFN 0x101
+#define IMX94_TIMER2_BUS_DEVFN 0x181
+#define IMX94_ENETC0_LINK 3
+#define IMX94_ENETC1_LINK 4
+#define IMX94_ENETC2_LINK 5
+
+#define NETC_ENETC_ID(a) (a)
+#define NETC_TIMER_ID(a) (a)
+
+/* Flags for different platforms */
+#define NETC_HAS_NETCMIX BIT(0)
+
+struct netc_devinfo {
+ u32 flags;
+ int (*netcmix_init)(struct platform_device *pdev);
+ int (*ierb_init)(struct platform_device *pdev);
+};
+
+struct netc_blk_ctrl {
+ void __iomem *prb;
+ void __iomem *ierb;
+ void __iomem *netcmix;
+
+ const struct netc_devinfo *devinfo;
+ struct platform_device *pdev;
+ struct dentry *debugfs_root;
+};
+
+static void netc_reg_write(void __iomem *base, u32 offset, u32 val)
+{
+ netc_write(base + offset, val);
+}
+
+static u32 netc_reg_read(void __iomem *base, u32 offset)
+{
+ return netc_read(base + offset);
+}
+
+static int netc_of_pci_get_bus_devfn(struct device_node *np)
+{
+ u32 reg[5];
+ int error;
+
+ error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
+ if (error)
+ return error;
+
+ return (reg[0] >> 8) & 0xffff;
+}
+
+static int netc_get_link_mii_protocol(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ return MII_PROT_MII;
+ case PHY_INTERFACE_MODE_RMII:
+ return MII_PROT_RMII;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return MII_PROT_RGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ return MII_PROT_SERIAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx95_netcmix_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ phy_interface_t interface;
+ int bus_devfn, mii_proto;
+ u32 val;
+ int err;
+
+ /* Default setting of MII protocol */
+ val = MII_PROT(0, MII_PROT_RGMII) | MII_PROT(1, MII_PROT_RGMII) |
+ MII_PROT(2, MII_PROT_SERIAL);
+
+ /* Update the link MII protocol through parsing phy-mode */
+ for_each_available_child_of_node_scoped(np, child) {
+ for_each_available_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(gchild);
+ if (bus_devfn < 0)
+ return -EINVAL;
+
+ if (bus_devfn == IMX95_ENETC2_BUS_DEVFN)
+ continue;
+
+ err = of_get_phy_mode(gchild, &interface);
+ if (err)
+ continue;
+
+ mii_proto = netc_get_link_mii_protocol(interface);
+ if (mii_proto < 0)
+ return -EINVAL;
+
+ switch (bus_devfn) {
+ case IMX95_ENETC0_BUS_DEVFN:
+ val = u32_replace_bits(val, mii_proto,
+ CFG_LINK_MII_PORT_0);
+ break;
+ case IMX95_ENETC1_BUS_DEVFN:
+ val = u32_replace_bits(val, mii_proto,
+ CFG_LINK_MII_PORT_1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Configure Link I/O variant */
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_IO_VAR,
+ IO_VAR(2, IO_VAR_16FF_16G_SERDES));
+ /* Configure Link 2 PCS protocol */
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_PCS_PROT(2),
+ PCS_PROT_10G_SXGMII);
+ netc_reg_write(priv->netcmix, IMX95_CFG_LINK_MII_PROT, val);
+
+ return 0;
+}
+
+static int imx94_enetc_get_link_id(struct device_node *np)
+{
+ int bus_devfn = netc_of_pci_get_bus_devfn(np);
+
+ /* Parse ENETC link number */
+ switch (bus_devfn) {
+ case IMX94_ENETC0_BUS_DEVFN:
+ return IMX94_ENETC0_LINK;
+ case IMX94_ENETC1_BUS_DEVFN:
+ return IMX94_ENETC1_LINK;
+ case IMX94_ENETC2_BUS_DEVFN:
+ return IMX94_ENETC2_LINK;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx94_link_config(struct netc_blk_ctrl *priv,
+ struct device_node *np, int link_id)
+{
+ phy_interface_t interface;
+ int mii_proto;
+ u32 val;
+
+ /* The node may be disabled and does not have a 'phy-mode'
+ * or 'phy-connection-type' property.
+ */
+ if (of_get_phy_mode(np, &interface))
+ return 0;
+
+ mii_proto = netc_get_link_mii_protocol(interface);
+ if (mii_proto < 0)
+ return mii_proto;
+
+ val = mii_proto & NETC_LINK_CFG_MII_PROT;
+ if (val == MII_PROT_SERIAL)
+ val = u32_replace_bits(val, IO_VAR_16FF_16G_SERDES,
+ NETC_LINK_CFG_IO_VAR);
+
+ netc_reg_write(priv->netcmix, IMX94_NETC_LINK_CFG(link_id), val);
+
+ return 0;
+}
+
+static int imx94_enetc_link_config(struct netc_blk_ctrl *priv,
+ struct device_node *np)
+{
+ int link_id = imx94_enetc_get_link_id(np);
+
+ if (link_id < 0)
+ return link_id;
+
+ return imx94_link_config(priv, np, link_id);
+}
+
+static int imx94_netcmix_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+ int err;
+
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ err = imx94_enetc_link_config(priv, gchild);
+ if (err)
+ return err;
+ }
+ }
+
+ /* ENETC 0 and switch port 2 share the same parallel interface.
+ * Currently, the switch is not supported, so this interface is
+ * used by ENETC 0 by default.
+ */
+ val = netc_reg_read(priv->netcmix, IMX94_EXT_PIN_CONTROL);
+ val |= MAC2_MAC3_SEL;
+ netc_reg_write(priv->netcmix, IMX94_EXT_PIN_CONTROL, val);
+
+ return 0;
+}
+
+static bool netc_ierb_is_locked(struct netc_blk_ctrl *priv)
+{
+ return !!(netc_reg_read(priv->prb, PRB_NETCRR) & NETCRR_LOCK);
+}
+
+static int netc_lock_ierb(struct netc_blk_ctrl *priv)
+{
+ u32 val;
+
+ netc_reg_write(priv->prb, PRB_NETCRR, NETCRR_LOCK);
+
+ return read_poll_timeout(netc_reg_read, val, !(val & NETCSR_STATE),
+ 100, 2000, false, priv->prb, PRB_NETCSR);
+}
+
+static int netc_unlock_ierb_with_warm_reset(struct netc_blk_ctrl *priv)
+{
+ u32 val;
+
+ netc_reg_write(priv->prb, PRB_NETCRR, 0);
+
+ return read_poll_timeout(netc_reg_read, val, !(val & NETCRR_LOCK),
+ 1000, 100000, true, priv->prb, PRB_NETCRR);
+}
+
+static int netc_get_phy_addr(struct device_node *np)
+{
+ struct device_node *mdio_node, *phy_node;
+ u32 addr = 0;
+ int err = 0;
+
+ mdio_node = of_get_child_by_name(np, "mdio");
+ if (!mdio_node)
+ return 0;
+
+ phy_node = of_get_next_child(mdio_node, NULL);
+ if (!phy_node)
+ goto of_put_mdio_node;
+
+ err = of_property_read_u32(phy_node, "reg", &addr);
+ if (err)
+ goto of_put_phy_node;
+
+ if (addr >= PHY_MAX_ADDR)
+ err = -EINVAL;
+
+of_put_phy_node:
+ of_node_put(phy_node);
+
+of_put_mdio_node:
+ of_node_put(mdio_node);
+
+ return err ? err : addr;
+}
+
+static int netc_parse_emdio_phy_mask(struct device_node *np, u32 *phy_mask)
+{
+ u32 mask = 0;
+
+ for_each_child_of_node_scoped(np, child) {
+ u32 addr;
+ int err;
+
+ err = of_property_read_u32(child, "reg", &addr);
+ if (err)
+ return err;
+
+ if (addr >= PHY_MAX_ADDR)
+ return -EINVAL;
+
+ mask |= BIT(addr);
+ }
+
+ *phy_mask = mask;
+
+ return 0;
+}
+
+static int netc_get_emdio_phy_mask(struct device_node *np, u32 *phy_mask)
+{
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,ee00"))
+ continue;
+
+ return netc_parse_emdio_phy_mask(gchild, phy_mask);
+ }
+ }
+
+ return 0;
+}
+
+static int imx95_enetc_mdio_phyaddr_config(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int bus_devfn, addr, err;
+ u32 phy_mask = 0;
+
+ err = netc_get_emdio_phy_mask(np, &phy_mask);
+ if (err) {
+ dev_err(dev, "Failed to get PHY address mask\n");
+ return err;
+ }
+
+ /* Update the port EMDIO PHY address through parsing phy properties.
+ * This is needed when using the port EMDIO but it's harmless when
+ * using the central EMDIO. So apply it on all cases.
+ */
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(gchild);
+ if (bus_devfn < 0) {
+ dev_err(dev, "Failed to get BDF number\n");
+ return bus_devfn;
+ }
+
+ addr = netc_get_phy_addr(gchild);
+ if (addr < 0) {
+ dev_err(dev, "Failed to get PHY address\n");
+ return addr;
+ }
+
+ if (phy_mask & BIT(addr)) {
+ dev_err(dev,
+ "Find same PHY address in EMDIO and ENETC node\n");
+ return -EINVAL;
+ }
+
+ /* The default value of LaBCR[MDIO_PHYAD_PRTAD ] is
+ * 0, so no need to set the register.
+ */
+ if (!addr)
+ continue;
+
+ switch (bus_devfn) {
+ case IMX95_ENETC0_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(0),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX95_ENETC1_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(1),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX95_ENETC2_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(2),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int imx95_ierb_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+
+ /* EMDIO : No MSI-X intterupt */
+ netc_reg_write(priv->ierb, IERB_EMDIOFAUXR, 0);
+ /* ENETC0 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(0), 0);
+ /* ENETC0 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(0), 1);
+ /* ENETC0 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(1), 2);
+ /* ENETC1 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(1), 3);
+ /* ENETC1 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(2), 5);
+ /* ENETC1 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(3), 6);
+ /* ENETC2 PF */
+ netc_reg_write(priv->ierb, IERB_EFAUXR(2), 4);
+ /* ENETC2 VF0 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(4), 5);
+ /* ENETC2 VF1 */
+ netc_reg_write(priv->ierb, IERB_VFAUXR(5), 6);
+ /* NETC TIMER */
+ netc_reg_write(priv->ierb, IERB_T0FAUXR, 7);
+
+ return imx95_enetc_mdio_phyaddr_config(pdev);
+}
+
+static int imx94_get_enetc_id(struct device_node *np)
+{
+ int bus_devfn = netc_of_pci_get_bus_devfn(np);
+
+ /* Parse ENETC offset */
+ switch (bus_devfn) {
+ case IMX94_ENETC0_BUS_DEVFN:
+ return NETC_ENETC_ID(0);
+ case IMX94_ENETC1_BUS_DEVFN:
+ return NETC_ENETC_ID(1);
+ case IMX94_ENETC2_BUS_DEVFN:
+ return NETC_ENETC_ID(2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx94_get_timer_id(struct device_node *np)
+{
+ int bus_devfn = netc_of_pci_get_bus_devfn(np);
+
+ /* Parse NETC PTP timer ID, the timer0 is on bus 0,
+ * the timer 1 and timer2 is on bus 1.
+ */
+ switch (bus_devfn) {
+ case IMX94_TIMER0_BUS_DEVFN:
+ return NETC_TIMER_ID(0);
+ case IMX94_TIMER1_BUS_DEVFN:
+ return NETC_TIMER_ID(1);
+ case IMX94_TIMER2_BUS_DEVFN:
+ return NETC_TIMER_ID(2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int imx94_enetc_update_tid(struct netc_blk_ctrl *priv,
+ struct device_node *np)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct device_node *timer_np;
+ int eid, tid;
+
+ eid = imx94_get_enetc_id(np);
+ if (eid < 0) {
+ dev_err(dev, "Failed to get ENETC ID\n");
+ return eid;
+ }
+
+ timer_np = of_parse_phandle(np, "ptp-timer", 0);
+ if (!timer_np) {
+ /* If 'ptp-timer' is not present, the timer1 is the default
+ * timer of all standalone ENETCs, which is on the same PCIe
+ * bus as these ENETCs.
+ */
+ tid = NETC_TIMER_ID(1);
+ goto end;
+ }
+
+ tid = imx94_get_timer_id(timer_np);
+ of_node_put(timer_np);
+ if (tid < 0) {
+ dev_err(dev, "Failed to get NETC Timer ID\n");
+ return tid;
+ }
+
+end:
+ netc_reg_write(priv->ierb, IERB_ETBCR(eid), tid);
+
+ return 0;
+}
+
+static int imx94_enetc_mdio_phyaddr_config(struct netc_blk_ctrl *priv,
+ struct device_node *np,
+ u32 phy_mask)
+{
+ struct device *dev = &priv->pdev->dev;
+ int bus_devfn, addr;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(np);
+ if (bus_devfn < 0) {
+ dev_err(dev, "Failed to get BDF number\n");
+ return bus_devfn;
+ }
+
+ addr = netc_get_phy_addr(np);
+ if (addr <= 0) {
+ dev_err(dev, "Failed to get PHY address\n");
+ return addr;
+ }
+
+ if (phy_mask & BIT(addr)) {
+ dev_err(dev,
+ "Find same PHY address in EMDIO and ENETC node\n");
+ return -EINVAL;
+ }
+
+ switch (bus_devfn) {
+ case IMX94_ENETC0_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC0_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX94_ENETC1_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC1_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX94_ENETC2_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC2_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int imx94_ierb_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ u32 phy_mask = 0;
+ int err;
+
+ err = netc_get_emdio_phy_mask(np, &phy_mask);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get PHY address mask\n");
+ return err;
+ }
+
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ err = imx94_enetc_update_tid(priv, gchild);
+ if (err)
+ return err;
+
+ err = imx94_enetc_mdio_phyaddr_config(priv, gchild,
+ phy_mask);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int netc_ierb_init(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ const struct netc_devinfo *devinfo = priv->devinfo;
+ int err;
+
+ if (netc_ierb_is_locked(priv)) {
+ err = netc_unlock_ierb_with_warm_reset(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Unlock IERB failed.\n");
+ return err;
+ }
+ }
+
+ if (devinfo->ierb_init) {
+ err = devinfo->ierb_init(pdev);
+ if (err)
+ return err;
+ }
+
+ err = netc_lock_ierb(priv);
+ if (err) {
+ dev_err(&pdev->dev, "Lock IERB failed.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int netc_prb_show(struct seq_file *s, void *data)
+{
+ struct netc_blk_ctrl *priv = s->private;
+ u32 val;
+
+ val = netc_reg_read(priv->prb, PRB_NETCRR);
+ seq_printf(s, "[PRB NETCRR] Lock:%d SR:%d\n",
+ (val & NETCRR_LOCK) ? 1 : 0,
+ (val & NETCRR_SR) ? 1 : 0);
+
+ val = netc_reg_read(priv->prb, PRB_NETCSR);
+ seq_printf(s, "[PRB NETCSR] State:%d Error:%d\n",
+ (val & NETCSR_STATE) ? 1 : 0,
+ (val & NETCSR_ERROR) ? 1 : 0);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(netc_prb);
+
+static void netc_blk_ctrl_create_debugfs(struct netc_blk_ctrl *priv)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir("netc_blk_ctrl", NULL);
+ if (IS_ERR(root))
+ return;
+
+ priv->debugfs_root = root;
+
+ debugfs_create_file("prb", 0444, root, priv, &netc_prb_fops);
+}
+
+static void netc_blk_ctrl_remove_debugfs(struct netc_blk_ctrl *priv)
+{
+ debugfs_remove_recursive(priv->debugfs_root);
+ priv->debugfs_root = NULL;
+}
+
+#else
+
+static void netc_blk_ctrl_create_debugfs(struct netc_blk_ctrl *priv)
+{
+}
+
+static void netc_blk_ctrl_remove_debugfs(struct netc_blk_ctrl *priv)
+{
+}
+#endif
+
+static int netc_prb_check_error(struct netc_blk_ctrl *priv)
+{
+ if (netc_reg_read(priv->prb, PRB_NETCSR) & NETCSR_ERROR)
+ return -1;
+
+ return 0;
+}
+
+static const struct netc_devinfo imx95_devinfo = {
+ .flags = NETC_HAS_NETCMIX,
+ .netcmix_init = imx95_netcmix_init,
+ .ierb_init = imx95_ierb_init,
+};
+
+static const struct netc_devinfo imx94_devinfo = {
+ .flags = NETC_HAS_NETCMIX,
+ .netcmix_init = imx94_netcmix_init,
+ .ierb_init = imx94_ierb_init,
+};
+
+static const struct of_device_id netc_blk_ctrl_match[] = {
+ { .compatible = "nxp,imx95-netc-blk-ctrl", .data = &imx95_devinfo },
+ { .compatible = "nxp,imx94-netc-blk-ctrl", .data = &imx94_devinfo },
+ {},
+};
+MODULE_DEVICE_TABLE(of, netc_blk_ctrl_match);
+
+static int netc_blk_ctrl_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct netc_devinfo *devinfo;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ struct netc_blk_ctrl *priv;
+ struct clk *ipg_clk;
+ void __iomem *regs;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+ ipg_clk = devm_clk_get_optional_enabled(dev, "ipg");
+ if (IS_ERR(ipg_clk))
+ return dev_err_probe(dev, PTR_ERR(ipg_clk),
+ "Set ipg clock failed\n");
+
+ id = of_match_device(netc_blk_ctrl_match, dev);
+ if (!id)
+ return dev_err_probe(dev, -EINVAL, "Cannot match device\n");
+
+ devinfo = (struct netc_devinfo *)id->data;
+ if (!devinfo)
+ return dev_err_probe(dev, -EINVAL, "No device information\n");
+
+ priv->devinfo = devinfo;
+ regs = devm_platform_ioremap_resource_byname(pdev, "ierb");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing IERB resource\n");
+
+ priv->ierb = regs;
+ regs = devm_platform_ioremap_resource_byname(pdev, "prb");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing PRB resource\n");
+
+ priv->prb = regs;
+ if (devinfo->flags & NETC_HAS_NETCMIX) {
+ regs = devm_platform_ioremap_resource_byname(pdev, "netcmix");
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Missing NETCMIX resource\n");
+ priv->netcmix = regs;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ if (devinfo->netcmix_init) {
+ err = devinfo->netcmix_init(pdev);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Initializing NETCMIX failed\n");
+ }
+
+ err = netc_ierb_init(pdev);
+ if (err)
+ return dev_err_probe(dev, err, "Initializing IERB failed\n");
+
+ if (netc_prb_check_error(priv) < 0)
+ dev_warn(dev, "The current IERB configuration is invalid\n");
+
+ netc_blk_ctrl_create_debugfs(priv);
+
+ err = of_platform_populate(node, NULL, NULL, dev);
+ if (err) {
+ netc_blk_ctrl_remove_debugfs(priv);
+ return dev_err_probe(dev, err, "of_platform_populate failed\n");
+ }
+
+ return 0;
+}
+
+static void netc_blk_ctrl_remove(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+ netc_blk_ctrl_remove_debugfs(priv);
+}
+
+static struct platform_driver netc_blk_ctrl_driver = {
+ .driver = {
+ .name = "nxp-netc-blk-ctrl",
+ .of_match_table = netc_blk_ctrl_match,
+ },
+ .probe = netc_blk_ctrl_probe,
+ .remove = netc_blk_ctrl_remove,
+};
+
+module_platform_driver(netc_blk_ctrl_driver);
+
+MODULE_DESCRIPTION("NXP NETC Blocks Control Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c
new file mode 100644
index 000000000000..0c1d343253bf
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NETC NTMP (NETC Table Management Protocol) 2.0 Library
+ * Copyright 2025 NXP
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/iopoll.h>
+
+#include "ntmp_private.h"
+
+#define NETC_CBDR_TIMEOUT 1000 /* us */
+#define NETC_CBDR_DELAY_US 10
+#define NETC_CBDR_MR_EN BIT(31)
+
+#define NTMP_BASE_ADDR_ALIGN 128
+#define NTMP_DATA_ADDR_ALIGN 32
+
+/* Define NTMP Table ID */
+#define NTMP_MAFT_ID 1
+#define NTMP_RSST_ID 3
+
+/* Generic Update Actions for most tables */
+#define NTMP_GEN_UA_CFGEU BIT(0)
+#define NTMP_GEN_UA_STSEU BIT(1)
+
+#define NTMP_ENTRY_ID_SIZE 4
+#define RSST_ENTRY_NUM 64
+#define RSST_STSE_DATA_SIZE(n) ((n) * 8)
+#define RSST_CFGE_DATA_SIZE(n) (n)
+
+int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs)
+{
+ int cbd_num = NETC_CBDR_BD_NUM;
+ size_t size;
+
+ size = cbd_num * sizeof(union netc_cbd) + NTMP_BASE_ADDR_ALIGN;
+ cbdr->addr_base = dma_alloc_coherent(dev, size, &cbdr->dma_base,
+ GFP_KERNEL);
+ if (!cbdr->addr_base)
+ return -ENOMEM;
+
+ cbdr->dma_size = size;
+ cbdr->bd_num = cbd_num;
+ cbdr->regs = *regs;
+ cbdr->dev = dev;
+
+ /* The base address of the Control BD Ring must be 128 bytes aligned */
+ cbdr->dma_base_align = ALIGN(cbdr->dma_base, NTMP_BASE_ADDR_ALIGN);
+ cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
+ NTMP_BASE_ADDR_ALIGN);
+
+ spin_lock_init(&cbdr->ring_lock);
+
+ cbdr->next_to_use = netc_read(cbdr->regs.pir);
+ cbdr->next_to_clean = netc_read(cbdr->regs.cir);
+
+ /* Step 1: Configure the base address of the Control BD Ring */
+ netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
+ netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
+
+ /* Step 2: Configure the number of BDs of the Control BD Ring */
+ netc_write(cbdr->regs.lenr, cbdr->bd_num);
+
+ /* Step 3: Enable the Control BD Ring */
+ netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ntmp_init_cbdr);
+
+void ntmp_free_cbdr(struct netc_cbdr *cbdr)
+{
+ /* Disable the Control BD Ring */
+ netc_write(cbdr->regs.mr, 0);
+ dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base,
+ cbdr->dma_base);
+ memset(cbdr, 0, sizeof(*cbdr));
+}
+EXPORT_SYMBOL_GPL(ntmp_free_cbdr);
+
+static int ntmp_get_free_cbd_num(struct netc_cbdr *cbdr)
+{
+ return (cbdr->next_to_clean - cbdr->next_to_use - 1 +
+ cbdr->bd_num) % cbdr->bd_num;
+}
+
+static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index)
+{
+ return &((union netc_cbd *)(cbdr->addr_base_align))[index];
+}
+
+static void ntmp_clean_cbdr(struct netc_cbdr *cbdr)
+{
+ union netc_cbd *cbd;
+ int i;
+
+ i = cbdr->next_to_clean;
+ while (netc_read(cbdr->regs.cir) != i) {
+ cbd = ntmp_get_cbd(cbdr, i);
+ memset(cbd, 0, sizeof(*cbd));
+ i = (i + 1) % cbdr->bd_num;
+ }
+
+ cbdr->next_to_clean = i;
+}
+
+static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
+{
+ union netc_cbd *cur_cbd;
+ struct netc_cbdr *cbdr;
+ int i, err;
+ u16 status;
+ u32 val;
+
+ /* Currently only i.MX95 ENETC is supported, and it only has one
+ * command BD ring
+ */
+ cbdr = &user->ring[0];
+
+ spin_lock_bh(&cbdr->ring_lock);
+
+ if (unlikely(!ntmp_get_free_cbd_num(cbdr)))
+ ntmp_clean_cbdr(cbdr);
+
+ i = cbdr->next_to_use;
+ cur_cbd = ntmp_get_cbd(cbdr, i);
+ *cur_cbd = *cbd;
+ dma_wmb();
+
+ /* Update producer index of both software and hardware */
+ i = (i + 1) % cbdr->bd_num;
+ cbdr->next_to_use = i;
+ netc_write(cbdr->regs.pir, i);
+
+ err = read_poll_timeout_atomic(netc_read, val, val == i,
+ NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT,
+ true, cbdr->regs.cir);
+ if (unlikely(err))
+ goto cbdr_unlock;
+
+ dma_rmb();
+ /* Get the writeback command BD, because the caller may need
+ * to check some other fields of the response header.
+ */
+ *cbd = *cur_cbd;
+
+ /* Check the writeback error status */
+ status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR;
+ if (unlikely(status)) {
+ err = -EIO;
+ dev_err(user->dev, "Command BD error: 0x%04x\n", status);
+ }
+
+ ntmp_clean_cbdr(cbdr);
+ dma_wmb();
+
+cbdr_unlock:
+ spin_unlock_bh(&cbdr->ring_lock);
+
+ return err;
+}
+
+static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align)
+{
+ void *buf;
+
+ buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ &data->dma, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ data->buf = buf;
+ *buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN);
+
+ return 0;
+}
+
+static void ntmp_free_data_mem(struct ntmp_dma_buf *data)
+{
+ dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ data->buf, data->dma);
+}
+
+static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma,
+ int len, int table_id, int cmd,
+ int access_method)
+{
+ dma_addr_t dma_align;
+
+ memset(cbd, 0, sizeof(*cbd));
+ dma_align = ALIGN(dma, NTMP_DATA_ADDR_ALIGN);
+ cbd->req_hdr.addr = cpu_to_le64(dma_align);
+ cbd->req_hdr.len = cpu_to_le32(len);
+ cbd->req_hdr.cmd = cmd;
+ cbd->req_hdr.access_method = FIELD_PREP(NTMP_ACCESS_METHOD,
+ access_method);
+ cbd->req_hdr.table_id = table_id;
+ cbd->req_hdr.ver_cci_rr = FIELD_PREP(NTMP_HDR_VERSION,
+ NTMP_HDR_VER2);
+ /* For NTMP version 2.0 or later version */
+ cbd->req_hdr.npf = cpu_to_le32(NTMP_NPF);
+}
+
+static void ntmp_fill_crd(struct ntmp_cmn_req_data *crd, u8 tblv,
+ u8 qa, u16 ua)
+{
+ crd->update_act = cpu_to_le16(ua);
+ crd->tblv_qact = NTMP_TBLV_QACT(tblv, qa);
+}
+
+static void ntmp_fill_crd_eid(struct ntmp_req_by_eid *rbe, u8 tblv,
+ u8 qa, u16 ua, u32 entry_id)
+{
+ ntmp_fill_crd(&rbe->crd, tblv, qa, ua);
+ rbe->entry_id = cpu_to_le32(entry_id);
+}
+
+static const char *ntmp_table_name(int tbl_id)
+{
+ switch (tbl_id) {
+ case NTMP_MAFT_ID:
+ return "MAC Address Filter Table";
+ case NTMP_RSST_ID:
+ return "RSS Table";
+ default:
+ return "Unknown Table";
+ };
+}
+
+static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u8 tbl_ver, u32 entry_id, u32 req_len,
+ u32 resp_len)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = max(req_len, resp_len),
+ };
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len),
+ tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev,
+ "Failed to delete entry 0x%x of %s, err: %pe",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+
+static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u32 len, struct ntmp_req_by_eid *req,
+ dma_addr_t dma, bool compare_eid)
+{
+ struct ntmp_cmn_resp_query *resp;
+ int cmd = NTMP_CMD_QUERY;
+ union netc_cbd cbd;
+ u32 entry_id;
+ int err;
+
+ entry_id = le32_to_cpu(req->entry_id);
+ if (le16_to_cpu(req->crd.update_act))
+ cmd = NTMP_CMD_QU;
+
+ /* Request header */
+ ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev,
+ "Failed to query entry 0x%x of %s, err: %pe\n",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+ return err;
+ }
+
+ /* For a few tables, the first field of their response data is not
+ * entry_id, so directly return success.
+ */
+ if (!compare_eid)
+ return 0;
+
+ resp = (struct ntmp_cmn_resp_query *)req;
+ if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) {
+ dev_err(user->dev,
+ "%s: query EID 0x%x doesn't match response EID 0x%x\n",
+ ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_req_add),
+ };
+ struct maft_req_add *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set mac address filter table request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id);
+ req->keye = maft->keye;
+ req->cfge = maft->cfge;
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n",
+ entry_id, ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_add_entry);
+
+int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_resp_query),
+ };
+ struct maft_resp_query *resp;
+ struct ntmp_req_by_eid *req;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id);
+ err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID,
+ NTMP_LEN(sizeof(*req), data.size),
+ req, data.dma, true);
+ if (err)
+ goto end;
+
+ resp = (struct maft_resp_query *)req;
+ maft->keye = resp->keye;
+ maft->cfge = resp->cfge;
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_query_entry);
+
+int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
+{
+ return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver,
+ entry_id, NTMP_EID_REQ_LEN, 0);
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry);
+
+int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
+ int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct rsst_req_update *req;
+ union netc_cbd cbd;
+ int err, i;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = struct_size(req, groups, count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0,
+ NTMP_GEN_UA_CFGEU | NTMP_GEN_UA_STSEU, 0);
+ for (i = 0; i < count; i++)
+ req->groups[i] = (u8)(table[i]);
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to update RSST entry, err: %pe\n",
+ ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry);
+
+int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err, i;
+ u8 *group;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) +
+ RSST_CFGE_DATA_SIZE(count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size),
+ NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev, "Failed to query RSST entry, err: %pe\n",
+ ERR_PTR(err));
+ goto end;
+ }
+
+ group = (u8 *)req;
+ group += NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count);
+ for (i = 0; i < count; i++)
+ table[i] = group[i];
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_query_entry);
+
+MODULE_DESCRIPTION("NXP NETC Library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp_private.h b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
new file mode 100644
index 000000000000..34394e40fddd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * NTMP table request and response data buffer formats
+ * Copyright 2025 NXP
+ */
+
+#ifndef __NTMP_PRIVATE_H
+#define __NTMP_PRIVATE_H
+
+#include <linux/bitfield.h>
+#include <linux/fsl/ntmp.h>
+
+#define NTMP_EID_REQ_LEN 8
+#define NETC_CBDR_BD_NUM 256
+
+union netc_cbd {
+ struct {
+ __le64 addr;
+ __le32 len;
+#define NTMP_RESP_LEN GENMASK(19, 0)
+#define NTMP_REQ_LEN GENMASK(31, 20)
+#define NTMP_LEN(req, resp) (FIELD_PREP(NTMP_REQ_LEN, (req)) | \
+ ((resp) & NTMP_RESP_LEN))
+ u8 cmd;
+#define NTMP_CMD_DELETE BIT(0)
+#define NTMP_CMD_UPDATE BIT(1)
+#define NTMP_CMD_QUERY BIT(2)
+#define NTMP_CMD_ADD BIT(3)
+#define NTMP_CMD_QU (NTMP_CMD_QUERY | NTMP_CMD_UPDATE)
+ u8 access_method;
+#define NTMP_ACCESS_METHOD GENMASK(7, 4)
+#define NTMP_AM_ENTRY_ID 0
+#define NTMP_AM_EXACT_KEY 1
+#define NTMP_AM_SEARCH 2
+#define NTMP_AM_TERNARY_KEY 3
+ u8 table_id;
+ u8 ver_cci_rr;
+#define NTMP_HDR_VERSION GENMASK(5, 0)
+#define NTMP_HDR_VER2 2
+#define NTMP_CCI BIT(6)
+#define NTMP_RR BIT(7)
+ __le32 resv[3];
+ __le32 npf;
+#define NTMP_NPF BIT(15)
+ } req_hdr; /* NTMP Request Message Header Format */
+
+ struct {
+ __le32 resv0[3];
+ __le16 num_matched;
+ __le16 error_rr;
+#define NTMP_RESP_ERROR GENMASK(11, 0)
+#define NTMP_RESP_RR BIT(15)
+ __le32 resv1[4];
+ } resp_hdr; /* NTMP Response Message Header Format */
+};
+
+struct ntmp_dma_buf {
+ struct device *dev;
+ size_t size;
+ void *buf;
+ dma_addr_t dma;
+};
+
+struct ntmp_cmn_req_data {
+ __le16 update_act;
+ u8 dbg_opt;
+ u8 tblv_qact;
+#define NTMP_QUERY_ACT GENMASK(3, 0)
+#define NTMP_TBL_VER GENMASK(7, 4)
+#define NTMP_TBLV_QACT(v, a) (FIELD_PREP(NTMP_TBL_VER, (v)) | \
+ ((a) & NTMP_QUERY_ACT))
+};
+
+struct ntmp_cmn_resp_query {
+ __le32 entry_id;
+};
+
+/* Generic structure for request data by entry ID */
+struct ntmp_req_by_eid {
+ struct ntmp_cmn_req_data crd;
+ __le32 entry_id;
+};
+
+/* MAC Address Filter Table Request Data Buffer Format of Add action */
+struct maft_req_add {
+ struct ntmp_req_by_eid rbe;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* MAC Address Filter Table Response Data Buffer Format of Query action */
+struct maft_resp_query {
+ __le32 entry_id;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* RSS Table Request Data Buffer Format of Update action */
+struct rsst_req_update {
+ struct ntmp_req_by_eid rbe;
+ u8 groups[];
+};
+
+#endif
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 7b4961daa254..fd9a93d02f8e 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -14,14 +14,17 @@
#define FEC_H
/****************************************************************************/
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/bpf.h>
#include <linux/clocksource.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/net_tstamp.h>
+#include <linux/pm_qos.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <net/xdp.h>
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -110,7 +113,7 @@
#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
-#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excessive collisions */
#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
@@ -237,23 +240,6 @@ struct bufdesc_ex {
__fec16 res0[4];
};
-/*
- * The following definitions courtesy of commproc.h, which where
- * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
- */
-#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
-#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
-#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
-#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
-#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
-#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
-#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
-#define BD_SC_BR ((ushort)0x0020) /* Break received */
-#define BD_SC_FR ((ushort)0x0010) /* Framing error */
-#define BD_SC_PR ((ushort)0x0008) /* Parity error */
-#define BD_SC_OV ((ushort)0x0002) /* Overrun */
-#define BD_SC_CD ((ushort)0x0001) /* ?? */
-
/* Buffer descriptor control/status used by Ethernet receive.
*/
#define BD_ENET_RX_EMPTY ((ushort)0x8000)
@@ -337,19 +323,22 @@ struct bufdesc_ex {
#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20)
/* The number of Tx and Rx buffers. These are allocated from the page
- * pool. The code may assume these are power of two, so it it best
+ * pool. The code may assume these are power of two, so it is best
* to keep them that size.
* We don't need to allocate pages for the transmitter. We just use
* the skbuffer directly.
*/
+#define FEC_DRV_RESERVE_SPACE (XDP_PACKET_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
#define FEC_ENET_RX_PAGES 256
-#define FEC_ENET_RX_FRSIZE 2048
+#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_DRV_RESERVE_SPACE)
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
-#define TX_RING_SIZE 512 /* Must be power of two */
+#define TX_RING_SIZE 1024 /* Must be power of two */
#define TX_RING_MOD_MASK 511 /* for this to work */
#define BD_ENET_RX_INT 0x00800000
@@ -377,6 +366,9 @@ struct bufdesc_ex {
#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \
+ (((X) == 1) ? FEC_ENET_RXF_1 : \
+ FEC_ENET_RXF_2))
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
@@ -450,7 +442,7 @@ struct bufdesc_ex {
#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
/* Controller supports RACC register */
#define FEC_QUIRK_HAS_RACC (1 << 12)
-/* Controller supports interrupt coalesc */
+/* Controller supports interrupt coalesce */
#define FEC_QUIRK_HAS_COALESCE (1 << 13)
/* Interrupt doesn't wake CPU from deep idle */
#define FEC_QUIRK_ERR006687 (1 << 14)
@@ -485,7 +477,7 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_EEE (1 << 20)
-/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC
+/* i.MX8QM ENET IP version add new feature to generate delayed TXC/RXC
* as an alternative option to make sure it works well with various PHYs.
* For the implementation of delayed clock, ENET takes synchronized 250MHz
* clocks to generate 2ns delay.
@@ -495,6 +487,17 @@ struct bufdesc_ex {
/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */
#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22)
+/* i.MX6Q adds pm_qos support */
+#define FEC_QUIRK_HAS_PMQOS BIT(23)
+
+/* Not all FEC hardware block MDIOs support accesses in C45 mode.
+ * Older blocks in the ColdFire parts do not support it.
+ */
+#define FEC_QUIRK_HAS_MDIO_C45 BIT(24)
+
+/* Jumbo Frame support */
+#define FEC_QUIRK_JUMBO_FRAME BIT(25)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -508,10 +511,34 @@ struct bufdesc_prop {
unsigned char dsize_log2;
};
+enum {
+ RX_XDP_REDIRECT = 0,
+ RX_XDP_PASS,
+ RX_XDP_DROP,
+ RX_XDP_TX,
+ RX_XDP_TX_ERRORS,
+ TX_XDP_XMIT,
+ TX_XDP_XMIT_ERRORS,
+
+ /* The following must be the last one */
+ XDP_STATS_TOTAL,
+};
+
+enum fec_txbuf_type {
+ FEC_TXBUF_T_SKB,
+ FEC_TXBUF_T_XDP_NDO,
+ FEC_TXBUF_T_XDP_TX,
+};
+
+struct fec_tx_buffer {
+ void *buf_p;
+ enum fec_txbuf_type type;
+};
+
struct fec_enet_priv_tx_q {
struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
- struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ struct fec_tx_buffer tx_buf[TX_RING_SIZE];
unsigned short tx_stop_threshold;
unsigned short tx_wake_threshold;
@@ -523,7 +550,15 @@ struct fec_enet_priv_tx_q {
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct page *rx_buf[RX_RING_SIZE];
+
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+ u32 stats[XDP_STATS_TOTAL];
+
+ /* rx queue number, in the range 0-7 */
+ u8 id;
};
struct fec_stop_mode_gpr {
@@ -558,12 +593,14 @@ struct fec_enet_private {
unsigned int num_tx_queues;
unsigned int num_rx_queues;
- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
+ unsigned int max_buf_size;
+ unsigned int pagepool_order;
+ unsigned int rx_frame_size;
struct platform_device *pdev;
@@ -576,6 +613,7 @@ struct fec_enet_private {
struct device_node *phy_node;
bool rgmii_txc_dly;
bool rgmii_rxc_dly;
+ bool rpm_active;
int link;
int full_duplex;
int speed;
@@ -593,21 +631,18 @@ struct fec_enet_private {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
- unsigned long last_overflow_check;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
- int rx_hwtstamp_filter;
- u32 base_incval;
u32 cycle_speed;
int hwts_rx_en;
int hwts_tx_en;
struct delayed_work time_keep;
struct regulator *reg_phy;
struct fec_stop_mode_gpr stop_gpr;
+ struct pm_qos_request pm_qos_req;
unsigned int tx_align;
- unsigned int rx_align;
/* hw interrupt coalesce */
unsigned int rx_pkts_itr;
@@ -616,12 +651,8 @@ struct fec_enet_private {
unsigned int tx_time_itr;
unsigned int itr_clk_rate;
- /* tx lpi eee mode */
- struct ethtool_eee eee;
unsigned int clk_ref_rate;
- u32 rx_copybreak;
-
/* ptp clock period in ns*/
unsigned int ptp_inc;
@@ -630,16 +661,33 @@ struct fec_enet_private {
unsigned int reload_period;
int pps_enable;
unsigned int next_counter;
+ bool perout_enable;
+ struct hrtimer perout_timer;
+ u64 perout_stime;
+
+ struct imx_sc_ipc *ipc_handle;
+
+ /* XDP BPF Program */
+ struct bpf_prog *xdp_prog;
+
+ struct {
+ int pps_enable;
+ u64 ns_sys, ns_phc;
+ u32 at_corr;
+ u8 at_inc_corr;
+ } ptp_saved_state;
u64 ethtool_stats[];
};
void fec_ptp_init(struct platform_device *pdev, int irq_idx);
+void fec_ptp_restore_state(struct fec_enet_private *fep);
+void fec_ptp_save_state(struct fec_enet_private *fep);
void fec_ptp_stop(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
-void fec_ptp_disable_hwts(struct net_device *ndev);
-int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
-int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
+int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+void fec_ptp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config);
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index bc418b910999..c685a5c0cc51 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -22,64 +22,69 @@
* Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/pm_runtime.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <net/selftests.h>
-#include <net/tso.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
+#include <linux/fec.h>
+#include <linux/filter.h>
+#include <linux/gpio/consumer.h>
#include <linux/icmp.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/crc32.h>
-#include <linux/platform_device.h>
+#include <linux/kernel.h>
#include <linux/mdio.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/regulator/consumer.h>
-#include <linux/if_vlan.h>
+#include <linux/phy.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/phy_fixed.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/prefetch.h>
-#include <linux/mfd/syscon.h>
+#include <linux/property.h>
+#include <linux/ptrace.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/workqueue.h>
+#include <net/ip.h>
+#include <net/page_pool/helpers.h>
+#include <net/selftests.h>
+#include <net/tso.h>
#include <soc/imx/cpuidle.h>
-#include <asm/cacheflush.h>
-
#include "fec.h"
static void set_multicast_list(struct net_device *ndev);
-static void fec_enet_itr_coal_init(struct net_device *ndev);
+static void fec_enet_itr_coal_set(struct net_device *ndev);
+static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
+ int cpu, struct xdp_buff *xdp,
+ u32 dma_sync_len);
#define DRIVER_NAME "fec"
static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
-/* Pause frame feild and FIFO threshold */
-#define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84
#define FEC_ENET_RSFL_V 16
#define FEC_ENET_RAEM_V 0x8
@@ -87,44 +92,53 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
#define FEC_ENET_OPD_V 0xFFF0
#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
+#define FEC_ENET_XDP_PASS 0
+#define FEC_ENET_XDP_CONSUMED BIT(0)
+#define FEC_ENET_XDP_TX BIT(1)
+#define FEC_ENET_XDP_REDIR BIT(2)
+
struct fec_devinfo {
u32 quirks;
};
static const struct fec_devinfo fec_imx25_info = {
.quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
- FEC_QUIRK_HAS_FRREG,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx27_info = {
- .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+ .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
- FEC_QUIRK_NO_HARD_RESET,
+ FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx6q_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_mvf600_info = {
- .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_MDIO_C45,
};
-static const struct fec_devinfo fec_imx6x_info = {
+static const struct fec_devinfo fec_imx6sx_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
- FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx6ul_info = {
@@ -132,7 +146,8 @@ static const struct fec_devinfo fec_imx6ul_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx8mq_info = {
@@ -142,7 +157,8 @@ static const struct fec_devinfo fec_imx8mq_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2,
+ FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx8qm_info = {
@@ -152,7 +168,16 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_DELAYED_CLKS_SUPPORT,
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45 |
+ FEC_QUIRK_JUMBO_FRAME,
+};
+
+static const struct fec_devinfo fec_s32v234_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static struct platform_device_id fec_devtype[] = {
@@ -161,60 +186,22 @@ static struct platform_device_id fec_devtype[] = {
.name = DRIVER_NAME,
.driver_data = 0,
}, {
- .name = "imx25-fec",
- .driver_data = (kernel_ulong_t)&fec_imx25_info,
- }, {
- .name = "imx27-fec",
- .driver_data = (kernel_ulong_t)&fec_imx27_info,
- }, {
- .name = "imx28-fec",
- .driver_data = (kernel_ulong_t)&fec_imx28_info,
- }, {
- .name = "imx6q-fec",
- .driver_data = (kernel_ulong_t)&fec_imx6q_info,
- }, {
- .name = "mvf600-fec",
- .driver_data = (kernel_ulong_t)&fec_mvf600_info,
- }, {
- .name = "imx6sx-fec",
- .driver_data = (kernel_ulong_t)&fec_imx6x_info,
- }, {
- .name = "imx6ul-fec",
- .driver_data = (kernel_ulong_t)&fec_imx6ul_info,
- }, {
- .name = "imx8mq-fec",
- .driver_data = (kernel_ulong_t)&fec_imx8mq_info,
- }, {
- .name = "imx8qm-fec",
- .driver_data = (kernel_ulong_t)&fec_imx8qm_info,
- }, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(platform, fec_devtype);
-enum imx_fec_type {
- IMX25_FEC = 1, /* runs on i.mx25/50/53 */
- IMX27_FEC, /* runs on i.mx27/35/51 */
- IMX28_FEC,
- IMX6Q_FEC,
- MVF600_FEC,
- IMX6SX_FEC,
- IMX6UL_FEC,
- IMX8MQ_FEC,
- IMX8QM_FEC,
-};
-
static const struct of_device_id fec_dt_ids[] = {
- { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
- { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
- { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
- { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
- { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
- { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
- { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
- { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
- { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
+ { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, },
+ { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, },
+ { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
+ { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
+ { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
+ { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, },
+ { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
+ { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
+ { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
+ { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -248,12 +235,13 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* 2048 byte skbufs are allocated. However, alignment requirements
* varies between FEC variants. Worst case is 64, so round down by 64.
*/
+#define MAX_JUMBO_BUF_SIZE (round_down(16384 - FEC_DRV_RESERVE_SPACE - 64, 64))
#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
#define PKT_MINBUF_SIZE 64
/* FEC receive acceleration */
-#define FEC_RACC_IPDIS (1 << 1)
-#define FEC_RACC_PRODIS (1 << 2)
+#define FEC_RACC_IPDIS BIT(1)
+#define FEC_RACC_PRODIS BIT(2)
#define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
@@ -265,12 +253,10 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* size bits. Other FEC hardware does not, so we need to take that into
* account when setting it.
*/
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64)
-#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
+#ifndef CONFIG_M5272
+#define OPT_ARCH_HAS_MAX_FL 1
#else
-#define OPT_FRAME_SIZE 0
+#define OPT_ARCH_HAS_MAX_FL 0
#endif
/* FEC MII MMFR bits definition */
@@ -285,8 +271,26 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
/* FEC ECR bits definition */
-#define FEC_ECR_MAGICEN (1 << 2)
-#define FEC_ECR_SLEEP (1 << 3)
+#define FEC_ECR_RESET BIT(0)
+#define FEC_ECR_ETHEREN BIT(1)
+#define FEC_ECR_MAGICEN BIT(2)
+#define FEC_ECR_SLEEP BIT(3)
+#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_SPEED BIT(5)
+#define FEC_ECR_BYTESWP BIT(8)
+/* FEC RCR bits definition */
+#define FEC_RCR_LOOP BIT(0)
+#define FEC_RCR_DRT BIT(1)
+#define FEC_RCR_MII BIT(2)
+#define FEC_RCR_PROMISC BIT(3)
+#define FEC_RCR_BC_REJ BIT(4)
+#define FEC_RCR_FLOWCTL BIT(5)
+#define FEC_RCR_RGMII BIT(6)
+#define FEC_RCR_RMII BIT(8)
+#define FEC_RCR_10BASET BIT(9)
+#define FEC_RCR_NLC BIT(30)
+/* TX WMARK bits */
+#define FEC_TXWMRK_STRFWD BIT(8)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -299,8 +303,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_WOL_FLAG_ENABLE (0x1 << 1)
#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
-#define COPYBREAK_DEFAULT 256
-
/* Max number of allowed TCP segments for software TSO */
#define FEC_MAX_TSO_SEGS 100
#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
@@ -350,16 +352,6 @@ static void swap_buffer(void *bufaddr, int len)
swab32s(buf);
}
-static void swap_buffer2(void *dst_buf, void *src_buf, int len)
-{
- int i;
- unsigned int *src = src_buf;
- unsigned int *dst = dst_buf;
-
- for (i = 0; i < len; i += 4, src++, dst++)
- *dst = swab32p(src);
-}
-
static void fec_dump(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -381,12 +373,76 @@ static void fec_dump(struct net_device *ndev)
fec16_to_cpu(bdp->cbd_sc),
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
- txq->tx_skbuff[index]);
+ txq->tx_buf[index].buf_p);
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
index++;
} while (bdp != txq->bd.base);
}
+/*
+ * Coldfire does not support DMA coherent allocations, and has historically used
+ * a band-aid with a manual flush in fec_enet_rx_queue.
+ */
+#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
+static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp)
+{
+ return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp);
+}
+
+static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle)
+{
+ dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL);
+}
+#else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
+static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp)
+{
+ return dma_alloc_coherent(dev, size, handle, gfp);
+}
+
+static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle)
+{
+ dma_free_coherent(dev, size, cpu_addr, handle);
+}
+#endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
+
+struct fec_dma_devres {
+ size_t size;
+ void *vaddr;
+ dma_addr_t dma_handle;
+};
+
+static void fec_dmam_release(struct device *dev, void *res)
+{
+ struct fec_dma_devres *this = res;
+
+ fec_dma_free(dev, this->size, this->vaddr, this->dma_handle);
+}
+
+static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp)
+{
+ struct fec_dma_devres *dr;
+ void *vaddr;
+
+ dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp);
+ if (!dr)
+ return NULL;
+ vaddr = fec_dma_alloc(dev, size, handle, gfp);
+ if (!vaddr) {
+ devres_free(dr);
+ return NULL;
+ }
+ dr->vaddr = vaddr;
+ dr->dma_handle = *handle;
+ dr->size = size;
+ devres_add(dev, dr);
+ return vaddr;
+}
+
static inline bool is_ipv4_pkt(struct sk_buff *skb)
{
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
@@ -409,6 +465,49 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
+static int
+fec_enet_create_page_pool(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq, int size)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
+ struct page_pool_params pp_params = {
+ .order = fep->pagepool_order,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = dev_to_node(&fep->pdev->dev),
+ .dev = &fep->pdev->dev,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = FEC_ENET_XDP_HEADROOM,
+ .max_len = fep->rx_frame_size,
+ };
+ int err;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+ }
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err)
+ goto err_unregister_rxq;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+}
+
static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb,
@@ -595,7 +694,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
index = fec_enet_get_bd_index(last_bdp, &txq->bd);
/* Save skb pointer */
- txq->tx_skbuff[index] = skb;
+ txq->tx_buf[index].buf_p = skb;
/* Make sure the updates to rest of the descriptor are performed before
* transferring ownership.
@@ -613,14 +712,17 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
skb_tx_timestamp(skb);
- /* Make sure the update to bdp and tx_skbuff are performed before
- * txq->bd.cur.
- */
+ /* Make sure the update to bdp is performed before txq->bd.cur. */
wmb();
txq->bd.cur = bdp;
/* Trigger transmission start */
- writel(0, txq->bd.reg_desc_active);
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active))
+ writel(0, txq->bd.reg_desc_active);
return 0;
}
@@ -656,7 +758,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
bdp->cbd_datlen = cpu_to_fec16(size);
@@ -691,7 +793,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct bufdesc *bdp, int index)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
void *bufaddr;
unsigned long dmabuf;
@@ -718,7 +820,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
}
@@ -746,6 +848,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len, total_len, data_left;
struct bufdesc *bdp = txq->bd.cur;
+ struct bufdesc *tmp_bdp;
+ struct bufdesc_ex *ebdp;
struct tso_t tso;
unsigned int index = 0;
int ret;
@@ -803,7 +907,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
}
/* Save skb pointer */
- txq->tx_skbuff[index] = skb;
+ txq->tx_buf[index].buf_p = skb;
skb_tx_timestamp(skb);
txq->bd.cur = bdp;
@@ -819,7 +923,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
return 0;
err_release:
- /* TODO: Release all used data descriptors for TSO */
+ /* Release all used data descriptors for TSO */
+ tmp_bdp = txq->bd.cur;
+
+ while (tmp_bdp != bdp) {
+ /* Unmap data buffers */
+ if (tmp_bdp->cbd_bufaddr &&
+ !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(tmp_bdp->cbd_bufaddr),
+ fec16_to_cpu(tmp_bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+
+ /* Clear standard buffer descriptor fields */
+ tmp_bdp->cbd_sc = 0;
+ tmp_bdp->cbd_datlen = 0;
+ tmp_bdp->cbd_bufaddr = 0;
+
+ /* Handle extended descriptor if enabled */
+ if (fep->bufdesc_ex) {
+ ebdp = (struct bufdesc_ex *)tmp_bdp;
+ ebdp->cbd_esc = 0;
+ }
+
+ tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd);
+ }
+
+ dev_kfree_skb_any(skb);
+
return ret;
}
@@ -879,7 +1010,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
rxq->bd.cur = rxq->bd.base;
}
@@ -893,23 +1024,43 @@ static void fec_enet_bd_init(struct net_device *dev)
for (i = 0; i < txq->bd.ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = cpu_to_fec16(0);
- if (bdp->cbd_bufaddr &&
- !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
- if (txq->tx_skbuff[i]) {
- dev_kfree_skb_any(txq->tx_skbuff[i]);
- txq->tx_skbuff[i] = NULL;
+ if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+ if (bdp->cbd_bufaddr &&
+ !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ if (txq->tx_buf[i].buf_p)
+ dev_kfree_skb_any(txq->tx_buf[i].buf_p);
+ } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
+ if (bdp->cbd_bufaddr)
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+
+ if (txq->tx_buf[i].buf_p)
+ xdp_return_frame(txq->tx_buf[i].buf_p);
+ } else {
+ struct page *page = txq->tx_buf[i].buf_p;
+
+ if (page)
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0,
+ false);
}
+
+ txq->tx_buf[i].buf_p = NULL;
+ /* restore default tx buffer type: FEC_TXBUF_T_SKB */
+ txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
bdp->cbd_bufaddr = cpu_to_fec32(0);
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
txq->dirty_tx = bdp;
}
}
@@ -933,7 +1084,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i];
writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+ writel(fep->max_buf_size, fep->hwp + FEC_R_BUFF_SIZE(i));
/* enable DMA1/2 */
if (i)
@@ -952,24 +1103,40 @@ static void fec_enet_enable_ring(struct net_device *ndev)
}
}
-static void fec_enet_reset_skb(struct net_device *ndev)
+/* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_enet_priv_tx_q *txq;
- int i, j;
-
- for (i = 0; i < fep->num_tx_queues; i++) {
- txq = fep->tx_queue[i];
+ u32 val;
- for (j = 0; j < txq->bd.ring_size; j++) {
- if (txq->tx_skbuff[j]) {
- dev_kfree_skb_any(txq->tx_skbuff[j]);
- txq->tx_skbuff[j] = NULL;
- }
+ if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
+ udelay(10);
}
+ } else {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
}
}
+static void fec_set_hw_mac_addr(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
+ fep->hwp + FEC_ADDR_HIGH);
+}
+
/*
* This function is called to start or restart the FEC during a link
* change, transmit timeout, or to reconfigure the FEC. The network
@@ -979,31 +1146,22 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 temp_mac[2];
- u32 rcntl = OPT_FRAME_SIZE | 0x04;
- u32 ecntl = 0x2; /* ETHEREN */
+ u32 ecntl = FEC_ECR_ETHEREN;
+ u32 rcntl = FEC_RCR_MII;
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
- ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
+ if (OPT_ARCH_HAS_MAX_FL)
+ rcntl |= (fep->netdev->mtu + ETH_HLEN + ETH_FCS_LEN) << 16;
+
+ if (fep->bufdesc_ex)
+ fec_ptp_save_state(fep);
+
+ fec_ctrl_reset(fep, false);
/*
* enet-mac reset will reset mac address registers too,
* so need to reconfigure it.
*/
- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
- writel((__force u32)cpu_to_be32(temp_mac[0]),
- fep->hwp + FEC_ADDR_LOW);
- writel((__force u32)cpu_to_be32(temp_mac[1]),
- fep->hwp + FEC_ADDR_HIGH);
+ fec_set_hw_mac_addr(ndev);
/* Clear any outstanding interrupt, except MDIO. */
writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
@@ -1012,16 +1170,13 @@ fec_restart(struct net_device *ndev)
fec_enet_enable_ring(ndev);
- /* Reset tx SKB buffers. */
- fec_enet_reset_skb(ndev);
-
/* Enable MII mode */
if (fep->full_duplex == DUPLEX_FULL) {
/* FD enable */
writel(0x04, fep->hwp + FEC_X_CNTRL);
} else {
/* No Rcv on Xmit */
- rcntl |= 0x02;
+ rcntl |= FEC_RCR_DRT;
writel(0x0, fep->hwp + FEC_X_CNTRL);
}
@@ -1040,7 +1195,7 @@ fec_restart(struct net_device *ndev)
else
val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC);
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
+ writel(min(fep->rx_frame_size, fep->max_buf_size), fep->hwp + FEC_FTRL);
}
#endif
@@ -1050,27 +1205,24 @@ fec_restart(struct net_device *ndev)
*/
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* Enable flow control and length check */
- rcntl |= 0x40000000 | 0x00000020;
+ rcntl |= FEC_RCR_NLC | FEC_RCR_FLOWCTL;
/* RGMII, RMII or MII */
- if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
- fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
- rcntl |= (1 << 6);
+ if (phy_interface_mode_is_rgmii(fep->phy_interface))
+ rcntl |= FEC_RCR_RGMII;
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
- rcntl |= (1 << 8);
+ rcntl |= FEC_RCR_RMII;
else
- rcntl &= ~(1 << 8);
+ rcntl &= ~FEC_RCR_RMII;
/* 1G, 100M or 10M */
if (ndev->phydev) {
if (ndev->phydev->speed == SPEED_1000)
- ecntl |= (1 << 5);
+ ecntl |= FEC_ECR_SPEED;
else if (ndev->phydev->speed == SPEED_100)
- rcntl &= ~(1 << 9);
+ rcntl &= ~FEC_RCR_10BASET;
else
- rcntl |= (1 << 9);
+ rcntl |= FEC_RCR_10BASET;
}
} else {
#ifdef FEC_MIIGSK_ENR
@@ -1103,7 +1255,7 @@ fec_restart(struct net_device *ndev)
if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
ndev->phydev && ndev->phydev->pause)) {
- rcntl |= FEC_ENET_FCE;
+ rcntl |= FEC_RCR_FLOWCTL;
/* set FIFO threshold parameter to reduce overrun */
writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
@@ -1114,7 +1266,7 @@ fec_restart(struct net_device *ndev)
/* OPD */
writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
} else {
- rcntl &= ~FEC_ENET_FCE;
+ rcntl &= ~FEC_RCR_FLOWCTL;
}
#endif /* !defined(CONFIG_M5272) */
@@ -1129,13 +1281,23 @@ fec_restart(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
- ecntl |= (1 << 8);
- /* enable ENET store and forward mode */
- writel(1 << 8, fep->hwp + FEC_X_WMRK);
+ ecntl |= FEC_ECR_BYTESWP;
+
+ /* When Jumbo Frame is enabled, the FIFO may not be large enough
+ * to hold an entire frame. In such cases, if the MTU exceeds
+ * (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN), configure the interface
+ * to operate in cut-through mode, triggered by the FIFO threshold.
+ * Otherwise, enable the ENET store-and-forward mode.
+ */
+ if ((fep->quirks & FEC_QUIRK_JUMBO_FRAME) &&
+ (ndev->mtu > (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN)))
+ writel(0xF, fep->hwp + FEC_X_WMRK);
+ else
+ writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
}
if (fep->bufdesc_ex)
- ecntl |= (1 << 4);
+ ecntl |= FEC_ECR_EN1588;
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
fep->rgmii_txc_dly)
@@ -1153,8 +1315,10 @@ fec_restart(struct net_device *ndev)
writel(ecntl, fep->hwp + FEC_ECNTRL);
fec_enet_active_rxring(ndev);
- if (fep->bufdesc_ex)
+ if (fep->bufdesc_ex) {
fec_ptp_start_cyclecounter(ndev);
+ fec_ptp_restore_state(fep);
+ }
/* Enable interrupts we wish to service */
if (fep->link)
@@ -1163,8 +1327,36 @@ fec_restart(struct net_device *ndev)
writel(0, fep->hwp + FEC_IMASK);
/* Init the interrupt coalescing */
- fec_enet_itr_coal_init(ndev);
+ if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
+ fec_enet_itr_coal_set(ndev);
+}
+
+static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
+{
+ if (!(of_machine_is_compatible("fsl,imx8qm") ||
+ of_machine_is_compatible("fsl,imx8qxp") ||
+ of_machine_is_compatible("fsl,imx8dxl")))
+ return 0;
+ return imx_scu_get_handle(&fep->ipc_handle);
+}
+
+static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
+{
+ struct device_node *np = fep->pdev->dev.of_node;
+ u32 rsrc_id, val;
+ int idx;
+
+ if (!np || !fep->ipc_handle)
+ return;
+
+ idx = of_alias_get_id(np, "ethernet");
+ if (idx < 0)
+ idx = 0;
+ rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
+
+ val = enabled ? 1 : 0;
+ imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
}
static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
@@ -1182,14 +1374,31 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
BIT(stop_gpr->bit), 0);
} else if (pdata && pdata->sleep_mode_enable) {
pdata->sleep_mode_enable(enabled);
+ } else {
+ fec_enet_ipg_stop_set(fep, enabled);
}
}
+static void fec_irqs_disable(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+}
+
+static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+ writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
+}
+
static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
u32 val;
/* We cannot expect a graceful transmit stop without link !!! */
@@ -1200,35 +1409,29 @@ fec_stop(struct net_device *ndev)
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
- } else {
- writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
- val = readl(fep->hwp + FEC_ECNTRL);
- val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
- writel(val, fep->hwp + FEC_ECNTRL);
- fec_enet_stop_mode(fep, true);
- }
+ if (fep->bufdesc_ex)
+ fec_ptp_save_state(fep);
+
+ fec_ctrl_reset(fep, true);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- writel(2, fep->hwp + FEC_ECNTRL);
+ writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
-}
+ if (fep->bufdesc_ex) {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= FEC_ECR_EN1588;
+ writel(val, fep->hwp + FEC_ECNTRL);
+
+ fec_ptp_start_cyclecounter(ndev);
+ fec_ptp_restore_state(fep);
+ }
+}
static void
fec_timeout(struct net_device *ndev, unsigned int txqueue)
@@ -1276,9 +1479,10 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
}
static void
-fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
+fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
{
struct fec_enet_private *fep;
+ struct xdp_frame *xdpf;
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
@@ -1286,6 +1490,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
struct netdev_queue *nq;
int index = 0;
int entries_free;
+ struct page *page;
+ int frame_len;
fep = netdev_priv(ndev);
@@ -1306,16 +1512,45 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
index = fec_enet_get_bd_index(bdp, &txq->bd);
- skb = txq->tx_skbuff[index];
- txq->tx_skbuff[index] = NULL;
- if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
- bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (!skb)
- goto skb_done;
+ if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
+ skb = txq->tx_buf[index].buf_p;
+ if (bdp->cbd_bufaddr &&
+ !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ if (!skb)
+ goto tx_buf_done;
+ } else {
+ /* Tx processing cannot call any XDP (or page pool) APIs if
+ * the "budget" is 0. Because NAPI is called with budget of
+ * 0 (such as netpoll) indicates we may be in an IRQ context,
+ * however, we can't use the page pool from IRQ context.
+ */
+ if (unlikely(!budget))
+ break;
+
+ if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
+ xdpf = txq->tx_buf[index].buf_p;
+ if (bdp->cbd_bufaddr)
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ } else {
+ page = txq->tx_buf[index].buf_p;
+ }
+
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
+ if (unlikely(!txq->tx_buf[index].buf_p)) {
+ txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+ goto tx_buf_done;
+ }
+
+ frame_len = fec16_to_cpu(bdp->cbd_datlen);
+ }
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1334,21 +1569,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
ndev->stats.tx_carrier_errors++;
} else {
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- }
- /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
- * are to time stamp the packet, so we still need to check time
- * stamping enabled flag.
- */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
- fep->hwts_tx_en) &&
- fep->bufdesc_ex) {
- struct skb_shared_hwtstamps shhwtstamps;
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-
- fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
- skb_tstamp_tx(skb, &shhwtstamps);
+ if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
+ ndev->stats.tx_bytes += skb->len;
+ else
+ ndev->stats.tx_bytes += frame_len;
}
/* Deferred means some collisions occurred during transmit,
@@ -1357,10 +1582,36 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
if (status & BD_ENET_TX_DEF)
ndev->stats.collisions++;
- /* Free the sk buffer associated with this last transmit */
- dev_kfree_skb_any(skb);
-skb_done:
- /* Make sure the update to bdp and tx_skbuff are performed
+ if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
+ /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
+ * are to time stamp the packet, so we still need to check time
+ * stamping enabled flag.
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ fep->hwts_tx_en) && fep->bufdesc_ex) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+ /* Free the sk buffer associated with this last transmit */
+ napi_consume_skb(skb, budget);
+ } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
+ xdp_return_frame_rx_napi(xdpf);
+ } else { /* recycle pages of XDP_TX frames */
+ /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
+ 0, true);
+ }
+
+ txq->tx_buf[index].buf_p = NULL;
+ /* restore default tx buffer type: FEC_TXBUF_T_SKB */
+ txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+
+tx_buf_done:
+ /* Make sure the update to bdp and tx_buf are performed
* before dirty_tx
*/
wmb();
@@ -1384,60 +1635,112 @@ skb_done:
writel(0, txq->bd.reg_desc_active);
}
-static void fec_enet_tx(struct net_device *ndev)
+static void fec_enet_tx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int i;
/* Make sure that AVB queues are processed first. */
for (i = fep->num_tx_queues - 1; i >= 0; i--)
- fec_enet_tx_queue(ndev, i);
+ fec_enet_tx_queue(ndev, i, budget);
}
-static int
-fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
+static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+ struct bufdesc *bdp, int index)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- int off;
-
- off = ((unsigned long)skb->data) & fep->rx_align;
- if (off)
- skb_reserve(skb, fep->rx_align + 1 - off);
+ struct page *new_page;
+ dma_addr_t phys_addr;
- bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
- if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
- if (net_ratelimit())
- netdev_err(ndev, "Rx DMA memory map failed\n");
+ new_page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (unlikely(!new_page))
return -ENOMEM;
- }
+
+ rxq->rx_buf[index] = new_page;
+ phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
return 0;
}
-static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- struct bufdesc *bdp, u32 length, bool swap)
+static u32
+fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
+ struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct sk_buff *new_skb;
-
- if (length > fep->rx_copybreak)
- return false;
-
- new_skb = netdev_alloc_skb(ndev, length);
- if (!new_skb)
- return false;
-
- dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- if (!swap)
- memcpy(new_skb->data, (*skb)->data, length);
- else
- swap_buffer2(new_skb->data, (*skb)->data, length);
- *skb = new_skb;
+ unsigned int sync, len = xdp->data_end - xdp->data;
+ u32 ret = FEC_ENET_XDP_PASS;
+ struct page *page;
+ int err;
+ u32 act;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
+ * max len CPU touch
+ */
+ sync = xdp->data_end - xdp->data;
+ sync = max(sync, len);
+
+ switch (act) {
+ case XDP_PASS:
+ rxq->stats[RX_XDP_PASS]++;
+ ret = FEC_ENET_XDP_PASS;
+ break;
+
+ case XDP_REDIRECT:
+ rxq->stats[RX_XDP_REDIRECT]++;
+ err = xdp_do_redirect(fep->netdev, xdp, prog);
+ if (unlikely(err))
+ goto xdp_err;
+
+ ret = FEC_ENET_XDP_REDIR;
+ break;
+
+ case XDP_TX:
+ rxq->stats[RX_XDP_TX]++;
+ err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
+ if (unlikely(err)) {
+ rxq->stats[RX_XDP_TX_ERRORS]++;
+ goto xdp_err;
+ }
+
+ ret = FEC_ENET_XDP_TX;
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
+ fallthrough;
+
+ case XDP_ABORTED:
+ fallthrough; /* handle aborts by dropping packet */
+
+ case XDP_DROP:
+ rxq->stats[RX_XDP_DROP]++;
+xdp_err:
+ ret = FEC_ENET_XDP_CONSUMED;
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+ if (act != XDP_DROP)
+ trace_xdp_exception(fep->netdev, prog, act);
+ break;
+ }
- return true;
+ return ret;
+}
+
+static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
+{
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ const struct vlan_ethhdr *vlan_header = skb_vlan_eth_hdr(skb);
+ const u16 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
+
+ /* Push and remove the vlan tag */
+
+ memmove(skb->data + VLAN_HLEN, skb->data, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
}
/* During a receive, the bd_rx.cur points to the current incoming buffer.
@@ -1446,25 +1749,40 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
* effectively tossing the packet.
*/
static int
-fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned short status;
- struct sk_buff *skb_new = NULL;
struct sk_buff *skb;
ushort pkt_len;
- __u8 *data;
int pkt_received = 0;
struct bufdesc_ex *ebdp = NULL;
- bool vlan_packet_rcvd = false;
- u16 vlan_tag;
int index = 0;
- bool is_copybreak;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+ struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
+ u32 ret, xdp_result = FEC_ENET_XDP_PASS;
+ u32 data_start = FEC_ENET_XDP_HEADROOM;
+ int cpu = smp_processor_id();
+ struct xdp_buff xdp;
+ struct page *page;
+ __fec32 cbd_bufaddr;
+ u32 sub_len = 4;
+
+ /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
+ * FEC_RACC_SHIFT16 is set by default in the probe function.
+ */
+ if (fep->quirks & FEC_QUIRK_HAS_RACC) {
+ data_start += 2;
+ sub_len += 2;
+ }
-#ifdef CONFIG_M532x
+#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
+ /*
+ * Hacky flush of all caches instead of using the DMA API for the TSO
+ * headers.
+ */
flush_cache_all();
#endif
rxq = fep->rx_queue[queue_id];
@@ -1473,6 +1791,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
* These get messed up if we get called due to a busy condition.
*/
bdp = rxq->bd.cur;
+ xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
@@ -1480,7 +1799,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
break;
pkt_received++;
- writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
+ writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
/* Check for errors. */
status ^= BD_ENET_RX_LAST;
@@ -1512,39 +1831,58 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_packets++;
pkt_len = fec16_to_cpu(bdp->cbd_datlen);
ndev->stats.rx_bytes += pkt_len;
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ ndev->stats.rx_bytes -= 2;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- skb = rxq->rx_skbuff[index];
+ page = rxq->rx_buf[index];
+ cbd_bufaddr = bdp->cbd_bufaddr;
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
+ dma_sync_single_for_cpu(&fep->pdev->dev,
+ fec32_to_cpu(cbd_bufaddr),
+ pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(page));
+
+ if (xdp_prog) {
+ xdp_buff_clear_frags_flag(&xdp);
+ /* subtract 16bit shift and FCS */
+ xdp_prepare_buff(&xdp, page_address(page),
+ data_start, pkt_len - sub_len, false);
+ ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
+ xdp_result |= ret;
+ if (ret != FEC_ENET_XDP_PASS)
+ goto rx_processing_done;
+ }
/* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
- is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
- need_swap);
- if (!is_copybreak) {
- skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
- if (unlikely(!skb_new)) {
- ndev->stats.rx_dropped++;
- goto rx_processing_done;
- }
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
+ skb = build_skb(page_address(page),
+ PAGE_SIZE << fep->pagepool_order);
+ if (unlikely(!skb)) {
+ page_pool_recycle_direct(rxq->page_pool, page);
+ ndev->stats.rx_dropped++;
+
+ netdev_err_once(ndev, "build_skb failed!\n");
+ goto rx_processing_done;
}
- prefetch(skb->data - NET_IP_ALIGN);
- skb_put(skb, pkt_len - 4);
- data = skb->data;
+ skb_reserve(skb, data_start);
+ skb_put(skb, pkt_len - sub_len);
+ skb_mark_for_recycle(skb);
- if (!is_copybreak && need_swap)
- swap_buffer(data, pkt_len);
+ if (unlikely(need_swap)) {
+ u8 *data;
-#if !defined(CONFIG_M5272)
- if (fep->quirks & FEC_QUIRK_HAS_RACC)
- data = skb_pull_inline(skb, 2);
-#endif
+ data = page_address(page) + FEC_ENET_XDP_HEADROOM;
+ swap_buffer(data, pkt_len);
+ }
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
@@ -1552,20 +1890,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ebdp = (struct bufdesc_ex *)bdp;
/* If this is a VLAN packet remove the VLAN Tag */
- vlan_packet_rcvd = false;
- if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- fep->bufdesc_ex &&
- (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
- /* Push and remove the vlan tag */
- struct vlan_hdr *vlan_header =
- (struct vlan_hdr *) (data + ETH_HLEN);
- vlan_tag = ntohs(vlan_header->h_vlan_TCI);
-
- vlan_packet_rcvd = true;
-
- memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
- skb_pull(skb, VLAN_HLEN);
- }
+ if (fep->bufdesc_ex &&
+ (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN)))
+ fec_enet_rx_vlan(ndev, skb);
skb->protocol = eth_type_trans(skb, ndev);
@@ -1584,25 +1911,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
}
}
- /* Handle received VLAN packets */
- if (vlan_packet_rcvd)
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
-
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
- if (is_copybreak) {
- dma_sync_single_for_device(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- } else {
- rxq->rx_skbuff[index] = skb_new;
- fec_enet_new_rxbdp(ndev, bdp, skb_new);
- }
-
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
@@ -1633,6 +1944,10 @@ rx_processing_done:
writel(0, rxq->bd.reg_desc_active);
}
rxq->bd.cur = bdp;
+
+ if (xdp_result & FEC_ENET_XDP_REDIR)
+ xdp_do_flush();
+
return pkt_received;
}
@@ -1643,7 +1958,7 @@ static int fec_enet_rx(struct net_device *ndev, int budget)
/* Make sure that AVB queues are processed first. */
for (i = fep->num_rx_queues - 1; i >= 0; i--)
- done += fec_enet_rx_queue(ndev, budget - done, i);
+ done += fec_enet_rx_queue(ndev, i, budget - done);
return done;
}
@@ -1690,7 +2005,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
do {
done += fec_enet_rx(ndev, budget - done);
- fec_enet_tx(ndev);
+ fec_enet_tx(ndev, budget);
} while ((done < budget) && fec_enet_collect_events(fep));
if (done < budget) {
@@ -1779,6 +2094,37 @@ static int fec_get_mac(struct net_device *ndev)
/*
* Phy section
*/
+
+/* LPI Sleep Ts count base on tx clk (clk_ref).
+ * The lpi sleep cnt value = X us / (cycle_ns).
+ */
+static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->clk_ref_rate / 1000) / 1000;
+}
+
+static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer,
+ bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int sleep_cycle, wake_cycle;
+
+ if (enable) {
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer);
+ wake_cycle = sleep_cycle;
+ } else {
+ sleep_cycle = 0;
+ wake_cycle = 0;
+ }
+
+ writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
+ writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
+
+ return 0;
+}
+
static void fec_enet_adjust_link(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1810,6 +2156,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
/* if any of the above changed restart the FEC */
if (status_change) {
+ netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
@@ -1817,8 +2164,13 @@ static void fec_enet_adjust_link(struct net_device *ndev)
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
+ if (fep->quirks & FEC_QUIRK_HAS_EEE)
+ fec_enet_eee_mode_set(ndev,
+ phy_dev->eee_cfg.tx_lpi_timer,
+ phy_dev->enable_tx_lpi);
} else {
if (fep->link) {
+ netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_stop(ndev);
@@ -1847,47 +2199,73 @@ static int fec_enet_mdio_wait(struct fec_enet_private *fep)
return ret;
}
-static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
int ret = 0, frame_start, frame_addr, frame_op;
- bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- if (is_c45) {
- frame_start = FEC_MMFR_ST_C45;
+ /* C22 read */
+ frame_op = FEC_MMFR_OP_READ;
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
- /* write address */
- frame_addr = (regnum >> 16);
- writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | (regnum & 0xFFFF),
- fep->hwp + FEC_MII_DATA);
+ /* start a read op */
+ writel(frame_start | frame_op |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
- /* wait for end of transfer */
- ret = fec_enet_mdio_wait(fep);
- if (ret) {
- netdev_err(fep->netdev, "MDIO address write timeout\n");
- goto out;
- }
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO read timeout\n");
+ goto out;
+ }
- frame_op = FEC_MMFR_OP_READ_C45;
+ ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
- } else {
- /* C22 read */
- frame_op = FEC_MMFR_OP_READ;
- frame_start = FEC_MMFR_ST;
- frame_addr = regnum;
+out:
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum)
+{
+ struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
+ int ret = 0, frame_start, frame_op;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ goto out;
}
+ frame_op = FEC_MMFR_OP_READ_C45;
+
/* start a read op */
writel(frame_start | frame_op |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
ret = fec_enet_mdio_wait(fep);
@@ -1899,51 +2277,73 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
out:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
-static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
+static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
int ret, frame_start, frame_addr;
- bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- if (is_c45) {
- frame_start = FEC_MMFR_ST_C45;
+ /* C22 write */
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
- /* write address */
- frame_addr = (regnum >> 16);
- writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | (regnum & 0xFFFF),
- fep->hwp + FEC_MII_DATA);
+ /* start a write op */
+ writel(frame_start | FEC_MMFR_OP_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
- /* wait for end of transfer */
- ret = fec_enet_mdio_wait(fep);
- if (ret) {
- netdev_err(fep->netdev, "MDIO address write timeout\n");
- goto out;
- }
- } else {
- /* C22 write */
- frame_start = FEC_MMFR_ST;
- frame_addr = regnum;
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret)
+ netdev_err(fep->netdev, "MDIO write timeout\n");
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum, u16 value)
+{
+ struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
+ int ret, frame_start;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ goto out;
}
/* start a write op */
writel(frame_start | FEC_MMFR_OP_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | FEC_MMFR_DATA(value),
- fep->hwp + FEC_MII_DATA);
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
ret = fec_enet_mdio_wait(fep);
@@ -1951,7 +2351,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
netdev_err(fep->netdev, "MDIO write timeout\n");
out:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -1974,7 +2373,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
*/
phy_dev = of_phy_find_device(fep->phy_node);
phy_reset_after_clk_enable(phy_dev);
- put_device(&phy_dev->mdio.dev);
+ if (phy_dev)
+ put_device(&phy_dev->mdio.dev);
}
}
@@ -2070,11 +2470,8 @@ static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phy_dev = NULL;
- char mdio_bus_id[MII_BUS_ID_SIZE];
- char phy_name[MII_BUS_ID_SIZE + 3];
- int phy_id;
- int dev_id = fep->dev_id;
+ struct phy_device *phy_dev;
+ int ret;
if (fep->phy_node) {
phy_dev = of_phy_connect(ndev, fep->phy_node,
@@ -2086,30 +2483,28 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
} else {
/* check for attached phy */
- for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
- if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
- continue;
- if (dev_id--)
- continue;
- strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
- break;
- }
+ phy_dev = phy_find_first(fep->mii_bus);
+ if (fep->dev_id && phy_dev)
+ phy_dev = phy_find_next(fep->mii_bus, phy_dev);
- if (phy_id >= PHY_MAX_ADDR) {
+ if (!phy_dev) {
netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
- strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
- phy_id = 0;
+ phy_dev = fixed_phy_register_100fd();
+ if (IS_ERR(phy_dev)) {
+ netdev_err(ndev, "could not register fixed PHY\n");
+ return PTR_ERR(phy_dev);
+ }
}
- snprintf(phy_name, sizeof(phy_name),
- PHY_ID_FMT, mdio_bus_id, phy_id);
- phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
- fep->phy_interface);
- }
+ ret = phy_connect_direct(ndev, phy_dev, &fec_enet_adjust_link,
+ fep->phy_interface);
+ if (ret) {
+ if (phy_is_pseudo_fixed_link(phy_dev))
+ fixed_phy_unregister(phy_dev);
+ netdev_err(ndev, "could not attach to PHY\n");
+ return ret;
+ }
- if (IS_ERR(phy_dev)) {
- netdev_err(ndev, "could not attach to PHY\n");
- return PTR_ERR(phy_dev);
}
/* mask with MAC supported features */
@@ -2117,18 +2512,17 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_set_max_speed(phy_dev, 1000);
phy_remove_link_mode(phy_dev,
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-#if !defined(CONFIG_M5272)
phy_support_sym_pause(phy_dev);
-#endif
}
else
phy_set_max_speed(phy_dev, 100);
+ if (fep->quirks & FEC_QUIRK_HAS_EEE)
+ phy_support_eee(phy_dev);
+
fep->link = 0;
fep->full_duplex = 0;
- phy_dev->mac_managed_pm = 1;
-
phy_attached_info(phy_dev);
return 0;
@@ -2140,6 +2534,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
bool suppress_preamble = false;
+ struct phy_device *phydev;
struct device_node *node;
int err = -ENXIO;
u32 mii_speed, holdtime;
@@ -2241,8 +2636,12 @@ static int fec_enet_mii_init(struct platform_device *pdev)
}
fep->mii_bus->name = "fec_enet_mii_bus";
- fep->mii_bus->read = fec_enet_mdio_read;
- fep->mii_bus->write = fec_enet_mdio_write;
+ fep->mii_bus->read = fec_enet_mdio_read_c22;
+ fep->mii_bus->write = fec_enet_mdio_write_c22;
+ if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
+ fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
+ fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
+ }
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, fep->dev_id + 1);
fep->mii_bus->priv = fep;
@@ -2253,6 +2652,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
goto err_out_free_mdiobus;
of_node_put(node);
+ /* find all the PHY devices on the bus and set mac_managed_pm to true */
+ mdiobus_for_each_phy(fep->mii_bus, phydev)
+ phydev->mac_managed_pm = true;
+
mii_cnt++;
/* save fec0 mii_bus */
@@ -2281,9 +2684,9 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- strlcpy(info->driver, fep->pdev->dev.driver->name,
+ strscpy(info->driver, fep->pdev->dev.driver->name,
sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
static int fec_enet_get_regs_len(struct net_device *ndev)
@@ -2300,9 +2703,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
}
/* List of registers that can be safety be read to dump them with ethtool */
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
static __u32 fec_enet_register_version = 2;
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
@@ -2333,6 +2734,31 @@ static u32 fec_enet_register_offset[] = {
IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
IEEE_R_FDXFC, IEEE_R_OCTETS_OK
};
+/* for i.MX6ul */
+static u32 fec_enet_register_offset_6ul[] = {
+ FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+ FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+ FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
+ FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
+ FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
+ FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+ FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
+ RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+ RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+ RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+ RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+ RMON_T_P_GTE2048, RMON_T_OCTETS,
+ IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+ IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+ IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+ RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+ RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+ RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+ RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+ RMON_R_P_GTE2048, RMON_R_OCTETS,
+ IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+ IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
#else
static __u32 fec_enet_register_version = 1;
static u32 fec_enet_register_offset[] = {
@@ -2351,13 +2777,22 @@ static u32 fec_enet_register_offset[] = {
static void fec_enet_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *regbuf)
{
+ u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
struct fec_enet_private *fep = netdev_priv(ndev);
u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ u32 *reg_list = fec_enet_register_offset;
struct device *dev = &fep->pdev->dev;
u32 *buf = (u32 *)regbuf;
u32 i, off;
int ret;
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
+ if (of_machine_is_compatible("fsl,imx6ul")) {
+ reg_list = fec_enet_register_offset_6ul;
+ reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
+ }
+#endif
+
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -2366,8 +2801,8 @@ static void fec_enet_get_regs(struct net_device *ndev,
memset(buf, 0, regs->len);
- for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
- off = fec_enet_register_offset[i];
+ for (i = 0; i < reg_cnt; i++) {
+ off = reg_list[i];
if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
!(fep->quirks & FEC_QUIRK_HAS_FRREG))
@@ -2377,27 +2812,22 @@ static void fec_enet_get_regs(struct net_device *ndev,
buf[off] = readl(&theregs[off]);
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
static int fec_enet_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct fec_enet_private *fep = netdev_priv(ndev);
if (fep->bufdesc_ex) {
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (fep->ptp_clock)
info->phc_index = ptp_clock_index(fep->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -2530,6 +2960,16 @@ static const struct fec_stat {
#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
+static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
+ "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */
+ "rx_xdp_pass", /* RX_XDP_PASS, */
+ "rx_xdp_drop", /* RX_XDP_DROP, */
+ "rx_xdp_tx", /* RX_XDP_TX, */
+ "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */
+ "tx_xdp_xmit", /* TX_XDP_XMIT, */
+ "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */
+};
+
static void fec_enet_update_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
@@ -2539,6 +2979,42 @@ static void fec_enet_update_ethtool_stats(struct net_device *dev)
fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
}
+static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
+{
+ u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
+ struct fec_enet_priv_rx_q *rxq;
+ int i, j;
+
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+
+ for (j = 0; j < XDP_STATS_TOTAL; j++)
+ xdp_stats[j] += rxq->stats[j];
+ }
+
+ memcpy(data, xdp_stats, sizeof(xdp_stats));
+}
+
+static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
+{
+#ifdef CONFIG_PAGE_POOL_STATS
+ struct page_pool_stats stats = {};
+ struct fec_enet_priv_rx_q *rxq;
+ int i;
+
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+
+ if (!rxq->page_pool)
+ continue;
+
+ page_pool_get_stats(rxq->page_pool, &stats);
+ }
+
+ page_pool_ethtool_stats_get(data, &stats);
+#endif
+}
+
static void fec_enet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -2548,6 +3024,12 @@ static void fec_enet_get_ethtool_stats(struct net_device *dev,
fec_enet_update_ethtool_stats(dev);
memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
+ data += FEC_STATS_SIZE / sizeof(u64);
+
+ fec_enet_get_xdp_stats(fep, data);
+ data += XDP_STATS_TOTAL;
+
+ fec_enet_page_pool_stats(fep, data);
}
static void fec_enet_get_strings(struct net_device *netdev,
@@ -2556,9 +3038,14 @@ static void fec_enet_get_strings(struct net_device *netdev,
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- fec_stats[i].name, ETH_GSTRING_LEN);
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
+ ethtool_puts(&data, fec_stats[i].name);
+ }
+ for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
+ ethtool_puts(&data, fec_xdp_stat_strs[i]);
+ }
+ page_pool_ethtool_stats_get_strings(data);
+
break;
case ETH_SS_TEST:
net_selftest_get_strings(data);
@@ -2568,9 +3055,14 @@ static void fec_enet_get_strings(struct net_device *netdev,
static int fec_enet_get_sset_count(struct net_device *dev, int sset)
{
+ int count;
+
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(fec_stats);
+ count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
+ count += page_pool_ethtool_stats_get_count();
+ return count;
+
case ETH_SS_TEST:
return net_selftest_get_count();
default:
@@ -2581,7 +3073,8 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
static void fec_enet_clear_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- int i;
+ struct fec_enet_priv_rx_q *rxq;
+ int i, j;
/* Disable MIB statistics counters */
writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
@@ -2589,6 +3082,12 @@ static void fec_enet_clear_ethtool_stats(struct net_device *dev)
for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
writel(0, fep->hwp + fec_stats[i].offset);
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+ for (j = 0; j < XDP_STATS_TOTAL; j++)
+ rxq->stats[j] = 0;
+ }
+
/* Don't disable MIB statistics counters */
writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
}
@@ -2619,27 +3118,25 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
static void fec_enet_itr_coal_set(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int rx_itr, tx_itr;
+ u32 rx_itr = 0, tx_itr = 0;
+ int rx_ictt, tx_ictt;
- /* Must be greater than zero to avoid unpredictable behavior */
- if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
- !fep->tx_time_itr || !fep->tx_pkts_itr)
- return;
-
- /* Select enet system clock as Interrupt Coalescing
- * timer Clock Source
- */
- rx_itr = FEC_ITR_CLK_SEL;
- tx_itr = FEC_ITR_CLK_SEL;
+ rx_ictt = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+ tx_ictt = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
- /* set ICFT and ICTT */
- rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
- rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
- tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
- tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
+ if (rx_ictt > 0 && fep->rx_pkts_itr > 1) {
+ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
+ rx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
+ rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
+ rx_itr |= FEC_ITR_ICTT(rx_ictt);
+ }
- rx_itr |= FEC_ITR_EN;
- tx_itr |= FEC_ITR_EN;
+ if (tx_ictt > 0 && fep->tx_pkts_itr > 1) {
+ /* Enable with enet system clock as Interrupt Coalescing timer Clock Source */
+ tx_itr = FEC_ITR_EN | FEC_ITR_CLK_SEL;
+ tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
+ tx_itr |= FEC_ITR_ICTT(tx_ictt);
+ }
writel(tx_itr, fep->hwp + FEC_TXIC0);
writel(rx_itr, fep->hwp + FEC_RXIC0);
@@ -2715,101 +3212,10 @@ static int fec_enet_set_coalesce(struct net_device *ndev,
return 0;
}
-static void fec_enet_itr_coal_init(struct net_device *ndev)
-{
- struct ethtool_coalesce ec;
-
- ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
- ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
-
- ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
- ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
-
- fec_enet_set_coalesce(ndev, &ec, NULL, NULL);
-}
-
-static int fec_enet_get_tunable(struct net_device *netdev,
- const struct ethtool_tunable *tuna,
- void *data)
-{
- struct fec_enet_private *fep = netdev_priv(netdev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = fep->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int fec_enet_set_tunable(struct net_device *netdev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct fec_enet_private *fep = netdev_priv(netdev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- fep->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-/* LPI Sleep Ts count base on tx clk (clk_ref).
- * The lpi sleep cnt value = X us / (cycle_ns).
- */
-static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
-
- return us * (fep->clk_ref_rate / 1000) / 1000;
-}
-
-static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
- unsigned int sleep_cycle, wake_cycle;
- int ret = 0;
-
- if (enable) {
- ret = phy_init_eee(ndev->phydev, 0);
- if (ret)
- return ret;
-
- sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
- wake_cycle = sleep_cycle;
- } else {
- sleep_cycle = 0;
- wake_cycle = 0;
- }
-
- p->tx_lpi_enabled = enable;
- p->eee_enabled = enable;
- p->eee_active = enable;
-
- writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
- writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
-
- return 0;
-}
-
static int
-fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -2817,20 +3223,13 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- edata->eee_enabled = p->eee_enabled;
- edata->eee_active = p->eee_active;
- edata->tx_lpi_timer = p->tx_lpi_timer;
- edata->tx_lpi_enabled = p->tx_lpi_enabled;
-
return phy_ethtool_get_eee(ndev->phydev, edata);
}
static int
-fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
- int ret = 0;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -2838,17 +3237,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- p->tx_lpi_timer = edata->tx_lpi_timer;
-
- if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
- !edata->tx_lpi_timer)
- ret = fec_enet_eee_mode_set(ndev, false);
- else
- ret = fec_enet_eee_mode_set(ndev, true);
-
- if (ret)
- return ret;
-
return phy_ethtool_set_eee(ndev->phydev, edata);
}
@@ -2877,15 +3265,10 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return -EINVAL;
device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
- if (device_may_wakeup(&ndev->dev)) {
+ if (device_may_wakeup(&ndev->dev))
fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
- if (fep->wake_irq > 0)
- enable_irq_wake(fep->wake_irq);
- } else {
+ else
fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
- if (fep->wake_irq > 0)
- disable_irq_wake(fep->wake_irq);
- }
return 0;
}
@@ -2908,8 +3291,6 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_sset_count = fec_enet_get_sset_count,
#endif
.get_ts_info = fec_enet_get_ts_info,
- .get_tunable = fec_enet_get_tunable,
- .set_tunable = fec_enet_set_tunable,
.get_wol = fec_enet_get_wol,
.set_wol = fec_enet_set_wol,
.get_eee = fec_enet_get_eee,
@@ -2919,58 +3300,27 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.self_test = net_selftest,
};
-static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- if (fep->bufdesc_ex) {
- bool use_fec_hwts = !phy_has_hwtstamp(phydev);
-
- if (cmd == SIOCSHWTSTAMP) {
- if (use_fec_hwts)
- return fec_ptp_set(ndev, rq);
- fec_ptp_disable_hwts(ndev);
- } else if (cmd == SIOCGHWTSTAMP) {
- if (use_fec_hwts)
- return fec_ptp_get(ndev, rq);
- }
- }
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
static void fec_enet_free_buffers(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
- struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
unsigned int q;
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
- bdp = rxq->bd.base;
- for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = rxq->rx_skbuff[i];
- rxq->rx_skbuff[i] = NULL;
- if (skb) {
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- }
- bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
- }
+ for (i = 0; i < rxq->bd.ring_size; i++)
+ page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
+ false);
+
+ for (i = 0; i < XDP_STATS_TOTAL; i++)
+ rxq->stats[i] = 0;
+
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
}
for (q = 0; q < fep->num_tx_queues; q++) {
@@ -2978,9 +3328,25 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (i = 0; i < txq->bd.ring_size; i++) {
kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL;
- skb = txq->tx_skbuff[i];
- txq->tx_skbuff[i] = NULL;
- dev_kfree_skb(skb);
+
+ if (!txq->tx_buf[i].buf_p) {
+ txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
+ continue;
+ }
+
+ if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+ dev_kfree_skb(txq->tx_buf[i].buf_p);
+ } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
+ xdp_return_frame(txq->tx_buf[i].buf_p);
+ } else {
+ struct page *page = txq->tx_buf[i].buf_p;
+
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0, false);
+ }
+
+ txq->tx_buf[i].buf_p = NULL;
+ txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
}
}
}
@@ -2994,10 +3360,9 @@ static void fec_enet_free_queue(struct net_device *ndev)
for (i = 0; i < fep->num_tx_queues; i++)
if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
txq = fep->tx_queue[i];
- dma_free_coherent(&fep->pdev->dev,
- txq->bd.ring_size * TSO_HEADER_SIZE,
- txq->tso_hdrs,
- txq->tso_hdrs_dma);
+ fec_dma_free(&fep->pdev->dev,
+ txq->bd.ring_size * TSO_HEADER_SIZE,
+ txq->tso_hdrs, txq->tso_hdrs_dma);
}
for (i = 0; i < fep->num_rx_queues; i++)
@@ -3025,13 +3390,11 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
- txq->tx_wake_threshold =
- (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
+ txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
- txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
+ txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev,
txq->bd.ring_size * TSO_HEADER_SIZE,
- &txq->tso_hdrs_dma,
- GFP_KERNEL);
+ &txq->tso_hdrs_dma, GFP_KERNEL);
if (!txq->tso_hdrs) {
ret = -ENOMEM;
goto alloc_failed;
@@ -3060,24 +3423,43 @@ static int
fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- unsigned int i;
- struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_rx_q *rxq;
+ dma_addr_t phys_addr;
+ struct bufdesc *bdp;
+ struct page *page;
+ int i, err;
rxq = fep->rx_queue[queue];
bdp = rxq->bd.base;
+
+ err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
+ if (err < 0) {
+ netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
+ return err;
+ }
+
+ /* Some platforms require the RX buffer must be 64 bytes alignment.
+ * Some platforms require 16 bytes alignment. And some platforms
+ * require 4 bytes alignment. But since the page pool have been
+ * introduced into the driver, the address of RX buffer is always
+ * the page address plus FEC_ENET_XDP_HEADROOM, and
+ * FEC_ENET_XDP_HEADROOM is 256 bytes. Therefore, this address can
+ * satisfy all platforms. To prevent future modifications to
+ * FEC_ENET_XDP_HEADROOM from ignoring this hardware limitation, a
+ * BUILD_BUG_ON() test has been added, which ensures that
+ * FEC_ENET_XDP_HEADROOM provides the required alignment.
+ */
+ BUILD_BUG_ON(FEC_ENET_XDP_HEADROOM & 0x3f);
+
for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
- if (!skb)
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!page)
goto err_alloc;
- if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
- dev_kfree_skb(skb);
- goto err_alloc;
- }
+ phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- rxq->rx_skbuff[i] = skb;
+ rxq->rx_buf[i] = page;
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
@@ -3090,7 +3472,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
return 0;
err_alloc:
@@ -3126,7 +3508,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
return 0;
@@ -3202,6 +3584,9 @@ fec_enet_open(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
+
napi_enable(&fep->napi);
phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
@@ -3216,7 +3601,6 @@ err_enet_mii_probe:
err_enet_alloc:
fec_enet_clk_enable(ndev, false);
clk_enable:
- pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
return ret;
@@ -3226,8 +3610,9 @@ static int
fec_enet_close(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = ndev->phydev;
- phy_stop(ndev->phydev);
+ phy_stop(phy_dev);
if (netif_device_present(ndev)) {
napi_disable(&fep->napi);
@@ -3235,7 +3620,10 @@ fec_enet_close(struct net_device *ndev)
fec_stop(ndev);
}
- phy_disconnect(ndev->phydev);
+ phy_disconnect(phy_dev);
+
+ if (!fep->phy_node && phy_is_pseudo_fixed_link(phy_dev))
+ fixed_phy_unregister(phy_dev);
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_unused();
@@ -3243,8 +3631,10 @@ fec_enet_close(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
fec_enet_clk_enable(ndev, false);
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_remove_request(&fep->pm_qos_req);
+
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
- pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
fec_enet_free_buffers(ndev);
@@ -3317,7 +3707,6 @@ static void set_multicast_list(struct net_device *ndev)
static int
fec_set_mac_address(struct net_device *ndev, void *p)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
struct sockaddr *addr = p;
if (addr) {
@@ -3334,36 +3723,10 @@ fec_set_mac_address(struct net_device *ndev, void *p)
if (!netif_running(ndev))
return 0;
- writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
- (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
- fep->hwp + FEC_ADDR_LOW);
- writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
- fep->hwp + FEC_ADDR_HIGH);
- return 0;
-}
+ fec_set_hw_mac_addr(ndev);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * fec_poll_controller - FEC Poll controller function
- * @dev: The FEC network adapter
- *
- * Polled functionality used by netconsole and others in non interrupt mode
- *
- */
-static void fec_poll_controller(struct net_device *dev)
-{
- int i;
- struct fec_enet_private *fep = netdev_priv(dev);
-
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- if (fep->irq[i] > 0) {
- disable_irq(fep->irq[i]);
- fec_enet_interrupt(fep->irq[i], dev);
- enable_irq(fep->irq[i]);
- }
- }
+ return 0;
}
-#endif
static inline void fec_enet_set_netdev_features(struct net_device *netdev,
netdev_features_t features)
@@ -3404,35 +3767,280 @@ static int fec_set_features(struct net_device *netdev,
return 0;
}
-static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
-{
- struct vlan_ethhdr *vhdr;
- unsigned short vlan_TCI = 0;
-
- if (skb->protocol == htons(ETH_P_ALL)) {
- vhdr = (struct vlan_ethhdr *)(skb->data);
- vlan_TCI = ntohs(vhdr->h_vlan_TCI);
- }
-
- return vlan_TCI;
-}
-
static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u16 vlan_tag;
+ u16 vlan_tag = 0;
if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return netdev_pick_tx(ndev, skb, NULL);
- vlan_tag = fec_enet_get_raw_vlan_tci(skb);
- if (!vlan_tag)
+ /* VLAN is present in the payload.*/
+ if (eth_type_vlan(skb->protocol)) {
+ struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
+
+ vlan_tag = ntohs(vhdr->h_vlan_TCI);
+ /* VLAN is present in the skb but not yet pushed in the payload.*/
+ } else if (skb_vlan_tag_present(skb)) {
+ vlan_tag = skb->vlan_tci;
+ } else {
return vlan_tag;
+ }
return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
}
+static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ bool is_run = netif_running(dev);
+ struct bpf_prog *old_prog;
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ /* No need to support the SoCs that require to
+ * do the frame swap because the performance wouldn't be
+ * better than the skb mode.
+ */
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ return -EOPNOTSUPP;
+
+ if (!bpf->prog)
+ xdp_features_clear_redirect_target(dev);
+
+ if (is_run) {
+ napi_disable(&fep->napi);
+ netif_tx_disable(dev);
+ }
+
+ old_prog = xchg(&fep->xdp_prog, bpf->prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ fec_restart(dev);
+
+ if (is_run) {
+ napi_enable(&fep->napi);
+ netif_tx_start_all_queues(dev);
+ }
+
+ if (bpf->prog)
+ xdp_features_set_redirect_target(dev, false);
+
+ return 0;
+
+ case XDP_SETUP_XSK_POOL:
+ return -EOPNOTSUPP;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
+{
+ if (unlikely(index < 0))
+ return 0;
+
+ return (index % fep->num_tx_queues);
+}
+
+static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
+ struct fec_enet_priv_tx_q *txq,
+ void *frame, u32 dma_sync_len,
+ bool ndo_xmit)
+{
+ unsigned int index, status, estatus;
+ struct bufdesc *bdp;
+ dma_addr_t dma_addr;
+ int entries_free;
+ u16 frame_len;
+
+ entries_free = fec_enet_get_free_txdesc_num(txq);
+ if (entries_free < MAX_SKB_FRAGS + 1) {
+ netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
+ return -EBUSY;
+ }
+
+ /* Fill in a Tx ring entry */
+ bdp = txq->bd.cur;
+ status = fec16_to_cpu(bdp->cbd_sc);
+ status &= ~BD_ENET_TX_STATS;
+
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
+
+ if (ndo_xmit) {
+ struct xdp_frame *xdpf = frame;
+
+ dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, dma_addr))
+ return -ENOMEM;
+
+ frame_len = xdpf->len;
+ txq->tx_buf[index].buf_p = xdpf;
+ txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
+ } else {
+ struct xdp_buff *xdpb = frame;
+ struct page *page;
+
+ page = virt_to_page(xdpb->data);
+ dma_addr = page_pool_get_dma_addr(page) +
+ (xdpb->data - xdpb->data_hard_start);
+ dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
+ dma_sync_len, DMA_BIDIRECTIONAL);
+ frame_len = xdpb->data_end - xdpb->data;
+ txq->tx_buf[index].buf_p = page;
+ txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
+ }
+
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex)
+ estatus = BD_ENET_TX_INT;
+
+ bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
+ bdp->cbd_datlen = cpu_to_fec16(frame_len);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
+ }
+
+ /* Make sure the updates to rest of the descriptor are performed before
+ * transferring ownership.
+ */
+ dma_wmb();
+
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
+ bdp->cbd_sc = cpu_to_fec16(status);
+
+ /* If this was the last BD in the ring, start at the beginning again. */
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+
+ /* Make sure the update to bdp are performed before txq->bd.cur. */
+ dma_wmb();
+
+ txq->bd.cur = bdp;
+
+ /* Trigger transmission start */
+ writel(0, txq->bd.reg_desc_active);
+
+ return 0;
+}
+
+static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
+ int cpu, struct xdp_buff *xdp,
+ u32 dma_sync_len)
+{
+ struct fec_enet_priv_tx_q *txq;
+ struct netdev_queue *nq;
+ int queue, ret;
+
+ queue = fec_enet_xdp_get_tx_queue(fep, cpu);
+ txq = fep->tx_queue[queue];
+ nq = netdev_get_tx_queue(fep->netdev, queue);
+
+ __netif_tx_lock(nq, cpu);
+
+ /* Avoid tx timeout as XDP shares the queue with kernel stack */
+ txq_trans_cond_update(nq);
+ ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
+
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
+static int fec_enet_xdp_xmit(struct net_device *dev,
+ int num_frames,
+ struct xdp_frame **frames,
+ u32 flags)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_priv_tx_q *txq;
+ int cpu = smp_processor_id();
+ unsigned int sent_frames = 0;
+ struct netdev_queue *nq;
+ unsigned int queue;
+ int i;
+
+ queue = fec_enet_xdp_get_tx_queue(fep, cpu);
+ txq = fep->tx_queue[queue];
+ nq = netdev_get_tx_queue(fep->netdev, queue);
+
+ __netif_tx_lock(nq, cpu);
+
+ /* Avoid tx timeout as XDP shares the queue with kernel stack */
+ txq_trans_cond_update(nq);
+ for (i = 0; i < num_frames; i++) {
+ if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
+ break;
+ sent_frames++;
+ }
+
+ __netif_tx_unlock(nq);
+
+ return sent_frames;
+}
+
+static int fec_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!fep->bufdesc_ex)
+ return -EOPNOTSUPP;
+
+ fec_ptp_get(ndev, config);
+
+ return 0;
+}
+
+static int fec_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!fep->bufdesc_ex)
+ return -EOPNOTSUPP;
+
+ return fec_ptp_set(ndev, config, extack);
+}
+
+static int fec_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int order;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN
+ + FEC_DRV_RESERVE_SPACE);
+ fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
+ fep->pagepool_order = order;
+ WRITE_ONCE(ndev->mtu, new_mtu);
+
+ return 0;
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
@@ -3442,11 +4050,13 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
- .ndo_eth_ioctl = fec_enet_ioctl,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = fec_poll_controller,
-#endif
+ .ndo_change_mtu = fec_change_mtu,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = fec_set_features,
+ .ndo_bpf = fec_enet_bpf,
+ .ndo_xdp_xmit = fec_enet_xdp_xmit,
+ .ndo_hwtstamp_get = fec_hwtstamp_get,
+ .ndo_hwtstamp_set = fec_hwtstamp_set,
};
static const unsigned short offset_des_active_rxq[] = {
@@ -3475,12 +4085,14 @@ static int fec_enet_init(struct net_device *ndev)
WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- fep->rx_align = 0xf;
fep->tx_align = 0xf;
#else
- fep->rx_align = 0x3;
fep->tx_align = 0x3;
#endif
+ fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
+ fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
+ fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
+ fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
/* Check mask of the streaming and coherent API */
ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
@@ -3496,8 +4108,8 @@ static int fec_enet_init(struct net_device *ndev)
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
/* Allocate memory for buffer descriptors. */
- cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
- GFP_KERNEL);
+ cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma,
+ GFP_KERNEL);
if (!cbd_base) {
ret = -ENOMEM;
goto free_queue_mem;
@@ -3508,9 +4120,6 @@ static int fec_enet_init(struct net_device *ndev)
if (ret)
goto free_queue_mem;
- /* make sure MAC we just acquired is programmed into the hw */
- fec_set_mac_address(ndev, NULL);
-
/* Set receive and transmit descriptor base. */
for (i = 0; i < fep->num_rx_queues; i++) {
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
@@ -3551,14 +4160,14 @@ static int fec_enet_init(struct net_device *ndev)
ndev->ethtool_ops = &fec_enet_ethtool_ops;
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
- netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
if (fep->quirks & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
- ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
+ netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
/* enable hw accelerator */
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
@@ -3566,13 +4175,15 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES)
fep->tx_align = 0;
- fep->rx_align = 0x3f;
- }
ndev->hw_features = ndev->features;
+ if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT;
+
fec_restart(ndev);
if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
@@ -3587,13 +4198,21 @@ free_queue_mem:
return ret;
}
+static void fec_enet_deinit(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ netif_napi_del(&fep->napi);
+ fec_enet_free_queue(ndev);
+}
+
#ifdef CONFIG_OF
static int fec_reset_phy(struct platform_device *pdev)
{
- int err, phy_reset;
- bool active_high = false;
+ struct gpio_desc *phy_reset;
int msec = 1, phy_post_delay = 0;
struct device_node *np = pdev->dev.of_node;
+ int err;
if (!np)
return 0;
@@ -3603,33 +4222,26 @@ static int fec_reset_phy(struct platform_device *pdev)
if (!err && msec > 1000)
msec = 1;
- phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- if (phy_reset == -EPROBE_DEFER)
- return phy_reset;
- else if (!gpio_is_valid(phy_reset))
- return 0;
-
err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
/* valid reset duration should be less than 1s */
if (!err && phy_post_delay > 1000)
return -EINVAL;
- active_high = of_property_read_bool(np, "phy-reset-active-high");
+ phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(phy_reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
+ "failed to get phy-reset-gpios\n");
- err = devm_gpio_request_one(&pdev->dev, phy_reset,
- active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- "phy-reset");
- if (err) {
- dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
- return err;
- }
+ if (!phy_reset)
+ return 0;
if (msec > 20)
msleep(msec);
else
usleep_range(msec * 1000, msec * 1000 + 1000);
- gpio_set_value_cansleep(phy_reset, !active_high);
+ gpiod_set_value_cansleep(phy_reset, 0);
if (!phy_post_delay)
return 0;
@@ -3723,7 +4335,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
ARRAY_SIZE(out_val));
if (ret) {
dev_dbg(&fep->pdev->dev, "no stop mode property\n");
- return ret;
+ goto out;
}
fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
@@ -3751,14 +4363,13 @@ fec_probe(struct platform_device *pdev)
phy_interface_t interface;
struct net_device *ndev;
int i, irq, ret = 0;
- const struct of_device_id *of_id;
static int dev_id;
struct device_node *np = pdev->dev.of_node, *phy_node;
int num_tx_qs;
int num_rx_qs;
char irq_name[8];
int irq_cnt;
- struct fec_devinfo *dev_info;
+ const struct fec_devinfo *dev_info;
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
@@ -3773,10 +4384,9 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */
fep = netdev_priv(ndev);
- of_id = of_match_device(fec_dt_ids, &pdev->dev);
- if (of_id)
- pdev->id_entry = of_id->data;
- dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
+ dev_info = device_get_match_data(&pdev->dev);
+ if (!dev_info)
+ dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data;
if (dev_info)
fep->quirks = dev_info->quirks;
@@ -3784,11 +4394,9 @@ fec_probe(struct platform_device *pdev)
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
-#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */
if (fep->quirks & FEC_QUIRK_HAS_GBIT)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
-#endif
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
@@ -3809,7 +4417,11 @@ fec_probe(struct platform_device *pdev)
!of_property_read_bool(np, "fsl,err006687-workaround-present"))
fep->quirks |= FEC_QUIRK_ERR006687;
- if (of_get_property(np, "fsl,magic-packet", NULL))
+ ret = fec_enet_ipc_handle_init(fep);
+ if (ret)
+ goto failed_ipc_init;
+
+ if (of_property_read_bool(np, "fsl,magic-packet"))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
ret = fec_enet_init_stop_mode(fep, np);
@@ -3858,17 +4470,21 @@ fec_probe(struct platform_device *pdev)
fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
/* enet_out is optional, depends on board */
- fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
- if (IS_ERR(fep->clk_enet_out))
- fep->clk_enet_out = NULL;
+ fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
+ if (IS_ERR(fep->clk_enet_out)) {
+ ret = PTR_ERR(fep->clk_enet_out);
+ goto failed_clk;
+ }
fep->ptp_clk_on = false;
mutex_init(&fep->ptp_clk_mutex);
/* clk_ref is optional, depends on board */
- fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
- if (IS_ERR(fep->clk_ref))
- fep->clk_ref = NULL;
+ fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
+ if (IS_ERR(fep->clk_ref)) {
+ ret = PTR_ERR(fep->clk_ref);
+ goto failed_clk;
+ }
fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
/* clk_2x_txclk is optional, depends on board */
@@ -3959,7 +4575,15 @@ fec_probe(struct platform_device *pdev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&pdev->dev);
- ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
+ fep->pagepool_order = 0;
+ fep->rx_frame_size = FEC_ENET_RX_FRSIZE;
+
+ if (fep->quirks & FEC_QUIRK_JUMBO_FRAME)
+ fep->max_buf_size = MAX_JUMBO_BUF_SIZE;
+ else
+ fep->max_buf_size = PKT_MAXBUF_SIZE;
+
+ ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
ret = register_netdev(ndev);
if (ret)
@@ -3971,10 +4595,8 @@ fec_probe(struct platform_device *pdev)
if (fep->bufdesc_ex && fep->ptp_clock)
netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
- fep->rx_copybreak = COPYBREAK_DEFAULT;
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -3983,6 +4605,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_irq:
+ fec_enet_deinit(ndev);
failed_init:
fec_ptp_stop(pdev);
failed_reset:
@@ -4002,6 +4625,7 @@ failed_rgmii_delay:
of_phy_deregister_fixed_link(np);
of_node_put(phy_node);
failed_stop_mode:
+failed_ipc_init:
failed_phy:
dev_id--;
failed_ioremap:
@@ -4010,7 +4634,7 @@ failed_ioremap:
return ret;
}
-static int
+static void
fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -4018,9 +4642,11 @@ fec_drv_remove(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
- return ret;
+ dev_err(&pdev->dev,
+ "Failed to resume device in remove callback (%pe)\n",
+ ERR_PTR(ret));
cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev);
@@ -4033,19 +4659,25 @@ fec_drv_remove(struct platform_device *pdev)
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
- clk_disable_unprepare(fep->clk_ahb);
- clk_disable_unprepare(fep->clk_ipg);
+ /* After pm_runtime_get_sync() failed, the clks are still off, so skip
+ * disabling them again.
+ */
+ if (ret >= 0) {
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ }
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ fec_enet_deinit(ndev);
free_netdev(ndev);
- return 0;
}
-static int __maybe_unused fec_suspend(struct device *dev)
+static int fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
rtnl_lock();
if (netif_running(ndev)) {
@@ -4057,9 +4689,28 @@ static int __maybe_unused fec_suspend(struct device *dev)
netif_device_detach(ndev);
netif_tx_unlock_bh(ndev);
fec_stop(ndev);
- fec_enet_clk_enable(ndev, false);
- if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+ if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
+ fec_irqs_disable(ndev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ } else {
+ fec_irqs_disable_except_wakeup(ndev);
+ if (fep->wake_irq > 0) {
+ disable_irq(fep->wake_irq);
+ enable_irq_wake(fep->wake_irq);
+ }
+ fec_enet_stop_mode(fep, true);
+ }
+ /* It's safe to disable clocks since interrupts are masked */
+ fec_enet_clk_enable(ndev, false);
+
+ fep->rpm_active = !pm_runtime_status_suspended(dev);
+ if (fep->rpm_active) {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0) {
+ rtnl_unlock();
+ return ret;
+ }
+ }
}
rtnl_unlock();
@@ -4075,7 +4726,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fec_resume(struct device *dev)
+static int fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4090,6 +4741,9 @@ static int __maybe_unused fec_resume(struct device *dev)
rtnl_lock();
if (netif_running(ndev)) {
+ if (fep->rpm_active)
+ pm_runtime_force_resume(dev);
+
ret = fec_enet_clk_enable(ndev, true);
if (ret) {
rtnl_unlock();
@@ -4097,6 +4751,10 @@ static int __maybe_unused fec_resume(struct device *dev)
}
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
fec_enet_stop_mode(fep, false);
+ if (fep->wake_irq) {
+ disable_irq_wake(fep->wake_irq);
+ enable_irq(fep->wake_irq);
+ }
val = readl(fep->hwp + FEC_ECNTRL);
val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
@@ -4123,7 +4781,7 @@ failed_clk:
return ret;
}
-static int __maybe_unused fec_runtime_suspend(struct device *dev)
+static int fec_runtime_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4134,7 +4792,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fec_runtime_resume(struct device *dev)
+static int fec_runtime_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4155,22 +4813,23 @@ failed_clk_ipg:
}
static const struct dev_pm_ops fec_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
- SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+ RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
};
static struct platform_driver fec_driver = {
.driver = {
.name = DRIVER_NAME,
- .pm = &fec_pm_ops,
+ .pm = pm_ptr(&fec_pm_ops),
.of_match_table = fec_dt_ids,
.suppress_bind_attrs = true,
},
.id_table = fec_devtype,
.probe = fec_probe,
- .remove = fec_drv_remove,
+ .remove = fec_drv_remove,
};
module_platform_driver(fec_driver);
+MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index bbbde9f701c2..3fc29afc9854 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -29,10 +29,12 @@
#include <linux/crc32.h>
#include <linux/hardirq.h>
#include <linux/delay.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -99,13 +101,13 @@ static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
netif_wake_queue(dev);
}
-static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
+static void mpc52xx_fec_set_paddr(struct net_device *dev, const u8 *mac)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
- out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
- out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
+ out_be32(&fec->paddr1, *(const u32 *)(&mac[0]));
+ out_be32(&fec->paddr2, (*(const u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
@@ -617,7 +619,7 @@ static void mpc52xx_fec_hw_init(struct net_device *dev)
out_be32(&fec->rfifo_alarm, 0x0000030c);
out_be32(&fec->tfifo_alarm, 0x00000100);
- /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
+ /* begin transmission when 256 bytes are in FIFO (or EOF or FIFO full) */
out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
/* enable crc generation */
@@ -893,13 +895,15 @@ static int mpc52xx_fec_probe(struct platform_device *op)
rv = of_get_ethdev_address(np, ndev);
if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
+ u8 addr[ETH_ALEN] __aligned(4);
/*
* If the MAC addresse is not provided via DT then read
* it back from the controller regs
*/
- *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
- *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ *(u32 *)(&addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&addr[4]) = in_be32(&fec->paddr2) >> 16;
+ eth_hw_addr_set(ndev, addr);
}
/*
@@ -920,7 +924,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(&op->dev) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
prop = of_get_property(np, "current-speed", &prop_size);
@@ -933,7 +937,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* the 7-wire property means don't use MII mode */
- if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
+ if (of_property_read_bool(np, "fsl,7-wire-mode")) {
priv->seven_wire_mode = 1;
dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
@@ -970,7 +974,7 @@ err_netdev:
return rv;
}
-static int
+static void
mpc52xx_fec_remove(struct platform_device *op)
{
struct net_device *ndev;
@@ -994,8 +998,6 @@ mpc52xx_fec_remove(struct platform_device *op)
release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec));
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index b5497e308302..3d073f0fae63 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -13,9 +13,11 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
-#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/mpc52xx.h>
#include "fec_mpc52xx.h"
@@ -92,15 +94,14 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
goto out_free;
}
- snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start);
bus->priv = priv;
bus->parent = dev;
dev_set_drvdata(dev, bus);
/* set MII speed */
- out_be32(&priv->regs->mii_speed,
- ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
+ out_be32(&priv->regs->mii_speed, ((mpc5xxx_get_bus_frequency(dev) >> 20) / 5) << 1);
err = of_mdiobus_register(bus, np);
if (err)
@@ -117,7 +118,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
return err;
}
-static int mpc52xx_fec_mdio_remove(struct platform_device *of)
+static void mpc52xx_fec_mdio_remove(struct platform_device *of)
{
struct mii_bus *bus = platform_get_drvdata(of);
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
@@ -126,8 +127,6 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of)
iounmap(priv->regs);
kfree(priv);
mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id mpc52xx_fec_mdio_match[] = {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index d71eac7e1924..4b7bad9a485d 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -7,32 +7,30 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/fec.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptrace.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
#include "fec.h"
@@ -85,15 +83,41 @@
#define FEC_CC_MULT (1 << 31)
#define FEC_COUNTER_PERIOD (1 << 31)
#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
-#define FEC_CHANNLE_0 0
-#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
+#define DEFAULT_PPS_CHANNEL 0
+
+#define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL
+#define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
+
+/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static u64 fec_ptp_read(struct cyclecounter *cc)
+{
+ struct fec_enet_private *fep =
+ container_of(cc, struct fec_enet_private, cc);
+ u32 tempval;
+
+ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ tempval |= FEC_T_CTRL_CAPTURE;
+ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+ udelay(1);
+
+ return readl(fep->hwp + FEC_ATIME);
+}
/**
* fec_ptp_enable_pps
* @fep: the fec_enet_private structure handle
* @enable: enable the channel pps output
*
- * This function enble the PPS ouput on the timer channel.
+ * This function enables the PPS output on the timer channel.
*/
static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
@@ -101,15 +125,19 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
u32 val, tempval;
struct timespec64 ts;
u64 ns;
- val = 0;
- if (fep->pps_enable == enable)
- return 0;
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
- fep->pps_channel = DEFAULT_PPS_CHANNEL;
- fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+ if (fep->perout_enable) {
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ dev_err(&fep->pdev->dev, "PEROUT is running");
+ return -EBUSY;
+ }
- spin_lock_irqsave(&fep->tmreg_lock, flags);
+ if (fep->pps_enable == enable) {
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ return 0;
+ }
if (enable) {
/* clear capture or output compare interrupt status if have.
@@ -136,11 +164,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- tempval = readl(fep->hwp + FEC_ATIME);
+ tempval = fec_ptp_read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@@ -154,7 +178,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* very close to the second point, which means NSEC_PER_SEC
* - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
* is still running when we calculate the first compare event, it is
- * possible that the remaining nanoseonds run out before the compare
+ * possible that the remaining nanoseconds run out before the compare
* counter is calculated and written into TCCR register. To avoid
* this possibility, we will set the compare event to be the next
* of next second. The current setting is 31-bit timer and wrap
@@ -203,28 +227,71 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
return 0;
}
-/**
- * fec_ptp_read - read raw cycle counter (to be used by time counter)
- * @cc: the cyclecounter structure
- *
- * this function reads the cyclecounter registers and is called by the
- * cyclecounter structure used to construct a ns counter from the
- * arbitrary fixed point registers
- */
-static u64 fec_ptp_read(const struct cyclecounter *cc)
+static int fec_ptp_pps_perout(struct fec_enet_private *fep)
{
- struct fec_enet_private *fep =
- container_of(cc, struct fec_enet_private, cc);
- u32 tempval;
+ u32 compare_val, ptp_hc, temp_val;
+ u64 curr_time;
+ unsigned long flags;
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
- if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
- udelay(1);
+ /* Update time counter */
+ timecounter_read(&fep->tc);
- return readl(fep->hwp + FEC_ATIME);
+ /* Get the current ptp hardware time counter */
+ ptp_hc = fec_ptp_read(&fep->cc);
+
+ /* Convert the ptp local counter to 1588 timestamp */
+ curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
+
+ /* If the pps start time less than current time add 100ms, just return.
+ * Because the software might not able to set the comparison time into
+ * the FEC_TCCR register in time and missed the start time.
+ */
+ if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
+ fep->perout_enable = false;
+ dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ return -1;
+ }
+
+ compare_val = fep->perout_stime - curr_time + ptp_hc;
+ compare_val &= fep->cc.mask;
+
+ writel(compare_val, fep->hwp + FEC_TCCR(fep->pps_channel));
+ fep->next_counter = (compare_val + fep->reload_period) & fep->cc.mask;
+
+ /* Enable compare event when overflow */
+ temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
+ temp_val |= FEC_T_CTRL_PINPER;
+ writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
+
+ /* Compare channel setting. */
+ temp_val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ temp_val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
+ temp_val &= ~(1 << FEC_T_TDRE_OFFSET);
+ temp_val &= ~(FEC_T_TMODE_MASK);
+ temp_val |= (FEC_TMODE_TOGGLE << FEC_T_TMODE_OFFSET);
+ writel(temp_val, fep->hwp + FEC_TCSR(fep->pps_channel));
+
+ /* Write the second compare event timestamp and calculate
+ * the third timestamp. Refer the TCCR register detail in the spec.
+ */
+ writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
+ fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
+{
+ struct fec_enet_private *fep = container_of(timer,
+ struct fec_enet_private, perout_timer);
+
+ fec_ptp_pps_perout(fep);
+
+ return HRTIMER_NORESTART;
}
/**
@@ -268,18 +335,21 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
}
/**
- * fec_ptp_adjfreq - adjust ptp cycle frequency
+ * fec_ptp_adjfine - adjust ptp cycle frequency
* @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * @scaled_ppm: scaled parts per million adjustment from base
*
* Adjust the frequency of the ptp cycle counter by the
- * indicated ppb from the base frequency.
+ * indicated amount from the base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*
* Because ENET hardware frequency adjust is complex,
* using software method to do that.
*/
-static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
unsigned long flags;
int neg_adj = 0;
u32 i, tmp;
@@ -370,21 +440,21 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
*/
static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
- struct fec_enet_private *adapter =
+ struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
u64 ns;
unsigned long flags;
- mutex_lock(&adapter->ptp_clk_mutex);
+ mutex_lock(&fep->ptp_clk_mutex);
/* Check the ptp clock */
- if (!adapter->ptp_clk_on) {
- mutex_unlock(&adapter->ptp_clk_mutex);
+ if (!fep->ptp_clk_on) {
+ mutex_unlock(&fep->ptp_clk_mutex);
return -EINVAL;
}
- spin_lock_irqsave(&adapter->tmreg_lock, flags);
- ns = timecounter_read(&adapter->tc);
- spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
- mutex_unlock(&adapter->ptp_clk_mutex);
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ ns = timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
*ts = ns_to_timespec64(ns);
@@ -430,6 +500,20 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
return 0;
}
+static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
+{
+ unsigned long flags;
+
+ hrtimer_cancel(&fep->perout_timer);
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ fep->perout_enable = false;
+ writel(0, fep->hwp + FEC_TCSR(channel));
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
/**
* fec_ptp_enable
* @ptp: the ptp clock structure
@@ -442,42 +526,120 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
{
struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
+ ktime_t timeout;
+ struct timespec64 start_time, period;
+ u64 curr_time, delta, period_ns;
+ unsigned long flags;
int ret = 0;
if (rq->type == PTP_CLK_REQ_PPS) {
+ fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+
ret = fec_ptp_enable_pps(fep, on);
return ret;
- }
- return -EOPNOTSUPP;
-}
+ } else if (rq->type == PTP_CLK_REQ_PEROUT) {
+ u32 reload_period;
-/**
- * fec_ptp_disable_hwts - disable hardware time stamping
- * @ndev: pointer to net_device
- */
-void fec_ptp_disable_hwts(struct net_device *ndev)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
- fep->hwts_tx_en = 0;
- fep->hwts_rx_en = 0;
-}
+ if (rq->perout.index != fep->pps_channel)
+ return -EOPNOTSUPP;
-int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
+ period.tv_sec = rq->perout.period.sec;
+ period.tv_nsec = rq->perout.period.nsec;
+ period_ns = timespec64_to_ns(&period);
- struct hwtstamp_config config;
+ /* FEC PTP timer only has 31 bits, so if the period exceed
+ * 4s is not supported.
+ */
+ if (period_ns > FEC_PTP_MAX_NSEC_PERIOD) {
+ dev_err(&fep->pdev->dev, "The period must equal to or less than 4s!\n");
+ return -EOPNOTSUPP;
+ }
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ reload_period = div_u64(period_ns, 2);
+ if (on && reload_period) {
+ u64 perout_stime;
+
+ /* Convert 1588 timestamp to ns*/
+ start_time.tv_sec = rq->perout.start.sec;
+ start_time.tv_nsec = rq->perout.start.nsec;
+ perout_stime = timespec64_to_ns(&start_time);
+
+ mutex_lock(&fep->ptp_clk_mutex);
+ if (!fep->ptp_clk_on) {
+ dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n");
+ mutex_unlock(&fep->ptp_clk_mutex);
+ return -EOPNOTSUPP;
+ }
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ if (fep->pps_enable) {
+ dev_err(&fep->pdev->dev, "PPS is running");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (fep->perout_enable) {
+ dev_err(&fep->pdev->dev,
+ "PEROUT has been enabled\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* Read current timestamp */
+ curr_time = timecounter_read(&fep->tc);
+ if (perout_stime <= curr_time) {
+ dev_err(&fep->pdev->dev,
+ "Start time must be greater than current time\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Calculate time difference */
+ delta = perout_stime - curr_time;
+ fep->reload_period = reload_period;
+ fep->perout_stime = perout_stime;
+ fep->perout_enable = true;
+
+unlock:
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
+
+ if (ret)
+ return ret;
+
+ /* Because the timer counter of FEC only has 31-bits, correspondingly,
+ * the time comparison register FEC_TCCR also only low 31 bits can be
+ * set. If the start time of pps signal exceeds current time more than
+ * 0x80000000 ns, a software timer is used and the timer expires about
+ * 1 second before the start time to be able to set FEC_TCCR.
+ */
+ if (delta > FEC_PTP_MAX_NSEC_COUNTER) {
+ timeout = ns_to_ktime(delta - NSEC_PER_SEC);
+ hrtimer_start(&fep->perout_timer, timeout, HRTIMER_MODE_REL);
+ } else {
+ return fec_ptp_pps_perout(fep);
+ }
+ } else {
+ fec_ptp_pps_disable(fep, fep->pps_channel);
+ }
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
+ return 0;
+ } else {
+ return -EOPNOTSUPP;
+ }
+}
- switch (config.tx_type) {
+int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
fep->hwts_tx_en = 0;
break;
@@ -488,33 +650,28 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
fep->hwts_rx_en = 0;
break;
default:
fep->hwts_rx_en = 1;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
-int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
+void fec_ptp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct hwtstamp_config config;
-
- config.flags = 0;
- config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- config.rx_filter = (fep->hwts_rx_en ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ config->flags = 0;
+ config->tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = (fep->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
}
/*
@@ -561,8 +718,11 @@ static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
fep->next_counter = (fep->next_counter + fep->reload_period) &
fep->cc.mask;
- event.type = PTP_CLOCK_PPS;
- ptp_clock_event(fep->ptp_clock, &event);
+ if (fep->pps_enable) {
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(fep->ptp_clock, &event);
+ }
+
return IRQ_HANDLED;
}
@@ -583,19 +743,23 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ struct device_node *np = fep->pdev->dev.of_node;
int irq;
int ret;
fep->ptp_caps.owner = THIS_MODULE;
- strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+ strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+
+ fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ of_property_read_u32(np, "fsl,pps-channel", &fep->pps_channel);
fep->ptp_caps.max_adj = 250000000;
fep->ptp_caps.n_alarm = 0;
fep->ptp_caps.n_ext_ts = 0;
- fep->ptp_caps.n_per_out = 0;
+ fep->ptp_caps.n_per_out = 1;
fep->ptp_caps.n_pins = 0;
fep->ptp_caps.pps = 1;
- fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+ fep->ptp_caps.adjfine = fec_ptp_adjfine;
fep->ptp_caps.adjtime = fec_ptp_adjtime;
fep->ptp_caps.gettime64 = fec_ptp_gettime;
fep->ptp_caps.settime64 = fec_ptp_settime;
@@ -614,6 +778,9 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
+ hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME,
+ HRTIMER_MODE_REL);
+
irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
irq = platform_get_irq_optional(pdev, irq_idx);
@@ -637,12 +804,66 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
schedule_delayed_work(&fep->time_keep, HZ);
}
+void fec_ptp_save_state(struct fec_enet_private *fep)
+{
+ unsigned long flags;
+ u32 atime_inc_corr;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ fep->ptp_saved_state.pps_enable = fep->pps_enable;
+
+ fep->ptp_saved_state.ns_phc = timecounter_read(&fep->tc);
+ fep->ptp_saved_state.ns_sys = ktime_get_ns();
+
+ fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
+ atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
+ fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+}
+
+/* Restore PTP functionality after a reset */
+void fec_ptp_restore_state(struct fec_enet_private *fep)
+{
+ u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
+ unsigned long flags;
+ u32 counter;
+ u64 ns;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ /* Reset turned it off, so adjust our status flag */
+ fep->pps_enable = 0;
+
+ writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
+ atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
+ writel(atime_inc, fep->hwp + FEC_ATIME_INC);
+
+ ns = ktime_get_ns() - fep->ptp_saved_state.ns_sys + fep->ptp_saved_state.ns_phc;
+ counter = ns & fep->cc.mask;
+ writel(counter, fep->hwp + FEC_ATIME);
+ timecounter_init(&fep->tc, &fep->cc, ns);
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ /* Restart PPS if needed */
+ if (fep->ptp_saved_state.pps_enable) {
+ /* Re-enable PPS */
+ fec_ptp_enable_pps(fep, 1);
+ }
+}
+
void fec_ptp_stop(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ if (fep->pps_enable)
+ fec_ptp_enable_pps(fep, 0);
+
cancel_delayed_work_sync(&fep->time_keep);
+ hrtimer_cancel(&fep->perout_timer);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
}
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 48bf8088795d..a55542c1ad65 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -3,7 +3,8 @@ config FSL_FMAN
tristate "FMan support"
depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
- select PHYLIB
+ select PHYLINK
+ select PCS_LYNX
select CRC32
default n
help
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index ce0a121580f6..11887458f050 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/fsl/guts.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -50,7 +24,6 @@
/* General defines */
#define FMAN_LIODN_TBL 64 /* size of LIODN table */
-#define MAX_NUM_OF_MACS 10
#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4
#define BASE_RX_PORTID 0x08
#define BASE_TX_PORTID 0x28
@@ -2717,17 +2690,16 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
{
struct fman *fman;
struct device_node *fm_node, *muram_node;
+ void __iomem *base_addr;
struct resource *res;
u32 val, range[2];
int err, irq;
struct clk *clk;
u32 clk_rate;
- phys_addr_t phys_base_addr;
- resource_size_t mem_size;
fman = kzalloc(sizeof(*fman), GFP_KERNEL);
if (!fman)
- return NULL;
+ return ERR_PTR(-ENOMEM);
fm_node = of_node_get(of_dev->dev.of_node);
@@ -2740,36 +2712,20 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman->dts_params.id = (u8)val;
/* Get the FM interrupt */
- res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
- __func__);
+ err = platform_get_irq(of_dev, 0);
+ if (err < 0)
goto fman_node_put;
- }
- irq = res->start;
+ irq = err;
/* Get the FM error interrupt */
- res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
- if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
- __func__);
- goto fman_node_put;
- }
- fman->dts_params.err_irq = res->start;
-
- /* Get the FM address */
- res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
- __func__);
+ err = platform_get_irq(of_dev, 1);
+ if (err < 0)
goto fman_node_put;
- }
-
- phys_base_addr = res->start;
- mem_size = resource_size(res);
+ fman->dts_params.err_irq = err;
clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
__func__, fman->dts_params.id);
goto fman_node_put;
@@ -2777,6 +2733,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
clk_rate = clk_get_rate(clk);
if (!clk_rate) {
+ err = -EINVAL;
dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
__func__, fman->dts_params.id);
goto fman_node_put;
@@ -2797,6 +2754,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the MURAM base address and size */
muram_node = of_find_matching_node(fm_node, fman_muram_match);
if (!muram_node) {
+ err = -EINVAL;
dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
__func__);
goto fman_free;
@@ -2832,22 +2790,16 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
}
}
- fman->dts_params.res =
- devm_request_mem_region(&of_dev->dev, phys_base_addr,
- mem_size, "fman");
- if (!fman->dts_params.res) {
- dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
- __func__);
- goto fman_free;
- }
-
- fman->dts_params.base_addr =
- devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
- if (!fman->dts_params.base_addr) {
+ base_addr = devm_platform_get_and_ioremap_resource(of_dev, 0, &res);
+ if (IS_ERR(base_addr)) {
+ err = PTR_ERR(base_addr);
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
goto fman_free;
}
+ fman->dts_params.base_addr = base_addr;
+ fman->dts_params.res = res;
+
fman->dev = &of_dev->dev;
err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
@@ -2868,7 +2820,7 @@ fman_node_put:
of_node_put(fm_node);
fman_free:
kfree(fman);
- return NULL;
+ return ERR_PTR(err);
}
static int fman_probe(struct platform_device *of_dev)
@@ -2880,8 +2832,8 @@ static int fman_probe(struct platform_device *of_dev)
dev = &of_dev->dev;
fman = read_dts_node(of_dev);
- if (!fman)
- return -EIO;
+ if (IS_ERR(fman))
+ return PTR_ERR(fman);
err = fman_config(fman);
if (err) {
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index f2ede1360f03..74eb62eba0d7 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_H
@@ -101,6 +74,9 @@
#define BM_MAX_NUM_OF_POOLS 64 /* Buffers pools */
#define FMAN_PORT_MAX_EXT_POOLS_NUM 8 /* External BM pools per Rx port */
+/* General defines */
+#define MAX_NUM_OF_MACS 10
+
struct fman; /* FMan data */
/* Enum for defining port types */
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 1950a8936bc0..51402dff72c5 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_dtsec.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
@@ -43,6 +17,7 @@
#include <linux/crc32.h>
#include <linux/of_mdio.h>
#include <linux/mii.h>
+#include <linux/netdevice.h>
/* TBI register addresses */
#define MII_TBICON 0x11
@@ -55,9 +30,6 @@
#define TBICON_CLK_SELECT 0x0020 /* Clock select */
#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
-#define TBIANA_SGMII 0x4001
-#define TBIANA_1000X 0x01a0
-
/* Interrupt Mask Register (IMASK) */
#define DTSEC_IMASK_BREN 0x80000000
#define DTSEC_IMASK_RXCEN 0x40000000
@@ -118,9 +90,10 @@
#define DTSEC_ECNTRL_GMIIM 0x00000040
#define DTSEC_ECNTRL_TBIM 0x00000020
-#define DTSEC_ECNTRL_SGMIIM 0x00000002
#define DTSEC_ECNTRL_RPM 0x00000010
#define DTSEC_ECNTRL_R100M 0x00000008
+#define DTSEC_ECNTRL_RMM 0x00000004
+#define DTSEC_ECNTRL_SGMIIM 0x00000002
#define DTSEC_ECNTRL_QSGMIIM 0x00000001
#define TCTRL_TTSE 0x00000040
@@ -327,7 +300,7 @@ struct fman_mac {
/* Ethernet physical interface */
phy_interface_t phy_if;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Number of individual addresses in registers for this station */
@@ -344,7 +317,8 @@ struct fman_mac {
void *fm;
struct fman_rev_info fm_rev_info;
bool basex_if;
- struct phy_device *tbiphy;
+ struct mdio_device *tbidev;
+ struct phylink_pcs pcs;
};
static void set_dflts(struct dtsec_cfg *cfg)
@@ -382,56 +356,14 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
phy_interface_t iface, u16 iface_speed, u64 addr,
u32 exception_mask, u8 tbi_addr)
{
- bool is_rgmii, is_sgmii, is_qsgmii;
enet_addr_t eth_addr;
- u32 tmp;
+ u32 tmp = 0;
int i;
/* Soft reset */
iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
iowrite32be(0, &regs->maccfg1);
- /* dtsec_id2 */
- tmp = ioread32be(&regs->tsec_id2);
-
- /* check RGMII support */
- if (iface == PHY_INTERFACE_MODE_RGMII ||
- iface == PHY_INTERFACE_MODE_RGMII_ID ||
- iface == PHY_INTERFACE_MODE_RGMII_RXID ||
- iface == PHY_INTERFACE_MODE_RGMII_TXID ||
- iface == PHY_INTERFACE_MODE_RMII)
- if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
- return -EINVAL;
-
- if (iface == PHY_INTERFACE_MODE_SGMII ||
- iface == PHY_INTERFACE_MODE_MII)
- if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
- return -EINVAL;
-
- is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
- iface == PHY_INTERFACE_MODE_RGMII_ID ||
- iface == PHY_INTERFACE_MODE_RGMII_RXID ||
- iface == PHY_INTERFACE_MODE_RGMII_TXID;
- is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
- is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
-
- tmp = 0;
- if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
- tmp |= DTSEC_ECNTRL_GMIIM;
- if (is_sgmii)
- tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
- if (is_qsgmii)
- tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
- DTSEC_ECNTRL_QSGMIIM);
- if (is_rgmii)
- tmp |= DTSEC_ECNTRL_RPM;
- if (iface_speed == SPEED_100)
- tmp |= DTSEC_ECNTRL_R100M;
-
- iowrite32be(tmp, &regs->ecntrl);
-
- tmp = 0;
-
if (cfg->tx_pause_time)
tmp |= cfg->tx_pause_time;
if (cfg->tx_pause_time_extd)
@@ -472,17 +404,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
tmp = 0;
- if (iface_speed < SPEED_1000)
- tmp |= MACCFG2_NIBBLE_MODE;
- else if (iface_speed == SPEED_1000)
- tmp |= MACCFG2_BYTE_MODE;
-
tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
MACCFG2_PREAMBLE_LENGTH_MASK;
if (cfg->tx_pad_crc)
tmp |= MACCFG2_PAD_CRC_EN;
- /* Full Duplex */
- tmp |= MACCFG2_FULL_DUPLEX;
iowrite32be(tmp, &regs->maccfg2);
tmp = (((cfg->non_back_to_back_ipg1 <<
@@ -551,10 +476,6 @@ static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
static int check_init_parameters(struct fman_mac *dtsec)
{
- if (dtsec->max_speed >= SPEED_10000) {
- pr_err("1G MAC driver supports 1G or lower speeds\n");
- return -EINVAL;
- }
if ((dtsec->dtsec_drv_param)->rx_prepend >
MAX_PACKET_ALIGNMENT) {
pr_err("packetAlignmentPadding can't be > than %d\n",
@@ -656,22 +577,10 @@ static int get_exception_flag(enum fman_mac_exceptions exception)
return bit_mask;
}
-static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
-{
- /* Checks if dTSEC driver parameters were initialized */
- if (!dtsec_drv_params)
- return true;
-
- return false;
-}
-
static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (is_init_done(dtsec->dtsec_drv_param))
- return 0;
-
return (u16)ioread32be(&regs->maxfrm);
}
@@ -708,6 +617,7 @@ static void dtsec_isr(void *handle)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
if (event & DTSEC_IMASK_XFUNEN) {
/* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
+ /* FIXME: This races with the rest of the driver! */
if (dtsec->fm_rev_info.major == 2) {
u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
/* a. Write 0x00E0_0C00 to DTSEC_ID
@@ -840,135 +750,98 @@ static void free_init_resources(struct fman_mac *dtsec)
dtsec->unicast_addr_hash = NULL;
}
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
+static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs)
{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ return container_of(pcs, struct fman_mac, pcs);
+}
- dtsec->dtsec_drv_param->maximum_frame = new_val;
+static void dtsec_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
+ struct phylink_link_state *state)
+{
+ struct fman_mac *dtsec = pcs_to_dtsec(pcs);
- return 0;
+ phylink_mii_c22_pcs_get_state(dtsec->tbidev, neg_mode, state);
}
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
+static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ struct fman_mac *dtsec = pcs_to_dtsec(pcs);
- dtsec->dtsec_drv_param->tx_pad_crc = new_val;
+ return phylink_mii_c22_pcs_config(dtsec->tbidev, interface,
+ advertising, neg_mode);
+}
- return 0;
+static void dtsec_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct fman_mac *dtsec = pcs_to_dtsec(pcs);
+
+ phylink_mii_c22_pcs_an_restart(dtsec->tbidev);
}
-static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode)
+static const struct phylink_pcs_ops dtsec_pcs_ops = {
+ .pcs_get_state = dtsec_pcs_get_state,
+ .pcs_config = dtsec_pcs_config,
+ .pcs_an_restart = dtsec_pcs_an_restart,
+};
+
+static void graceful_start(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (mode & COMM_MODE_TX)
- iowrite32be(ioread32be(&regs->tctrl) &
- ~TCTRL_GTS, &regs->tctrl);
- if (mode & COMM_MODE_RX)
- iowrite32be(ioread32be(&regs->rctrl) &
- ~RCTRL_GRS, &regs->rctrl);
+ iowrite32be(ioread32be(&regs->tctrl) & ~TCTRL_GTS, &regs->tctrl);
+ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS, &regs->rctrl);
}
-static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode)
+static void graceful_stop(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
/* Graceful stop - Assert the graceful Rx stop bit */
- if (mode & COMM_MODE_RX) {
- tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
- iowrite32be(tmp, &regs->rctrl);
+ tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
+ iowrite32be(tmp, &regs->rctrl);
- if (dtsec->fm_rev_info.major == 2) {
- /* Workaround for dTSEC Errata A002 */
- usleep_range(100, 200);
- } else {
- /* Workaround for dTSEC Errata A004839 */
- usleep_range(10, 50);
- }
+ if (dtsec->fm_rev_info.major == 2) {
+ /* Workaround for dTSEC Errata A002 */
+ usleep_range(100, 200);
+ } else {
+ /* Workaround for dTSEC Errata A004839 */
+ usleep_range(10, 50);
}
/* Graceful stop - Assert the graceful Tx stop bit */
- if (mode & COMM_MODE_TX) {
- if (dtsec->fm_rev_info.major == 2) {
- /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
- pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
- } else {
- tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
- iowrite32be(tmp, &regs->tctrl);
+ if (dtsec->fm_rev_info.major == 2) {
+ /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
+ pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
+ } else {
+ tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
+ iowrite32be(tmp, &regs->tctrl);
- /* Workaround for dTSEC Errata A0012, A0014 */
- usleep_range(10, 50);
- }
+ /* Workaround for dTSEC Errata A0012, A0014 */
+ usleep_range(10, 50);
}
}
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
+static int dtsec_enable(struct fman_mac *dtsec)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- u32 tmp;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- /* Enable */
- tmp = ioread32be(&regs->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp |= MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= MACCFG1_TX_EN;
-
- iowrite32be(tmp, &regs->maccfg1);
-
- /* Graceful start - clear the graceful Rx/Tx stop bit */
- graceful_start(dtsec, mode);
-
return 0;
}
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
+static void dtsec_disable(struct fman_mac *dtsec)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- u32 tmp;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- /* Graceful stop - Assert the graceful Rx/Tx stop bit */
- graceful_stop(dtsec, mode);
-
- tmp = ioread32be(&regs->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp &= ~MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~MACCFG1_TX_EN;
-
- iowrite32be(tmp, &regs->maccfg1);
-
- return 0;
}
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
- u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
+ u8 __maybe_unused priority,
+ u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 ptv = 0;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
-
if (pause_time) {
/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
@@ -989,27 +862,14 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
&regs->maccfg1);
- graceful_start(dtsec, mode);
-
return 0;
}
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
+static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
-
tmp = ioread32be(&regs->maccfg1);
if (en)
tmp |= MACCFG1_RX_FLOW;
@@ -1017,25 +877,125 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
tmp &= ~MACCFG1_RX_FLOW;
iowrite32be(tmp, &regs->maccfg1);
- graceful_start(dtsec, mode);
-
return 0;
}
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr)
+static struct phylink_pcs *dtsec_select_pcs(struct phylink_config *config,
+ phy_interface_t iface)
{
+ struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac;
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return &dtsec->pcs;
+ default:
+ return NULL;
+ }
+}
+
+static void dtsec_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct dtsec_regs __iomem *regs = mac_dev->fman_mac->regs;
+ u32 tmp;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ tmp = DTSEC_ECNTRL_RMM;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ tmp = DTSEC_ECNTRL_GMIIM | DTSEC_ECNTRL_RPM;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ tmp = DTSEC_ECNTRL_TBIM | DTSEC_ECNTRL_SGMIIM;
+ break;
+ default:
+ dev_warn(mac_dev->dev, "cannot configure dTSEC for %s\n",
+ phy_modes(state->interface));
+ return;
+ }
+
+ iowrite32be(tmp, &regs->ecntrl);
+}
+
+static void dtsec_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct fman_mac *dtsec = mac_dev->fman_mac;
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
+ u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE;
+ u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ dtsec_set_tx_pause_frames(dtsec, 0, pause_time, 0);
+ dtsec_accept_rx_pause_frames(dtsec, rx_pause);
+
+ tmp = ioread32be(&regs->ecntrl);
+ if (speed == SPEED_100)
+ tmp |= DTSEC_ECNTRL_R100M;
+ else
+ tmp &= ~DTSEC_ECNTRL_R100M;
+ iowrite32be(tmp, &regs->ecntrl);
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
+ tmp = ioread32be(&regs->maccfg2);
+ tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE | MACCFG2_FULL_DUPLEX);
+ if (speed >= SPEED_1000)
+ tmp |= MACCFG2_BYTE_MODE;
+ else
+ tmp |= MACCFG2_NIBBLE_MODE;
+
+ if (duplex == DUPLEX_FULL)
+ tmp |= MACCFG2_FULL_DUPLEX;
- graceful_stop(dtsec, mode);
+ iowrite32be(tmp, &regs->maccfg2);
+
+ mac_dev->update_speed(mac_dev, speed);
+
+ /* Enable */
+ tmp = ioread32be(&regs->maccfg1);
+ tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
+ iowrite32be(tmp, &regs->maccfg1);
+
+ /* Graceful start - clear the graceful Rx/Tx stop bit */
+ graceful_start(dtsec);
+}
+
+static void dtsec_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ /* Graceful stop - Assert the graceful Rx/Tx stop bit */
+ graceful_stop(dtsec);
+
+ tmp = ioread32be(&regs->maccfg1);
+ tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ iowrite32be(tmp, &regs->maccfg1);
+}
+
+static const struct phylink_mac_ops dtsec_mac_ops = {
+ .mac_select_pcs = dtsec_select_pcs,
+ .mac_config = dtsec_mac_config,
+ .mac_link_up = dtsec_link_up,
+ .mac_link_down = dtsec_link_down,
+};
+
+static int dtsec_modify_mac_address(struct fman_mac *dtsec,
+ const enet_addr_t *enet_addr)
+{
+ graceful_stop(dtsec);
/* Initialize MAC Station Address registers (1 & 2)
* Station address have to be swapped (big endian to little endian
@@ -1043,12 +1003,13 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_add
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_add_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct eth_hash_entry *hash_entry;
@@ -1057,9 +1018,6 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
u32 crc = 0xFFFFFFFF;
bool mcast, ghtx;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
@@ -1114,14 +1072,11 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
{
u32 tmp;
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
tmp = ioread32be(&regs->rctrl);
if (enable)
tmp |= RCTRL_MPROM;
@@ -1133,14 +1088,11 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 rctrl, tctrl;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
rctrl = ioread32be(&regs->rctrl);
tctrl = ioread32be(&regs->tctrl);
@@ -1158,7 +1110,8 @@ int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_del_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct list_head *pos;
@@ -1168,9 +1121,6 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
u32 crc = 0xFFFFFFFF;
bool mcast, ghtx;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
@@ -1229,14 +1179,11 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
+static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
/* Set unicast promiscuous */
tmp = ioread32be(&regs->rctrl);
if (new_val)
@@ -1258,85 +1205,12 @@ int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
return 0;
}
-int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
-{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
- u32 tmp;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
-
- tmp = ioread32be(&regs->maccfg2);
-
- /* Full Duplex */
- tmp |= MACCFG2_FULL_DUPLEX;
-
- tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
- if (speed < SPEED_1000)
- tmp |= MACCFG2_NIBBLE_MODE;
- else if (speed == SPEED_1000)
- tmp |= MACCFG2_BYTE_MODE;
- iowrite32be(tmp, &regs->maccfg2);
-
- tmp = ioread32be(&regs->ecntrl);
- if (speed == SPEED_100)
- tmp |= DTSEC_ECNTRL_R100M;
- else
- tmp &= ~DTSEC_ECNTRL_R100M;
- iowrite32be(tmp, &regs->ecntrl);
-
- graceful_start(dtsec, mode);
-
- return 0;
-}
-
-int dtsec_restart_autoneg(struct fman_mac *dtsec)
-{
- u16 tmp_reg16;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
-
- tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
- tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
- BMCR_FULLDPLX | BMCR_SPEED1000);
-
- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
-
- return 0;
-}
-
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
-{
- struct dtsec_regs __iomem *regs = dtsec->regs;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- *mac_version = ioread32be(&regs->tsec_id);
-
- return 0;
-}
-
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable)
+static int dtsec_set_exception(struct fman_mac *dtsec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 bit_mask = 0;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
bit_mask = get_exception_flag(exception);
if (bit_mask) {
@@ -1382,16 +1256,13 @@ int dtsec_set_exception(struct fman_mac *dtsec,
return 0;
}
-int dtsec_init(struct fman_mac *dtsec)
+static int dtsec_init(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct dtsec_cfg *dtsec_drv_param;
- u16 max_frm_ln;
+ u16 max_frm_ln, tbicon;
int err;
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
if (DEFAULT_RESET_ON_INIT &&
(fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
pr_err("Can't reset MAC!\n");
@@ -1406,38 +1277,19 @@ int dtsec_init(struct fman_mac *dtsec)
err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
dtsec->max_speed, dtsec->addr, dtsec->exceptions,
- dtsec->tbiphy->mdio.addr);
+ dtsec->tbidev->addr);
if (err) {
free_init_resources(dtsec);
pr_err("DTSEC version doesn't support this i/f mode\n");
return err;
}
- if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
- u16 tmp_reg16;
-
- /* Configure the TBI PHY Control Register */
- tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
- phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
+ /* Configure the TBI PHY Control Register */
+ tbicon = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
+ mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon);
- tmp_reg16 = TBICON_CLK_SELECT;
- phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
-
- tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
- BMCR_FULLDPLX | BMCR_SPEED1000);
- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
-
- if (dtsec->basex_if)
- tmp_reg16 = TBIANA_1000X;
- else
- tmp_reg16 = TBIANA_SGMII;
- phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
-
- tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
- BMCR_FULLDPLX | BMCR_SPEED1000);
-
- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
- }
+ tbicon = TBICON_CLK_SELECT;
+ mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon);
/* Max Frame Length */
max_frm_ln = (u16)ioread32be(&regs->maxfrm);
@@ -1476,24 +1328,24 @@ int dtsec_init(struct fman_mac *dtsec)
return 0;
}
-int dtsec_free(struct fman_mac *dtsec)
+static int dtsec_free(struct fman_mac *dtsec)
{
free_init_resources(dtsec);
kfree(dtsec->dtsec_drv_param);
dtsec->dtsec_drv_param = NULL;
+ if (!IS_ERR_OR_NULL(dtsec->tbidev))
+ put_device(&dtsec->tbidev->dev);
kfree(dtsec);
return 0;
}
-struct fman_mac *dtsec_config(struct fman_mac_params *params)
+static struct fman_mac *dtsec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *dtsec;
struct dtsec_cfg *dtsec_drv_param;
- void __iomem *base_addr;
-
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
@@ -1510,10 +1362,9 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
set_dflts(dtsec_drv_param);
- dtsec->regs = base_addr;
- dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
- dtsec->max_speed = params->max_speed;
- dtsec->phy_if = params->phy_if;
+ dtsec->regs = mac_dev->vaddr;
+ dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
+ dtsec->phy_if = mac_dev->phy_if;
dtsec->mac_id = params->mac_id;
dtsec->exceptions = (DTSEC_IMASK_BREN |
DTSEC_IMASK_RXCEN |
@@ -1530,34 +1381,122 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
DTSEC_IMASK_RDPEEN);
dtsec->exception_cb = params->exception_cb;
dtsec->event_cb = params->event_cb;
- dtsec->dev_id = params->dev_id;
+ dtsec->dev_id = mac_dev;
dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
dtsec->fm = params->fm;
- dtsec->basex_if = params->basex_if;
-
- if (!params->internal_phy_node) {
- pr_err("TBI PHY node is not available\n");
- goto err_dtsec_drv_param;
- }
-
- dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
- if (!dtsec->tbiphy) {
- pr_err("of_phy_find_device (TBI PHY) failed\n");
- goto err_dtsec_drv_param;
- }
-
- put_device(&dtsec->tbiphy->mdio.dev);
/* Save FMan revision */
fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
return dtsec;
-err_dtsec_drv_param:
- kfree(dtsec_drv_param);
err_dtsec:
kfree(dtsec);
return NULL;
}
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *dtsec;
+ struct device_node *phy_node;
+ unsigned long capabilities;
+ unsigned long *supported;
+
+ mac_dev->phylink_ops = &dtsec_mac_ops;
+ mac_dev->set_promisc = dtsec_set_promiscuous;
+ mac_dev->change_addr = dtsec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
+ mac_dev->set_exception = dtsec_set_exception;
+ mac_dev->set_allmulti = dtsec_set_allmulti;
+ mac_dev->set_tstamp = dtsec_set_tstamp;
+ mac_dev->enable = dtsec_enable;
+ mac_dev->disable = dtsec_disable;
+
+ mac_dev->fman_mac = dtsec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ dtsec = mac_dev->fman_mac;
+ dtsec->dtsec_drv_param->maximum_frame = fman_get_max_frm();
+ dtsec->dtsec_drv_param->tx_pad_crc = true;
+
+ phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+ if (!phy_node || !of_device_is_available(phy_node)) {
+ of_node_put(phy_node);
+ err = -EINVAL;
+ dev_err_probe(mac_dev->dev, err,
+ "TBI PCS node is not available\n");
+ goto _return_fm_mac_free;
+ }
+
+ dtsec->tbidev = of_mdio_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!dtsec->tbidev) {
+ err = -EPROBE_DEFER;
+ dev_err_probe(mac_dev->dev, err,
+ "could not find mdiodev for PCS\n");
+ goto _return_fm_mac_free;
+ }
+ dtsec->pcs.ops = &dtsec_pcs_ops;
+ dtsec->pcs.poll = true;
+
+ supported = mac_dev->phylink_config.supported_interfaces;
+
+ /* FIXME: Can we use DTSEC_ID2_INT_FULL_OFF to determine if these are
+ * supported? If not, we can determine support via the phy if SerDes
+ * support is added.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII ||
+ mac_dev->phy_if == PHY_INTERFACE_MODE_1000BASEX) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ } else if (mac_dev->phy_if == PHY_INTERFACE_MODE_2500BASEX) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+ }
+
+ if (!(ioread32be(&dtsec->regs->tsec_id2) & DTSEC_ID2_INT_REDUCED_OFF)) {
+ phy_interface_set_rgmii(supported);
+
+ /* DTSEC_ID2_INT_REDUCED_OFF indicates that the dTSEC supports
+ * RMII and RGMII. However, the only SoCs which support RMII
+ * are the P1017 and P1023. Avoid advertising this mode on
+ * other SoCs. This is a bit of a moot point, since there's no
+ * in-tree support for ethernet on these platforms...
+ */
+ if (of_machine_is_compatible("fsl,P1023") ||
+ of_machine_is_compatible("fsl,P1023RDB"))
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ }
+
+ capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ capabilities |= MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+ mac_dev->phylink_config.mac_capabilities = capabilities;
+
+ err = dtsec_init(dtsec);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
+ err = dtsec_set_exception(dtsec, FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(mac_dev->dev, "FMan dTSEC version: 0x%08x\n",
+ ioread32be(&dtsec->regs->tsec_id));
+
+ goto _return;
+
+_return_fm_mac_free:
+ dtsec_free(dtsec);
+
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 68512c3bd6e5..8c72d280c51a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __DTSEC_H
@@ -35,27 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *dtsec_config(struct fman_mac_params *params);
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr);
-int dtsec_adjust_link(struct fman_mac *dtsec,
- u16 speed);
-int dtsec_restart_autoneg(struct fman_mac *dtsec);
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_init(struct fman_mac *dtsec);
-int dtsec_free(struct fman_mac *dtsec);
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable);
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
+struct mac_device;
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c
index e1bdfed16134..e73f6ef3c6ee 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.c
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.h b/drivers/net/ethernet/freescale/fman/fman_keygen.h
index c4640de3f4cb..2cb0df453074 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.h
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __KEYGEN_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index 19f327efdaff..e5d6cddea731 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -41,6 +41,7 @@
#include <linux/if_ether.h>
struct fman_mac;
+struct mac_device;
/* Ethernet Address */
typedef u8 enet_addr_t[ETH_ALEN];
@@ -75,16 +76,6 @@ typedef u8 enet_addr_t[ETH_ALEN];
#define ETH_HASH_ENTRY_OBJ(ptr) \
hlist_entry_safe(ptr, struct eth_hash_entry, node)
-/* Enumeration (bit flags) of communication modes (Transmit,
- * receive or both).
- */
-enum comm_mode {
- COMM_MODE_NONE = 0, /* No transmit/receive communication */
- COMM_MODE_RX = 1, /* Only receive communication */
- COMM_MODE_TX = 2, /* Only transmit communication */
- COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */
-};
-
/* FM MAC Exceptions */
enum fman_mac_exceptions {
FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
@@ -168,40 +159,21 @@ struct eth_hash_entry {
struct list_head node;
};
-typedef void (fman_mac_exception_cb)(void *dev_id,
- enum fman_mac_exceptions exceptions);
+typedef void (fman_mac_exception_cb)(struct mac_device *dev_id,
+ enum fman_mac_exceptions exceptions);
/* FMan MAC config input */
struct fman_mac_params {
- /* Base of memory mapped FM MAC registers */
- void __iomem *base_addr;
- /* MAC address of device; First octet is sent first */
- enet_addr_t addr;
/* MAC ID; numbering of dTSEC and 1G-mEMAC:
* 0 - FM_MAX_NUM_OF_1G_MACS;
* numbering of 10G-MAC (TGEC) and 10G-mEMAC:
* 0 - FM_MAX_NUM_OF_10G_MACS
*/
u8 mac_id;
- /* PHY interface */
- phy_interface_t phy_if;
- /* Note that the speed should indicate the maximum rate that
- * this MAC should support rather than the actual speed;
- */
- u16 max_speed;
/* A handle to the FM object this port related to */
void *fm;
- void *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
- /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
- * and phy or backplane; Note: 1000BaseX auto-negotiation relates only
- * to interface between MAC and phy/backplane, SGMII phy can still
- * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
- */
- bool basex_if;
- /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
- struct device_node *internal_phy_node;
};
struct eth_hash_t {
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 2216b7f51d26..c84f0336c94c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1,78 +1,22 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_memac.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/pcs-lynx.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
+#include <linux/phy/phy.h>
#include <linux/of_mdio.h>
-/* PCS registers */
-#define MDIO_SGMII_CR 0x00
-#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
-#define MDIO_SGMII_LINK_TMR_L 0x12
-#define MDIO_SGMII_LINK_TMR_H 0x13
-#define MDIO_SGMII_IF_MODE 0x14
-
-/* SGMII Control defines */
-#define SGMII_CR_AN_EN 0x1000
-#define SGMII_CR_RESTART_AN 0x0200
-#define SGMII_CR_FD 0x0100
-#define SGMII_CR_SPEED_SEL1_1G 0x0040
-#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
- SGMII_CR_SPEED_SEL1_1G)
-
-/* SGMII Device Ability for SGMII defines */
-#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
-#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
-
-/* Link timer define */
-#define LINK_TMR_L 0xa120
-#define LINK_TMR_H 0x0007
-#define LINK_TMR_L_BASEX 0xaf08
-#define LINK_TMR_H_BASEX 0x002f
-
-/* SGMII IF Mode defines */
-#define IF_MODE_USE_SGMII_AN 0x0002
-#define IF_MODE_SGMII_EN 0x0001
-#define IF_MODE_SGMII_SPEED_100M 0x0004
-#define IF_MODE_SGMII_SPEED_1G 0x0008
-#define IF_MODE_SGMII_DUPLEX_HALF 0x0010
-
/* Num of additional exact match MAC adr regs */
#define MEMAC_NUM_OF_PADDRS 7
@@ -323,7 +267,6 @@ struct memac_cfg {
bool reset_on_init;
bool pause_ignore;
bool promiscuous_mode_enable;
- struct fixed_phy_status *fixed_link;
u16 max_frame_length;
u16 pause_quanta;
u32 tx_ipg_length;
@@ -334,10 +277,7 @@ struct fman_mac {
struct memac_regs __iomem *regs;
/* MAC address of device */
u64 addr;
- /* Ethernet physical interface */
- phy_interface_t phy_if;
- u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Pointer to driver's global address hash table */
@@ -349,9 +289,12 @@ struct fman_mac {
struct memac_cfg *memac_drv_param;
void *fm;
struct fman_rev_info fm_rev_info;
- bool basex_if;
- struct phy_device *pcsphy;
+ struct phy *serdes;
+ struct phylink_pcs *sgmii_pcs;
+ struct phylink_pcs *qsgmii_pcs;
+ struct phylink_pcs *xfi_pcs;
bool allmulti_enabled;
+ bool rgmii_no_half_duplex;
};
static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr,
@@ -409,7 +352,6 @@ static void set_exception(struct memac_regs __iomem *regs, u32 val,
}
static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
- phy_interface_t phy_if, u16 speed, bool slow_10g_if,
u32 exceptions)
{
u32 tmp;
@@ -437,41 +379,6 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
iowrite32be((u32)cfg->pause_quanta, &regs->pause_quanta[0]);
iowrite32be((u32)0, &regs->pause_thresh[0]);
- /* IF_MODE */
- tmp = 0;
- switch (phy_if) {
- case PHY_INTERFACE_MODE_XGMII:
- tmp |= IF_MODE_10G;
- break;
- case PHY_INTERFACE_MODE_MII:
- tmp |= IF_MODE_MII;
- break;
- default:
- tmp |= IF_MODE_GMII;
- if (phy_if == PHY_INTERFACE_MODE_RGMII ||
- phy_if == PHY_INTERFACE_MODE_RGMII_ID ||
- phy_if == PHY_INTERFACE_MODE_RGMII_RXID ||
- phy_if == PHY_INTERFACE_MODE_RGMII_TXID)
- tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
- }
- iowrite32be(tmp, &regs->if_mode);
-
- /* TX_FIFO_SECTIONS */
- tmp = 0;
- if (phy_if == PHY_INTERFACE_MODE_XGMII) {
- if (slow_10g_if) {
- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
- } else {
- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
- }
- } else {
- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
- }
- iowrite32be(tmp, &regs->tx_fifo_sections);
-
/* clear all pending events and set-up interrupts */
iowrite32be(0xffffffff, &regs->ievent);
set_exception(regs, exceptions, true);
@@ -511,93 +418,6 @@ static u32 get_mac_addr_hash_code(u64 eth_addr)
return xor_val;
}
-static void setup_sgmii_internal_phy(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link)
-{
- u16 tmp_reg16;
-
- if (WARN_ON(!memac->pcsphy))
- return;
-
- /* SGMII mode */
- tmp_reg16 = IF_MODE_SGMII_EN;
- if (!fixed_link)
- /* AN enable */
- tmp_reg16 |= IF_MODE_USE_SGMII_AN;
- else {
- switch (fixed_link->speed) {
- case 10:
- /* For 10M: IF_MODE[SPEED_10M] = 0 */
- break;
- case 100:
- tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
- break;
- case 1000:
- default:
- tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
- break;
- }
- if (!fixed_link->duplex)
- tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
- }
- phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
-
- /* Device ability according to SGMII specification */
- tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
- phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
-
- /* Adjust link timer for SGMII -
- * According to Cisco SGMII specification the timer should be 1.6 ms.
- * The link_timer register is configured in units of the clock.
- * - When running as 1G SGMII, Serdes clock is 125 MHz, so
- * unit = 1 / (125*10^6 Hz) = 8 ns.
- * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40
- * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
- * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
- * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120.
- * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
- * we always set up here a value of 2.5 SGMII.
- */
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
-
- if (!fixed_link)
- /* Restart AN */
- tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
- else
- /* AN disabled */
- tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
- phy_write(memac->pcsphy, 0x0, tmp_reg16);
-}
-
-static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
-{
- u16 tmp_reg16;
-
- /* AN Device capability */
- tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
- phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
-
- /* Adjust link timer for SGMII -
- * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
- * The link_timer register is configured in units of the clock.
- * - When running as 1G SGMII, Serdes clock is 125 MHz, so
- * unit = 1 / (125*10^6 Hz) = 8 ns.
- * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
- * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
- * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
- * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
- * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
- * we always set up here a value of 2.5 SGMII.
- */
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
-
- /* Restart AN */
- tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
- phy_write(memac->pcsphy, 0x0, tmp_reg16);
-}
-
static int check_init_parameters(struct fman_mac *memac)
{
if (!memac->exception_cb) {
@@ -703,220 +523,284 @@ static void free_init_resources(struct fman_mac *memac)
memac->unicast_addr_hash = NULL;
}
-static bool is_init_done(struct memac_cfg *memac_drv_params)
+static int memac_enable(struct fman_mac *memac)
{
- /* Checks if mEMAC driver parameters were initialized */
- if (!memac_drv_params)
- return true;
+ int ret;
+
+ ret = phy_init(memac->serdes);
+ if (ret) {
+ dev_err(memac->dev_id->dev,
+ "could not initialize serdes: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = phy_power_on(memac->serdes);
+ if (ret) {
+ dev_err(memac->dev_id->dev,
+ "could not power on serdes: %pe\n", ERR_PTR(ret));
+ phy_exit(memac->serdes);
+ }
+
+ return ret;
+}
- return false;
+static void memac_disable(struct fman_mac *memac)
+{
+ phy_power_off(memac->serdes);
+ phy_exit(memac->serdes);
}
-int memac_enable(struct fman_mac *memac, enum comm_mode mode)
+static int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
+ if (new_val)
+ tmp |= CMD_CFG_PROMIS_EN;
+ else
+ tmp &= ~CMD_CFG_PROMIS_EN;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int memac_disable(struct fman_mac *memac, enum comm_mode mode)
+static int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+ u16 pause_time, u16 thresh_time)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ tmp = ioread32be(&regs->tx_fifo_sections);
+
+ GET_TX_EMPTY_DEFAULT_VALUE(tmp);
+ iowrite32be(tmp, &regs->tx_fifo_sections);
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
+ tmp &= ~CMD_CFG_PFC_MODE;
iowrite32be(tmp, &regs->command_config);
+ tmp = ioread32be(&regs->pause_quanta[priority / 2]);
+ if (priority % 2)
+ tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT;
+ else
+ tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT;
+ tmp |= ((u32)pause_time << (16 * (priority % 2)));
+ iowrite32be(tmp, &regs->pause_quanta[priority / 2]);
+
+ tmp = ioread32be(&regs->pause_thresh[priority / 2]);
+ if (priority % 2)
+ tmp &= CLXY_PAUSE_THRESH_CLX_QTH;
+ else
+ tmp &= CLXY_PAUSE_THRESH_CLY_QTH;
+ tmp |= ((u32)thresh_time << (16 * (priority % 2)));
+ iowrite32be(tmp, &regs->pause_thresh[priority / 2]);
+
return 0;
}
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
+static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
- if (new_val)
- tmp |= CMD_CFG_PROMIS_EN;
+ if (en)
+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
else
- tmp &= ~CMD_CFG_PROMIS_EN;
+ tmp |= CMD_CFG_PAUSE_IGNORE;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int memac_adjust_link(struct fman_mac *memac, u16 speed)
+static unsigned long memac_get_caps(struct phylink_config *config,
+ phy_interface_t interface)
{
- struct memac_regs __iomem *regs = memac->regs;
- u32 tmp;
-
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- tmp = ioread32be(&regs->if_mode);
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
+ unsigned long caps = config->mac_capabilities;
- /* Set full duplex */
- tmp &= ~IF_MODE_HD;
+ if (phy_interface_mode_is_rgmii(interface) &&
+ memac->rgmii_no_half_duplex)
+ caps &= ~(MAC_10HD | MAC_100HD);
- if (phy_interface_mode_is_rgmii(memac->phy_if)) {
- /* Configure RGMII in manual mode */
- tmp &= ~IF_MODE_RGMII_AUTO;
- tmp &= ~IF_MODE_RGMII_SP_MASK;
- /* Full duplex */
- tmp |= IF_MODE_RGMII_FD;
+ return caps;
+}
- switch (speed) {
- case SPEED_1000:
- tmp |= IF_MODE_RGMII_1000;
- break;
- case SPEED_100:
- tmp |= IF_MODE_RGMII_100;
- break;
- case SPEED_10:
- tmp |= IF_MODE_RGMII_10;
- break;
- default:
- break;
- }
+/**
+ * memac_if_mode() - Convert an interface mode into an IF_MODE config
+ * @interface: A phy interface mode
+ *
+ * Return: A configuration word, suitable for programming into the lower bits
+ * of %IF_MODE.
+ */
+static u32 memac_if_mode(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ return IF_MODE_MII;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return IF_MODE_GMII | IF_MODE_RGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return IF_MODE_GMII;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return IF_MODE_10G;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
}
-
- iowrite32be(tmp, &regs->if_mode);
-
- return 0;
}
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
+static struct phylink_pcs *memac_select_pcs(struct phylink_config *config,
+ phy_interface_t iface)
{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->max_frame_length = new_val;
-
- return 0;
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return memac->sgmii_pcs;
+ case PHY_INTERFACE_MODE_QSGMII:
+ return memac->qsgmii_pcs;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return memac->xfi_pcs;
+ default:
+ return NULL;
+ }
}
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
+static int memac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->reset_on_init = enable;
-
- return 0;
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_10GBASER:
+ return phy_set_mode_ext(memac->serdes, PHY_MODE_ETHERNET,
+ iface);
+ default:
+ return 0;
+ }
}
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link)
+static void memac_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->fixed_link = fixed_link;
-
- return 0;
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct memac_regs __iomem *regs = mac_dev->fman_mac->regs;
+ u32 tmp = ioread32be(&regs->if_mode);
+
+ tmp &= ~(IF_MODE_MASK | IF_MODE_RGMII);
+ tmp |= memac_if_mode(state->interface);
+ if (phylink_autoneg_inband(mode))
+ tmp |= IF_MODE_RGMII_AUTO;
+ iowrite32be(tmp, &regs->if_mode);
}
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time)
+static void memac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct fman_mac *memac = mac_dev->fman_mac;
struct memac_regs __iomem *regs = memac->regs;
- u32 tmp;
-
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ u32 tmp = memac_if_mode(interface);
+ u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE;
- tmp = ioread32be(&regs->tx_fifo_sections);
+ memac_set_tx_pause_frames(memac, 0, pause_time, 0);
+ memac_accept_rx_pause_frames(memac, rx_pause);
- GET_TX_EMPTY_DEFAULT_VALUE(tmp);
- iowrite32be(tmp, &regs->tx_fifo_sections);
+ if (duplex == DUPLEX_HALF)
+ tmp |= IF_MODE_HD;
- tmp = ioread32be(&regs->command_config);
- tmp &= ~CMD_CFG_PFC_MODE;
+ switch (speed) {
+ case SPEED_1000:
+ tmp |= IF_MODE_RGMII_1000;
+ break;
+ case SPEED_100:
+ tmp |= IF_MODE_RGMII_100;
+ break;
+ case SPEED_10:
+ tmp |= IF_MODE_RGMII_10;
+ break;
+ }
+ iowrite32be(tmp, &regs->if_mode);
- iowrite32be(tmp, &regs->command_config);
+ /* TODO: EEE? */
- tmp = ioread32be(&regs->pause_quanta[priority / 2]);
- if (priority % 2)
- tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT;
- else
- tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT;
- tmp |= ((u32)pause_time << (16 * (priority % 2)));
- iowrite32be(tmp, &regs->pause_quanta[priority / 2]);
+ if (speed == SPEED_10000) {
+ if (memac->fm_rev_info.major == 6 &&
+ memac->fm_rev_info.minor == 4)
+ tmp = TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G;
+ else
+ tmp = TX_FIFO_SECTIONS_TX_AVAIL_10G;
+ tmp |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G;
+ } else {
+ tmp = TX_FIFO_SECTIONS_TX_AVAIL_1G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G;
+ }
+ iowrite32be(tmp, &regs->tx_fifo_sections);
- tmp = ioread32be(&regs->pause_thresh[priority / 2]);
- if (priority % 2)
- tmp &= CLXY_PAUSE_THRESH_CLX_QTH;
- else
- tmp &= CLXY_PAUSE_THRESH_CLY_QTH;
- tmp |= ((u32)thresh_time << (16 * (priority % 2)));
- iowrite32be(tmp, &regs->pause_thresh[priority / 2]);
+ mac_dev->update_speed(mac_dev, speed);
- return 0;
+ tmp = ioread32be(&regs->command_config);
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
}
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
+static void memac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
+ /* TODO: graceful */
tmp = ioread32be(&regs->command_config);
- if (en)
- tmp &= ~CMD_CFG_PAUSE_IGNORE;
- else
- tmp |= CMD_CFG_PAUSE_IGNORE;
-
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, &regs->command_config);
-
- return 0;
}
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr)
-{
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+static const struct phylink_mac_ops memac_mac_ops = {
+ .mac_get_caps = memac_get_caps,
+ .mac_select_pcs = memac_select_pcs,
+ .mac_prepare = memac_prepare,
+ .mac_config = memac_mac_config,
+ .mac_link_up = memac_link_up,
+ .mac_link_down = memac_link_down,
+};
+static int memac_modify_mac_address(struct fman_mac *memac,
+ const enet_addr_t *enet_addr)
+{
add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0);
return 0;
}
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_add_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry;
u32 hash;
u64 addr;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
if (!(addr & GROUP_ADDRESS)) {
@@ -940,14 +824,11 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_allmulti(struct fman_mac *memac, bool enable)
+static int memac_set_allmulti(struct fman_mac *memac, bool enable)
{
u32 entry;
struct memac_regs __iomem *regs = memac->regs;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
if (enable) {
for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
iowrite32be(entry | HASH_CTRL_MCAST_EN,
@@ -963,12 +844,13 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable)
return 0;
}
-int memac_set_tstamp(struct fman_mac *memac, bool enable)
+static int memac_set_tstamp(struct fman_mac *memac, bool enable)
{
return 0; /* Always enabled. */
}
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_del_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -976,9 +858,6 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
u32 hash;
u64 addr;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
@@ -1001,14 +880,11 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable)
+static int memac_set_exception(struct fman_mac *memac,
+ enum fman_mac_exceptions exception, bool enable)
{
u32 bit_mask = 0;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
bit_mask = get_exception_flag(exception);
if (bit_mask) {
if (enable)
@@ -1024,28 +900,102 @@ int memac_set_exception(struct fman_mac *memac,
return 0;
}
-int memac_init(struct fman_mac *memac)
+static u64 memac_read64(void __iomem *reg)
+{
+ u32 low, high, tmp;
+
+ do {
+ high = ioread32be(reg + 4);
+ low = ioread32be(reg);
+ tmp = ioread32be(reg + 4);
+ } while (high != tmp);
+
+ return ((u64)high << 32) | low;
+}
+
+static void memac_get_pause_stats(struct fman_mac *memac,
+ struct ethtool_pause_stats *s)
+{
+ s->tx_pause_frames = memac_read64(&memac->regs->txpf_l);
+ s->rx_pause_frames = memac_read64(&memac->regs->rxpf_l);
+}
+
+static const struct ethtool_rmon_hist_range memac_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 9600 },
+ {},
+};
+
+static void memac_get_rmon_stats(struct fman_mac *memac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ s->undersize_pkts = memac_read64(&memac->regs->rund_l);
+ s->oversize_pkts = memac_read64(&memac->regs->rovr_l);
+ s->fragments = memac_read64(&memac->regs->rfrg_l);
+ s->jabbers = memac_read64(&memac->regs->rjbr_l);
+
+ s->hist[0] = memac_read64(&memac->regs->r64_l);
+ s->hist[1] = memac_read64(&memac->regs->r127_l);
+ s->hist[2] = memac_read64(&memac->regs->r255_l);
+ s->hist[3] = memac_read64(&memac->regs->r511_l);
+ s->hist[4] = memac_read64(&memac->regs->r1023_l);
+ s->hist[5] = memac_read64(&memac->regs->r1518_l);
+ s->hist[6] = memac_read64(&memac->regs->r1519x_l);
+
+ s->hist_tx[0] = memac_read64(&memac->regs->t64_l);
+ s->hist_tx[1] = memac_read64(&memac->regs->t127_l);
+ s->hist_tx[2] = memac_read64(&memac->regs->t255_l);
+ s->hist_tx[3] = memac_read64(&memac->regs->t511_l);
+ s->hist_tx[4] = memac_read64(&memac->regs->t1023_l);
+ s->hist_tx[5] = memac_read64(&memac->regs->t1518_l);
+ s->hist_tx[6] = memac_read64(&memac->regs->t1519x_l);
+
+ *ranges = memac_rmon_ranges;
+}
+
+static void memac_get_eth_ctrl_stats(struct fman_mac *memac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = memac_read64(&memac->regs->tcnp_l);
+ s->MACControlFramesReceived = memac_read64(&memac->regs->rcnp_l);
+}
+
+static void memac_get_eth_mac_stats(struct fman_mac *memac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = memac_read64(&memac->regs->tfrm_l);
+ s->FramesReceivedOK = memac_read64(&memac->regs->rfrm_l);
+ s->FrameCheckSequenceErrors = memac_read64(&memac->regs->rfcs_l);
+ s->AlignmentErrors = memac_read64(&memac->regs->raln_l);
+ s->OctetsTransmittedOK = memac_read64(&memac->regs->teoct_l);
+ s->FramesLostDueToIntMACXmitError = memac_read64(&memac->regs->terr_l);
+ s->OctetsReceivedOK = memac_read64(&memac->regs->reoct_l);
+ s->FramesLostDueToIntMACRcvError = memac_read64(&memac->regs->rdrntp_l);
+ s->MulticastFramesXmittedOK = memac_read64(&memac->regs->tmca_l);
+ s->BroadcastFramesXmittedOK = memac_read64(&memac->regs->tbca_l);
+ s->MulticastFramesReceivedOK = memac_read64(&memac->regs->rmca_l);
+ s->BroadcastFramesReceivedOK = memac_read64(&memac->regs->rbca_l);
+}
+
+static int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
- u8 i;
enet_addr_t eth_addr;
- bool slow_10g_if = false;
- struct fixed_phy_status *fixed_link;
int err;
u32 reg32 = 0;
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
err = check_init_parameters(memac);
if (err)
return err;
memac_drv_param = memac->memac_drv_param;
- if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4)
- slow_10g_if = true;
-
/* First, reset the MAC if desired. */
if (memac_drv_param->reset_on_init) {
err = reset(memac->regs);
@@ -1061,10 +1011,7 @@ int memac_init(struct fman_mac *memac)
add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0);
}
- fixed_link = memac_drv_param->fixed_link;
-
- init(memac->regs, memac->memac_drv_param, memac->phy_if,
- memac->max_speed, slow_10g_if, memac->exceptions);
+ init(memac->regs, memac->memac_drv_param, memac->exceptions);
/* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround
* Exists only in FMan 6.0 and 6.3.
@@ -1080,33 +1027,6 @@ int memac_init(struct fman_mac *memac)
iowrite32be(reg32, &memac->regs->command_config);
}
- if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
- /* Configure internal SGMII PHY */
- if (memac->basex_if)
- setup_sgmii_internal_phy_base_x(memac);
- else
- setup_sgmii_internal_phy(memac, fixed_link);
- } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
- /* Configure 4 internal SGMII PHYs */
- for (i = 0; i < 4; i++) {
- u8 qsmgii_phy_addr, phy_addr;
- /* QSGMII PHY address occupies 3 upper bits of 5-bit
- * phy_address; the lower 2 bits are used to extend
- * register address space and access each one of 4
- * ports inside QSGMII.
- */
- phy_addr = memac->pcsphy->mdio.addr;
- qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
- memac->pcsphy->mdio.addr = qsmgii_phy_addr;
- if (memac->basex_if)
- setup_sgmii_internal_phy_base_x(memac);
- else
- setup_sgmii_internal_phy(memac, fixed_link);
-
- memac->pcsphy->mdio.addr = phy_addr;
- }
- }
-
/* Max Frame Length */
err = fman_set_mac_max_frame(memac->fm, memac->mac_id,
memac_drv_param->max_frame_length);
@@ -1135,32 +1055,36 @@ int memac_init(struct fman_mac *memac)
fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
FMAN_INTR_TYPE_NORMAL, memac_exception, memac);
- kfree(memac_drv_param);
- memac->memac_drv_param = NULL;
-
return 0;
}
-int memac_free(struct fman_mac *memac)
+static void pcs_put(struct phylink_pcs *pcs)
{
- free_init_resources(memac);
+ if (IS_ERR_OR_NULL(pcs))
+ return;
+
+ lynx_pcs_destroy(pcs);
+}
- if (memac->pcsphy)
- put_device(&memac->pcsphy->mdio.dev);
+static int memac_free(struct fman_mac *memac)
+{
+ free_init_resources(memac);
+ pcs_put(memac->sgmii_pcs);
+ pcs_put(memac->qsgmii_pcs);
+ pcs_put(memac->xfi_pcs);
kfree(memac->memac_drv_param);
kfree(memac);
return 0;
}
-struct fman_mac *memac_config(struct fman_mac_params *params)
+static struct fman_mac *memac_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *memac;
struct memac_cfg *memac_drv_param;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the m_emac data structure */
memac = kzalloc(sizeof(*memac), GFP_KERNEL);
if (!memac)
@@ -1178,38 +1102,234 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
set_dflts(memac_drv_param);
- memac->addr = ENET_ADDR_TO_UINT64(params->addr);
+ memac->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
- memac->regs = base_addr;
- memac->max_speed = params->max_speed;
- memac->phy_if = params->phy_if;
+ memac->regs = mac_dev->vaddr;
memac->mac_id = params->mac_id;
memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
memac->exception_cb = params->exception_cb;
memac->event_cb = params->event_cb;
- memac->dev_id = params->dev_id;
+ memac->dev_id = mac_dev;
memac->fm = params->fm;
- memac->basex_if = params->basex_if;
/* Save FMan revision */
fman_get_revision(memac->fm, &memac->fm_rev_info);
- if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
- memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
- if (!params->internal_phy_node) {
- pr_err("PCS PHY node is not available\n");
- memac_free(memac);
- return NULL;
+ return memac;
+}
+
+static struct phylink_pcs *memac_pcs_create(struct device_node *mac_node,
+ int index)
+{
+ struct device_node *node;
+ struct phylink_pcs *pcs;
+
+ node = of_parse_phandle(mac_node, "pcsphy-handle", index);
+ if (!node)
+ return ERR_PTR(-ENODEV);
+
+ pcs = lynx_pcs_create_fwnode(of_fwnode_handle(node));
+ of_node_put(node);
+
+ return pcs;
+}
+
+static bool memac_supports(struct mac_device *mac_dev, phy_interface_t iface)
+{
+ /* If there's no serdes device, assume that it's been configured for
+ * whatever the default interface mode is.
+ */
+ if (!mac_dev->fman_mac->serdes)
+ return mac_dev->phy_if == iface;
+ /* Otherwise, ask the serdes */
+ return !phy_validate(mac_dev->fman_mac->serdes, PHY_MODE_ETHERNET,
+ iface, NULL);
+}
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct phylink_pcs *pcs;
+ struct fman_mac *memac;
+ unsigned long capabilities;
+ unsigned long *supported;
+
+ /* The internal connection to the serdes is XGMII, but this isn't
+ * really correct for the phy mode (which is the external connection).
+ * However, this is how all older device trees say that they want
+ * 10GBASE-R (aka XFI), so just convert it for them.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
+
+ mac_dev->phylink_ops = &memac_mac_ops;
+ mac_dev->set_promisc = memac_set_promiscuous;
+ mac_dev->change_addr = memac_modify_mac_address;
+ mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
+ mac_dev->set_exception = memac_set_exception;
+ mac_dev->set_allmulti = memac_set_allmulti;
+ mac_dev->set_tstamp = memac_set_tstamp;
+ mac_dev->enable = memac_enable;
+ mac_dev->disable = memac_disable;
+ mac_dev->get_pause_stats = memac_get_pause_stats;
+ mac_dev->get_rmon_stats = memac_get_rmon_stats;
+ mac_dev->get_eth_ctrl_stats = memac_get_eth_ctrl_stats;
+ mac_dev->get_eth_mac_stats = memac_get_eth_mac_stats;
+
+ mac_dev->fman_mac = memac_config(mac_dev, params);
+ if (!mac_dev->fman_mac)
+ return -EINVAL;
+
+ memac = mac_dev->fman_mac;
+ memac->memac_drv_param->max_frame_length = fman_get_max_frm();
+ memac->memac_drv_param->reset_on_init = true;
+
+ err = of_property_match_string(mac_node, "pcs-handle-names", "xfi");
+ if (err >= 0) {
+ memac->xfi_pcs = memac_pcs_create(mac_node, err);
+ if (IS_ERR(memac->xfi_pcs)) {
+ err = PTR_ERR(memac->xfi_pcs);
+ dev_err_probe(mac_dev->dev, err, "missing xfi pcs\n");
+ goto _return_fm_mac_free;
}
+ } else if (err != -EINVAL && err != -ENODATA) {
+ goto _return_fm_mac_free;
+ }
- memac->pcsphy = of_phy_find_device(params->internal_phy_node);
- if (!memac->pcsphy) {
- pr_err("of_phy_find_device (PCS PHY) failed\n");
- memac_free(memac);
- return NULL;
+ err = of_property_match_string(mac_node, "pcs-handle-names", "qsgmii");
+ if (err >= 0) {
+ memac->qsgmii_pcs = memac_pcs_create(mac_node, err);
+ if (IS_ERR(memac->qsgmii_pcs)) {
+ err = PTR_ERR(memac->qsgmii_pcs);
+ dev_err_probe(mac_dev->dev, err,
+ "missing qsgmii pcs\n");
+ goto _return_fm_mac_free;
}
+ } else if (err != -EINVAL && err != -ENODATA) {
+ goto _return_fm_mac_free;
}
- return memac;
+ /* For compatibility, if pcs-handle-names is missing, we assume this
+ * phy is the first one in pcsphy-handle
+ */
+ err = of_property_match_string(mac_node, "pcs-handle-names", "sgmii");
+ if (err == -EINVAL || err == -ENODATA)
+ pcs = memac_pcs_create(mac_node, 0);
+ else if (err < 0)
+ goto _return_fm_mac_free;
+ else
+ pcs = memac_pcs_create(mac_node, err);
+
+ if (IS_ERR(pcs)) {
+ err = PTR_ERR(pcs);
+ dev_err_probe(mac_dev->dev, err, "missing pcs\n");
+ goto _return_fm_mac_free;
+ }
+
+ /* If err is set here, it means that pcs-handle-names was missing above
+ * (and therefore that xfi_pcs cannot be set). If we are defaulting to
+ * XGMII, assume this is for XFI. Otherwise, assume it is for SGMII.
+ */
+ if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_10GBASER)
+ memac->xfi_pcs = pcs;
+ else
+ memac->sgmii_pcs = pcs;
+
+ memac->serdes = devm_of_phy_optional_get(mac_dev->dev, mac_node,
+ "serdes");
+ if (!memac->serdes) {
+ dev_dbg(mac_dev->dev, "could not get (optional) serdes\n");
+ } else if (IS_ERR(memac->serdes)) {
+ err = PTR_ERR(memac->serdes);
+ goto _return_fm_mac_free;
+ }
+
+ /* TODO: The following interface modes are supported by (some) hardware
+ * but not by this driver:
+ * - 1000BASE-KX
+ * - 10GBASE-KR
+ * - XAUI/HiGig
+ */
+ supported = mac_dev->phylink_config.supported_interfaces;
+
+ /* Note that half duplex is only supported on 10/100M interfaces. */
+
+ if (memac->sgmii_pcs &&
+ (memac_supports(mac_dev, PHY_INTERFACE_MODE_SGMII) ||
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_1000BASEX))) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ }
+
+ if (memac->sgmii_pcs &&
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_2500BASEX))
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ if (memac->qsgmii_pcs &&
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_QSGMII))
+ __set_bit(PHY_INTERFACE_MODE_QSGMII, supported);
+ else if (mac_dev->phy_if == PHY_INTERFACE_MODE_QSGMII)
+ dev_warn(mac_dev->dev, "no QSGMII pcs specified\n");
+
+ if (memac->xfi_pcs &&
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_10GBASER)) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, supported);
+ } else {
+ /* From what I can tell, no 10g macs support RGMII. */
+ phy_interface_set_rgmii(supported);
+ __set_bit(PHY_INTERFACE_MODE_MII, supported);
+ }
+
+ capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100;
+ capabilities |= MAC_1000FD | MAC_2500FD | MAC_10000FD;
+
+ /* These SoCs don't support half duplex at all; there's no different
+ * FMan version or compatible, so we just have to check the machine
+ * compatible instead
+ */
+ if (of_machine_is_compatible("fsl,ls1043a") ||
+ of_machine_is_compatible("fsl,ls1046a") ||
+ of_machine_is_compatible("fsl,B4QDS"))
+ capabilities &= ~(MAC_10HD | MAC_100HD);
+
+ mac_dev->phylink_config.mac_capabilities = capabilities;
+
+ /* The T2080 and T4240 don't support half duplex RGMII. There is no
+ * other way to identify these SoCs, so just use the machine
+ * compatible.
+ */
+ if (of_machine_is_compatible("fsl,T2080QDS") ||
+ of_machine_is_compatible("fsl,T2080RDB") ||
+ of_machine_is_compatible("fsl,T2081QDS") ||
+ of_machine_is_compatible("fsl,T4240QDS") ||
+ of_machine_is_compatible("fsl,T4240RDB"))
+ memac->rgmii_no_half_duplex = true;
+
+ /* Most boards should use MLO_AN_INBAND, but existing boards don't have
+ * a managed property. Default to MLO_AN_INBAND rather than MLO_AN_PHY.
+ * Phylink will allow this to be overriden by a fixed link. We need to
+ * be careful and not enable this if we are using MII or RGMII, since
+ * those configurations modes don't use in-band autonegotiation.
+ */
+ if (!of_property_present(mac_node, "managed") &&
+ mac_dev->phy_if != PHY_INTERFACE_MODE_2500BASEX &&
+ mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
+ !phy_interface_mode_is_rgmii(mac_dev->phy_if))
+ mac_dev->phylink_config.default_an_inband = true;
+
+ err = memac_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(mac_dev->dev, "FMan MEMAC\n");
+
+ return 0;
+
+_return_fm_mac_free:
+ memac_free(mac_dev->fman_mac);
+ return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index 3820f7a22983..5a3a14f9684f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MEMAC_H
@@ -38,26 +11,10 @@
#include <linux/netdevice.h>
#include <linux/phy_fixed.h>
-struct fman_mac *memac_config(struct fman_mac_params *params);
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr);
-int memac_adjust_link(struct fman_mac *memac, u16 speed);
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link);
-int memac_enable(struct fman_mac *memac, enum comm_mode mode);
-int memac_disable(struct fman_mac *memac, enum comm_mode mode);
-int memac_init(struct fman_mac *memac);
-int memac_free(struct fman_mac *memac);
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time);
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable);
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_set_allmulti(struct fman_mac *memac, bool enable);
-int memac_set_tstamp(struct fman_mac *memac, bool enable);
+struct mac_device;
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 7ad317e622bc..1ed245a2ee01 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#include "fman_muram.h"
@@ -39,7 +12,6 @@
struct muram_info {
struct gen_pool *pool;
void __iomem *vbase;
- size_t size;
phys_addr_t pbase;
};
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index 453bf849eee1..3643af61bae2 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -1,34 +1,8 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
+
#ifndef __FM_MURAM_EXT
#define __FM_MURAM_EXT
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index d9baac0dbc7d..e977389f7088 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1,38 +1,12 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -1013,7 +987,7 @@ static int init_low_level_driver(struct fman_port *port)
return -ENODEV;
}
- /* The code bellow is a trick so the FM will not release the buffer
+ /* The code below is a trick so the FM will not release the buffer
* to BM nor will try to enqueue the frame to QM
*/
if (port->port_type == FMAN_PORT_TYPE_TX) {
@@ -1774,7 +1748,7 @@ static int fman_port_probe(struct platform_device *of_dev)
struct resource res;
struct resource *dev_res;
u32 val;
- int err = 0, lenp;
+ int err = 0;
enum fman_port_type port_type;
u16 port_speed;
u8 port_id;
@@ -1805,7 +1779,7 @@ static int fman_port_probe(struct platform_device *of_dev)
fman = dev_get_drvdata(&fm_pdev->dev);
if (!fman) {
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
err = of_property_read_u32(port_node, "cell-index", &val);
@@ -1813,7 +1787,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
__func__, port_node);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port_id = (u8)val;
port->dts_params.id = port_id;
@@ -1821,7 +1795,7 @@ static int fman_port_probe(struct platform_device *of_dev)
if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
port_type = FMAN_PORT_TYPE_TX;
port_speed = 1000;
- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ if (of_property_read_bool(port_node, "fsl,fman-10g-port"))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
@@ -1834,7 +1808,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
port_type = FMAN_PORT_TYPE_RX;
port_speed = 1000;
- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ if (of_property_read_bool(port_node, "fsl,fman-10g-port"))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
@@ -1847,7 +1821,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else {
dev_err(port->dev, "%s: Illegal port type\n", __func__);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port->dts_params.type = port_type;
@@ -1861,7 +1835,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: incorrect qman-channel-id\n",
__func__);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port->dts_params.qman_channel_id = qman_channel_id;
}
@@ -1871,7 +1845,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: of_address_to_resource() failed\n",
__func__);
err = -ENOMEM;
- goto return_err;
+ goto put_device;
}
port->dts_params.fman = fman;
@@ -1896,6 +1870,8 @@ static int fman_port_probe(struct platform_device *of_dev)
return 0;
+put_device:
+ put_device(&fm_pdev->dev);
return_err:
of_node_put(port_node);
free_port:
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index 82f12661a46d..4917fe8f0617 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FMAN_PORT_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
index 248f5bcca468..0fac60aa5283 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.c
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fman_sp.h"
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h
index 820b7f63088f..a62dd21c81f1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.h
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.h
@@ -1,32 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_SP_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 311c1906e044..fecfca6eba03 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -1,44 +1,19 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_tgec.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
#include <linux/io.h>
#include <linux/crc32.h>
+#include <linux/netdevice.h>
/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
@@ -206,7 +181,7 @@ struct fman_mac {
/* MAC address of device; */
u64 addr;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* pointer to driver's global address hash table */
@@ -269,10 +244,6 @@ static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
static int check_init_parameters(struct fman_mac *tgec)
{
- if (tgec->max_speed < SPEED_10000) {
- pr_err("10G MAC driver only support 10G speed\n");
- return -EINVAL;
- }
if (!tgec->exception_cb) {
pr_err("uninitialized exception_cb\n");
return -EINVAL;
@@ -410,131 +381,116 @@ static void free_init_resources(struct fman_mac *tgec)
tgec->unicast_addr_hash = NULL;
}
-static bool is_init_done(struct tgec_cfg *cfg)
+static int tgec_enable(struct fman_mac *tgec)
{
- /* Checks if tGEC driver parameters were initialized */
- if (!cfg)
- return true;
+ return 0;
+}
- return false;
+static void tgec_disable(struct fman_mac *tgec)
+{
}
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
+static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
+ if (new_val)
+ tmp |= CMD_CFG_PROMIS_EN;
+ else
+ tmp &= ~CMD_CFG_PROMIS_EN;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
+static int tgec_set_tx_pause_frames(struct fman_mac *tgec,
+ u8 __maybe_unused priority, u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct tgec_regs __iomem *regs = tgec->regs;
- u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
- tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
- iowrite32be(tmp, &regs->command_config);
+ iowrite32be((u32)pause_time, &regs->pause_quant);
return 0;
}
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
+static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
- if (new_val)
- tmp |= CMD_CFG_PROMIS_EN;
+ if (!en)
+ tmp |= CMD_CFG_PAUSE_IGNORE;
else
- tmp &= ~CMD_CFG_PROMIS_EN;
+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
+static void tgec_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- if (is_init_done(tgec->cfg))
- return -EINVAL;
-
- tgec->cfg->max_frame_length = new_val;
-
- return 0;
}
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static void tgec_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct fman_mac *tgec = mac_dev->fman_mac;
struct tgec_regs __iomem *regs = tgec->regs;
+ u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE;
+ u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
+ tgec_set_tx_pause_frames(tgec, 0, pause_time, 0);
+ tgec_accept_rx_pause_frames(tgec, rx_pause);
+ mac_dev->update_speed(mac_dev, speed);
- iowrite32be((u32)pause_time, &regs->pause_quant);
-
- return 0;
+ tmp = ioread32be(&regs->command_config);
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
}
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
+static void tgec_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
+ struct fman_mac *tgec = fman_config_to_mac(config)->fman_mac;
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
- if (!en)
- tmp |= CMD_CFG_PAUSE_IGNORE;
- else
- tmp &= ~CMD_CFG_PAUSE_IGNORE;
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, &regs->command_config);
-
- return 0;
}
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr)
-{
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
+static const struct phylink_mac_ops tgec_mac_ops = {
+ .mac_config = tgec_mac_config,
+ .mac_link_up = tgec_link_up,
+ .mac_link_down = tgec_link_down,
+};
+static int tgec_modify_mac_address(struct fman_mac *tgec,
+ const enet_addr_t *p_enet_addr)
+{
tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr));
return 0;
}
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_add_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry;
u32 crc = 0xFFFFFFFF, hash;
u64 addr;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
if (!(addr & GROUP_ADDRESS)) {
@@ -562,14 +518,11 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
+static int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
{
u32 entry;
struct tgec_regs __iomem *regs = tgec->regs;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
if (enable) {
for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
iowrite32be(entry | TGEC_HASH_MCAST_EN,
@@ -585,14 +538,11 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+static int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
if (enable)
@@ -605,7 +555,8 @@ int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_del_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -613,9 +564,6 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
u32 crc = 0xFFFFFFFF, hash;
u64 addr;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
addr = ((*(u64 *)eth_addr) >> 16);
/* CRC calculation */
@@ -642,27 +590,12 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
-{
- struct tgec_regs __iomem *regs = tgec->regs;
-
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
- *mac_version = ioread32be(&regs->tgec_id);
-
- return 0;
-}
-
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable)
+static int tgec_set_exception(struct fman_mac *tgec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 bit_mask = 0;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
bit_mask = get_exception_flag(exception);
if (bit_mask) {
if (enable)
@@ -681,15 +614,12 @@ int tgec_set_exception(struct fman_mac *tgec,
return 0;
}
-int tgec_init(struct fman_mac *tgec)
+static int tgec_init(struct fman_mac *tgec)
{
struct tgec_cfg *cfg;
enet_addr_t eth_addr;
int err;
- if (is_init_done(tgec->cfg))
- return -EINVAL;
-
if (DEFAULT_RESET_ON_INIT &&
(fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) {
pr_err("Can't reset MAC!\n");
@@ -764,7 +694,7 @@ int tgec_init(struct fman_mac *tgec)
return 0;
}
-int tgec_free(struct fman_mac *tgec)
+static int tgec_free(struct fman_mac *tgec)
{
free_init_resources(tgec);
@@ -774,13 +704,12 @@ int tgec_free(struct fman_mac *tgec)
return 0;
}
-struct fman_mac *tgec_config(struct fman_mac_params *params)
+static struct fman_mac *tgec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *tgec;
struct tgec_cfg *cfg;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
if (!tgec)
@@ -798,9 +727,8 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
set_dflts(cfg);
- tgec->regs = base_addr;
- tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
- tgec->max_speed = params->max_speed;
+ tgec->regs = mac_dev->vaddr;
+ tgec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
tgec->mac_id = params->mac_id;
tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
TGEC_IMASK_REM_FAULT |
@@ -819,7 +747,7 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
TGEC_IMASK_RX_ALIGN_ER);
tgec->exception_cb = params->exception_cb;
tgec->event_cb = params->event_cb;
- tgec->dev_id = params->dev_id;
+ tgec->dev_id = mac_dev;
tgec->fm = params->fm;
/* Save FMan revision */
@@ -827,3 +755,62 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
return tgec;
}
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *tgec;
+
+ mac_dev->phylink_ops = &tgec_mac_ops;
+ mac_dev->set_promisc = tgec_set_promiscuous;
+ mac_dev->change_addr = tgec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
+ mac_dev->set_exception = tgec_set_exception;
+ mac_dev->set_allmulti = tgec_set_allmulti;
+ mac_dev->set_tstamp = tgec_set_tstamp;
+ mac_dev->enable = tgec_enable;
+ mac_dev->disable = tgec_disable;
+
+ mac_dev->fman_mac = tgec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ /* The internal connection to the serdes is XGMII, but this isn't
+ * really correct for the phy mode (which is the external connection).
+ * However, this is how all older device trees say that they want
+ * XAUI, so just convert it for them.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_XAUI;
+
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ mac_dev->phylink_config.supported_interfaces);
+ mac_dev->phylink_config.mac_capabilities =
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10000FD;
+
+ tgec = mac_dev->fman_mac;
+ tgec->cfg->max_frame_length = fman_get_max_frm();
+ err = tgec_init(tgec);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 10G MAC, disable Tx ECC exception */
+ err = tgec_set_exception(tgec, FM_MAC_EX_10G_TX_ECC_ER, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ pr_info("FMan XGEC version: 0x%08x\n",
+ ioread32be(&tgec->regs->tgec_id));
+ goto _return;
+
+_return_fm_mac_free:
+ tgec_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index b28b20b26148..768b8d165e05 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __TGEC_H
@@ -35,23 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *tgec_config(struct fman_mac_params *params);
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr);
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_init(struct fman_mac *tgec);
-int tgec_free(struct fman_mac *tgec);
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable);
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
+struct mac_device;
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index d9fc5c456bf3..f27ff625fe29 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -40,9 +14,9 @@
#include <linux/device.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
-#include <linux/phy_fixed.h>
#include <linux/etherdevice.h>
#include <linux/libfdt_env.h>
+#include <linux/platform_device.h>
#include "mac.h"
#include "fman_mac.h"
@@ -54,20 +28,10 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("FSL FMan MAC API based driver");
struct mac_priv_s {
- struct device *dev;
- void __iomem *vaddr;
u8 cell_index;
struct fman *fman;
- struct device_node *internal_phy_node;
- /* List of multicast addresses */
- struct list_head mc_addr_list;
struct platform_device *eth_dev;
- struct fixed_phy_status *fixed_link;
u16 speed;
- u16 max_speed;
-
- int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
- int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
};
struct mac_address {
@@ -75,475 +39,22 @@ struct mac_address {
struct list_head list;
};
-static void mac_exception(void *handle, enum fman_mac_exceptions ex)
+static void mac_exception(struct mac_device *mac_dev,
+ enum fman_mac_exceptions ex)
{
- struct mac_device *mac_dev;
- struct mac_priv_s *priv;
-
- mac_dev = handle;
- priv = mac_dev->priv;
-
if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
/* don't flag RX FIFO after the first */
mac_dev->set_exception(mac_dev->fman_mac,
FM_MAC_EX_10G_RX_FIFO_OVFL, false);
- dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
+ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n", ex);
}
- dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
+ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
__func__, ex);
}
-static void set_fman_mac_params(struct mac_device *mac_dev,
- struct fman_mac_params *params)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- params->base_addr = (typeof(params->base_addr))
- devm_ioremap(priv->dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
- params->max_speed = priv->max_speed;
- params->phy_if = mac_dev->phy_if;
- params->basex_if = false;
- params->mac_id = priv->cell_index;
- params->fm = (void *)priv->fman;
- params->exception_cb = mac_exception;
- params->event_cb = mac_exception;
- params->dev_id = mac_dev;
- params->internal_phy_node = priv->internal_phy_node;
-}
-
-static int tgec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- set_fman_mac_params(mac_dev, &params);
-
- mac_dev->fman_mac = tgec_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 10G MAC, disable Tx ECC exception */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_10G_TX_ECC_ER, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- tgec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int dtsec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- set_fman_mac_params(mac_dev, &params);
-
- mac_dev->fman_mac = dtsec_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 1G MAC, disable by default the MIB counters overflow interrupt */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- dtsec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int memac_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
-
- priv = mac_dev->priv;
-
- set_fman_mac_params(mac_dev, &params);
-
- if (priv->max_speed == SPEED_10000)
- params.phy_if = PHY_INTERFACE_MODE_XGMII;
-
- mac_dev->fman_mac = memac_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan MEMAC\n");
-
- goto _return;
-
-_return_fm_mac_free:
- memac_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int start(struct mac_device *mac_dev)
-{
- int err;
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct mac_priv_s *priv = mac_dev->priv;
-
- err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
- if (!err && phy_dev)
- phy_start(phy_dev);
-
- return err;
-}
-
-static int stop(struct mac_device *mac_dev)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- if (mac_dev->phy_dev)
- phy_stop(mac_dev->phy_dev);
-
- return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
-}
-
-static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
-{
- struct mac_priv_s *priv;
- struct mac_address *old_addr, *tmp;
- struct netdev_hw_addr *ha;
- int err;
- enet_addr_t *addr;
-
- priv = mac_dev->priv;
-
- /* Clear previous address list */
- list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) {
- addr = (enet_addr_t *)old_addr->addr;
- err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr);
- if (err < 0)
- return err;
-
- list_del(&old_addr->list);
- kfree(old_addr);
- }
-
- /* Add all the addresses from the new list */
- netdev_for_each_mc_addr(ha, net_dev) {
- addr = (enet_addr_t *)ha->addr;
- err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr);
- if (err < 0)
- return err;
-
- tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
- if (!tmp)
- return -ENOMEM;
-
- ether_addr_copy(tmp->addr, ha->addr);
- list_add(&tmp->list, &priv->mc_addr_list);
- }
- return 0;
-}
-
-/**
- * fman_set_mac_active_pause
- * @mac_dev: A pointer to the MAC device
- * @rx: Pause frame setting for RX
- * @tx: Pause frame setting for TX
- *
- * Set the MAC RX/TX PAUSE frames settings
- *
- * Avoid redundant calls to FMD, if the MAC driver already contains the desired
- * active PAUSE settings. Otherwise, the new active settings should be reflected
- * in FMan.
- *
- * Return: 0 on success; Error code otherwise.
- */
-int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
-{
- struct fman_mac *fman_mac = mac_dev->fman_mac;
- int err = 0;
-
- if (rx != mac_dev->rx_pause_active) {
- err = mac_dev->set_rx_pause(fman_mac, rx);
- if (likely(err == 0))
- mac_dev->rx_pause_active = rx;
- }
-
- if (tx != mac_dev->tx_pause_active) {
- u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE :
- FSL_FM_PAUSE_TIME_DISABLE);
-
- err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0);
-
- if (likely(err == 0))
- mac_dev->tx_pause_active = tx;
- }
-
- return err;
-}
-EXPORT_SYMBOL(fman_set_mac_active_pause);
-
-/**
- * fman_get_pause_cfg
- * @mac_dev: A pointer to the MAC device
- * @rx_pause: Return value for RX setting
- * @tx_pause: Return value for TX setting
- *
- * Determine the MAC RX/TX PAUSE frames settings based on PHY
- * autonegotiation or values set by eththool.
- *
- * Return: Pointer to FMan device.
- */
-void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
- bool *tx_pause)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- u16 lcl_adv, rmt_adv;
- u8 flowctrl;
-
- *rx_pause = *tx_pause = false;
-
- if (!phy_dev->duplex)
- return;
-
- /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
- * are those set by ethtool.
- */
- if (!mac_dev->autoneg_pause) {
- *rx_pause = mac_dev->rx_pause_req;
- *tx_pause = mac_dev->tx_pause_req;
- return;
- }
-
- /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
- * settings depend on the result of the link negotiation.
- */
-
- /* get local capabilities */
- lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising);
-
- /* get link partner capabilities */
- rmt_adv = 0;
- if (phy_dev->pause)
- rmt_adv |= LPA_PAUSE_CAP;
- if (phy_dev->asym_pause)
- rmt_adv |= LPA_PAUSE_ASYM;
-
- /* Calculate TX/RX settings based on local and peer advertised
- * symmetric/asymmetric PAUSE capabilities.
- */
- flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
- if (flowctrl & FLOW_CTRL_RX)
- *rx_pause = true;
- if (flowctrl & FLOW_CTRL_TX)
- *tx_pause = true;
-}
-EXPORT_SYMBOL(fman_get_pause_cfg);
-
-static void adjust_link_void(struct mac_device *mac_dev)
-{
-}
-
-static void adjust_link_dtsec(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- if (!phy_dev->link) {
- dtsec_restart_autoneg(fman_mac);
-
- return;
- }
-
- dtsec_adjust_link(fman_mac, phy_dev->speed);
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void adjust_link_memac(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- memac_adjust_link(fman_mac, phy_dev->speed);
-
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void setup_dtsec(struct mac_device *mac_dev)
-{
- mac_dev->init = dtsec_initialization;
- mac_dev->set_promisc = dtsec_set_promiscuous;
- mac_dev->change_addr = dtsec_modify_mac_address;
- mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
- mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
- mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
- mac_dev->set_exception = dtsec_set_exception;
- mac_dev->set_allmulti = dtsec_set_allmulti;
- mac_dev->set_tstamp = dtsec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_dtsec;
- mac_dev->priv->enable = dtsec_enable;
- mac_dev->priv->disable = dtsec_disable;
-}
-
-static void setup_tgec(struct mac_device *mac_dev)
-{
- mac_dev->init = tgec_initialization;
- mac_dev->set_promisc = tgec_set_promiscuous;
- mac_dev->change_addr = tgec_modify_mac_address;
- mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
- mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
- mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
- mac_dev->set_exception = tgec_set_exception;
- mac_dev->set_allmulti = tgec_set_allmulti;
- mac_dev->set_tstamp = tgec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_void;
- mac_dev->priv->enable = tgec_enable;
- mac_dev->priv->disable = tgec_disable;
-}
-
-static void setup_memac(struct mac_device *mac_dev)
-{
- mac_dev->init = memac_initialization;
- mac_dev->set_promisc = memac_set_promiscuous;
- mac_dev->change_addr = memac_modify_mac_address;
- mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
- mac_dev->set_tx_pause = memac_set_tx_pause_frames;
- mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
- mac_dev->set_exception = memac_set_exception;
- mac_dev->set_allmulti = memac_set_allmulti;
- mac_dev->set_tstamp = memac_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_memac;
- mac_dev->priv->enable = memac_enable;
- mac_dev->priv->disable = memac_disable;
-}
-
-#define DTSEC_SUPPORTED \
- (SUPPORTED_10baseT_Half \
- | SUPPORTED_10baseT_Full \
- | SUPPORTED_100baseT_Half \
- | SUPPORTED_100baseT_Full \
- | SUPPORTED_Autoneg \
- | SUPPORTED_Pause \
- | SUPPORTED_Asym_Pause \
- | SUPPORTED_FIBRE \
- | SUPPORTED_MII)
-
static DEFINE_MUTEX(eth_lock);
-static const u16 phy2speed[] = {
- [PHY_INTERFACE_MODE_MII] = SPEED_100,
- [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
- [PHY_INTERFACE_MODE_RMII] = SPEED_100,
- [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
- [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
- [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
- [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
- [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_XGMII] = SPEED_10000
-};
-
static struct platform_device *dpaa_eth_add_device(int fman_id,
struct mac_device *mac_dev)
{
@@ -566,7 +77,7 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}
- pdev->dev.parent = priv->dev;
+ pdev->dev.parent = mac_dev->dev;
ret = platform_device_add_data(pdev, &data, sizeof(data));
if (ret)
@@ -590,9 +101,9 @@ no_mem:
}
static const struct of_device_id mac_match[] = {
- { .compatible = "fsl,fman-dtsec" },
- { .compatible = "fsl,fman-xgec" },
- { .compatible = "fsl,fman-memac" },
+ { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization },
+ { .compatible = "fsl,fman-xgec", .data = tgec_initialization },
+ { .compatible = "fsl,fman-memac", .data = memac_initialization },
{}
};
MODULE_DEVICE_TABLE(of, mac_match);
@@ -600,60 +111,40 @@ MODULE_DEVICE_TABLE(of, mac_match);
static int mac_probe(struct platform_device *_of_dev)
{
int err, i, nph;
+ int (*init)(struct mac_device *mac_dev, struct device_node *mac_node,
+ struct fman_mac_params *params);
struct device *dev;
struct device_node *mac_node, *dev_node;
struct mac_device *mac_dev;
struct platform_device *of_dev;
- struct resource res;
struct mac_priv_s *priv;
+ struct fman_mac_params params;
u32 val;
u8 fman_id;
phy_interface_t phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
+ init = of_device_get_match_data(dev);
mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
- if (!mac_dev) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!mac_dev)
+ return -ENOMEM;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(_of_dev, mac_dev);
/* Save private information */
mac_dev->priv = priv;
- priv->dev = dev;
-
- if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
- setup_dtsec(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "tbi-handle", 0);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
- setup_tgec(mac_dev);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
- setup_memac(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "pcsphy-handle", 0);
- } else {
- dev_err(dev, "MAC node (%pOF) contains unsupported MAC\n",
- mac_node);
- err = -EINVAL;
- goto _return;
- }
-
- INIT_LIST_HEAD(&priv->mc_addr_list);
+ mac_dev->dev = dev;
/* Get the FM node */
dev_node = of_get_parent(mac_node);
if (!dev_node) {
dev_err(dev, "of_get_parent(%pOF) failed\n",
mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
@@ -662,55 +153,59 @@ static int mac_probe(struct platform_device *_of_dev)
err = -EINVAL;
goto _return_of_node_put;
}
+ mac_dev->fman_dev = &of_dev->dev;
/* Get the FMan cell-index */
err = of_property_read_u32(dev_node, "cell-index", &val);
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", dev_node);
err = -EINVAL;
- goto _return_of_node_put;
+ goto _return_dev_put;
}
/* cell-index 0 => FMan id 1 */
fman_id = (u8)(val + 1);
- priv->fman = fman_bind(&of_dev->dev);
+ priv->fman = fman_bind(mac_dev->fman_dev);
if (!priv->fman) {
dev_err(dev, "fman_bind(%pOF) failed\n", dev_node);
err = -ENODEV;
- goto _return_of_node_put;
+ goto _return_dev_put;
}
+ /* Two references have been taken in of_find_device_by_node()
+ * and fman_bind(). Release one of them here. The second one
+ * will be released in mac_remove().
+ */
+ put_device(mac_dev->fman_dev);
of_node_put(dev_node);
+ dev_node = NULL;
/* Get the address of the memory mapped registers */
- err = of_address_to_resource(mac_node, 0, &res);
- if (err < 0) {
- dev_err(dev, "of_address_to_resource(%pOF) = %d\n",
- mac_node, err);
- goto _return_of_get_parent;
+ mac_dev->res = platform_get_mem_or_io(_of_dev, 0);
+ if (!mac_dev->res) {
+ dev_err(dev, "could not get registers\n");
+ err = -EINVAL;
+ goto _return_dev_put;
}
- mac_dev->res = __devm_request_region(dev,
- fman_get_mem_region(priv->fman),
- res.start, resource_size(&res),
- "mac");
- if (!mac_dev->res) {
- dev_err(dev, "__devm_request_mem_region(mac) failed\n");
- err = -EBUSY;
- goto _return_of_get_parent;
+ err = devm_request_resource(dev, fman_get_mem_region(priv->fman),
+ mac_dev->res);
+ if (err) {
+ dev_err_probe(dev, err, "could not request resource\n");
+ goto _return_dev_put;
}
- priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- if (!priv->vaddr) {
+ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
+ resource_size(mac_dev->res));
+ if (!mac_dev->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
err = -EIO;
- goto _return_of_get_parent;
+ goto _return_dev_put;
}
if (!of_device_is_available(mac_node)) {
err = -ENODEV;
- goto _return_of_get_parent;
+ goto _return_dev_put;
}
/* Get the cell-index */
@@ -718,7 +213,12 @@ static int mac_probe(struct platform_device *_of_dev)
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
err = -EINVAL;
- goto _return_of_get_parent;
+ goto _return_dev_put;
+ }
+ if (val >= MAX_NUM_OF_MACS) {
+ dev_err(dev, "cell-index value is too big for %pOF\n", mac_node);
+ err = -EINVAL;
+ goto _return_dev_put;
}
priv->cell_index = (u8)val;
@@ -733,24 +233,25 @@ static int mac_probe(struct platform_device *_of_dev)
dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
mac_node);
err = nph;
- goto _return_of_get_parent;
+ goto _return_dev_put;
}
if (nph != ARRAY_SIZE(mac_dev->port)) {
dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
mac_node);
err = -EINVAL;
- goto _return_of_get_parent;
+ goto _return_dev_put;
}
- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
+ /* PORT_NUM determines the size of the port array */
+ for (i = 0; i < PORT_NUM; i++) {
/* Find the port node */
dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
if (!dev_node) {
dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
mac_node);
err = -EINVAL;